tunnel-manager 1.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tunnel-manager might be problematic. Click here for more details.
- tests/test_tunnel.py +3 -4
- tunnel_manager/tunnel_manager.py +138 -39
- tunnel_manager/tunnel_manager_mcp.py +680 -181
- {tunnel_manager-1.0.0.dist-info → tunnel_manager-1.0.1.dist-info}/METADATA +106 -57
- tunnel_manager-1.0.1.dist-info/RECORD +11 -0
- tunnel_manager-1.0.0.dist-info/RECORD +0 -11
- {tunnel_manager-1.0.0.dist-info → tunnel_manager-1.0.1.dist-info}/WHEEL +0 -0
- {tunnel_manager-1.0.0.dist-info → tunnel_manager-1.0.1.dist-info}/entry_points.txt +0 -0
- {tunnel_manager-1.0.0.dist-info → tunnel_manager-1.0.1.dist-info}/licenses/LICENSE +0 -0
- {tunnel_manager-1.0.0.dist-info → tunnel_manager-1.0.1.dist-info}/top_level.txt +0 -0
|
@@ -39,26 +39,38 @@ def to_boolean(string: Union[str, bool] = None) -> bool:
|
|
|
39
39
|
raise ValueError(f"Cannot convert '{string}' to boolean")
|
|
40
40
|
|
|
41
41
|
|
|
42
|
+
def to_integer(string: Union[str, int] = None) -> int:
|
|
43
|
+
if isinstance(string, int):
|
|
44
|
+
return string
|
|
45
|
+
if not string:
|
|
46
|
+
return 0
|
|
47
|
+
try:
|
|
48
|
+
return int(string.strip())
|
|
49
|
+
except ValueError:
|
|
50
|
+
raise ValueError(f"Cannot convert '{string}' to integer")
|
|
51
|
+
|
|
52
|
+
|
|
42
53
|
class ResponseBuilder:
|
|
43
54
|
@staticmethod
|
|
44
55
|
def build(
|
|
45
56
|
status: int,
|
|
46
57
|
msg: str,
|
|
47
58
|
details: Dict,
|
|
48
|
-
|
|
59
|
+
error: str = "",
|
|
60
|
+
stdout: str = "", # Add this
|
|
49
61
|
files: List = None,
|
|
50
|
-
|
|
62
|
+
locations: List = None,
|
|
51
63
|
errors: List = None,
|
|
52
64
|
) -> Dict:
|
|
53
65
|
return {
|
|
54
66
|
"status_code": status,
|
|
55
67
|
"message": msg,
|
|
56
|
-
"stdout":
|
|
57
|
-
"stderr":
|
|
68
|
+
"stdout": stdout, # Use the parameter
|
|
69
|
+
"stderr": error,
|
|
58
70
|
"files_copied": files or [],
|
|
59
|
-
"locations_copied_to":
|
|
71
|
+
"locations_copied_to": locations or [],
|
|
60
72
|
"details": details,
|
|
61
|
-
"errors": errors or ([
|
|
73
|
+
"errors": errors or ([error] if error else []),
|
|
62
74
|
}
|
|
63
75
|
|
|
64
76
|
|
|
@@ -80,10 +92,10 @@ def setup_logging(log_file: Optional[str], logger: logging.Logger) -> Dict:
|
|
|
80
92
|
|
|
81
93
|
|
|
82
94
|
def load_inventory(
|
|
83
|
-
|
|
95
|
+
inventory: str, group: str, logger: logging.Logger
|
|
84
96
|
) -> tuple[List[Dict], Dict]:
|
|
85
97
|
try:
|
|
86
|
-
with open(
|
|
98
|
+
with open(inventory, "r") as f:
|
|
87
99
|
inv = yaml.safe_load(f)
|
|
88
100
|
hosts = []
|
|
89
101
|
if group in inv and isinstance(inv[group], dict) and "hosts" in inv[group]:
|
|
@@ -102,14 +114,14 @@ def load_inventory(
|
|
|
102
114
|
return [], ResponseBuilder.build(
|
|
103
115
|
400,
|
|
104
116
|
f"Group '{group}' invalid",
|
|
105
|
-
{"
|
|
117
|
+
{"inventory": inventory, "group": group},
|
|
106
118
|
errors=[f"Group '{group}' invalid"],
|
|
107
119
|
)
|
|
108
120
|
if not hosts:
|
|
109
121
|
return [], ResponseBuilder.build(
|
|
110
122
|
400,
|
|
111
123
|
f"No hosts in group '{group}'",
|
|
112
|
-
{"
|
|
124
|
+
{"inventory": inventory, "group": group},
|
|
113
125
|
errors=[f"No hosts in group '{group}'"],
|
|
114
126
|
)
|
|
115
127
|
return hosts, {}
|
|
@@ -118,21 +130,21 @@ def load_inventory(
|
|
|
118
130
|
return [], ResponseBuilder.build(
|
|
119
131
|
500,
|
|
120
132
|
f"Load inv fail: {e}",
|
|
121
|
-
{"
|
|
133
|
+
{"inventory": inventory, "group": group},
|
|
122
134
|
str(e),
|
|
123
135
|
)
|
|
124
136
|
|
|
125
137
|
|
|
126
138
|
@mcp.tool(
|
|
127
139
|
annotations={
|
|
128
|
-
"title": "Run Remote
|
|
140
|
+
"title": "Run Command on Remote Host",
|
|
129
141
|
"readOnlyHint": True,
|
|
130
142
|
"destructiveHint": True,
|
|
131
143
|
"idempotentHint": False,
|
|
132
144
|
},
|
|
133
145
|
tags={"remote_access"},
|
|
134
146
|
)
|
|
135
|
-
async def
|
|
147
|
+
async def run_command_on_remote_host(
|
|
136
148
|
host: str = Field(
|
|
137
149
|
description="Remote host.", default=os.environ.get("TUNNEL_REMOTE_HOST", None)
|
|
138
150
|
),
|
|
@@ -143,7 +155,8 @@ async def run_remote_command(
|
|
|
143
155
|
description="Password.", default=os.environ.get("TUNNEL_PASSWORD", None)
|
|
144
156
|
),
|
|
145
157
|
port: int = Field(
|
|
146
|
-
description="Port.",
|
|
158
|
+
description="Port.",
|
|
159
|
+
default=to_integer(os.environ.get("TUNNEL_REMOTE_PORT", "22")),
|
|
147
160
|
),
|
|
148
161
|
cmd: str = Field(description="Shell command.", default=None),
|
|
149
162
|
id_file: Optional[str] = Field(
|
|
@@ -168,8 +181,8 @@ async def run_remote_command(
|
|
|
168
181
|
) -> Dict:
|
|
169
182
|
"""Run shell command on remote host. Expected return object type: dict"""
|
|
170
183
|
logger = logging.getLogger("TunnelServer")
|
|
171
|
-
if
|
|
172
|
-
return
|
|
184
|
+
if error := setup_logging(log, logger):
|
|
185
|
+
return error
|
|
173
186
|
logger.debug(f"Run cmd: host={host}, cmd={cmd}")
|
|
174
187
|
if not host or not cmd:
|
|
175
188
|
logger.error("Need host, cmd")
|
|
@@ -191,18 +204,19 @@ async def run_remote_command(
|
|
|
191
204
|
await ctx.report_progress(progress=0, total=100)
|
|
192
205
|
logger.debug("Progress: 0/100")
|
|
193
206
|
t.connect()
|
|
194
|
-
out,
|
|
207
|
+
out, error = t.run_command(cmd)
|
|
195
208
|
if ctx:
|
|
196
209
|
await ctx.report_progress(progress=100, total=100)
|
|
197
210
|
logger.debug("Progress: 100/100")
|
|
198
|
-
logger.debug(f"Cmd out: {out},
|
|
211
|
+
logger.debug(f"Cmd out: {out}, error: {error}")
|
|
199
212
|
return ResponseBuilder.build(
|
|
200
213
|
200,
|
|
201
214
|
f"Cmd '{cmd}' done on {host}",
|
|
202
215
|
{"host": host, "cmd": cmd},
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
[],
|
|
216
|
+
error,
|
|
217
|
+
stdout=out,
|
|
218
|
+
files=[],
|
|
219
|
+
locations=[],
|
|
206
220
|
errors=[],
|
|
207
221
|
)
|
|
208
222
|
except Exception as e:
|
|
@@ -217,14 +231,14 @@ async def run_remote_command(
|
|
|
217
231
|
|
|
218
232
|
@mcp.tool(
|
|
219
233
|
annotations={
|
|
220
|
-
"title": "
|
|
234
|
+
"title": "Send File from Remote Host",
|
|
221
235
|
"readOnlyHint": False,
|
|
222
236
|
"destructiveHint": True,
|
|
223
237
|
"idempotentHint": False,
|
|
224
238
|
},
|
|
225
239
|
tags={"remote_access"},
|
|
226
240
|
)
|
|
227
|
-
async def
|
|
241
|
+
async def send_file_to_remote_host(
|
|
228
242
|
host: str = Field(
|
|
229
243
|
description="Remote host.", default=os.environ.get("TUNNEL_REMOTE_HOST", None)
|
|
230
244
|
),
|
|
@@ -235,7 +249,8 @@ async def upload_file(
|
|
|
235
249
|
description="Password.", default=os.environ.get("TUNNEL_PASSWORD", None)
|
|
236
250
|
),
|
|
237
251
|
port: int = Field(
|
|
238
|
-
description="Port.",
|
|
252
|
+
description="Port.",
|
|
253
|
+
default=to_integer(os.environ.get("TUNNEL_REMOTE_PORT", "22")),
|
|
239
254
|
),
|
|
240
255
|
lpath: str = Field(description="Local file path.", default=None),
|
|
241
256
|
rpath: str = Field(description="Remote path.", default=None),
|
|
@@ -261,8 +276,15 @@ async def upload_file(
|
|
|
261
276
|
) -> Dict:
|
|
262
277
|
"""Upload file to remote host. Expected return object type: dict"""
|
|
263
278
|
logger = logging.getLogger("TunnelServer")
|
|
264
|
-
|
|
265
|
-
|
|
279
|
+
logger.debug(f"Upload: host={host}, local={lpath}, remote={rpath}")
|
|
280
|
+
lpath = os.path.abspath(os.path.expanduser(lpath)) # Normalize to absolute
|
|
281
|
+
rpath = os.path.expanduser(rpath) # Handle ~ on remote
|
|
282
|
+
logger.debug(
|
|
283
|
+
f"Normalized: lpath={lpath} (exists={os.path.exists(lpath)}, isfile={os.path.isfile(lpath)}), rpath={rpath}, CWD={os.getcwd()}"
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
if error := setup_logging(log, logger):
|
|
287
|
+
return error
|
|
266
288
|
logger.debug(f"Upload: host={host}, local={lpath}, remote={rpath}")
|
|
267
289
|
if not host or not lpath or not rpath:
|
|
268
290
|
logger.error("Need host, lpath, rpath")
|
|
@@ -272,14 +294,17 @@ async def upload_file(
|
|
|
272
294
|
{"host": host, "lpath": lpath, "rpath": rpath},
|
|
273
295
|
errors=["Need host, lpath, rpath"],
|
|
274
296
|
)
|
|
275
|
-
if not os.path.exists(lpath):
|
|
276
|
-
logger.error(
|
|
297
|
+
if not os.path.exists(lpath) or not os.path.isfile(lpath):
|
|
298
|
+
logger.error(
|
|
299
|
+
f"Invalid file: {lpath} (exists={os.path.exists(lpath)}, isfile={os.path.isfile(lpath)})"
|
|
300
|
+
)
|
|
277
301
|
return ResponseBuilder.build(
|
|
278
302
|
400,
|
|
279
|
-
f"
|
|
303
|
+
f"Invalid file: {lpath}",
|
|
280
304
|
{"host": host, "lpath": lpath, "rpath": rpath},
|
|
281
|
-
errors=[f"
|
|
305
|
+
errors=[f"Invalid file: {lpath}"],
|
|
282
306
|
)
|
|
307
|
+
lpath = os.path.abspath(os.path.expanduser(lpath))
|
|
283
308
|
try:
|
|
284
309
|
t = Tunnel(
|
|
285
310
|
remote_host=host,
|
|
@@ -305,9 +330,6 @@ async def upload_file(
|
|
|
305
330
|
asyncio.ensure_future(ctx.report_progress(progress=transf, total=total))
|
|
306
331
|
|
|
307
332
|
sftp.put(lpath, rpath, callback=progress_callback)
|
|
308
|
-
if ctx:
|
|
309
|
-
await ctx.report_progress(progress=100, total=100)
|
|
310
|
-
logger.debug("Progress: 100/100")
|
|
311
333
|
sftp.close()
|
|
312
334
|
logger.debug(f"Uploaded: {lpath} -> {rpath}")
|
|
313
335
|
return ResponseBuilder.build(
|
|
@@ -315,16 +337,17 @@ async def upload_file(
|
|
|
315
337
|
f"Uploaded to {rpath}",
|
|
316
338
|
{"host": host, "lpath": lpath, "rpath": rpath},
|
|
317
339
|
files=[lpath],
|
|
318
|
-
|
|
340
|
+
locations=[rpath],
|
|
319
341
|
errors=[],
|
|
320
342
|
)
|
|
321
343
|
except Exception as e:
|
|
322
|
-
logger.error(f"
|
|
344
|
+
logger.error(f"Unexpected error during file transfer: {str(e)}")
|
|
323
345
|
return ResponseBuilder.build(
|
|
324
346
|
500,
|
|
325
|
-
f"Upload fail: {e}",
|
|
347
|
+
f"Upload fail: {str(e)}",
|
|
326
348
|
{"host": host, "lpath": lpath, "rpath": rpath},
|
|
327
349
|
str(e),
|
|
350
|
+
errors=[f"Unexpected error: {str(e)}"],
|
|
328
351
|
)
|
|
329
352
|
finally:
|
|
330
353
|
if "t" in locals():
|
|
@@ -333,14 +356,14 @@ async def upload_file(
|
|
|
333
356
|
|
|
334
357
|
@mcp.tool(
|
|
335
358
|
annotations={
|
|
336
|
-
"title": "
|
|
359
|
+
"title": "Receive File from Remote Host",
|
|
337
360
|
"readOnlyHint": False,
|
|
338
361
|
"destructiveHint": False,
|
|
339
362
|
"idempotentHint": True,
|
|
340
363
|
},
|
|
341
364
|
tags={"remote_access"},
|
|
342
365
|
)
|
|
343
|
-
async def
|
|
366
|
+
async def receive_file_from_remote_host(
|
|
344
367
|
host: str = Field(
|
|
345
368
|
description="Remote host.", default=os.environ.get("TUNNEL_REMOTE_HOST", None)
|
|
346
369
|
),
|
|
@@ -351,10 +374,11 @@ async def download_file(
|
|
|
351
374
|
description="Password.", default=os.environ.get("TUNNEL_PASSWORD", None)
|
|
352
375
|
),
|
|
353
376
|
port: int = Field(
|
|
354
|
-
description="Port.",
|
|
377
|
+
description="Port.",
|
|
378
|
+
default=to_integer(os.environ.get("TUNNEL_REMOTE_PORT", "22")),
|
|
355
379
|
),
|
|
356
380
|
rpath: str = Field(description="Remote file path.", default=None),
|
|
357
|
-
lpath: str = Field(description="Local path.", default=None),
|
|
381
|
+
lpath: str = Field(description="Local file path.", default=None),
|
|
358
382
|
id_file: Optional[str] = Field(
|
|
359
383
|
description="Private key path.",
|
|
360
384
|
default=os.environ.get("TUNNEL_IDENTITY_FILE", None),
|
|
@@ -377,8 +401,9 @@ async def download_file(
|
|
|
377
401
|
) -> Dict:
|
|
378
402
|
"""Download file from remote host. Expected return object type: dict"""
|
|
379
403
|
logger = logging.getLogger("TunnelServer")
|
|
380
|
-
|
|
381
|
-
|
|
404
|
+
lpath = os.path.abspath(os.path.expanduser(lpath))
|
|
405
|
+
if error := setup_logging(log, logger):
|
|
406
|
+
return error
|
|
382
407
|
logger.debug(f"Download: host={host}, remote={rpath}, local={lpath}")
|
|
383
408
|
if not host or not rpath or not lpath:
|
|
384
409
|
logger.error("Need host, rpath, lpath")
|
|
@@ -424,7 +449,7 @@ async def download_file(
|
|
|
424
449
|
f"Downloaded to {lpath}",
|
|
425
450
|
{"host": host, "rpath": rpath, "lpath": lpath},
|
|
426
451
|
files=[rpath],
|
|
427
|
-
|
|
452
|
+
locations=[lpath],
|
|
428
453
|
errors=[],
|
|
429
454
|
)
|
|
430
455
|
except Exception as e:
|
|
@@ -460,7 +485,8 @@ async def check_ssh_server(
|
|
|
460
485
|
description="Password.", default=os.environ.get("TUNNEL_PASSWORD", None)
|
|
461
486
|
),
|
|
462
487
|
port: int = Field(
|
|
463
|
-
description="Port.",
|
|
488
|
+
description="Port.",
|
|
489
|
+
default=to_integer(os.environ.get("TUNNEL_REMOTE_PORT", "22")),
|
|
464
490
|
),
|
|
465
491
|
id_file: Optional[str] = Field(
|
|
466
492
|
description="Private key path.",
|
|
@@ -484,8 +510,8 @@ async def check_ssh_server(
|
|
|
484
510
|
) -> Dict:
|
|
485
511
|
"""Check SSH server status. Expected return object type: dict"""
|
|
486
512
|
logger = logging.getLogger("TunnelServer")
|
|
487
|
-
if
|
|
488
|
-
return
|
|
513
|
+
if error := setup_logging(log, logger):
|
|
514
|
+
return error
|
|
489
515
|
logger.debug(f"Check SSH: host={host}")
|
|
490
516
|
if not host:
|
|
491
517
|
logger.error("Need host")
|
|
@@ -516,7 +542,7 @@ async def check_ssh_server(
|
|
|
516
542
|
f"SSH check: {msg}",
|
|
517
543
|
{"host": host, "success": success},
|
|
518
544
|
files=[],
|
|
519
|
-
|
|
545
|
+
locations=[],
|
|
520
546
|
errors=[] if success else [msg],
|
|
521
547
|
)
|
|
522
548
|
except Exception as e:
|
|
@@ -548,7 +574,8 @@ async def test_key_auth(
|
|
|
548
574
|
default=os.environ.get("TUNNEL_IDENTITY_FILE", None),
|
|
549
575
|
),
|
|
550
576
|
port: int = Field(
|
|
551
|
-
description="Port.",
|
|
577
|
+
description="Port.",
|
|
578
|
+
default=to_integer(os.environ.get("TUNNEL_REMOTE_PORT", "22")),
|
|
552
579
|
),
|
|
553
580
|
cfg: str = Field(
|
|
554
581
|
description="SSH config path.", default=os.path.expanduser("~/.ssh/config")
|
|
@@ -560,8 +587,8 @@ async def test_key_auth(
|
|
|
560
587
|
) -> Dict:
|
|
561
588
|
"""Test key-based auth. Expected return object type: dict"""
|
|
562
589
|
logger = logging.getLogger("TunnelServer")
|
|
563
|
-
if
|
|
564
|
-
return
|
|
590
|
+
if error := setup_logging(log, logger):
|
|
591
|
+
return error
|
|
565
592
|
logger.debug(f"Test key: host={host}, key={key}")
|
|
566
593
|
if not host or not key:
|
|
567
594
|
logger.error("Need host, key")
|
|
@@ -583,7 +610,7 @@ async def test_key_auth(
|
|
|
583
610
|
f"Key test: {msg}",
|
|
584
611
|
{"host": host, "key": key, "success": success},
|
|
585
612
|
files=[],
|
|
586
|
-
|
|
613
|
+
locations=[],
|
|
587
614
|
errors=[] if success else [msg],
|
|
588
615
|
)
|
|
589
616
|
except Exception as e:
|
|
@@ -613,11 +640,15 @@ async def setup_passwordless_ssh(
|
|
|
613
640
|
description="Password.", default=os.environ.get("TUNNEL_PASSWORD", None)
|
|
614
641
|
),
|
|
615
642
|
port: int = Field(
|
|
616
|
-
description="Port.",
|
|
643
|
+
description="Port.",
|
|
644
|
+
default=to_integer(os.environ.get("TUNNEL_REMOTE_PORT", "22")),
|
|
617
645
|
),
|
|
618
646
|
key: str = Field(
|
|
619
647
|
description="Private key path.", default=os.path.expanduser("~/.ssh/id_rsa")
|
|
620
648
|
),
|
|
649
|
+
key_type: str = Field(
|
|
650
|
+
description="Key type to generate (rsa or ed25519).", default="ed25519"
|
|
651
|
+
),
|
|
621
652
|
cfg: str = Field(
|
|
622
653
|
description="SSH config path.", default=os.path.expanduser("~/.ssh/config")
|
|
623
654
|
),
|
|
@@ -628,17 +659,25 @@ async def setup_passwordless_ssh(
|
|
|
628
659
|
) -> Dict:
|
|
629
660
|
"""Setup passwordless SSH. Expected return object type: dict"""
|
|
630
661
|
logger = logging.getLogger("TunnelServer")
|
|
631
|
-
if
|
|
632
|
-
return
|
|
633
|
-
logger.debug(f"Setup SSH: host={host}, key={key}")
|
|
662
|
+
if error := setup_logging(log, logger):
|
|
663
|
+
return error
|
|
664
|
+
logger.debug(f"Setup SSH: host={host}, key={key}, key_type={key_type}")
|
|
634
665
|
if not host or not password:
|
|
635
666
|
logger.error("Need host, password")
|
|
636
667
|
return ResponseBuilder.build(
|
|
637
668
|
400,
|
|
638
669
|
"Need host, password",
|
|
639
|
-
{"host": host, "key": key},
|
|
670
|
+
{"host": host, "key": key, "key_type": key_type},
|
|
640
671
|
errors=["Need host, password"],
|
|
641
672
|
)
|
|
673
|
+
if key_type not in ["rsa", "ed25519"]:
|
|
674
|
+
logger.error(f"Invalid key_type: {key_type}")
|
|
675
|
+
return ResponseBuilder.build(
|
|
676
|
+
400,
|
|
677
|
+
f"Invalid key_type: {key_type}",
|
|
678
|
+
{"host": host, "key": key, "key_type": key_type},
|
|
679
|
+
errors=["key_type must be 'rsa' or 'ed25519'"],
|
|
680
|
+
)
|
|
642
681
|
try:
|
|
643
682
|
t = Tunnel(
|
|
644
683
|
remote_host=host,
|
|
@@ -653,9 +692,12 @@ async def setup_passwordless_ssh(
|
|
|
653
692
|
key = os.path.expanduser(key)
|
|
654
693
|
pub_key = key + ".pub"
|
|
655
694
|
if not os.path.exists(pub_key):
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
695
|
+
if key_type == "rsa":
|
|
696
|
+
os.system(f"ssh-keygen -t rsa -b 4096 -f {key} -N ''")
|
|
697
|
+
else: # ed25519
|
|
698
|
+
os.system(f"ssh-keygen -t ed25519 -f {key} -N ''")
|
|
699
|
+
logger.info(f"Generated {key_type} key: {key}, {pub_key}")
|
|
700
|
+
t.setup_passwordless_ssh(local_key_path=key, key_type=key_type)
|
|
659
701
|
if ctx:
|
|
660
702
|
await ctx.report_progress(progress=100, total=100)
|
|
661
703
|
logger.debug("Progress: 100/100")
|
|
@@ -663,15 +705,18 @@ async def setup_passwordless_ssh(
|
|
|
663
705
|
return ResponseBuilder.build(
|
|
664
706
|
200,
|
|
665
707
|
f"SSH setup for {user}@{host}",
|
|
666
|
-
{"host": host, "key": key, "user": user},
|
|
708
|
+
{"host": host, "key": key, "user": user, "key_type": key_type},
|
|
667
709
|
files=[pub_key],
|
|
668
|
-
|
|
710
|
+
locations=[f"~/.ssh/authorized_keys on {host}"],
|
|
669
711
|
errors=[],
|
|
670
712
|
)
|
|
671
713
|
except Exception as e:
|
|
672
714
|
logger.error(f"SSH setup fail: {e}")
|
|
673
715
|
return ResponseBuilder.build(
|
|
674
|
-
500,
|
|
716
|
+
500,
|
|
717
|
+
f"SSH setup fail: {e}",
|
|
718
|
+
{"host": host, "key": key, "key_type": key_type},
|
|
719
|
+
str(e),
|
|
675
720
|
)
|
|
676
721
|
finally:
|
|
677
722
|
if "t" in locals():
|
|
@@ -698,7 +743,8 @@ async def copy_ssh_config(
|
|
|
698
743
|
description="Password.", default=os.environ.get("TUNNEL_PASSWORD", None)
|
|
699
744
|
),
|
|
700
745
|
port: int = Field(
|
|
701
|
-
description="Port.",
|
|
746
|
+
description="Port.",
|
|
747
|
+
default=to_integer(os.environ.get("TUNNEL_REMOTE_PORT", "22")),
|
|
702
748
|
),
|
|
703
749
|
lcfg: str = Field(description="Local SSH config.", default=None),
|
|
704
750
|
rcfg: str = Field(
|
|
@@ -726,8 +772,8 @@ async def copy_ssh_config(
|
|
|
726
772
|
) -> Dict:
|
|
727
773
|
"""Copy SSH config to remote host. Expected return object type: dict"""
|
|
728
774
|
logger = logging.getLogger("TunnelServer")
|
|
729
|
-
if
|
|
730
|
-
return
|
|
775
|
+
if error := setup_logging(log, logger):
|
|
776
|
+
return error
|
|
731
777
|
logger.debug(f"Copy cfg: host={host}, local={lcfg}, remote={rcfg}")
|
|
732
778
|
if not host or not lcfg:
|
|
733
779
|
logger.error("Need host, lcfg")
|
|
@@ -761,7 +807,7 @@ async def copy_ssh_config(
|
|
|
761
807
|
f"Copied cfg to {rcfg} on {host}",
|
|
762
808
|
{"host": host, "lcfg": lcfg, "rcfg": rcfg},
|
|
763
809
|
files=[lcfg],
|
|
764
|
-
|
|
810
|
+
locations=[rcfg],
|
|
765
811
|
errors=[],
|
|
766
812
|
)
|
|
767
813
|
except Exception as e:
|
|
@@ -797,9 +843,13 @@ async def rotate_ssh_key(
|
|
|
797
843
|
description="Password.", default=os.environ.get("TUNNEL_PASSWORD", None)
|
|
798
844
|
),
|
|
799
845
|
port: int = Field(
|
|
800
|
-
description="Port.",
|
|
846
|
+
description="Port.",
|
|
847
|
+
default=to_integer(os.environ.get("TUNNEL_REMOTE_PORT", "22")),
|
|
801
848
|
),
|
|
802
849
|
new_key: str = Field(description="New private key path.", default=None),
|
|
850
|
+
key_type: str = Field(
|
|
851
|
+
description="Key type to generate (rsa or ed25519).", default="ed25519"
|
|
852
|
+
),
|
|
803
853
|
id_file: Optional[str] = Field(
|
|
804
854
|
description="Current key path.",
|
|
805
855
|
default=os.environ.get("TUNNEL_IDENTITY_FILE", None),
|
|
@@ -822,17 +872,25 @@ async def rotate_ssh_key(
|
|
|
822
872
|
) -> Dict:
|
|
823
873
|
"""Rotate SSH key on remote host. Expected return object type: dict"""
|
|
824
874
|
logger = logging.getLogger("TunnelServer")
|
|
825
|
-
if
|
|
826
|
-
return
|
|
827
|
-
logger.debug(f"Rotate key: host={host}, new_key={new_key}")
|
|
875
|
+
if error := setup_logging(log, logger):
|
|
876
|
+
return error
|
|
877
|
+
logger.debug(f"Rotate key: host={host}, new_key={new_key}, key_type={key_type}")
|
|
828
878
|
if not host or not new_key:
|
|
829
879
|
logger.error("Need host, new_key")
|
|
830
880
|
return ResponseBuilder.build(
|
|
831
881
|
400,
|
|
832
882
|
"Need host, new_key",
|
|
833
|
-
{"host": host, "new_key": new_key},
|
|
883
|
+
{"host": host, "new_key": new_key, "key_type": key_type},
|
|
834
884
|
errors=["Need host, new_key"],
|
|
835
885
|
)
|
|
886
|
+
if key_type not in ["rsa", "ed25519"]:
|
|
887
|
+
logger.error(f"Invalid key_type: {key_type}")
|
|
888
|
+
return ResponseBuilder.build(
|
|
889
|
+
400,
|
|
890
|
+
f"Invalid key_type: {key_type}",
|
|
891
|
+
{"host": host, "new_key": new_key, "key_type": key_type},
|
|
892
|
+
errors=["key_type must be 'rsa' or 'ed25519'"],
|
|
893
|
+
)
|
|
836
894
|
try:
|
|
837
895
|
t = Tunnel(
|
|
838
896
|
remote_host=host,
|
|
@@ -850,25 +908,36 @@ async def rotate_ssh_key(
|
|
|
850
908
|
new_key = os.path.expanduser(new_key)
|
|
851
909
|
new_public_key = new_key + ".pub"
|
|
852
910
|
if not os.path.exists(new_key):
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
911
|
+
if key_type == "rsa":
|
|
912
|
+
os.system(f"ssh-keygen -t rsa -b 4096 -f {new_key} -N ''")
|
|
913
|
+
else: # ed25519
|
|
914
|
+
os.system(f"ssh-keygen -t ed25519 -f {new_key} -N ''")
|
|
915
|
+
logger.info(f"Generated {key_type} key: {new_key}")
|
|
916
|
+
t.rotate_ssh_key(new_key, key_type=key_type)
|
|
856
917
|
if ctx:
|
|
857
918
|
await ctx.report_progress(progress=100, total=100)
|
|
858
919
|
logger.debug("Progress: 100/100")
|
|
859
|
-
logger.debug(f"Rotated key to {new_key} on {host}")
|
|
920
|
+
logger.debug(f"Rotated {key_type} key to {new_key} on {host}")
|
|
860
921
|
return ResponseBuilder.build(
|
|
861
922
|
200,
|
|
862
|
-
f"Rotated key to {new_key} on {host}",
|
|
863
|
-
{
|
|
923
|
+
f"Rotated {key_type} key to {new_key} on {host}",
|
|
924
|
+
{
|
|
925
|
+
"host": host,
|
|
926
|
+
"new_key": new_key,
|
|
927
|
+
"old_key": id_file,
|
|
928
|
+
"key_type": key_type,
|
|
929
|
+
},
|
|
864
930
|
files=[new_public_key],
|
|
865
|
-
|
|
931
|
+
locations=[f"~/.ssh/authorized_keys on {host}"],
|
|
866
932
|
errors=[],
|
|
867
933
|
)
|
|
868
934
|
except Exception as e:
|
|
869
935
|
logger.error(f"Rotate fail: {e}")
|
|
870
936
|
return ResponseBuilder.build(
|
|
871
|
-
500,
|
|
937
|
+
500,
|
|
938
|
+
f"Rotate fail: {e}",
|
|
939
|
+
{"host": host, "new_key": new_key, "key_type": key_type},
|
|
940
|
+
str(e),
|
|
872
941
|
)
|
|
873
942
|
finally:
|
|
874
943
|
if "t" in locals():
|
|
@@ -899,8 +968,8 @@ async def remove_host_key(
|
|
|
899
968
|
) -> Dict:
|
|
900
969
|
"""Remove host key from known_hosts. Expected return object type: dict"""
|
|
901
970
|
logger = logging.getLogger("TunnelServer")
|
|
902
|
-
if
|
|
903
|
-
return
|
|
971
|
+
if error := setup_logging(log, logger):
|
|
972
|
+
return error
|
|
904
973
|
logger.debug(f"Remove key: host={host}, known_hosts={known_hosts}")
|
|
905
974
|
if not host:
|
|
906
975
|
logger.error("Need host")
|
|
@@ -926,7 +995,7 @@ async def remove_host_key(
|
|
|
926
995
|
msg,
|
|
927
996
|
{"host": host, "known_hosts": known_hosts},
|
|
928
997
|
files=[],
|
|
929
|
-
|
|
998
|
+
locations=[],
|
|
930
999
|
errors=[] if "Removed" in msg else [msg],
|
|
931
1000
|
)
|
|
932
1001
|
except Exception as e:
|
|
@@ -945,8 +1014,8 @@ async def remove_host_key(
|
|
|
945
1014
|
},
|
|
946
1015
|
tags={"remote_access"},
|
|
947
1016
|
)
|
|
948
|
-
async def
|
|
949
|
-
|
|
1017
|
+
async def configure_key_auth_on_inventory(
|
|
1018
|
+
inventory: str = Field(
|
|
950
1019
|
description="YAML inventory path.",
|
|
951
1020
|
default=os.environ.get("TUNNEL_INVENTORY", None),
|
|
952
1021
|
),
|
|
@@ -956,6 +1025,9 @@ async def setup_all_passwordless_ssh(
|
|
|
956
1025
|
"TUNNEL_IDENTITY_FILE", os.path.expanduser("~/.ssh/id_shared")
|
|
957
1026
|
),
|
|
958
1027
|
),
|
|
1028
|
+
key_type: str = Field(
|
|
1029
|
+
description="Key type to generate (rsa or ed25519).", default="ed25519"
|
|
1030
|
+
),
|
|
959
1031
|
group: str = Field(
|
|
960
1032
|
description="Target group.",
|
|
961
1033
|
default=os.environ.get("TUNNEL_INVENTORY_GROUP", "all"),
|
|
@@ -965,35 +1037,47 @@ async def setup_all_passwordless_ssh(
|
|
|
965
1037
|
default=to_boolean(os.environ.get("TUNNEL_PARALLEL", False)),
|
|
966
1038
|
),
|
|
967
1039
|
max_threads: int = Field(
|
|
968
|
-
description="Max threads.",
|
|
1040
|
+
description="Max threads.",
|
|
1041
|
+
default=to_integer(os.environ.get("TUNNEL_MAX_THREADS", "6")),
|
|
969
1042
|
),
|
|
970
1043
|
log: Optional[str] = Field(description="Log file.", default=None),
|
|
971
1044
|
ctx: Context = Field(description="MCP context.", default=None),
|
|
972
1045
|
) -> Dict:
|
|
973
1046
|
"""Setup passwordless SSH for all hosts in group. Expected return object type: dict"""
|
|
974
1047
|
logger = logging.getLogger("TunnelServer")
|
|
975
|
-
if
|
|
976
|
-
return
|
|
977
|
-
logger.debug(f"Setup SSH all: inv={
|
|
978
|
-
if not
|
|
979
|
-
logger.error("Need
|
|
1048
|
+
if error := setup_logging(log, logger):
|
|
1049
|
+
return error
|
|
1050
|
+
logger.debug(f"Setup SSH all: inv={inventory}, group={group}, key_type={key_type}")
|
|
1051
|
+
if not inventory:
|
|
1052
|
+
logger.error("Need inventory")
|
|
1053
|
+
return ResponseBuilder.build(
|
|
1054
|
+
400,
|
|
1055
|
+
"Need inventory",
|
|
1056
|
+
{"inventory": inventory, "group": group, "key_type": key_type},
|
|
1057
|
+
errors=["Need inventory"],
|
|
1058
|
+
)
|
|
1059
|
+
if key_type not in ["rsa", "ed25519"]:
|
|
1060
|
+
logger.error(f"Invalid key_type: {key_type}")
|
|
980
1061
|
return ResponseBuilder.build(
|
|
981
1062
|
400,
|
|
982
|
-
"
|
|
983
|
-
{"
|
|
984
|
-
errors=["
|
|
1063
|
+
f"Invalid key_type: {key_type}",
|
|
1064
|
+
{"inventory": inventory, "group": group, "key_type": key_type},
|
|
1065
|
+
errors=["key_type must be 'rsa' or 'ed25519'"],
|
|
985
1066
|
)
|
|
986
1067
|
try:
|
|
987
1068
|
key = os.path.expanduser(key)
|
|
988
1069
|
pub_key = key + ".pub"
|
|
989
1070
|
if not os.path.exists(key):
|
|
990
|
-
|
|
991
|
-
|
|
1071
|
+
if key_type == "rsa":
|
|
1072
|
+
os.system(f"ssh-keygen -t rsa -b 4096 -f {key} -N ''")
|
|
1073
|
+
else: # ed25519
|
|
1074
|
+
os.system(f"ssh-keygen -t ed25519 -f {key} -N ''")
|
|
1075
|
+
logger.info(f"Generated {key_type} key: {key}, {pub_key}")
|
|
992
1076
|
with open(pub_key, "r") as f:
|
|
993
1077
|
pub = f.read().strip()
|
|
994
|
-
hosts,
|
|
995
|
-
if
|
|
996
|
-
return
|
|
1078
|
+
hosts, error = load_inventory(inventory, group, logger)
|
|
1079
|
+
if error:
|
|
1080
|
+
return error
|
|
997
1081
|
total = len(hosts)
|
|
998
1082
|
if ctx:
|
|
999
1083
|
await ctx.report_progress(progress=0, total=total)
|
|
@@ -1006,16 +1090,16 @@ async def setup_all_passwordless_ssh(
|
|
|
1006
1090
|
try:
|
|
1007
1091
|
t = Tunnel(remote_host=host, username=user, password=password)
|
|
1008
1092
|
t.remove_host_key()
|
|
1009
|
-
t.setup_passwordless_ssh(local_key_path=kpath)
|
|
1093
|
+
t.setup_passwordless_ssh(local_key_path=kpath, key_type=key_type)
|
|
1010
1094
|
t.connect()
|
|
1011
1095
|
t.run_command(f"echo '{pub}' >> ~/.ssh/authorized_keys")
|
|
1012
1096
|
t.run_command("chmod 600 ~/.ssh/authorized_keys")
|
|
1013
|
-
logger.info(f"Added key to {user}@{host}")
|
|
1014
|
-
res, msg = t.test_key_auth(
|
|
1097
|
+
logger.info(f"Added {key_type} key to {user}@{host}")
|
|
1098
|
+
res, msg = t.test_key_auth(kpath)
|
|
1015
1099
|
return {
|
|
1016
1100
|
"hostname": host,
|
|
1017
1101
|
"status": "success",
|
|
1018
|
-
"message": f"SSH setup for {user}@{host}",
|
|
1102
|
+
"message": f"SSH setup for {user}@{host} with {key_type} key",
|
|
1019
1103
|
"errors": [] if res else [msg],
|
|
1020
1104
|
}
|
|
1021
1105
|
except Exception as e:
|
|
@@ -1030,7 +1114,7 @@ async def setup_all_passwordless_ssh(
|
|
|
1030
1114
|
if "t" in locals():
|
|
1031
1115
|
t.close()
|
|
1032
1116
|
|
|
1033
|
-
results, files,
|
|
1117
|
+
results, files, locations, errors = [], [], [], []
|
|
1034
1118
|
if parallel:
|
|
1035
1119
|
with concurrent.futures.ThreadPoolExecutor(max_workers=max_threads) as ex:
|
|
1036
1120
|
futures = [
|
|
@@ -1043,19 +1127,21 @@ async def setup_all_passwordless_ssh(
|
|
|
1043
1127
|
results.append(r)
|
|
1044
1128
|
if r["status"] == "success":
|
|
1045
1129
|
files.append(pub_key)
|
|
1046
|
-
|
|
1130
|
+
locations.append(
|
|
1131
|
+
f"~/.ssh/authorized_keys on {r['hostname']}"
|
|
1132
|
+
)
|
|
1047
1133
|
else:
|
|
1048
1134
|
errors.extend(r["errors"])
|
|
1049
1135
|
if ctx:
|
|
1050
1136
|
await ctx.report_progress(progress=i, total=total)
|
|
1051
1137
|
logger.debug(f"Progress: {i}/{total}")
|
|
1052
1138
|
except Exception as e:
|
|
1053
|
-
logger.error(f"Parallel
|
|
1139
|
+
logger.error(f"Parallel error: {e}")
|
|
1054
1140
|
results.append(
|
|
1055
1141
|
{
|
|
1056
1142
|
"hostname": "unknown",
|
|
1057
1143
|
"status": "failed",
|
|
1058
|
-
"message": f"Parallel
|
|
1144
|
+
"message": f"Parallel error: {e}",
|
|
1059
1145
|
"errors": [str(e)],
|
|
1060
1146
|
}
|
|
1061
1147
|
)
|
|
@@ -1066,7 +1152,7 @@ async def setup_all_passwordless_ssh(
|
|
|
1066
1152
|
results.append(r)
|
|
1067
1153
|
if r["status"] == "success":
|
|
1068
1154
|
files.append(pub_key)
|
|
1069
|
-
|
|
1155
|
+
locations.append(f"~/.ssh/authorized_keys on {r['hostname']}")
|
|
1070
1156
|
else:
|
|
1071
1157
|
errors.extend(r["errors"])
|
|
1072
1158
|
if ctx:
|
|
@@ -1081,10 +1167,15 @@ async def setup_all_passwordless_ssh(
|
|
|
1081
1167
|
return ResponseBuilder.build(
|
|
1082
1168
|
200 if not errors else 500,
|
|
1083
1169
|
msg,
|
|
1084
|
-
{
|
|
1170
|
+
{
|
|
1171
|
+
"inventory": inventory,
|
|
1172
|
+
"group": group,
|
|
1173
|
+
"key_type": key_type,
|
|
1174
|
+
"host_results": results,
|
|
1175
|
+
},
|
|
1085
1176
|
"; ".join(errors),
|
|
1086
1177
|
files,
|
|
1087
|
-
|
|
1178
|
+
locations,
|
|
1088
1179
|
errors,
|
|
1089
1180
|
)
|
|
1090
1181
|
except Exception as e:
|
|
@@ -1092,7 +1183,7 @@ async def setup_all_passwordless_ssh(
|
|
|
1092
1183
|
return ResponseBuilder.build(
|
|
1093
1184
|
500,
|
|
1094
1185
|
f"Setup all fail: {e}",
|
|
1095
|
-
{"
|
|
1186
|
+
{"inventory": inventory, "group": group, "key_type": key_type},
|
|
1096
1187
|
str(e),
|
|
1097
1188
|
)
|
|
1098
1189
|
|
|
@@ -1106,8 +1197,8 @@ async def setup_all_passwordless_ssh(
|
|
|
1106
1197
|
},
|
|
1107
1198
|
tags={"remote_access"},
|
|
1108
1199
|
)
|
|
1109
|
-
async def
|
|
1110
|
-
|
|
1200
|
+
async def run_command_on_inventory(
|
|
1201
|
+
inventory: str = Field(
|
|
1111
1202
|
description="YAML inventory path.",
|
|
1112
1203
|
default=os.environ.get("TUNNEL_INVENTORY", None),
|
|
1113
1204
|
),
|
|
@@ -1121,28 +1212,29 @@ async def run_command_on_all(
|
|
|
1121
1212
|
default=to_boolean(os.environ.get("TUNNEL_PARALLEL", False)),
|
|
1122
1213
|
),
|
|
1123
1214
|
max_threads: int = Field(
|
|
1124
|
-
description="Max threads.",
|
|
1215
|
+
description="Max threads.",
|
|
1216
|
+
default=to_integer(os.environ.get("TUNNEL_MAX_THREADS", "6")),
|
|
1125
1217
|
),
|
|
1126
1218
|
log: Optional[str] = Field(description="Log file.", default=None),
|
|
1127
1219
|
ctx: Context = Field(description="MCP context.", default=None),
|
|
1128
1220
|
) -> Dict:
|
|
1129
1221
|
"""Run command on all hosts in group. Expected return object type: dict"""
|
|
1130
1222
|
logger = logging.getLogger("TunnelServer")
|
|
1131
|
-
if
|
|
1132
|
-
return
|
|
1133
|
-
logger.debug(f"Run cmd all: inv={
|
|
1134
|
-
if not
|
|
1135
|
-
logger.error("Need
|
|
1223
|
+
if error := setup_logging(log, logger):
|
|
1224
|
+
return error
|
|
1225
|
+
logger.debug(f"Run cmd all: inv={inventory}, group={group}, cmd={cmd}")
|
|
1226
|
+
if not inventory or not cmd:
|
|
1227
|
+
logger.error("Need inventory, cmd")
|
|
1136
1228
|
return ResponseBuilder.build(
|
|
1137
1229
|
400,
|
|
1138
|
-
"Need
|
|
1139
|
-
{"
|
|
1140
|
-
errors=["Need
|
|
1230
|
+
"Need inventory, cmd",
|
|
1231
|
+
{"inventory": inventory, "group": group, "cmd": cmd},
|
|
1232
|
+
errors=["Need inventory, cmd"],
|
|
1141
1233
|
)
|
|
1142
1234
|
try:
|
|
1143
|
-
hosts,
|
|
1144
|
-
if
|
|
1145
|
-
return
|
|
1235
|
+
hosts, error = load_inventory(inventory, group, logger)
|
|
1236
|
+
if error:
|
|
1237
|
+
return error
|
|
1146
1238
|
total = len(hosts)
|
|
1147
1239
|
if ctx:
|
|
1148
1240
|
await ctx.report_progress(progress=0, total=total)
|
|
@@ -1157,14 +1249,14 @@ async def run_command_on_all(
|
|
|
1157
1249
|
password=h.get("password"),
|
|
1158
1250
|
identity_file=h.get("key_path"),
|
|
1159
1251
|
)
|
|
1160
|
-
out,
|
|
1161
|
-
logger.info(f"Host {host}: Out: {out}, Err: {
|
|
1252
|
+
out, error = t.run_command(cmd)
|
|
1253
|
+
logger.info(f"Host {host}: Out: {out}, Err: {error}")
|
|
1162
1254
|
return {
|
|
1163
1255
|
"hostname": host,
|
|
1164
1256
|
"status": "success",
|
|
1165
1257
|
"message": f"Cmd '{cmd}' done on {host}",
|
|
1166
1258
|
"stdout": out,
|
|
1167
|
-
"stderr":
|
|
1259
|
+
"stderr": error,
|
|
1168
1260
|
"errors": [],
|
|
1169
1261
|
}
|
|
1170
1262
|
except Exception as e:
|
|
@@ -1196,12 +1288,12 @@ async def run_command_on_all(
|
|
|
1196
1288
|
await ctx.report_progress(progress=i, total=total)
|
|
1197
1289
|
logger.debug(f"Progress: {i}/{total}")
|
|
1198
1290
|
except Exception as e:
|
|
1199
|
-
logger.error(f"Parallel
|
|
1291
|
+
logger.error(f"Parallel error: {e}")
|
|
1200
1292
|
results.append(
|
|
1201
1293
|
{
|
|
1202
1294
|
"hostname": "unknown",
|
|
1203
1295
|
"status": "failed",
|
|
1204
|
-
"message": f"Parallel
|
|
1296
|
+
"message": f"Parallel error: {e}",
|
|
1205
1297
|
"stdout": "",
|
|
1206
1298
|
"stderr": str(e),
|
|
1207
1299
|
"errors": [str(e)],
|
|
@@ -1226,7 +1318,7 @@ async def run_command_on_all(
|
|
|
1226
1318
|
200 if not errors else 500,
|
|
1227
1319
|
msg,
|
|
1228
1320
|
{
|
|
1229
|
-
"
|
|
1321
|
+
"inventory": inventory,
|
|
1230
1322
|
"group": group,
|
|
1231
1323
|
"cmd": cmd,
|
|
1232
1324
|
"host_results": results,
|
|
@@ -1241,7 +1333,7 @@ async def run_command_on_all(
|
|
|
1241
1333
|
return ResponseBuilder.build(
|
|
1242
1334
|
500,
|
|
1243
1335
|
f"Cmd all fail: {e}",
|
|
1244
|
-
{"
|
|
1336
|
+
{"inventory": inventory, "group": group, "cmd": cmd},
|
|
1245
1337
|
str(e),
|
|
1246
1338
|
)
|
|
1247
1339
|
|
|
@@ -1255,8 +1347,8 @@ async def run_command_on_all(
|
|
|
1255
1347
|
},
|
|
1256
1348
|
tags={"remote_access"},
|
|
1257
1349
|
)
|
|
1258
|
-
async def
|
|
1259
|
-
|
|
1350
|
+
async def copy_ssh_config_on_inventory(
|
|
1351
|
+
inventory: str = Field(
|
|
1260
1352
|
description="YAML inventory path.",
|
|
1261
1353
|
default=os.environ.get("TUNNEL_INVENTORY", None),
|
|
1262
1354
|
),
|
|
@@ -1273,7 +1365,8 @@ async def copy_ssh_config_on_all(
|
|
|
1273
1365
|
default=to_boolean(os.environ.get("TUNNEL_PARALLEL", False)),
|
|
1274
1366
|
),
|
|
1275
1367
|
max_threads: int = Field(
|
|
1276
|
-
description="Max threads.",
|
|
1368
|
+
description="Max threads.",
|
|
1369
|
+
default=to_integer(os.environ.get("TUNNEL_MAX_THREADS", "6")),
|
|
1277
1370
|
),
|
|
1278
1371
|
log: Optional[str] = Field(
|
|
1279
1372
|
description="Log file.", default=os.environ.get("TUNNEL_LOG_FILE", None)
|
|
@@ -1282,22 +1375,22 @@ async def copy_ssh_config_on_all(
|
|
|
1282
1375
|
) -> Dict:
|
|
1283
1376
|
"""Copy SSH config to all hosts in YAML group. Expected return object type: dict"""
|
|
1284
1377
|
logger = logging.getLogger("TunnelServer")
|
|
1285
|
-
if
|
|
1286
|
-
return
|
|
1287
|
-
logger.debug(f"Copy SSH config: inv={
|
|
1378
|
+
if error := setup_logging(log, logger):
|
|
1379
|
+
return error
|
|
1380
|
+
logger.debug(f"Copy SSH config: inv={inventory}, group={group}")
|
|
1288
1381
|
|
|
1289
|
-
if not
|
|
1290
|
-
logger.error("Need
|
|
1382
|
+
if not inventory or not cfg:
|
|
1383
|
+
logger.error("Need inventory, cfg")
|
|
1291
1384
|
return ResponseBuilder.build(
|
|
1292
1385
|
400,
|
|
1293
|
-
"Need
|
|
1386
|
+
"Need inventory, cfg",
|
|
1294
1387
|
{
|
|
1295
|
-
"
|
|
1388
|
+
"inventory": inventory,
|
|
1296
1389
|
"group": group,
|
|
1297
1390
|
"cfg": cfg,
|
|
1298
1391
|
"rmt_cfg": rmt_cfg,
|
|
1299
1392
|
},
|
|
1300
|
-
errors=["Need
|
|
1393
|
+
errors=["Need inventory, cfg"],
|
|
1301
1394
|
)
|
|
1302
1395
|
|
|
1303
1396
|
if not os.path.exists(cfg):
|
|
@@ -1306,7 +1399,7 @@ async def copy_ssh_config_on_all(
|
|
|
1306
1399
|
400,
|
|
1307
1400
|
f"No cfg file: {cfg}",
|
|
1308
1401
|
{
|
|
1309
|
-
"
|
|
1402
|
+
"inventory": inventory,
|
|
1310
1403
|
"group": group,
|
|
1311
1404
|
"cfg": cfg,
|
|
1312
1405
|
"rmt_cfg": rmt_cfg,
|
|
@@ -1315,16 +1408,16 @@ async def copy_ssh_config_on_all(
|
|
|
1315
1408
|
)
|
|
1316
1409
|
|
|
1317
1410
|
try:
|
|
1318
|
-
hosts,
|
|
1319
|
-
if
|
|
1320
|
-
return
|
|
1411
|
+
hosts, error = load_inventory(inventory, group, logger)
|
|
1412
|
+
if error:
|
|
1413
|
+
return error
|
|
1321
1414
|
|
|
1322
1415
|
total = len(hosts)
|
|
1323
1416
|
if ctx:
|
|
1324
1417
|
await ctx.report_progress(progress=0, total=total)
|
|
1325
1418
|
logger.debug(f"Progress: 0/{total}")
|
|
1326
1419
|
|
|
1327
|
-
results, files,
|
|
1420
|
+
results, files, locations, errors = [], [], [], []
|
|
1328
1421
|
|
|
1329
1422
|
async def copy_host(h: Dict) -> Dict:
|
|
1330
1423
|
try:
|
|
@@ -1365,19 +1458,19 @@ async def copy_ssh_config_on_all(
|
|
|
1365
1458
|
results.append(r)
|
|
1366
1459
|
if r["status"] == "success":
|
|
1367
1460
|
files.append(cfg)
|
|
1368
|
-
|
|
1461
|
+
locations.append(f"{rmt_cfg} on {r['hostname']}")
|
|
1369
1462
|
else:
|
|
1370
1463
|
errors.extend(r["errors"])
|
|
1371
1464
|
if ctx:
|
|
1372
1465
|
await ctx.report_progress(progress=i, total=total)
|
|
1373
1466
|
logger.debug(f"Progress: {i}/{total}")
|
|
1374
1467
|
except Exception as e:
|
|
1375
|
-
logger.error(f"Parallel
|
|
1468
|
+
logger.error(f"Parallel error: {e}")
|
|
1376
1469
|
results.append(
|
|
1377
1470
|
{
|
|
1378
1471
|
"hostname": "unknown",
|
|
1379
1472
|
"status": "failed",
|
|
1380
|
-
"message": f"Parallel
|
|
1473
|
+
"message": f"Parallel error: {e}",
|
|
1381
1474
|
"errors": [str(e)],
|
|
1382
1475
|
}
|
|
1383
1476
|
)
|
|
@@ -1388,7 +1481,7 @@ async def copy_ssh_config_on_all(
|
|
|
1388
1481
|
results.append(r)
|
|
1389
1482
|
if r["status"] == "success":
|
|
1390
1483
|
files.append(cfg)
|
|
1391
|
-
|
|
1484
|
+
locations.append(f"{rmt_cfg} on {r['hostname']}")
|
|
1392
1485
|
else:
|
|
1393
1486
|
errors.extend(r["errors"])
|
|
1394
1487
|
if ctx:
|
|
@@ -1405,7 +1498,7 @@ async def copy_ssh_config_on_all(
|
|
|
1405
1498
|
200 if not errors else 500,
|
|
1406
1499
|
msg,
|
|
1407
1500
|
{
|
|
1408
|
-
"
|
|
1501
|
+
"inventory": inventory,
|
|
1409
1502
|
"group": group,
|
|
1410
1503
|
"cfg": cfg,
|
|
1411
1504
|
"rmt_cfg": rmt_cfg,
|
|
@@ -1413,7 +1506,7 @@ async def copy_ssh_config_on_all(
|
|
|
1413
1506
|
},
|
|
1414
1507
|
"; ".join(errors),
|
|
1415
1508
|
files,
|
|
1416
|
-
|
|
1509
|
+
locations,
|
|
1417
1510
|
errors,
|
|
1418
1511
|
)
|
|
1419
1512
|
|
|
@@ -1423,7 +1516,7 @@ async def copy_ssh_config_on_all(
|
|
|
1423
1516
|
500,
|
|
1424
1517
|
f"Copy all fail: {e}",
|
|
1425
1518
|
{
|
|
1426
|
-
"
|
|
1519
|
+
"inventory": inventory,
|
|
1427
1520
|
"group": group,
|
|
1428
1521
|
"cfg": cfg,
|
|
1429
1522
|
"rmt_cfg": rmt_cfg,
|
|
@@ -1441,14 +1534,17 @@ async def copy_ssh_config_on_all(
|
|
|
1441
1534
|
},
|
|
1442
1535
|
tags={"remote_access"},
|
|
1443
1536
|
)
|
|
1444
|
-
async def
|
|
1445
|
-
|
|
1537
|
+
async def rotate_ssh_key_on_inventory(
|
|
1538
|
+
inventory: str = Field(
|
|
1446
1539
|
description="YAML inventory path.",
|
|
1447
1540
|
default=os.environ.get("TUNNEL_INVENTORY", None),
|
|
1448
1541
|
),
|
|
1449
1542
|
key_pfx: str = Field(
|
|
1450
1543
|
description="Prefix for new keys.", default=os.path.expanduser("~/.ssh/id_")
|
|
1451
1544
|
),
|
|
1545
|
+
key_type: str = Field(
|
|
1546
|
+
description="Key type to generate (rsa or ed25519).", default="ed25519"
|
|
1547
|
+
),
|
|
1452
1548
|
group: str = Field(
|
|
1453
1549
|
description="Target group.",
|
|
1454
1550
|
default=os.environ.get("TUNNEL_INVENTORY_GROUP", "all"),
|
|
@@ -1458,7 +1554,8 @@ async def rotate_ssh_key_on_all(
|
|
|
1458
1554
|
default=to_boolean(os.environ.get("TUNNEL_PARALLEL", False)),
|
|
1459
1555
|
),
|
|
1460
1556
|
max_threads: int = Field(
|
|
1461
|
-
description="Max threads.",
|
|
1557
|
+
description="Max threads.",
|
|
1558
|
+
default=to_integer(os.environ.get("TUNNEL_MAX_THREADS", "6")),
|
|
1462
1559
|
),
|
|
1463
1560
|
log: Optional[str] = Field(
|
|
1464
1561
|
description="Log file.", default=os.environ.get("TUNNEL_LOG_FILE", None)
|
|
@@ -1467,30 +1564,50 @@ async def rotate_ssh_key_on_all(
|
|
|
1467
1564
|
) -> Dict:
|
|
1468
1565
|
"""Rotate SSH keys for all hosts in YAML group. Expected return object type: dict"""
|
|
1469
1566
|
logger = logging.getLogger("TunnelServer")
|
|
1470
|
-
if
|
|
1471
|
-
return
|
|
1472
|
-
logger.debug(
|
|
1567
|
+
if error := setup_logging(log, logger):
|
|
1568
|
+
return error
|
|
1569
|
+
logger.debug(
|
|
1570
|
+
f"Rotate SSH keys: inv={inventory}, group={group}, key_type={key_type}"
|
|
1571
|
+
)
|
|
1473
1572
|
|
|
1474
|
-
if not
|
|
1475
|
-
logger.error("Need
|
|
1573
|
+
if not inventory:
|
|
1574
|
+
logger.error("Need inventory")
|
|
1476
1575
|
return ResponseBuilder.build(
|
|
1477
1576
|
400,
|
|
1478
|
-
"Need
|
|
1479
|
-
{
|
|
1480
|
-
|
|
1577
|
+
"Need inventory",
|
|
1578
|
+
{
|
|
1579
|
+
"inventory": inventory,
|
|
1580
|
+
"group": group,
|
|
1581
|
+
"key_pfx": key_pfx,
|
|
1582
|
+
"key_type": key_type,
|
|
1583
|
+
},
|
|
1584
|
+
errors=["Need inventory"],
|
|
1585
|
+
)
|
|
1586
|
+
if key_type not in ["rsa", "ed25519"]:
|
|
1587
|
+
logger.error(f"Invalid key_type: {key_type}")
|
|
1588
|
+
return ResponseBuilder.build(
|
|
1589
|
+
400,
|
|
1590
|
+
f"Invalid key_type: {key_type}",
|
|
1591
|
+
{
|
|
1592
|
+
"inventory": inventory,
|
|
1593
|
+
"group": group,
|
|
1594
|
+
"key_pfx": key_pfx,
|
|
1595
|
+
"key_type": key_type,
|
|
1596
|
+
},
|
|
1597
|
+
errors=["key_type must be 'rsa' or 'ed25519'"],
|
|
1481
1598
|
)
|
|
1482
1599
|
|
|
1483
1600
|
try:
|
|
1484
|
-
hosts,
|
|
1485
|
-
if
|
|
1486
|
-
return
|
|
1601
|
+
hosts, error = load_inventory(inventory, group, logger)
|
|
1602
|
+
if error:
|
|
1603
|
+
return error
|
|
1487
1604
|
|
|
1488
1605
|
total = len(hosts)
|
|
1489
1606
|
if ctx:
|
|
1490
1607
|
await ctx.report_progress(progress=0, total=total)
|
|
1491
1608
|
logger.debug(f"Progress: 0/{total}")
|
|
1492
1609
|
|
|
1493
|
-
results, files,
|
|
1610
|
+
results, files, locations, errors = [], [], [], []
|
|
1494
1611
|
|
|
1495
1612
|
async def rotate_host(h: Dict) -> Dict:
|
|
1496
1613
|
key = os.path.expanduser(key_pfx + h["hostname"])
|
|
@@ -1501,12 +1618,12 @@ async def rotate_ssh_key_on_all(
|
|
|
1501
1618
|
password=h.get("password"),
|
|
1502
1619
|
identity_file=h.get("key_path"),
|
|
1503
1620
|
)
|
|
1504
|
-
t.rotate_ssh_key(key)
|
|
1505
|
-
logger.info(f"Rotated key for {h['hostname']}: {key}")
|
|
1621
|
+
t.rotate_ssh_key(key, key_type=key_type)
|
|
1622
|
+
logger.info(f"Rotated {key_type} key for {h['hostname']}: {key}")
|
|
1506
1623
|
return {
|
|
1507
1624
|
"hostname": h["hostname"],
|
|
1508
1625
|
"status": "success",
|
|
1509
|
-
"message": f"Rotated key to {key}",
|
|
1626
|
+
"message": f"Rotated {key_type} key to {key}",
|
|
1510
1627
|
"errors": [],
|
|
1511
1628
|
"new_key_path": key,
|
|
1512
1629
|
}
|
|
@@ -1528,25 +1645,27 @@ async def rotate_ssh_key_on_all(
|
|
|
1528
1645
|
futures = [
|
|
1529
1646
|
ex.submit(lambda h: asyncio.run(rotate_host(h)), h) for h in hosts
|
|
1530
1647
|
]
|
|
1531
|
-
for i, f in enumerate(concurrent.
|
|
1648
|
+
for i, f in enumerate(concurrent.fences.as_completed(futures), 1):
|
|
1532
1649
|
try:
|
|
1533
1650
|
r = f.result()
|
|
1534
1651
|
results.append(r)
|
|
1535
1652
|
if r["status"] == "success":
|
|
1536
1653
|
files.append(r["new_key_path"] + ".pub")
|
|
1537
|
-
|
|
1654
|
+
locations.append(
|
|
1655
|
+
f"~/.ssh/authorized_keys on {r['hostname']}"
|
|
1656
|
+
)
|
|
1538
1657
|
else:
|
|
1539
1658
|
errors.extend(r["errors"])
|
|
1540
1659
|
if ctx:
|
|
1541
1660
|
await ctx.report_progress(progress=i, total=total)
|
|
1542
1661
|
logger.debug(f"Progress: {i}/{total}")
|
|
1543
1662
|
except Exception as e:
|
|
1544
|
-
logger.error(f"Parallel
|
|
1663
|
+
logger.error(f"Parallel error: {e}")
|
|
1545
1664
|
results.append(
|
|
1546
1665
|
{
|
|
1547
1666
|
"hostname": "unknown",
|
|
1548
1667
|
"status": "failed",
|
|
1549
|
-
"message": f"Parallel
|
|
1668
|
+
"message": f"Parallel error: {e}",
|
|
1550
1669
|
"errors": [str(e)],
|
|
1551
1670
|
"new_key_path": None,
|
|
1552
1671
|
}
|
|
@@ -1558,7 +1677,7 @@ async def rotate_ssh_key_on_all(
|
|
|
1558
1677
|
results.append(r)
|
|
1559
1678
|
if r["status"] == "success":
|
|
1560
1679
|
files.append(r["new_key_path"] + ".pub")
|
|
1561
|
-
|
|
1680
|
+
locations.append(f"~/.ssh/authorized_keys on {r['hostname']}")
|
|
1562
1681
|
else:
|
|
1563
1682
|
errors.extend(r["errors"])
|
|
1564
1683
|
if ctx:
|
|
@@ -1567,7 +1686,7 @@ async def rotate_ssh_key_on_all(
|
|
|
1567
1686
|
|
|
1568
1687
|
logger.debug(f"Done SSH key rotate for {group}")
|
|
1569
1688
|
msg = (
|
|
1570
|
-
f"Rotated keys for {group}"
|
|
1689
|
+
f"Rotated {key_type} keys for {group}"
|
|
1571
1690
|
if not errors
|
|
1572
1691
|
else f"Rotate failed for some in {group}"
|
|
1573
1692
|
)
|
|
@@ -1575,14 +1694,15 @@ async def rotate_ssh_key_on_all(
|
|
|
1575
1694
|
200 if not errors else 500,
|
|
1576
1695
|
msg,
|
|
1577
1696
|
{
|
|
1578
|
-
"
|
|
1697
|
+
"inventory": inventory,
|
|
1579
1698
|
"group": group,
|
|
1580
1699
|
"key_pfx": key_pfx,
|
|
1700
|
+
"key_type": key_type,
|
|
1581
1701
|
"host_results": results,
|
|
1582
1702
|
},
|
|
1583
1703
|
"; ".join(errors),
|
|
1584
1704
|
files,
|
|
1585
|
-
|
|
1705
|
+
locations,
|
|
1586
1706
|
errors,
|
|
1587
1707
|
)
|
|
1588
1708
|
|
|
@@ -1591,7 +1711,386 @@ async def rotate_ssh_key_on_all(
|
|
|
1591
1711
|
return ResponseBuilder.build(
|
|
1592
1712
|
500,
|
|
1593
1713
|
f"Rotate all fail: {e}",
|
|
1594
|
-
{
|
|
1714
|
+
{
|
|
1715
|
+
"inventory": inventory,
|
|
1716
|
+
"group": group,
|
|
1717
|
+
"key_pfx": key_pfx,
|
|
1718
|
+
"key_type": key_type,
|
|
1719
|
+
},
|
|
1720
|
+
str(e),
|
|
1721
|
+
)
|
|
1722
|
+
|
|
1723
|
+
|
|
1724
|
+
@mcp.tool(
|
|
1725
|
+
annotations={
|
|
1726
|
+
"title": "Upload File to All Hosts",
|
|
1727
|
+
"readOnlyHint": False,
|
|
1728
|
+
"destructiveHint": True,
|
|
1729
|
+
"idempotentHint": False,
|
|
1730
|
+
},
|
|
1731
|
+
tags={"remote_access"},
|
|
1732
|
+
)
|
|
1733
|
+
async def send_file_to_inventory(
|
|
1734
|
+
inventory: str = Field(
|
|
1735
|
+
description="YAML inventory path.",
|
|
1736
|
+
default=os.environ.get("TUNNEL_INVENTORY", None),
|
|
1737
|
+
),
|
|
1738
|
+
lpath: str = Field(description="Local file path.", default=None),
|
|
1739
|
+
rpath: str = Field(description="Remote destination path.", default=None),
|
|
1740
|
+
group: str = Field(
|
|
1741
|
+
description="Target group.",
|
|
1742
|
+
default=os.environ.get("TUNNEL_INVENTORY_GROUP", "all"),
|
|
1743
|
+
),
|
|
1744
|
+
parallel: bool = Field(
|
|
1745
|
+
description="Run parallel.",
|
|
1746
|
+
default=to_boolean(os.environ.get("TUNNEL_PARALLEL", False)),
|
|
1747
|
+
),
|
|
1748
|
+
max_threads: int = Field(
|
|
1749
|
+
description="Max threads.",
|
|
1750
|
+
default=to_integer(os.environ.get("TUNNEL_MAX_THREADS", "5")),
|
|
1751
|
+
),
|
|
1752
|
+
log: Optional[str] = Field(
|
|
1753
|
+
description="Log file.", default=os.environ.get("TUNNEL_LOG_FILE", None)
|
|
1754
|
+
),
|
|
1755
|
+
ctx: Context = Field(description="MCP context.", default=None),
|
|
1756
|
+
) -> Dict:
|
|
1757
|
+
"""Upload a file to all hosts in the specified inventory group. Expected return object type: dict"""
|
|
1758
|
+
logger = logging.getLogger("TunnelServer")
|
|
1759
|
+
lpath = os.path.abspath(os.path.expanduser(lpath)) # Normalize
|
|
1760
|
+
rpath = os.path.expanduser(rpath)
|
|
1761
|
+
logger.debug(
|
|
1762
|
+
f"Normalized: lpath={lpath} (exists={os.path.exists(lpath)}, isfile={os.path.isfile(lpath)}), rpath={rpath}, CWD={os.getcwd()}"
|
|
1763
|
+
)
|
|
1764
|
+
if error := setup_logging(log, logger):
|
|
1765
|
+
return error
|
|
1766
|
+
logger.debug(
|
|
1767
|
+
f"Upload file all: inv={inventory}, group={group}, local={lpath}, remote={rpath}"
|
|
1768
|
+
)
|
|
1769
|
+
if not inventory or not lpath or not rpath:
|
|
1770
|
+
logger.error("Need inventory, lpath, rpath")
|
|
1771
|
+
return ResponseBuilder.build(
|
|
1772
|
+
400,
|
|
1773
|
+
"Need inventory, lpath, rpath",
|
|
1774
|
+
{"inventory": inventory, "group": group, "lpath": lpath, "rpath": rpath},
|
|
1775
|
+
errors=["Need inventory, lpath, rpath"],
|
|
1776
|
+
)
|
|
1777
|
+
if not os.path.exists(lpath) or not os.path.isfile(lpath):
|
|
1778
|
+
logger.error(f"Invalid file: {lpath}")
|
|
1779
|
+
return ResponseBuilder.build(
|
|
1780
|
+
400,
|
|
1781
|
+
f"Invalid file: {lpath}",
|
|
1782
|
+
{"inventory": inventory, "group": group, "lpath": lpath, "rpath": rpath},
|
|
1783
|
+
errors=[f"Invalid file: {lpath}"],
|
|
1784
|
+
)
|
|
1785
|
+
try:
|
|
1786
|
+
hosts, error = load_inventory(inventory, group, logger)
|
|
1787
|
+
if error:
|
|
1788
|
+
return error
|
|
1789
|
+
total = len(hosts)
|
|
1790
|
+
if ctx:
|
|
1791
|
+
await ctx.report_progress(progress=0, total=total)
|
|
1792
|
+
logger.debug(f"Progress: 0/{total}")
|
|
1793
|
+
|
|
1794
|
+
async def send_host(h: Dict) -> Dict:
|
|
1795
|
+
host = h["hostname"]
|
|
1796
|
+
try:
|
|
1797
|
+
t = Tunnel(
|
|
1798
|
+
remote_host=host,
|
|
1799
|
+
username=h["username"],
|
|
1800
|
+
password=h.get("password"),
|
|
1801
|
+
identity_file=h.get("key_path"),
|
|
1802
|
+
)
|
|
1803
|
+
t.connect()
|
|
1804
|
+
sftp = t.ssh_client.open_sftp()
|
|
1805
|
+
transferred = 0
|
|
1806
|
+
|
|
1807
|
+
def progress_callback(transf, total):
|
|
1808
|
+
nonlocal transferred
|
|
1809
|
+
transferred = transf
|
|
1810
|
+
if ctx:
|
|
1811
|
+
asyncio.ensure_future(
|
|
1812
|
+
ctx.report_progress(progress=transf, total=total)
|
|
1813
|
+
)
|
|
1814
|
+
|
|
1815
|
+
sftp.put(lpath, rpath, callback=progress_callback)
|
|
1816
|
+
sftp.close()
|
|
1817
|
+
logger.info(f"Host {host}: Uploaded {lpath} to {rpath}")
|
|
1818
|
+
return {
|
|
1819
|
+
"hostname": host,
|
|
1820
|
+
"status": "success",
|
|
1821
|
+
"message": f"Uploaded {lpath} to {rpath}",
|
|
1822
|
+
"errors": [],
|
|
1823
|
+
}
|
|
1824
|
+
except Exception as e:
|
|
1825
|
+
logger.error(f"Upload fail {host}: {e}")
|
|
1826
|
+
return {
|
|
1827
|
+
"hostname": host,
|
|
1828
|
+
"status": "failed",
|
|
1829
|
+
"message": f"Upload fail: {e}",
|
|
1830
|
+
"errors": [str(e)],
|
|
1831
|
+
}
|
|
1832
|
+
finally:
|
|
1833
|
+
if "t" in locals():
|
|
1834
|
+
t.close()
|
|
1835
|
+
|
|
1836
|
+
results, files, locations, errors = [], [lpath], [], []
|
|
1837
|
+
if parallel:
|
|
1838
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=max_threads) as ex:
|
|
1839
|
+
futures = [
|
|
1840
|
+
ex.submit(lambda h: asyncio.run(send_host(h)), h) for h in hosts
|
|
1841
|
+
]
|
|
1842
|
+
for i, f in enumerate(concurrent.futures.as_completed(futures), 1):
|
|
1843
|
+
try:
|
|
1844
|
+
r = f.result()
|
|
1845
|
+
results.append(r)
|
|
1846
|
+
if r["status"] == "success":
|
|
1847
|
+
locations.append(f"{rpath} on {r['hostname']}")
|
|
1848
|
+
else:
|
|
1849
|
+
errors.extend(r["errors"])
|
|
1850
|
+
if ctx:
|
|
1851
|
+
await ctx.report_progress(progress=i, total=total)
|
|
1852
|
+
logger.debug(f"Progress: {i}/{total}")
|
|
1853
|
+
except Exception as e:
|
|
1854
|
+
logger.error(f"Parallel error: {e}")
|
|
1855
|
+
results.append(
|
|
1856
|
+
{
|
|
1857
|
+
"hostname": "unknown",
|
|
1858
|
+
"status": "failed",
|
|
1859
|
+
"message": f"Parallel error: {e}",
|
|
1860
|
+
"errors": [str(e)],
|
|
1861
|
+
}
|
|
1862
|
+
)
|
|
1863
|
+
errors.append(str(e))
|
|
1864
|
+
else:
|
|
1865
|
+
for i, h in enumerate(hosts, 1):
|
|
1866
|
+
r = await send_host(h)
|
|
1867
|
+
results.append(r)
|
|
1868
|
+
if r["status"] == "success":
|
|
1869
|
+
locations.append(f"{rpath} on {r['hostname']}")
|
|
1870
|
+
else:
|
|
1871
|
+
errors.extend(r["errors"])
|
|
1872
|
+
if ctx:
|
|
1873
|
+
await ctx.report_progress(progress=i, total=total)
|
|
1874
|
+
logger.debug(f"Progress: {i}/{total}")
|
|
1875
|
+
|
|
1876
|
+
logger.debug(f"Done file upload for {group}")
|
|
1877
|
+
msg = (
|
|
1878
|
+
f"Uploaded {lpath} to {group}"
|
|
1879
|
+
if not errors
|
|
1880
|
+
else f"Upload failed for some in {group}"
|
|
1881
|
+
)
|
|
1882
|
+
return ResponseBuilder.build(
|
|
1883
|
+
200 if not errors else 500,
|
|
1884
|
+
msg,
|
|
1885
|
+
{
|
|
1886
|
+
"inventory": inventory,
|
|
1887
|
+
"group": group,
|
|
1888
|
+
"lpath": lpath,
|
|
1889
|
+
"rpath": rpath,
|
|
1890
|
+
"host_results": results,
|
|
1891
|
+
},
|
|
1892
|
+
"; ".join(errors),
|
|
1893
|
+
files,
|
|
1894
|
+
locations,
|
|
1895
|
+
errors,
|
|
1896
|
+
)
|
|
1897
|
+
except Exception as e:
|
|
1898
|
+
logger.error(f"Upload all fail: {e}")
|
|
1899
|
+
return ResponseBuilder.build(
|
|
1900
|
+
500,
|
|
1901
|
+
f"Upload all fail: {e}",
|
|
1902
|
+
{"inventory": inventory, "group": group, "lpath": lpath, "rpath": rpath},
|
|
1903
|
+
str(e),
|
|
1904
|
+
)
|
|
1905
|
+
|
|
1906
|
+
|
|
1907
|
+
@mcp.tool(
|
|
1908
|
+
annotations={
|
|
1909
|
+
"title": "Download File from All Hosts",
|
|
1910
|
+
"readOnlyHint": False,
|
|
1911
|
+
"destructiveHint": False,
|
|
1912
|
+
"idempotentHint": True,
|
|
1913
|
+
},
|
|
1914
|
+
tags={"remote_access"},
|
|
1915
|
+
)
|
|
1916
|
+
async def receive_file_from_inventory(
|
|
1917
|
+
inventory: str = Field(
|
|
1918
|
+
description="YAML inventory path.",
|
|
1919
|
+
default=os.environ.get("TUNNEL_INVENTORY", None),
|
|
1920
|
+
),
|
|
1921
|
+
rpath: str = Field(description="Remote file path to download.", default=None),
|
|
1922
|
+
lpath_prefix: str = Field(
|
|
1923
|
+
description="Local directory path prefix to save files.", default=None
|
|
1924
|
+
),
|
|
1925
|
+
group: str = Field(
|
|
1926
|
+
description="Target group.",
|
|
1927
|
+
default=os.environ.get("TUNNEL_INVENTORY_GROUP", "all"),
|
|
1928
|
+
),
|
|
1929
|
+
parallel: bool = Field(
|
|
1930
|
+
description="Run parallel.",
|
|
1931
|
+
default=to_boolean(os.environ.get("TUNNEL_PARALLEL", False)),
|
|
1932
|
+
),
|
|
1933
|
+
max_threads: int = Field(
|
|
1934
|
+
description="Max threads.",
|
|
1935
|
+
default=to_integer(os.environ.get("TUNNEL_MAX_THREADS", "5")),
|
|
1936
|
+
),
|
|
1937
|
+
log: Optional[str] = Field(
|
|
1938
|
+
description="Log file.", default=os.environ.get("TUNNEL_LOG_FILE", None)
|
|
1939
|
+
),
|
|
1940
|
+
ctx: Context = Field(description="MCP context.", default=None),
|
|
1941
|
+
) -> Dict:
|
|
1942
|
+
"""Download a file from all hosts in the specified inventory group. Expected return object type: dict"""
|
|
1943
|
+
logger = logging.getLogger("TunnelServer")
|
|
1944
|
+
if error := setup_logging(log, logger):
|
|
1945
|
+
return error
|
|
1946
|
+
logger.debug(
|
|
1947
|
+
f"Download file all: inv={inventory}, group={group}, remote={rpath}, local_prefix={lpath_prefix}"
|
|
1948
|
+
)
|
|
1949
|
+
if not inventory or not rpath or not lpath_prefix:
|
|
1950
|
+
logger.error("Need inventory, rpath, lpath_prefix")
|
|
1951
|
+
return ResponseBuilder.build(
|
|
1952
|
+
400,
|
|
1953
|
+
"Need inventory, rpath, lpath_prefix",
|
|
1954
|
+
{
|
|
1955
|
+
"inventory": inventory,
|
|
1956
|
+
"group": group,
|
|
1957
|
+
"rpath": rpath,
|
|
1958
|
+
"lpath_prefix": lpath_prefix,
|
|
1959
|
+
},
|
|
1960
|
+
errors=["Need inventory, rpath, lpath_prefix"],
|
|
1961
|
+
)
|
|
1962
|
+
try:
|
|
1963
|
+
os.makedirs(lpath_prefix, exist_ok=True)
|
|
1964
|
+
hosts, error = load_inventory(inventory, group, logger)
|
|
1965
|
+
if error:
|
|
1966
|
+
return error
|
|
1967
|
+
total = len(hosts)
|
|
1968
|
+
if ctx:
|
|
1969
|
+
await ctx.report_progress(progress=0, total=total)
|
|
1970
|
+
logger.debug(f"Progress: 0/{total}")
|
|
1971
|
+
|
|
1972
|
+
async def receive_host(h: Dict) -> Dict:
|
|
1973
|
+
host = h["hostname"]
|
|
1974
|
+
lpath = os.path.join(lpath_prefix, host, os.path.basename(rpath))
|
|
1975
|
+
os.makedirs(os.path.dirname(lpath), exist_ok=True)
|
|
1976
|
+
try:
|
|
1977
|
+
t = Tunnel(
|
|
1978
|
+
remote_host=host,
|
|
1979
|
+
username=h["username"],
|
|
1980
|
+
password=h.get("password"),
|
|
1981
|
+
identity_file=h.get("key_path"),
|
|
1982
|
+
)
|
|
1983
|
+
t.connect()
|
|
1984
|
+
sftp = t.ssh_client.open_sftp()
|
|
1985
|
+
sftp.stat(rpath)
|
|
1986
|
+
transferred = 0
|
|
1987
|
+
|
|
1988
|
+
def progress_callback(transf, total):
|
|
1989
|
+
nonlocal transferred
|
|
1990
|
+
transferred = transf
|
|
1991
|
+
if ctx:
|
|
1992
|
+
asyncio.ensure_future(
|
|
1993
|
+
ctx.report_progress(progress=transf, total=total)
|
|
1994
|
+
)
|
|
1995
|
+
|
|
1996
|
+
sftp.get(rpath, lpath, callback=progress_callback)
|
|
1997
|
+
sftp.close()
|
|
1998
|
+
logger.info(f"Host {host}: Downloaded {rpath} to {lpath}")
|
|
1999
|
+
return {
|
|
2000
|
+
"hostname": host,
|
|
2001
|
+
"status": "success",
|
|
2002
|
+
"message": f"Downloaded {rpath} to {lpath}",
|
|
2003
|
+
"errors": [],
|
|
2004
|
+
"local_path": lpath,
|
|
2005
|
+
}
|
|
2006
|
+
except Exception as e:
|
|
2007
|
+
logger.error(f"Download fail {host}: {e}")
|
|
2008
|
+
return {
|
|
2009
|
+
"hostname": host,
|
|
2010
|
+
"status": "failed",
|
|
2011
|
+
"message": f"Download fail: {e}",
|
|
2012
|
+
"errors": [str(e)],
|
|
2013
|
+
"local_path": lpath,
|
|
2014
|
+
}
|
|
2015
|
+
finally:
|
|
2016
|
+
if "t" in locals():
|
|
2017
|
+
t.close()
|
|
2018
|
+
|
|
2019
|
+
results, files, locations, errors = [], [], [], []
|
|
2020
|
+
if parallel:
|
|
2021
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=max_threads) as ex:
|
|
2022
|
+
futures = [
|
|
2023
|
+
ex.submit(lambda h: asyncio.run(receive_host(h)), h) for h in hosts
|
|
2024
|
+
]
|
|
2025
|
+
for i, f in enumerate(concurrent.futures.as_completed(futures), 1):
|
|
2026
|
+
try:
|
|
2027
|
+
r = f.result()
|
|
2028
|
+
results.append(r)
|
|
2029
|
+
if r["status"] == "success":
|
|
2030
|
+
files.append(rpath)
|
|
2031
|
+
locations.append(r["local_path"])
|
|
2032
|
+
else:
|
|
2033
|
+
errors.extend(r["errors"])
|
|
2034
|
+
if ctx:
|
|
2035
|
+
await ctx.report_progress(progress=i, total=total)
|
|
2036
|
+
logger.debug(f"Progress: {i}/{total}")
|
|
2037
|
+
except Exception as e:
|
|
2038
|
+
logger.error(f"Parallel error: {e}")
|
|
2039
|
+
results.append(
|
|
2040
|
+
{
|
|
2041
|
+
"hostname": "unknown",
|
|
2042
|
+
"status": "failed",
|
|
2043
|
+
"message": f"Parallel error: {e}",
|
|
2044
|
+
"errors": [str(e)],
|
|
2045
|
+
"local_path": None,
|
|
2046
|
+
}
|
|
2047
|
+
)
|
|
2048
|
+
errors.append(str(e))
|
|
2049
|
+
else:
|
|
2050
|
+
for i, h in enumerate(hosts, 1):
|
|
2051
|
+
r = await receive_host(h)
|
|
2052
|
+
results.append(r)
|
|
2053
|
+
if r["status"] == "success":
|
|
2054
|
+
files.append(rpath)
|
|
2055
|
+
locations.append(r["local_path"])
|
|
2056
|
+
else:
|
|
2057
|
+
errors.extend(r["errors"])
|
|
2058
|
+
if ctx:
|
|
2059
|
+
await ctx.report_progress(progress=i, total=total)
|
|
2060
|
+
logger.debug(f"Progress: {i}/{total}")
|
|
2061
|
+
|
|
2062
|
+
logger.debug(f"Done file download for {group}")
|
|
2063
|
+
msg = (
|
|
2064
|
+
f"Downloaded {rpath} from {group}"
|
|
2065
|
+
if not errors
|
|
2066
|
+
else f"Download failed for some in {group}"
|
|
2067
|
+
)
|
|
2068
|
+
return ResponseBuilder.build(
|
|
2069
|
+
200 if not errors else 500,
|
|
2070
|
+
msg,
|
|
2071
|
+
{
|
|
2072
|
+
"inventory": inventory,
|
|
2073
|
+
"group": group,
|
|
2074
|
+
"rpath": rpath,
|
|
2075
|
+
"lpath_prefix": lpath_prefix,
|
|
2076
|
+
"host_results": results,
|
|
2077
|
+
},
|
|
2078
|
+
"; ".join(errors),
|
|
2079
|
+
files,
|
|
2080
|
+
locations,
|
|
2081
|
+
errors,
|
|
2082
|
+
)
|
|
2083
|
+
except Exception as e:
|
|
2084
|
+
logger.error(f"Download all fail: {e}")
|
|
2085
|
+
return ResponseBuilder.build(
|
|
2086
|
+
500,
|
|
2087
|
+
f"Download all fail: {e}",
|
|
2088
|
+
{
|
|
2089
|
+
"inventory": inventory,
|
|
2090
|
+
"group": group,
|
|
2091
|
+
"rpath": rpath,
|
|
2092
|
+
"lpath_prefix": lpath_prefix,
|
|
2093
|
+
},
|
|
1595
2094
|
str(e),
|
|
1596
2095
|
)
|
|
1597
2096
|
|