tunnel-manager 1.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scripts/validate_a2a_agent.py +148 -0
- scripts/validate_agent.py +67 -0
- tests/test_tunnel.py +76 -0
- tunnel_manager/__init__.py +66 -0
- tunnel_manager/__main__.py +6 -0
- tunnel_manager/mcp_config.json +8 -0
- tunnel_manager/middlewares.py +53 -0
- tunnel_manager/skills/tunnel-manager-remote-access/SKILL.md +51 -0
- tunnel_manager/tunnel_manager.py +990 -0
- tunnel_manager/tunnel_manager_agent.py +350 -0
- tunnel_manager/tunnel_manager_mcp.py +2600 -0
- tunnel_manager/utils.py +110 -0
- tunnel_manager-1.0.9.dist-info/METADATA +565 -0
- tunnel_manager-1.0.9.dist-info/RECORD +18 -0
- tunnel_manager-1.0.9.dist-info/WHEEL +5 -0
- tunnel_manager-1.0.9.dist-info/entry_points.txt +4 -0
- tunnel_manager-1.0.9.dist-info/licenses/LICENSE +20 -0
- tunnel_manager-1.0.9.dist-info/top_level.txt +3 -0
|
@@ -0,0 +1,350 @@
|
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# coding: utf-8
|
|
3
|
+
import os
|
|
4
|
+
import argparse
|
|
5
|
+
import logging
|
|
6
|
+
import uvicorn
|
|
7
|
+
from typing import Optional, Any
|
|
8
|
+
from contextlib import asynccontextmanager
|
|
9
|
+
import json
|
|
10
|
+
|
|
11
|
+
from fastmcp import Client
|
|
12
|
+
from pydantic_ai import Agent, ModelSettings
|
|
13
|
+
from pydantic_ai.mcp import load_mcp_servers
|
|
14
|
+
from pydantic_ai.toolsets.fastmcp import FastMCPToolset
|
|
15
|
+
from pydantic_ai_skills import SkillsToolset
|
|
16
|
+
from pydantic_ai.models.openai import OpenAIChatModel
|
|
17
|
+
from pydantic_ai.models.anthropic import AnthropicModel
|
|
18
|
+
from pydantic_ai.models.google import GoogleModel
|
|
19
|
+
from pydantic_ai.models.huggingface import HuggingFaceModel
|
|
20
|
+
from fasta2a import Skill
|
|
21
|
+
from tunnel_manager.utils import (
|
|
22
|
+
to_boolean,
|
|
23
|
+
to_integer,
|
|
24
|
+
get_mcp_config_path,
|
|
25
|
+
get_skills_path,
|
|
26
|
+
load_skills_from_directory,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
from fastapi import FastAPI, Request
|
|
30
|
+
from starlette.responses import Response, StreamingResponse
|
|
31
|
+
from pydantic import ValidationError
|
|
32
|
+
from pydantic_ai.ui import SSE_CONTENT_TYPE
|
|
33
|
+
from pydantic_ai.ui.ag_ui import AGUIAdapter
|
|
34
|
+
|
|
35
|
+
# Configure logging
|
|
36
|
+
logging.basicConfig(
|
|
37
|
+
level=logging.INFO,
|
|
38
|
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
39
|
+
handlers=[logging.StreamHandler()],
|
|
40
|
+
)
|
|
41
|
+
logging.getLogger("pydantic_ai").setLevel(logging.INFO)
|
|
42
|
+
logging.getLogger("fastmcp").setLevel(logging.INFO)
|
|
43
|
+
logging.getLogger("httpx").setLevel(logging.INFO)
|
|
44
|
+
logger = logging.getLogger(__name__)
|
|
45
|
+
|
|
46
|
+
# Default Configuration
|
|
47
|
+
DEFAULT_HOST = os.getenv("HOST", "0.0.0.0")
|
|
48
|
+
DEFAULT_PORT = to_integer(os.getenv("PORT", "9000"))
|
|
49
|
+
DEFAULT_DEBUG = to_boolean(os.getenv("DEBUG", "False"))
|
|
50
|
+
DEFAULT_PROVIDER = os.getenv("PROVIDER", "openai")
|
|
51
|
+
DEFAULT_MODEL_ID = os.getenv("MODEL_ID", "qwen/qwen3-8b")
|
|
52
|
+
DEFAULT_OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "http://127.0.0.1:1234/v1")
|
|
53
|
+
DEFAULT_OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "ollama")
|
|
54
|
+
DEFAULT_MCP_URL = os.getenv("MCP_URL", None)
|
|
55
|
+
DEFAULT_MCP_CONFIG = os.getenv("MCP_CONFIG", get_mcp_config_path())
|
|
56
|
+
# Calculate default skills directory relative to this file
|
|
57
|
+
DEFAULT_SKILLS_DIRECTORY = os.getenv("SKILLS_DIRECTORY", get_skills_path())
|
|
58
|
+
DEFAULT_ENABLE_WEB_UI = to_boolean(os.getenv("ENABLE_WEB_UI", "False"))
|
|
59
|
+
|
|
60
|
+
AGENT_NAME = "Tunnel Manager Agent"
|
|
61
|
+
AGENT_DESCRIPTION = "A specialist agent for managing remote servers via SSH tunnels."
|
|
62
|
+
|
|
63
|
+
AGENT_SYSTEM_PROMPT = (
|
|
64
|
+
"You are an SSH Specialist Agent responsible for managing remote servers via SSH tunnels.\n"
|
|
65
|
+
"You have access to tools for running commands, transferring files, and managing SSH keys on remote hosts.\n"
|
|
66
|
+
"Your responsibilities:\n"
|
|
67
|
+
"1. Analyze the user's request for remote server management.\n"
|
|
68
|
+
"2. Use the 'run_command_on_remote_host' tools to execute commands on the remote host.\n"
|
|
69
|
+
"3. Ensure secure practices when handling SSH keys and credentials.\n"
|
|
70
|
+
"4. If a connection fails, diagnose the issue (e.g., check SSH server status) and suggest fixes.\n"
|
|
71
|
+
"5. Always confirm destructive actions (like file overwrites) unless explicitly instructed otherwise.\n"
|
|
72
|
+
"6. Provide clear output of command execution results.\n"
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def create_model(
|
|
77
|
+
provider: str = DEFAULT_PROVIDER,
|
|
78
|
+
model_id: str = DEFAULT_MODEL_ID,
|
|
79
|
+
base_url: Optional[str] = DEFAULT_OPENAI_BASE_URL,
|
|
80
|
+
api_key: Optional[str] = DEFAULT_OPENAI_API_KEY,
|
|
81
|
+
):
|
|
82
|
+
if provider == "openai":
|
|
83
|
+
target_base_url = base_url or DEFAULT_OPENAI_BASE_URL
|
|
84
|
+
target_api_key = api_key or DEFAULT_OPENAI_API_KEY
|
|
85
|
+
if target_base_url:
|
|
86
|
+
os.environ["OPENAI_BASE_URL"] = target_base_url
|
|
87
|
+
if target_api_key:
|
|
88
|
+
os.environ["OPENAI_API_KEY"] = target_api_key
|
|
89
|
+
return OpenAIChatModel(model_id, provider="openai")
|
|
90
|
+
|
|
91
|
+
elif provider == "anthropic":
|
|
92
|
+
if api_key:
|
|
93
|
+
os.environ["ANTHROPIC_API_KEY"] = api_key
|
|
94
|
+
return AnthropicModel(model_id)
|
|
95
|
+
|
|
96
|
+
elif provider == "google":
|
|
97
|
+
if api_key:
|
|
98
|
+
os.environ["GEMINI_API_KEY"] = api_key
|
|
99
|
+
os.environ["GOOGLE_API_KEY"] = api_key
|
|
100
|
+
return GoogleModel(model_id)
|
|
101
|
+
|
|
102
|
+
elif provider == "huggingface":
|
|
103
|
+
if api_key:
|
|
104
|
+
os.environ["HF_TOKEN"] = api_key
|
|
105
|
+
return HuggingFaceModel(model_id)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def create_agent(
|
|
109
|
+
provider: str = DEFAULT_PROVIDER,
|
|
110
|
+
model_id: str = DEFAULT_MODEL_ID,
|
|
111
|
+
base_url: Optional[str] = None,
|
|
112
|
+
api_key: Optional[str] = None,
|
|
113
|
+
mcp_url: str = DEFAULT_MCP_URL,
|
|
114
|
+
mcp_config: str = DEFAULT_MCP_CONFIG,
|
|
115
|
+
skills_directory: Optional[str] = DEFAULT_SKILLS_DIRECTORY,
|
|
116
|
+
) -> Agent:
|
|
117
|
+
agent_toolsets = []
|
|
118
|
+
|
|
119
|
+
if mcp_config and os.path.exists(mcp_config):
|
|
120
|
+
mcp_toolset = load_mcp_servers(mcp_config)
|
|
121
|
+
agent_toolsets.extend(mcp_toolset)
|
|
122
|
+
logger.info(f"Connected to MCP Config JSON: {mcp_toolset}")
|
|
123
|
+
elif mcp_url:
|
|
124
|
+
fastmcp_toolset = FastMCPToolset(Client[Any](mcp_url, timeout=3600))
|
|
125
|
+
agent_toolsets.append(fastmcp_toolset)
|
|
126
|
+
logger.info(f"Connected to MCP Server: {mcp_url}")
|
|
127
|
+
|
|
128
|
+
if skills_directory and os.path.exists(skills_directory):
|
|
129
|
+
logger.debug(f"Loading skills {skills_directory}")
|
|
130
|
+
skills = SkillsToolset(directories=[str(skills_directory)])
|
|
131
|
+
agent_toolsets.append(skills)
|
|
132
|
+
logger.info(f"Loaded Skills at {skills_directory}")
|
|
133
|
+
|
|
134
|
+
# Create the Model
|
|
135
|
+
model = create_model(provider, model_id, base_url, api_key)
|
|
136
|
+
|
|
137
|
+
logger.info("Initializing Agent...")
|
|
138
|
+
|
|
139
|
+
settings = ModelSettings(timeout=3600.0)
|
|
140
|
+
|
|
141
|
+
return Agent(
|
|
142
|
+
model=model,
|
|
143
|
+
system_prompt=AGENT_SYSTEM_PROMPT,
|
|
144
|
+
name=AGENT_NAME,
|
|
145
|
+
toolsets=agent_toolsets,
|
|
146
|
+
deps_type=Any,
|
|
147
|
+
model_settings=settings,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def create_agent_server(
|
|
152
|
+
provider: str = DEFAULT_PROVIDER,
|
|
153
|
+
model_id: str = DEFAULT_MODEL_ID,
|
|
154
|
+
base_url: Optional[str] = None,
|
|
155
|
+
api_key: Optional[str] = None,
|
|
156
|
+
mcp_url: str = DEFAULT_MCP_URL,
|
|
157
|
+
mcp_config: str = DEFAULT_MCP_CONFIG,
|
|
158
|
+
skills_directory: Optional[str] = DEFAULT_SKILLS_DIRECTORY,
|
|
159
|
+
debug: Optional[bool] = DEFAULT_DEBUG,
|
|
160
|
+
host: Optional[str] = DEFAULT_HOST,
|
|
161
|
+
port: Optional[int] = DEFAULT_PORT,
|
|
162
|
+
enable_web_ui: bool = DEFAULT_ENABLE_WEB_UI,
|
|
163
|
+
):
|
|
164
|
+
print(
|
|
165
|
+
f"Starting {AGENT_NAME} with provider={provider}, model={model_id}, mcp={mcp_url} | {mcp_config}"
|
|
166
|
+
)
|
|
167
|
+
agent = create_agent(
|
|
168
|
+
provider=provider,
|
|
169
|
+
model_id=model_id,
|
|
170
|
+
base_url=base_url,
|
|
171
|
+
api_key=api_key,
|
|
172
|
+
mcp_url=mcp_url,
|
|
173
|
+
mcp_config=mcp_config,
|
|
174
|
+
skills_directory=skills_directory,
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
# Define Skills for Agent Card
|
|
178
|
+
if skills_directory and os.path.exists(skills_directory):
|
|
179
|
+
skills = load_skills_from_directory(skills_directory)
|
|
180
|
+
logger.info(f"Loaded {len(skills)} skills from {skills_directory}")
|
|
181
|
+
else:
|
|
182
|
+
# Fallback if no skills directory
|
|
183
|
+
skills = [
|
|
184
|
+
Skill(
|
|
185
|
+
id="tunnel_manager_agent",
|
|
186
|
+
name="Tunnel Manager Agent",
|
|
187
|
+
description="General access to Tunnel Manager tools",
|
|
188
|
+
tags=["sshtunnel", "remote"],
|
|
189
|
+
input_modes=["text"],
|
|
190
|
+
output_modes=["text"],
|
|
191
|
+
)
|
|
192
|
+
]
|
|
193
|
+
|
|
194
|
+
# Create A2A app explicitly before main app to bind lifespan
|
|
195
|
+
a2a_app = agent.to_a2a(
|
|
196
|
+
name=AGENT_NAME,
|
|
197
|
+
description=AGENT_DESCRIPTION,
|
|
198
|
+
version="1.0.0",
|
|
199
|
+
skills=skills,
|
|
200
|
+
debug=debug,
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
@asynccontextmanager
|
|
204
|
+
async def lifespan(app: FastAPI):
|
|
205
|
+
# Trigger A2A (sub-app) startup/shutdown events
|
|
206
|
+
# This is critical for TaskManager initialization in A2A
|
|
207
|
+
if hasattr(a2a_app, "router"):
|
|
208
|
+
async with a2a_app.router.lifespan_context(a2a_app):
|
|
209
|
+
yield
|
|
210
|
+
else:
|
|
211
|
+
yield
|
|
212
|
+
|
|
213
|
+
# Create main FastAPI app
|
|
214
|
+
app = FastAPI(
|
|
215
|
+
title=f"{AGENT_NAME} - A2A + AG-UI Server",
|
|
216
|
+
description=AGENT_DESCRIPTION,
|
|
217
|
+
debug=debug,
|
|
218
|
+
lifespan=lifespan,
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
# Mount A2A as sub-app at /a2a
|
|
222
|
+
app.mount("/a2a", a2a_app)
|
|
223
|
+
|
|
224
|
+
# Add AG-UI endpoint (POST to /ag-ui)
|
|
225
|
+
@app.post("/ag-ui")
|
|
226
|
+
async def ag_ui_endpoint(request: Request) -> Response:
|
|
227
|
+
accept = request.headers.get("accept", SSE_CONTENT_TYPE)
|
|
228
|
+
try:
|
|
229
|
+
# Parse incoming AG-UI RunAgentInput from request body
|
|
230
|
+
run_input = AGUIAdapter.build_run_input(await request.body())
|
|
231
|
+
except ValidationError as e:
|
|
232
|
+
return Response(
|
|
233
|
+
content=json.dumps(e.json()),
|
|
234
|
+
media_type="application/json",
|
|
235
|
+
status_code=422,
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
# Create adapter and run the agent → stream AG-UI events
|
|
239
|
+
adapter = AGUIAdapter(agent=agent, run_input=run_input, accept=accept)
|
|
240
|
+
event_stream = adapter.run_stream() # Runs agent, yields events
|
|
241
|
+
sse_stream = adapter.encode_stream(event_stream) # Encodes to SSE
|
|
242
|
+
|
|
243
|
+
return StreamingResponse(
|
|
244
|
+
sse_stream,
|
|
245
|
+
media_type=accept,
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
# Mount Web UI if enabled
|
|
249
|
+
if enable_web_ui:
|
|
250
|
+
web_ui = agent.to_web(instructions=AGENT_SYSTEM_PROMPT)
|
|
251
|
+
app.mount("/", web_ui)
|
|
252
|
+
logger.info(
|
|
253
|
+
"Starting server on %s:%s (A2A at /a2a, AG-UI at /ag-ui, Web UI: %s)",
|
|
254
|
+
host,
|
|
255
|
+
port,
|
|
256
|
+
"Enabled at /" if enable_web_ui else "Disabled",
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
uvicorn.run(
|
|
260
|
+
app,
|
|
261
|
+
host=host,
|
|
262
|
+
port=port,
|
|
263
|
+
timeout_keep_alive=1800, # 30 minute timeout
|
|
264
|
+
timeout_graceful_shutdown=60,
|
|
265
|
+
log_level="debug" if debug else "info",
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
def agent_server():
|
|
270
|
+
parser = argparse.ArgumentParser(
|
|
271
|
+
description=f"Run the {AGENT_NAME} A2A + AG-UI Server"
|
|
272
|
+
)
|
|
273
|
+
parser.add_argument(
|
|
274
|
+
"--host", default=DEFAULT_HOST, help="Host to bind the server to"
|
|
275
|
+
)
|
|
276
|
+
parser.add_argument(
|
|
277
|
+
"--port", type=int, default=DEFAULT_PORT, help="Port to bind the server to"
|
|
278
|
+
)
|
|
279
|
+
parser.add_argument("--debug", type=bool, default=DEFAULT_DEBUG, help="Debug mode")
|
|
280
|
+
parser.add_argument("--reload", action="store_true", help="Enable auto-reload")
|
|
281
|
+
|
|
282
|
+
parser.add_argument(
|
|
283
|
+
"--provider",
|
|
284
|
+
default=DEFAULT_PROVIDER,
|
|
285
|
+
choices=["openai", "anthropic", "google", "huggingface"],
|
|
286
|
+
help="LLM Provider",
|
|
287
|
+
)
|
|
288
|
+
parser.add_argument("--model-id", default=DEFAULT_MODEL_ID, help="LLM Model ID")
|
|
289
|
+
parser.add_argument(
|
|
290
|
+
"--base-url",
|
|
291
|
+
default=DEFAULT_OPENAI_BASE_URL,
|
|
292
|
+
help="LLM Base URL (for OpenAI compatible providers)",
|
|
293
|
+
)
|
|
294
|
+
parser.add_argument("--api-key", default=DEFAULT_OPENAI_API_KEY, help="LLM API Key")
|
|
295
|
+
parser.add_argument("--mcp-url", default=DEFAULT_MCP_URL, help="MCP Server URL")
|
|
296
|
+
parser.add_argument(
|
|
297
|
+
"--mcp-config", default=DEFAULT_MCP_CONFIG, help="MCP Server Config"
|
|
298
|
+
)
|
|
299
|
+
parser.add_argument(
|
|
300
|
+
"--skills-directory",
|
|
301
|
+
default=DEFAULT_SKILLS_DIRECTORY,
|
|
302
|
+
help="Directory containing agent skills",
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
parser.add_argument(
|
|
306
|
+
"--web",
|
|
307
|
+
action="store_true",
|
|
308
|
+
default=DEFAULT_ENABLE_WEB_UI,
|
|
309
|
+
help="Enable Pydantic AI Web UI",
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
args = parser.parse_args()
|
|
313
|
+
|
|
314
|
+
if args.debug:
|
|
315
|
+
# Force reconfiguration of logging
|
|
316
|
+
for handler in logging.root.handlers[:]:
|
|
317
|
+
logging.root.removeHandler(handler)
|
|
318
|
+
|
|
319
|
+
logging.basicConfig(
|
|
320
|
+
level=logging.DEBUG,
|
|
321
|
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
322
|
+
handlers=[logging.StreamHandler()],
|
|
323
|
+
force=True,
|
|
324
|
+
)
|
|
325
|
+
logging.getLogger("pydantic_ai").setLevel(logging.DEBUG)
|
|
326
|
+
logging.getLogger("fastmcp").setLevel(logging.DEBUG)
|
|
327
|
+
logging.getLogger("httpcore").setLevel(logging.DEBUG)
|
|
328
|
+
logging.getLogger("httpx").setLevel(logging.DEBUG)
|
|
329
|
+
logger.setLevel(logging.DEBUG)
|
|
330
|
+
logger.debug("Debug mode enabled")
|
|
331
|
+
|
|
332
|
+
# Create the agent with CLI args
|
|
333
|
+
# Create the agent with CLI args
|
|
334
|
+
create_agent_server(
|
|
335
|
+
provider=args.provider,
|
|
336
|
+
model_id=args.model_id,
|
|
337
|
+
base_url=args.base_url,
|
|
338
|
+
api_key=args.api_key,
|
|
339
|
+
mcp_url=args.mcp_url,
|
|
340
|
+
mcp_config=args.mcp_config,
|
|
341
|
+
skills_directory=args.skills_directory,
|
|
342
|
+
debug=args.debug,
|
|
343
|
+
host=args.host,
|
|
344
|
+
port=args.port,
|
|
345
|
+
enable_web_ui=args.web,
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
|
|
349
|
+
if __name__ == "__main__":
|
|
350
|
+
agent_server()
|