container-manager-mcp 1.0.3__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- container_manager_mcp/__init__.py +23 -17
- container_manager_mcp/__main__.py +2 -2
- container_manager_mcp/container_manager.py +555 -441
- container_manager_mcp/container_manager_a2a.py +339 -0
- container_manager_mcp/container_manager_mcp.py +2055 -1323
- container_manager_mcp/mcp_config.json +7 -0
- container_manager_mcp/skills/container-manager-compose/SKILL.md +25 -0
- container_manager_mcp/skills/container-manager-containers/SKILL.md +28 -0
- container_manager_mcp/skills/container-manager-containers/troubleshoot.md +5 -0
- container_manager_mcp/skills/container-manager-images/SKILL.md +25 -0
- container_manager_mcp/skills/container-manager-info/SKILL.md +23 -0
- container_manager_mcp/skills/container-manager-logs/SKILL.md +22 -0
- container_manager_mcp/skills/container-manager-networks/SKILL.md +22 -0
- container_manager_mcp/skills/container-manager-swarm/SKILL.md +28 -0
- container_manager_mcp/skills/container-manager-swarm/orchestrate.md +4 -0
- container_manager_mcp/skills/container-manager-system/SKILL.md +19 -0
- container_manager_mcp/skills/container-manager-volumes/SKILL.md +23 -0
- container_manager_mcp/utils.py +31 -0
- container_manager_mcp-1.2.0.dist-info/METADATA +371 -0
- container_manager_mcp-1.2.0.dist-info/RECORD +26 -0
- container_manager_mcp-1.2.0.dist-info/entry_points.txt +4 -0
- {container_manager_mcp-1.0.3.dist-info → container_manager_mcp-1.2.0.dist-info}/top_level.txt +1 -0
- scripts/validate_a2a_agent.py +150 -0
- scripts/validate_agent.py +67 -0
- container_manager_mcp-1.0.3.dist-info/METADATA +0 -243
- container_manager_mcp-1.0.3.dist-info/RECORD +0 -10
- container_manager_mcp-1.0.3.dist-info/entry_points.txt +0 -3
- {container_manager_mcp-1.0.3.dist-info → container_manager_mcp-1.2.0.dist-info}/WHEEL +0 -0
- {container_manager_mcp-1.0.3.dist-info → container_manager_mcp-1.2.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,339 @@
|
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# coding: utf-8
|
|
3
|
+
import os
|
|
4
|
+
import argparse
|
|
5
|
+
import logging
|
|
6
|
+
import uvicorn
|
|
7
|
+
from typing import Optional, Any, List
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
import yaml
|
|
10
|
+
|
|
11
|
+
from fastmcp import Client
|
|
12
|
+
from pydantic_ai import Agent
|
|
13
|
+
from pydantic_ai.mcp import load_mcp_servers
|
|
14
|
+
from pydantic_ai.toolsets.fastmcp import FastMCPToolset
|
|
15
|
+
from pydantic_ai_skills import SkillsToolset
|
|
16
|
+
from pydantic_ai.models.openai import OpenAIChatModel
|
|
17
|
+
from pydantic_ai.models.anthropic import AnthropicModel
|
|
18
|
+
from pydantic_ai.models.google import GoogleModel
|
|
19
|
+
from pydantic_ai.models.huggingface import HuggingFaceModel
|
|
20
|
+
from fasta2a import Skill
|
|
21
|
+
from container_manager_mcp.utils import to_integer, to_boolean
|
|
22
|
+
from importlib.resources import files, as_file
|
|
23
|
+
|
|
24
|
+
logging.basicConfig(
|
|
25
|
+
level=logging.INFO,
|
|
26
|
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
27
|
+
handlers=[logging.StreamHandler()], # Output to console
|
|
28
|
+
)
|
|
29
|
+
logging.getLogger("pydantic_ai").setLevel(logging.INFO)
|
|
30
|
+
logging.getLogger("fastmcp").setLevel(logging.INFO)
|
|
31
|
+
logging.getLogger("httpx").setLevel(logging.INFO)
|
|
32
|
+
logger = logging.getLogger(__name__)
|
|
33
|
+
|
|
34
|
+
mcp_config_file = files("container_manager_mcp") / "mcp_config.json"
|
|
35
|
+
with as_file(mcp_config_file) as path:
|
|
36
|
+
mcp_config_path = str(path)
|
|
37
|
+
|
|
38
|
+
skills_dir = files("container_manager_mcp") / "skills"
|
|
39
|
+
with as_file(skills_dir) as path:
|
|
40
|
+
skills_path = str(path)
|
|
41
|
+
|
|
42
|
+
DEFAULT_HOST = os.getenv("HOST", "0.0.0.0")
|
|
43
|
+
DEFAULT_PORT = to_integer(string=os.getenv("PORT", "9000"))
|
|
44
|
+
DEFAULT_DEBUG = to_boolean(string=os.getenv("DEBUG", "False"))
|
|
45
|
+
DEFAULT_PROVIDER = os.getenv("PROVIDER", "openai")
|
|
46
|
+
DEFAULT_MODEL_ID = os.getenv("MODEL_ID", "qwen/qwen3-8b")
|
|
47
|
+
DEFAULT_OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "http://127.0.0.1:1234/v1")
|
|
48
|
+
DEFAULT_OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "ollama")
|
|
49
|
+
DEFAULT_MCP_URL = os.getenv("MCP_URL", None)
|
|
50
|
+
DEFAULT_MCP_CONFIG = os.getenv("MCP_CONFIG", mcp_config_path)
|
|
51
|
+
DEFAULT_SKILLS_DIRECTORY = os.getenv("SKILLS_DIRECTORY", skills_path)
|
|
52
|
+
|
|
53
|
+
AGENT_NAME = "ContainerManagerOrchestrator"
|
|
54
|
+
AGENT_DESCRIPTION = (
|
|
55
|
+
"A multi-agent system for managing container tasks via delegated specialists."
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def create_model(
|
|
60
|
+
provider: str = DEFAULT_PROVIDER,
|
|
61
|
+
model_id: str = DEFAULT_MODEL_ID,
|
|
62
|
+
base_url: Optional[str] = DEFAULT_OPENAI_BASE_URL,
|
|
63
|
+
api_key: Optional[str] = DEFAULT_OPENAI_API_KEY,
|
|
64
|
+
):
|
|
65
|
+
if provider == "openai":
|
|
66
|
+
target_base_url = base_url or DEFAULT_OPENAI_BASE_URL
|
|
67
|
+
target_api_key = api_key or DEFAULT_OPENAI_API_KEY
|
|
68
|
+
if target_base_url:
|
|
69
|
+
os.environ["OPENAI_BASE_URL"] = target_base_url
|
|
70
|
+
if target_api_key:
|
|
71
|
+
os.environ["OPENAI_API_KEY"] = target_api_key
|
|
72
|
+
return OpenAIChatModel(model_id, provider="openai")
|
|
73
|
+
|
|
74
|
+
elif provider == "anthropic":
|
|
75
|
+
if api_key:
|
|
76
|
+
os.environ["ANTHROPIC_API_KEY"] = api_key
|
|
77
|
+
return AnthropicModel(model_id)
|
|
78
|
+
|
|
79
|
+
elif provider == "google":
|
|
80
|
+
if api_key:
|
|
81
|
+
os.environ["GEMINI_API_KEY"] = api_key
|
|
82
|
+
os.environ["GOOGLE_API_KEY"] = api_key
|
|
83
|
+
return GoogleModel(model_id)
|
|
84
|
+
|
|
85
|
+
elif provider == "huggingface":
|
|
86
|
+
if api_key:
|
|
87
|
+
os.environ["HF_TOKEN"] = api_key
|
|
88
|
+
return HuggingFaceModel(model_id)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def create_agent(
|
|
92
|
+
provider: str = DEFAULT_PROVIDER,
|
|
93
|
+
model_id: str = DEFAULT_MODEL_ID,
|
|
94
|
+
base_url: Optional[str] = None,
|
|
95
|
+
api_key: Optional[str] = None,
|
|
96
|
+
mcp_url: str = DEFAULT_MCP_URL,
|
|
97
|
+
mcp_config: str = DEFAULT_MCP_CONFIG,
|
|
98
|
+
skills_directory: Optional[str] = DEFAULT_SKILLS_DIRECTORY,
|
|
99
|
+
) -> Agent:
|
|
100
|
+
agent_toolsets = []
|
|
101
|
+
|
|
102
|
+
if mcp_config:
|
|
103
|
+
mcp_toolset = load_mcp_servers(mcp_config)
|
|
104
|
+
agent_toolsets.extend(mcp_toolset)
|
|
105
|
+
logger.info(f"Connected to MCP Config JSON: {mcp_toolset}")
|
|
106
|
+
elif mcp_url:
|
|
107
|
+
fastmcp_toolset = FastMCPToolset(Client[Any](mcp_url, timeout=3600))
|
|
108
|
+
agent_toolsets.append(fastmcp_toolset)
|
|
109
|
+
logger.info(f"Connected to MCP Server: {mcp_url}")
|
|
110
|
+
|
|
111
|
+
if skills_directory and os.path.exists(skills_directory):
|
|
112
|
+
logger.debug(f"Loading skills {skills_directory}")
|
|
113
|
+
skills = SkillsToolset(directories=[str(skills_directory)])
|
|
114
|
+
agent_toolsets.append(skills)
|
|
115
|
+
logger.info(f"Loaded Skills at {skills_directory}")
|
|
116
|
+
|
|
117
|
+
# Create the Model
|
|
118
|
+
model = create_model(provider, model_id, base_url, api_key)
|
|
119
|
+
|
|
120
|
+
logger.info("Initializing Agent...")
|
|
121
|
+
|
|
122
|
+
return Agent(
|
|
123
|
+
model=model,
|
|
124
|
+
system_prompt=(
|
|
125
|
+
"You are the Container Manager Agent.\n"
|
|
126
|
+
"You have access to all skills and toolsets to interact with the Docker/Podman API.\n"
|
|
127
|
+
"Your responsibilities:\n"
|
|
128
|
+
"1. Analyze the user's request.\n"
|
|
129
|
+
"2. Identify the domain (e.g., logs, volumes, images, run, compose, etc) and select the appropriate skills.\n"
|
|
130
|
+
"4. If a complicated task requires multiple skills (e.g. 'verify the logs of container XXXX and bring down my compose services'), "
|
|
131
|
+
" orchestrate them sequentially: call the Log skill, then the Compose skill.\n"
|
|
132
|
+
"5. Always be warm, professional, and helpful.\n"
|
|
133
|
+
"6. Explain your plan in detail before executing."
|
|
134
|
+
),
|
|
135
|
+
name="Container Manager Agent",
|
|
136
|
+
toolsets=agent_toolsets,
|
|
137
|
+
deps_type=Any,
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
async def chat(agent: Agent, prompt: str):
|
|
142
|
+
result = await agent.run(prompt)
|
|
143
|
+
print(f"Response:\n\n{result.output}")
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
async def node_chat(agent: Agent, prompt: str) -> List:
|
|
147
|
+
nodes = []
|
|
148
|
+
async with agent.iter(prompt) as agent_run:
|
|
149
|
+
async for node in agent_run:
|
|
150
|
+
nodes.append(node)
|
|
151
|
+
print(node)
|
|
152
|
+
return nodes
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
async def stream_chat(agent: Agent, prompt: str) -> None:
|
|
156
|
+
# Option A: Easiest & most common - just stream the final text output
|
|
157
|
+
async with agent.run_stream(prompt) as result:
|
|
158
|
+
async for text_chunk in result.stream_text(
|
|
159
|
+
delta=True
|
|
160
|
+
): # ← streams partial text deltas
|
|
161
|
+
print(text_chunk, end="", flush=True)
|
|
162
|
+
print("\nDone!") # optional
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def load_skills_from_directory(directory: str) -> List[Skill]:
|
|
166
|
+
skills = []
|
|
167
|
+
base_path = Path(directory)
|
|
168
|
+
|
|
169
|
+
if not base_path.exists():
|
|
170
|
+
logger.warning(f"Skills directory not found: {directory}")
|
|
171
|
+
return skills
|
|
172
|
+
|
|
173
|
+
for item in base_path.iterdir():
|
|
174
|
+
if item.is_dir():
|
|
175
|
+
skill_file = item / "SKILL.md"
|
|
176
|
+
if skill_file.exists():
|
|
177
|
+
try:
|
|
178
|
+
with open(skill_file, "r") as f:
|
|
179
|
+
# Extract frontmatter
|
|
180
|
+
content = f.read()
|
|
181
|
+
if content.startswith("---"):
|
|
182
|
+
_, frontmatter, _ = content.split("---", 2)
|
|
183
|
+
data = yaml.safe_load(frontmatter)
|
|
184
|
+
|
|
185
|
+
skill_id = item.name
|
|
186
|
+
skill_name = data.get("name", skill_id)
|
|
187
|
+
skill_desc = data.get(
|
|
188
|
+
"description", f"Access to {skill_name} tools"
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
# Generate tags: "container-manager" + folder name without "container-manager-" prefix
|
|
192
|
+
tag_name = skill_id.replace("container-manager-", "")
|
|
193
|
+
tags = ["container-manager", tag_name]
|
|
194
|
+
|
|
195
|
+
skills.append(
|
|
196
|
+
Skill(
|
|
197
|
+
id=skill_id,
|
|
198
|
+
name=skill_name,
|
|
199
|
+
description=skill_desc,
|
|
200
|
+
tags=tags,
|
|
201
|
+
input_modes=["text"],
|
|
202
|
+
output_modes=["text"],
|
|
203
|
+
)
|
|
204
|
+
)
|
|
205
|
+
except Exception as e:
|
|
206
|
+
logger.error(f"Error loading skill from {skill_file}: {e}")
|
|
207
|
+
|
|
208
|
+
return skills
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def create_a2a_server(
|
|
212
|
+
provider: str = DEFAULT_PROVIDER,
|
|
213
|
+
model_id: str = DEFAULT_MODEL_ID,
|
|
214
|
+
base_url: Optional[str] = None,
|
|
215
|
+
api_key: Optional[str] = None,
|
|
216
|
+
mcp_url: str = DEFAULT_MCP_URL,
|
|
217
|
+
mcp_config: str = DEFAULT_MCP_CONFIG,
|
|
218
|
+
skills_directory: Optional[str] = DEFAULT_SKILLS_DIRECTORY,
|
|
219
|
+
debug: Optional[bool] = DEFAULT_DEBUG,
|
|
220
|
+
host: Optional[str] = DEFAULT_HOST,
|
|
221
|
+
port: Optional[int] = DEFAULT_PORT,
|
|
222
|
+
):
|
|
223
|
+
print(
|
|
224
|
+
f"Starting {AGENT_NAME} with provider={provider}, model={model_id}, mcp={mcp_url} | {mcp_config}"
|
|
225
|
+
)
|
|
226
|
+
agent = create_agent(
|
|
227
|
+
provider=provider,
|
|
228
|
+
model_id=model_id,
|
|
229
|
+
base_url=base_url,
|
|
230
|
+
api_key=api_key,
|
|
231
|
+
mcp_url=mcp_url,
|
|
232
|
+
mcp_config=mcp_config,
|
|
233
|
+
skills_directory=skills_directory,
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
# Define Skills for Agent Card (High-level capabilities)
|
|
237
|
+
if skills_directory and os.path.exists(skills_directory):
|
|
238
|
+
skills = load_skills_from_directory(skills_directory)
|
|
239
|
+
logger.info(f"Loaded {len(skills)} skills from {skills_directory}")
|
|
240
|
+
else:
|
|
241
|
+
skills = [
|
|
242
|
+
Skill(
|
|
243
|
+
id="container_manager_agent",
|
|
244
|
+
name="Container Manager Agent",
|
|
245
|
+
description="This Container Manager skill grants access to the host or a remote docker API host to manager the host's container service through an MCP Server",
|
|
246
|
+
tags=["container_manager"],
|
|
247
|
+
input_modes=["text"],
|
|
248
|
+
output_modes=["text"],
|
|
249
|
+
)
|
|
250
|
+
]
|
|
251
|
+
# Create A2A App
|
|
252
|
+
app = agent.to_a2a(
|
|
253
|
+
name=AGENT_NAME,
|
|
254
|
+
description=AGENT_DESCRIPTION,
|
|
255
|
+
version="1.2.0",
|
|
256
|
+
skills=skills,
|
|
257
|
+
debug=debug,
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
logger.info(
|
|
261
|
+
"Starting A2A server with provider=%s, model=%s, mcp_url=%s, mcp_config=%s",
|
|
262
|
+
provider,
|
|
263
|
+
model_id,
|
|
264
|
+
mcp_url,
|
|
265
|
+
mcp_config,
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
uvicorn.run(
|
|
269
|
+
app,
|
|
270
|
+
host=host,
|
|
271
|
+
port=port,
|
|
272
|
+
log_level="debug" if debug else "info",
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
def agent_server():
|
|
277
|
+
parser = argparse.ArgumentParser(description=f"Run the {AGENT_NAME} A2A Server")
|
|
278
|
+
parser.add_argument(
|
|
279
|
+
"--host", default=DEFAULT_HOST, help="Host to bind the server to"
|
|
280
|
+
)
|
|
281
|
+
parser.add_argument(
|
|
282
|
+
"--port", type=int, default=DEFAULT_PORT, help="Port to bind the server to"
|
|
283
|
+
)
|
|
284
|
+
parser.add_argument("--debug", type=bool, default=DEFAULT_DEBUG, help="Debug mode")
|
|
285
|
+
parser.add_argument("--reload", action="store_true", help="Enable auto-reload")
|
|
286
|
+
|
|
287
|
+
parser.add_argument(
|
|
288
|
+
"--provider",
|
|
289
|
+
default=DEFAULT_PROVIDER,
|
|
290
|
+
choices=["openai", "anthropic", "google", "huggingface"],
|
|
291
|
+
help="LLM Provider",
|
|
292
|
+
)
|
|
293
|
+
parser.add_argument("--model-id", default=DEFAULT_MODEL_ID, help="LLM Model ID")
|
|
294
|
+
parser.add_argument(
|
|
295
|
+
"--base-url",
|
|
296
|
+
default=DEFAULT_OPENAI_BASE_URL,
|
|
297
|
+
help="LLM Base URL (for OpenAI compatible providers)",
|
|
298
|
+
)
|
|
299
|
+
parser.add_argument("--api-key", default=DEFAULT_OPENAI_API_KEY, help="LLM API Key")
|
|
300
|
+
parser.add_argument("--mcp-url", default=DEFAULT_MCP_URL, help="MCP Server URL")
|
|
301
|
+
parser.add_argument(
|
|
302
|
+
"--mcp-config", default=DEFAULT_MCP_CONFIG, help="MCP Server Config"
|
|
303
|
+
)
|
|
304
|
+
args = parser.parse_args()
|
|
305
|
+
|
|
306
|
+
if args.debug:
|
|
307
|
+
# Force reconfiguration of logging
|
|
308
|
+
for handler in logging.root.handlers[:]:
|
|
309
|
+
logging.root.removeHandler(handler)
|
|
310
|
+
|
|
311
|
+
logging.basicConfig(
|
|
312
|
+
level=logging.DEBUG,
|
|
313
|
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
314
|
+
handlers=[logging.StreamHandler()], # Output to console
|
|
315
|
+
force=True,
|
|
316
|
+
)
|
|
317
|
+
logging.getLogger("pydantic_ai").setLevel(logging.DEBUG)
|
|
318
|
+
logging.getLogger("fastmcp").setLevel(logging.DEBUG)
|
|
319
|
+
logging.getLogger("httpcore").setLevel(logging.DEBUG)
|
|
320
|
+
logging.getLogger("httpx").setLevel(logging.DEBUG)
|
|
321
|
+
logger.setLevel(logging.DEBUG)
|
|
322
|
+
logger.debug("Debug mode enabled")
|
|
323
|
+
|
|
324
|
+
# Create the agent with CLI args
|
|
325
|
+
create_a2a_server(
|
|
326
|
+
provider=args.provider,
|
|
327
|
+
model_id=args.model_id,
|
|
328
|
+
base_url=args.base_url,
|
|
329
|
+
api_key=args.api_key,
|
|
330
|
+
mcp_url=args.mcp_url,
|
|
331
|
+
mcp_config=args.mcp_config,
|
|
332
|
+
debug=args.debug,
|
|
333
|
+
host=args.host,
|
|
334
|
+
port=args.port,
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
|
|
338
|
+
if __name__ == "__main__":
|
|
339
|
+
agent_server()
|