kubectl-mcp-server 1.12.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kubectl_mcp_server-1.12.0.dist-info/METADATA +711 -0
- kubectl_mcp_server-1.12.0.dist-info/RECORD +45 -0
- kubectl_mcp_server-1.12.0.dist-info/WHEEL +5 -0
- kubectl_mcp_server-1.12.0.dist-info/entry_points.txt +3 -0
- kubectl_mcp_server-1.12.0.dist-info/licenses/LICENSE +21 -0
- kubectl_mcp_server-1.12.0.dist-info/top_level.txt +2 -0
- kubectl_mcp_tool/__init__.py +21 -0
- kubectl_mcp_tool/__main__.py +46 -0
- kubectl_mcp_tool/auth/__init__.py +13 -0
- kubectl_mcp_tool/auth/config.py +71 -0
- kubectl_mcp_tool/auth/scopes.py +148 -0
- kubectl_mcp_tool/auth/verifier.py +82 -0
- kubectl_mcp_tool/cli/__init__.py +9 -0
- kubectl_mcp_tool/cli/__main__.py +10 -0
- kubectl_mcp_tool/cli/cli.py +111 -0
- kubectl_mcp_tool/diagnostics.py +355 -0
- kubectl_mcp_tool/k8s_config.py +289 -0
- kubectl_mcp_tool/mcp_server.py +530 -0
- kubectl_mcp_tool/prompts/__init__.py +5 -0
- kubectl_mcp_tool/prompts/prompts.py +823 -0
- kubectl_mcp_tool/resources/__init__.py +5 -0
- kubectl_mcp_tool/resources/resources.py +305 -0
- kubectl_mcp_tool/tools/__init__.py +28 -0
- kubectl_mcp_tool/tools/browser.py +371 -0
- kubectl_mcp_tool/tools/cluster.py +315 -0
- kubectl_mcp_tool/tools/core.py +421 -0
- kubectl_mcp_tool/tools/cost.py +680 -0
- kubectl_mcp_tool/tools/deployments.py +381 -0
- kubectl_mcp_tool/tools/diagnostics.py +174 -0
- kubectl_mcp_tool/tools/helm.py +1561 -0
- kubectl_mcp_tool/tools/networking.py +296 -0
- kubectl_mcp_tool/tools/operations.py +501 -0
- kubectl_mcp_tool/tools/pods.py +582 -0
- kubectl_mcp_tool/tools/security.py +333 -0
- kubectl_mcp_tool/tools/storage.py +133 -0
- kubectl_mcp_tool/utils/__init__.py +17 -0
- kubectl_mcp_tool/utils/helpers.py +80 -0
- tests/__init__.py +9 -0
- tests/conftest.py +379 -0
- tests/test_auth.py +256 -0
- tests/test_browser.py +349 -0
- tests/test_prompts.py +536 -0
- tests/test_resources.py +343 -0
- tests/test_server.py +384 -0
- tests/test_tools.py +659 -0
|
@@ -0,0 +1,381 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import subprocess
|
|
3
|
+
from typing import Any, Dict, Optional
|
|
4
|
+
|
|
5
|
+
from mcp.types import ToolAnnotations
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger("mcp-server")
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def register_deployment_tools(server, non_destructive: bool):
|
|
11
|
+
"""Register deployment and workload management tools."""
|
|
12
|
+
|
|
13
|
+
@server.tool(
|
|
14
|
+
annotations=ToolAnnotations(
|
|
15
|
+
title="Get Deployments",
|
|
16
|
+
readOnlyHint=True,
|
|
17
|
+
),
|
|
18
|
+
)
|
|
19
|
+
def get_deployments(namespace: Optional[str] = None) -> Dict[str, Any]:
|
|
20
|
+
"""Get all deployments in the specified namespace."""
|
|
21
|
+
try:
|
|
22
|
+
from kubernetes import client, config
|
|
23
|
+
config.load_kube_config()
|
|
24
|
+
apps = client.AppsV1Api()
|
|
25
|
+
|
|
26
|
+
if namespace:
|
|
27
|
+
deployments = apps.list_namespaced_deployment(namespace)
|
|
28
|
+
else:
|
|
29
|
+
deployments = apps.list_deployment_for_all_namespaces()
|
|
30
|
+
|
|
31
|
+
return {
|
|
32
|
+
"success": True,
|
|
33
|
+
"deployments": [
|
|
34
|
+
{
|
|
35
|
+
"name": d.metadata.name,
|
|
36
|
+
"namespace": d.metadata.namespace,
|
|
37
|
+
"replicas": d.spec.replicas,
|
|
38
|
+
"ready": d.status.ready_replicas or 0,
|
|
39
|
+
"available": d.status.available_replicas or 0
|
|
40
|
+
}
|
|
41
|
+
for d in deployments.items
|
|
42
|
+
]
|
|
43
|
+
}
|
|
44
|
+
except Exception as e:
|
|
45
|
+
logger.error(f"Error getting deployments: {e}")
|
|
46
|
+
return {"success": False, "error": str(e)}
|
|
47
|
+
|
|
48
|
+
@server.tool(
|
|
49
|
+
annotations=ToolAnnotations(
|
|
50
|
+
title="Create Deployment",
|
|
51
|
+
destructiveHint=True,
|
|
52
|
+
),
|
|
53
|
+
)
|
|
54
|
+
def create_deployment(name: str, image: str, replicas: int, namespace: Optional[str] = "default") -> Dict[str, Any]:
|
|
55
|
+
"""Create a new deployment."""
|
|
56
|
+
if non_destructive:
|
|
57
|
+
return {"success": False, "error": "Blocked: non-destructive mode"}
|
|
58
|
+
try:
|
|
59
|
+
from kubernetes import client, config
|
|
60
|
+
config.load_kube_config()
|
|
61
|
+
apps = client.AppsV1Api()
|
|
62
|
+
|
|
63
|
+
deployment = client.V1Deployment(
|
|
64
|
+
metadata=client.V1ObjectMeta(name=name),
|
|
65
|
+
spec=client.V1DeploymentSpec(
|
|
66
|
+
replicas=replicas,
|
|
67
|
+
selector=client.V1LabelSelector(
|
|
68
|
+
match_labels={"app": name}
|
|
69
|
+
),
|
|
70
|
+
template=client.V1PodTemplateSpec(
|
|
71
|
+
metadata=client.V1ObjectMeta(labels={"app": name}),
|
|
72
|
+
spec=client.V1PodSpec(
|
|
73
|
+
containers=[
|
|
74
|
+
client.V1Container(
|
|
75
|
+
name=name,
|
|
76
|
+
image=image
|
|
77
|
+
)
|
|
78
|
+
]
|
|
79
|
+
)
|
|
80
|
+
)
|
|
81
|
+
)
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
apps.create_namespaced_deployment(namespace, deployment)
|
|
85
|
+
return {"success": True, "message": f"Deployment {name} created"}
|
|
86
|
+
except Exception as e:
|
|
87
|
+
logger.error(f"Error creating deployment: {e}")
|
|
88
|
+
return {"success": False, "error": str(e)}
|
|
89
|
+
|
|
90
|
+
@server.tool(
|
|
91
|
+
annotations=ToolAnnotations(
|
|
92
|
+
title="Scale Deployment",
|
|
93
|
+
destructiveHint=True,
|
|
94
|
+
),
|
|
95
|
+
)
|
|
96
|
+
def scale_deployment(name: str, replicas: int, namespace: Optional[str] = "default") -> Dict[str, Any]:
|
|
97
|
+
"""Scale a deployment to a specified number of replicas."""
|
|
98
|
+
if non_destructive:
|
|
99
|
+
return {"success": False, "error": "Blocked: non-destructive mode"}
|
|
100
|
+
try:
|
|
101
|
+
result = subprocess.run(
|
|
102
|
+
["kubectl", "scale", "deployment", name, f"--replicas={replicas}", "-n", namespace],
|
|
103
|
+
capture_output=True, text=True, timeout=30
|
|
104
|
+
)
|
|
105
|
+
if result.returncode == 0:
|
|
106
|
+
return {"success": True, "message": f"Deployment {name} scaled to {replicas} replicas"}
|
|
107
|
+
return {"success": False, "error": result.stderr}
|
|
108
|
+
except Exception as e:
|
|
109
|
+
logger.error(f"Error scaling deployment: {e}")
|
|
110
|
+
return {"success": False, "error": str(e)}
|
|
111
|
+
|
|
112
|
+
@server.tool(
|
|
113
|
+
annotations=ToolAnnotations(
|
|
114
|
+
title="Restart Deployment",
|
|
115
|
+
destructiveHint=True,
|
|
116
|
+
),
|
|
117
|
+
)
|
|
118
|
+
def restart_deployment(name: str, namespace: str = "default") -> Dict[str, Any]:
|
|
119
|
+
"""Restart a deployment by triggering a rolling update."""
|
|
120
|
+
if non_destructive:
|
|
121
|
+
return {"success": False, "error": "Blocked: non-destructive mode"}
|
|
122
|
+
try:
|
|
123
|
+
result = subprocess.run(
|
|
124
|
+
["kubectl", "rollout", "restart", "deployment", name, "-n", namespace],
|
|
125
|
+
capture_output=True, text=True, timeout=30
|
|
126
|
+
)
|
|
127
|
+
if result.returncode == 0:
|
|
128
|
+
return {"success": True, "message": f"Deployment {name} restarted"}
|
|
129
|
+
return {"success": False, "error": result.stderr}
|
|
130
|
+
except Exception as e:
|
|
131
|
+
logger.error(f"Error restarting deployment: {e}")
|
|
132
|
+
return {"success": False, "error": str(e)}
|
|
133
|
+
|
|
134
|
+
@server.tool(
|
|
135
|
+
annotations=ToolAnnotations(
|
|
136
|
+
title="Get ReplicaSets",
|
|
137
|
+
readOnlyHint=True,
|
|
138
|
+
),
|
|
139
|
+
)
|
|
140
|
+
def get_replicasets(namespace: Optional[str] = None) -> Dict[str, Any]:
|
|
141
|
+
"""Get ReplicaSets in a namespace or cluster-wide."""
|
|
142
|
+
try:
|
|
143
|
+
from kubernetes import client, config
|
|
144
|
+
config.load_kube_config()
|
|
145
|
+
apps = client.AppsV1Api()
|
|
146
|
+
|
|
147
|
+
if namespace:
|
|
148
|
+
rs_list = apps.list_namespaced_replica_set(namespace)
|
|
149
|
+
else:
|
|
150
|
+
rs_list = apps.list_replica_set_for_all_namespaces()
|
|
151
|
+
|
|
152
|
+
return {
|
|
153
|
+
"success": True,
|
|
154
|
+
"replicaSets": [
|
|
155
|
+
{
|
|
156
|
+
"name": rs.metadata.name,
|
|
157
|
+
"namespace": rs.metadata.namespace,
|
|
158
|
+
"desired": rs.spec.replicas,
|
|
159
|
+
"ready": rs.status.ready_replicas or 0,
|
|
160
|
+
"available": rs.status.available_replicas or 0,
|
|
161
|
+
"ownerReferences": [
|
|
162
|
+
{"kind": ref.kind, "name": ref.name}
|
|
163
|
+
for ref in (rs.metadata.owner_references or [])
|
|
164
|
+
]
|
|
165
|
+
}
|
|
166
|
+
for rs in rs_list.items
|
|
167
|
+
]
|
|
168
|
+
}
|
|
169
|
+
except Exception as e:
|
|
170
|
+
logger.error(f"Error getting ReplicaSets: {e}")
|
|
171
|
+
return {"success": False, "error": str(e)}
|
|
172
|
+
|
|
173
|
+
@server.tool(
|
|
174
|
+
annotations=ToolAnnotations(
|
|
175
|
+
title="Get StatefulSets",
|
|
176
|
+
readOnlyHint=True,
|
|
177
|
+
),
|
|
178
|
+
)
|
|
179
|
+
def get_statefulsets(namespace: Optional[str] = None) -> Dict[str, Any]:
|
|
180
|
+
"""Get StatefulSets in a namespace or cluster-wide."""
|
|
181
|
+
try:
|
|
182
|
+
from kubernetes import client, config
|
|
183
|
+
config.load_kube_config()
|
|
184
|
+
apps = client.AppsV1Api()
|
|
185
|
+
|
|
186
|
+
if namespace:
|
|
187
|
+
sts_list = apps.list_namespaced_stateful_set(namespace)
|
|
188
|
+
else:
|
|
189
|
+
sts_list = apps.list_stateful_set_for_all_namespaces()
|
|
190
|
+
|
|
191
|
+
return {
|
|
192
|
+
"success": True,
|
|
193
|
+
"statefulSets": [
|
|
194
|
+
{
|
|
195
|
+
"name": sts.metadata.name,
|
|
196
|
+
"namespace": sts.metadata.namespace,
|
|
197
|
+
"replicas": sts.spec.replicas,
|
|
198
|
+
"ready": sts.status.ready_replicas or 0,
|
|
199
|
+
"currentReplicas": sts.status.current_replicas or 0,
|
|
200
|
+
"serviceName": sts.spec.service_name,
|
|
201
|
+
"updateStrategy": sts.spec.update_strategy.type if sts.spec.update_strategy else None
|
|
202
|
+
}
|
|
203
|
+
for sts in sts_list.items
|
|
204
|
+
]
|
|
205
|
+
}
|
|
206
|
+
except Exception as e:
|
|
207
|
+
logger.error(f"Error getting StatefulSets: {e}")
|
|
208
|
+
return {"success": False, "error": str(e)}
|
|
209
|
+
|
|
210
|
+
@server.tool(
|
|
211
|
+
annotations=ToolAnnotations(
|
|
212
|
+
title="Get DaemonSets",
|
|
213
|
+
readOnlyHint=True,
|
|
214
|
+
),
|
|
215
|
+
)
|
|
216
|
+
def get_daemonsets(namespace: Optional[str] = None) -> Dict[str, Any]:
|
|
217
|
+
"""Get DaemonSets in a namespace or cluster-wide."""
|
|
218
|
+
try:
|
|
219
|
+
from kubernetes import client, config
|
|
220
|
+
config.load_kube_config()
|
|
221
|
+
apps = client.AppsV1Api()
|
|
222
|
+
|
|
223
|
+
if namespace:
|
|
224
|
+
ds_list = apps.list_namespaced_daemon_set(namespace)
|
|
225
|
+
else:
|
|
226
|
+
ds_list = apps.list_daemon_set_for_all_namespaces()
|
|
227
|
+
|
|
228
|
+
return {
|
|
229
|
+
"success": True,
|
|
230
|
+
"daemonSets": [
|
|
231
|
+
{
|
|
232
|
+
"name": ds.metadata.name,
|
|
233
|
+
"namespace": ds.metadata.namespace,
|
|
234
|
+
"desired": ds.status.desired_number_scheduled,
|
|
235
|
+
"current": ds.status.current_number_scheduled,
|
|
236
|
+
"ready": ds.status.number_ready,
|
|
237
|
+
"available": ds.status.number_available or 0,
|
|
238
|
+
"nodeSelector": ds.spec.template.spec.node_selector
|
|
239
|
+
}
|
|
240
|
+
for ds in ds_list.items
|
|
241
|
+
]
|
|
242
|
+
}
|
|
243
|
+
except Exception as e:
|
|
244
|
+
logger.error(f"Error getting DaemonSets: {e}")
|
|
245
|
+
return {"success": False, "error": str(e)}
|
|
246
|
+
|
|
247
|
+
@server.tool(
|
|
248
|
+
annotations=ToolAnnotations(
|
|
249
|
+
title="Get Jobs and CronJobs",
|
|
250
|
+
readOnlyHint=True,
|
|
251
|
+
),
|
|
252
|
+
)
|
|
253
|
+
def get_jobs(namespace: Optional[str] = None, include_cronjobs: bool = True) -> Dict[str, Any]:
|
|
254
|
+
"""Get Jobs and optionally CronJobs."""
|
|
255
|
+
try:
|
|
256
|
+
from kubernetes import client, config
|
|
257
|
+
config.load_kube_config()
|
|
258
|
+
batch = client.BatchV1Api()
|
|
259
|
+
|
|
260
|
+
if namespace:
|
|
261
|
+
jobs = batch.list_namespaced_job(namespace)
|
|
262
|
+
else:
|
|
263
|
+
jobs = batch.list_job_for_all_namespaces()
|
|
264
|
+
|
|
265
|
+
result = {
|
|
266
|
+
"success": True,
|
|
267
|
+
"jobs": [
|
|
268
|
+
{
|
|
269
|
+
"name": job.metadata.name,
|
|
270
|
+
"namespace": job.metadata.namespace,
|
|
271
|
+
"completions": job.spec.completions,
|
|
272
|
+
"succeeded": job.status.succeeded or 0,
|
|
273
|
+
"failed": job.status.failed or 0,
|
|
274
|
+
"active": job.status.active or 0,
|
|
275
|
+
"startTime": job.status.start_time.isoformat() if job.status.start_time else None,
|
|
276
|
+
"completionTime": job.status.completion_time.isoformat() if job.status.completion_time else None
|
|
277
|
+
}
|
|
278
|
+
for job in jobs.items
|
|
279
|
+
]
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
if include_cronjobs:
|
|
283
|
+
if namespace:
|
|
284
|
+
cronjobs = batch.list_namespaced_cron_job(namespace)
|
|
285
|
+
else:
|
|
286
|
+
cronjobs = batch.list_cron_job_for_all_namespaces()
|
|
287
|
+
|
|
288
|
+
result["cronJobs"] = [
|
|
289
|
+
{
|
|
290
|
+
"name": cj.metadata.name,
|
|
291
|
+
"namespace": cj.metadata.namespace,
|
|
292
|
+
"schedule": cj.spec.schedule,
|
|
293
|
+
"suspend": cj.spec.suspend,
|
|
294
|
+
"lastScheduleTime": cj.status.last_schedule_time.isoformat() if cj.status.last_schedule_time else None,
|
|
295
|
+
"activeJobs": len(cj.status.active or [])
|
|
296
|
+
}
|
|
297
|
+
for cj in cronjobs.items
|
|
298
|
+
]
|
|
299
|
+
|
|
300
|
+
return result
|
|
301
|
+
except Exception as e:
|
|
302
|
+
logger.error(f"Error getting Jobs: {e}")
|
|
303
|
+
return {"success": False, "error": str(e)}
|
|
304
|
+
|
|
305
|
+
@server.tool(
|
|
306
|
+
annotations=ToolAnnotations(
|
|
307
|
+
title="Get HorizontalPodAutoscalers",
|
|
308
|
+
readOnlyHint=True,
|
|
309
|
+
),
|
|
310
|
+
)
|
|
311
|
+
def get_hpa(namespace: Optional[str] = None) -> Dict[str, Any]:
|
|
312
|
+
"""Get HorizontalPodAutoscalers in a namespace or cluster-wide."""
|
|
313
|
+
try:
|
|
314
|
+
from kubernetes import client, config
|
|
315
|
+
config.load_kube_config()
|
|
316
|
+
autoscaling = client.AutoscalingV2Api()
|
|
317
|
+
|
|
318
|
+
if namespace:
|
|
319
|
+
hpas = autoscaling.list_namespaced_horizontal_pod_autoscaler(namespace)
|
|
320
|
+
else:
|
|
321
|
+
hpas = autoscaling.list_horizontal_pod_autoscaler_for_all_namespaces()
|
|
322
|
+
|
|
323
|
+
return {
|
|
324
|
+
"success": True,
|
|
325
|
+
"hpas": [
|
|
326
|
+
{
|
|
327
|
+
"name": hpa.metadata.name,
|
|
328
|
+
"namespace": hpa.metadata.namespace,
|
|
329
|
+
"targetRef": {
|
|
330
|
+
"kind": hpa.spec.scale_target_ref.kind,
|
|
331
|
+
"name": hpa.spec.scale_target_ref.name
|
|
332
|
+
},
|
|
333
|
+
"minReplicas": hpa.spec.min_replicas,
|
|
334
|
+
"maxReplicas": hpa.spec.max_replicas,
|
|
335
|
+
"currentReplicas": hpa.status.current_replicas,
|
|
336
|
+
"desiredReplicas": hpa.status.desired_replicas
|
|
337
|
+
}
|
|
338
|
+
for hpa in hpas.items
|
|
339
|
+
]
|
|
340
|
+
}
|
|
341
|
+
except Exception as e:
|
|
342
|
+
logger.error(f"Error getting HPAs: {e}")
|
|
343
|
+
return {"success": False, "error": str(e)}
|
|
344
|
+
|
|
345
|
+
@server.tool(
|
|
346
|
+
annotations=ToolAnnotations(
|
|
347
|
+
title="Get PodDisruptionBudgets",
|
|
348
|
+
readOnlyHint=True,
|
|
349
|
+
),
|
|
350
|
+
)
|
|
351
|
+
def get_pdb(namespace: Optional[str] = None) -> Dict[str, Any]:
|
|
352
|
+
"""Get PodDisruptionBudgets in a namespace or cluster-wide."""
|
|
353
|
+
try:
|
|
354
|
+
from kubernetes import client, config
|
|
355
|
+
config.load_kube_config()
|
|
356
|
+
policy = client.PolicyV1Api()
|
|
357
|
+
|
|
358
|
+
if namespace:
|
|
359
|
+
pdbs = policy.list_namespaced_pod_disruption_budget(namespace)
|
|
360
|
+
else:
|
|
361
|
+
pdbs = policy.list_pod_disruption_budget_for_all_namespaces()
|
|
362
|
+
|
|
363
|
+
return {
|
|
364
|
+
"success": True,
|
|
365
|
+
"pdbs": [
|
|
366
|
+
{
|
|
367
|
+
"name": pdb.metadata.name,
|
|
368
|
+
"namespace": pdb.metadata.namespace,
|
|
369
|
+
"minAvailable": str(pdb.spec.min_available) if pdb.spec.min_available else None,
|
|
370
|
+
"maxUnavailable": str(pdb.spec.max_unavailable) if pdb.spec.max_unavailable else None,
|
|
371
|
+
"currentHealthy": pdb.status.current_healthy,
|
|
372
|
+
"desiredHealthy": pdb.status.desired_healthy,
|
|
373
|
+
"disruptionsAllowed": pdb.status.disruptions_allowed,
|
|
374
|
+
"expectedPods": pdb.status.expected_pods
|
|
375
|
+
}
|
|
376
|
+
for pdb in pdbs.items
|
|
377
|
+
]
|
|
378
|
+
}
|
|
379
|
+
except Exception as e:
|
|
380
|
+
logger.error(f"Error getting PDBs: {e}")
|
|
381
|
+
return {"success": False, "error": str(e)}
|
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import subprocess
|
|
3
|
+
from typing import Any, Dict, Optional
|
|
4
|
+
|
|
5
|
+
from mcp.types import ToolAnnotations
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger("mcp-server")
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def register_diagnostics_tools(server, non_destructive: bool):
|
|
11
|
+
"""Register diagnostic and troubleshooting tools.
|
|
12
|
+
|
|
13
|
+
Note: Pod-specific diagnostic tools (diagnose_pod_crash, detect_pending_pods,
|
|
14
|
+
get_evicted_pods, get_pod_conditions, get_previous_logs) are in pods.py.
|
|
15
|
+
This module contains additional diagnostic tools for namespace comparison and metrics.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
@server.tool(
|
|
19
|
+
annotations=ToolAnnotations(
|
|
20
|
+
title="Compare Namespaces",
|
|
21
|
+
readOnlyHint=True,
|
|
22
|
+
),
|
|
23
|
+
)
|
|
24
|
+
def compare_namespaces(namespace1: str, namespace2: str, resource_type: str = "deployment") -> Dict[str, Any]:
|
|
25
|
+
"""Compare resources between two namespaces."""
|
|
26
|
+
try:
|
|
27
|
+
from kubernetes import client, config
|
|
28
|
+
config.load_kube_config()
|
|
29
|
+
apps = client.AppsV1Api()
|
|
30
|
+
v1 = client.CoreV1Api()
|
|
31
|
+
|
|
32
|
+
def get_resources(ns, res_type):
|
|
33
|
+
if res_type == "deployment":
|
|
34
|
+
items = apps.list_namespaced_deployment(ns).items
|
|
35
|
+
return {d.metadata.name: d.spec.replicas for d in items}
|
|
36
|
+
elif res_type == "service":
|
|
37
|
+
items = v1.list_namespaced_service(ns).items
|
|
38
|
+
return {s.metadata.name: s.spec.type for s in items}
|
|
39
|
+
elif res_type == "configmap":
|
|
40
|
+
items = v1.list_namespaced_config_map(ns).items
|
|
41
|
+
return {c.metadata.name: len(c.data or {}) for c in items}
|
|
42
|
+
elif res_type == "secret":
|
|
43
|
+
items = v1.list_namespaced_secret(ns).items
|
|
44
|
+
return {s.metadata.name: s.type for s in items}
|
|
45
|
+
else:
|
|
46
|
+
return {}
|
|
47
|
+
|
|
48
|
+
res1 = get_resources(namespace1, resource_type)
|
|
49
|
+
res2 = get_resources(namespace2, resource_type)
|
|
50
|
+
|
|
51
|
+
only_in_ns1 = [name for name in res1 if name not in res2]
|
|
52
|
+
only_in_ns2 = [name for name in res2 if name not in res1]
|
|
53
|
+
in_both = [name for name in res1 if name in res2]
|
|
54
|
+
|
|
55
|
+
differences = []
|
|
56
|
+
for name in in_both:
|
|
57
|
+
if res1[name] != res2[name]:
|
|
58
|
+
differences.append({
|
|
59
|
+
"name": name,
|
|
60
|
+
f"{namespace1}": res1[name],
|
|
61
|
+
f"{namespace2}": res2[name]
|
|
62
|
+
})
|
|
63
|
+
|
|
64
|
+
return {
|
|
65
|
+
"success": True,
|
|
66
|
+
"resourceType": resource_type,
|
|
67
|
+
"namespaces": [namespace1, namespace2],
|
|
68
|
+
"summary": {
|
|
69
|
+
f"onlyIn_{namespace1}": len(only_in_ns1),
|
|
70
|
+
f"onlyIn_{namespace2}": len(only_in_ns2),
|
|
71
|
+
"inBoth": len(in_both),
|
|
72
|
+
"withDifferences": len(differences)
|
|
73
|
+
},
|
|
74
|
+
f"onlyIn_{namespace1}": only_in_ns1,
|
|
75
|
+
f"onlyIn_{namespace2}": only_in_ns2,
|
|
76
|
+
"differences": differences
|
|
77
|
+
}
|
|
78
|
+
except Exception as e:
|
|
79
|
+
logger.error(f"Error comparing namespaces: {e}")
|
|
80
|
+
return {"success": False, "error": str(e)}
|
|
81
|
+
|
|
82
|
+
@server.tool(
|
|
83
|
+
annotations=ToolAnnotations(
|
|
84
|
+
title="Get Pod Metrics",
|
|
85
|
+
readOnlyHint=True,
|
|
86
|
+
),
|
|
87
|
+
)
|
|
88
|
+
def get_pod_metrics(namespace: Optional[str] = None, pod_name: Optional[str] = None) -> Dict[str, Any]:
|
|
89
|
+
"""Get pod resource usage metrics (requires metrics-server)."""
|
|
90
|
+
try:
|
|
91
|
+
cmd = ["kubectl", "top", "pods", "--no-headers"]
|
|
92
|
+
if namespace:
|
|
93
|
+
cmd.extend(["-n", namespace])
|
|
94
|
+
else:
|
|
95
|
+
cmd.append("-A")
|
|
96
|
+
|
|
97
|
+
result = subprocess.run(cmd, capture_output=True, text=True, timeout=60)
|
|
98
|
+
|
|
99
|
+
if result.returncode != 0:
|
|
100
|
+
return {"success": False, "error": result.stderr.strip() or "Metrics server not available"}
|
|
101
|
+
|
|
102
|
+
metrics = []
|
|
103
|
+
for line in result.stdout.strip().split("\n"):
|
|
104
|
+
if not line.strip():
|
|
105
|
+
continue
|
|
106
|
+
parts = line.split()
|
|
107
|
+
if namespace and len(parts) >= 3:
|
|
108
|
+
name, cpu, memory = parts[0], parts[1], parts[2]
|
|
109
|
+
ns = namespace
|
|
110
|
+
elif len(parts) >= 4:
|
|
111
|
+
ns, name, cpu, memory = parts[0], parts[1], parts[2], parts[3]
|
|
112
|
+
else:
|
|
113
|
+
continue
|
|
114
|
+
|
|
115
|
+
if pod_name and name != pod_name:
|
|
116
|
+
continue
|
|
117
|
+
|
|
118
|
+
metrics.append({
|
|
119
|
+
"namespace": ns,
|
|
120
|
+
"pod": name,
|
|
121
|
+
"cpuUsage": cpu,
|
|
122
|
+
"memoryUsage": memory
|
|
123
|
+
})
|
|
124
|
+
|
|
125
|
+
return {
|
|
126
|
+
"success": True,
|
|
127
|
+
"count": len(metrics),
|
|
128
|
+
"metrics": metrics
|
|
129
|
+
}
|
|
130
|
+
except subprocess.TimeoutExpired:
|
|
131
|
+
return {"success": False, "error": "Metrics retrieval timed out"}
|
|
132
|
+
except Exception as e:
|
|
133
|
+
logger.error(f"Error getting pod metrics: {e}")
|
|
134
|
+
return {"success": False, "error": str(e)}
|
|
135
|
+
|
|
136
|
+
@server.tool(
|
|
137
|
+
annotations=ToolAnnotations(
|
|
138
|
+
title="Get Node Metrics",
|
|
139
|
+
readOnlyHint=True,
|
|
140
|
+
),
|
|
141
|
+
)
|
|
142
|
+
def get_node_metrics() -> Dict[str, Any]:
|
|
143
|
+
"""Get node resource usage metrics (requires metrics-server)."""
|
|
144
|
+
try:
|
|
145
|
+
cmd = ["kubectl", "top", "nodes", "--no-headers"]
|
|
146
|
+
result = subprocess.run(cmd, capture_output=True, text=True, timeout=60)
|
|
147
|
+
|
|
148
|
+
if result.returncode != 0:
|
|
149
|
+
return {"success": False, "error": result.stderr.strip() or "Metrics server not available"}
|
|
150
|
+
|
|
151
|
+
metrics = []
|
|
152
|
+
for line in result.stdout.strip().split("\n"):
|
|
153
|
+
if not line.strip():
|
|
154
|
+
continue
|
|
155
|
+
parts = line.split()
|
|
156
|
+
if len(parts) >= 5:
|
|
157
|
+
metrics.append({
|
|
158
|
+
"node": parts[0],
|
|
159
|
+
"cpuUsage": parts[1],
|
|
160
|
+
"cpuPercent": parts[2],
|
|
161
|
+
"memoryUsage": parts[3],
|
|
162
|
+
"memoryPercent": parts[4]
|
|
163
|
+
})
|
|
164
|
+
|
|
165
|
+
return {
|
|
166
|
+
"success": True,
|
|
167
|
+
"count": len(metrics),
|
|
168
|
+
"metrics": metrics
|
|
169
|
+
}
|
|
170
|
+
except subprocess.TimeoutExpired:
|
|
171
|
+
return {"success": False, "error": "Metrics retrieval timed out"}
|
|
172
|
+
except Exception as e:
|
|
173
|
+
logger.error(f"Error getting node metrics: {e}")
|
|
174
|
+
return {"success": False, "error": str(e)}
|