kubectl-mcp-server 1.18.0__py3-none-any.whl → 1.19.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -19,6 +19,10 @@ from kubectl_mcp_tool.k8s_config import (
19
19
  get_admissionregistration_client,
20
20
  list_contexts,
21
21
  get_active_context,
22
+ enable_kubeconfig_watch,
23
+ disable_kubeconfig_watch,
24
+ is_stateless_mode,
25
+ set_stateless_mode,
22
26
  )
23
27
 
24
28
  logger = logging.getLogger("mcp-server")
@@ -59,8 +63,6 @@ def _validate_node_name(name: str) -> tuple:
59
63
  def register_cluster_tools(server, non_destructive: bool):
60
64
  """Register cluster and context management tools."""
61
65
 
62
- # ========== Config Toolset ==========
63
-
64
66
  @server.tool(
65
67
  annotations=ToolAnnotations(
66
68
  title="List Contexts",
@@ -234,8 +236,6 @@ def register_cluster_tools(server, non_destructive: bool):
234
236
  logger.error(f"Error setting namespace: {e}")
235
237
  return {"success": False, "error": str(e)}
236
238
 
237
- # ========== Cluster Info Tools ==========
238
-
239
239
  @server.tool(
240
240
  annotations=ToolAnnotations(
241
241
  title="Get Cluster Info",
@@ -588,7 +588,6 @@ def register_cluster_tools(server, non_destructive: bool):
588
588
  }
589
589
  }
590
590
 
591
- # Get node status
592
591
  for condition in (node.status.conditions or []):
593
592
  if condition.type == "Ready":
594
593
  node_info["status"] = "Ready" if condition.status == "True" else "NotReady"
@@ -597,7 +596,6 @@ def register_cluster_tools(server, non_destructive: bool):
597
596
  else:
598
597
  summary["notReady"] += 1
599
598
 
600
- # Get node roles
601
599
  for label, value in (node.metadata.labels or {}).items():
602
600
  if label.startswith("node-role.kubernetes.io/"):
603
601
  role = label.split("/")[1]
@@ -614,7 +612,140 @@ def register_cluster_tools(server, non_destructive: bool):
614
612
  logger.error(f"Error getting nodes summary: {e}")
615
613
  return {"success": False, "error": str(e)}
616
614
 
617
- # ========== Node Kubelet Tools ==========
615
+ @server.tool(
616
+ annotations=ToolAnnotations(
617
+ title="Get Server Config Status",
618
+ readOnlyHint=True,
619
+ ),
620
+ )
621
+ def get_server_config_status() -> Dict[str, Any]:
622
+ """Get current server configuration status.
623
+
624
+ Returns information about:
625
+ - Stateless mode (whether API clients are cached)
626
+ - Kubeconfig watching (whether auto-reload is enabled)
627
+ - Available contexts
628
+ - Current active context
629
+ """
630
+ try:
631
+ from kubectl_mcp_tool.k8s_config import _kubeconfig_watcher
632
+
633
+ contexts = list_contexts()
634
+ active = get_active_context()
635
+
636
+ return {
637
+ "success": True,
638
+ "config": {
639
+ "statelessMode": is_stateless_mode(),
640
+ "kubeconfigWatching": _kubeconfig_watcher is not None,
641
+ },
642
+ "contexts": {
643
+ "active": active,
644
+ "available": [c.get("name") for c in contexts],
645
+ "total": len(contexts)
646
+ }
647
+ }
648
+ except Exception as e:
649
+ logger.error(f"Error getting config status: {e}")
650
+ return {"success": False, "error": str(e)}
651
+
652
+ @server.tool(
653
+ annotations=ToolAnnotations(
654
+ title="Enable Kubeconfig Watching",
655
+ destructiveHint=True,
656
+ ),
657
+ )
658
+ def enable_kubeconfig_watching(
659
+ check_interval: float = 5.0
660
+ ) -> Dict[str, Any]:
661
+ """Enable automatic kubeconfig file watching.
662
+
663
+ When enabled, the server will automatically detect changes to kubeconfig
664
+ files and reload the configuration. This is useful when:
665
+ - Cloud provider CLIs update credentials (aws, gcloud, az)
666
+ - Users switch contexts using external tools
667
+ - Kubeconfig files are mounted dynamically
668
+
669
+ Args:
670
+ check_interval: How often to check for changes (seconds). Default: 5.0
671
+
672
+ Returns:
673
+ Status of the kubeconfig watcher.
674
+
675
+ Raises:
676
+ TypeError: If check_interval is not a number (int or float).
677
+ ValueError: If check_interval is not positive.
678
+ """
679
+ if not isinstance(check_interval, (int, float)):
680
+ raise TypeError(f"check_interval must be a number (int or float), got {type(check_interval).__name__}")
681
+ if check_interval <= 0:
682
+ raise ValueError(f"check_interval must be positive, got {check_interval}")
683
+
684
+ try:
685
+ enable_kubeconfig_watch(check_interval=check_interval)
686
+ return {
687
+ "success": True,
688
+ "message": f"Kubeconfig watching enabled (interval: {check_interval}s)",
689
+ "checkInterval": check_interval
690
+ }
691
+ except Exception as e:
692
+ logger.error(f"Error enabling kubeconfig watch: {e}")
693
+ return {"success": False, "error": str(e)}
694
+
695
+ @server.tool(
696
+ annotations=ToolAnnotations(
697
+ title="Disable Kubeconfig Watching",
698
+ destructiveHint=True,
699
+ ),
700
+ )
701
+ def disable_kubeconfig_watching() -> Dict[str, Any]:
702
+ """Disable automatic kubeconfig file watching.
703
+
704
+ Stops monitoring kubeconfig files for changes.
705
+ """
706
+ try:
707
+ disable_kubeconfig_watch()
708
+ return {
709
+ "success": True,
710
+ "message": "Kubeconfig watching disabled"
711
+ }
712
+ except Exception as e:
713
+ logger.error(f"Error disabling kubeconfig watch: {e}")
714
+ return {"success": False, "error": str(e)}
715
+
716
+ @server.tool(
717
+ annotations=ToolAnnotations(
718
+ title="Set Stateless Mode",
719
+ destructiveHint=True,
720
+ ),
721
+ )
722
+ def set_server_stateless_mode(
723
+ enabled: bool
724
+ ) -> Dict[str, Any]:
725
+ """Enable or disable stateless mode.
726
+
727
+ In stateless mode:
728
+ - API clients are not cached
729
+ - Configuration is reloaded on each request
730
+ - Useful for serverless/Lambda environments
731
+ - Useful when credentials may change frequently
732
+
733
+ Args:
734
+ enabled: True to enable stateless mode, False to disable
735
+
736
+ Returns:
737
+ New stateless mode status.
738
+ """
739
+ try:
740
+ set_stateless_mode(enabled)
741
+ return {
742
+ "success": True,
743
+ "statelessMode": enabled,
744
+ "message": f"Stateless mode {'enabled' if enabled else 'disabled'}"
745
+ }
746
+ except Exception as e:
747
+ logger.error(f"Error setting stateless mode: {e}")
748
+ return {"success": False, "error": str(e)}
618
749
 
619
750
  @server.tool(
620
751
  annotations=ToolAnnotations(
@@ -908,8 +1039,6 @@ def register_cluster_tools(server, non_destructive: bool):
908
1039
  return {"success": False, "error": str(e)}
909
1040
 
910
1041
 
911
- # Helper functions for formatting kubelet stats
912
-
913
1042
  def _format_cpu_stats(cpu: Dict) -> Dict[str, Any]:
914
1043
  """Format CPU statistics from kubelet stats."""
915
1044
  if not cpu:
@@ -971,3 +1100,354 @@ def _format_runtime_stats(runtime: Dict) -> Dict[str, Any]:
971
1100
  return {
972
1101
  "imageFs": _format_fs_stats(runtime.get("imageFs", {})),
973
1102
  }
1103
+
1104
+
1105
+ def register_multicluster_tools(server, non_destructive: bool):
1106
+ """Register multi-cluster simultaneous access tools."""
1107
+
1108
+ @server.tool(
1109
+ annotations=ToolAnnotations(
1110
+ title="Multi-Cluster Query",
1111
+ readOnlyHint=True,
1112
+ ),
1113
+ )
1114
+ def multi_cluster_query(
1115
+ contexts: List[str],
1116
+ resource: str = "pods",
1117
+ namespace: Optional[str] = None,
1118
+ label_selector: str = "",
1119
+ field_selector: str = ""
1120
+ ) -> Dict[str, Any]:
1121
+ """Query resources across multiple Kubernetes clusters simultaneously.
1122
+
1123
+ This tool allows you to query the same resource type across multiple clusters
1124
+ in a single request, returning aggregated results from all clusters.
1125
+
1126
+ Args:
1127
+ contexts: List of context names to query (e.g., ["prod-us", "prod-eu", "staging"])
1128
+ resource: Resource type to query (pods, deployments, services, nodes, namespaces)
1129
+ namespace: Namespace to query (optional, all namespaces if not specified)
1130
+ label_selector: Label selector to filter resources (e.g., "app=nginx")
1131
+ field_selector: Field selector to filter resources (e.g., "status.phase=Running")
1132
+
1133
+ Returns:
1134
+ Aggregated results from all specified clusters with per-cluster breakdown.
1135
+
1136
+ Examples:
1137
+ - Get all pods across prod clusters: multi_cluster_query(contexts=["prod-us", "prod-eu"], resource="pods")
1138
+ - Get nginx deployments: multi_cluster_query(contexts=["dev", "staging"], resource="deployments", label_selector="app=nginx")
1139
+ """
1140
+ import concurrent.futures
1141
+
1142
+ if not contexts:
1143
+ return {"success": False, "error": "At least one context must be specified"}
1144
+
1145
+ valid_resources = ["pods", "deployments", "services", "nodes", "namespaces",
1146
+ "configmaps", "secrets", "ingresses", "statefulsets",
1147
+ "daemonsets", "jobs", "cronjobs", "pvcs"]
1148
+ if resource not in valid_resources:
1149
+ return {
1150
+ "success": False,
1151
+ "error": f"Invalid resource '{resource}'. Must be one of: {valid_resources}"
1152
+ }
1153
+
1154
+ def query_cluster(ctx: str) -> Dict[str, Any]:
1155
+ """Query a single cluster."""
1156
+ try:
1157
+ ctx_args = _get_kubectl_context_args(ctx)
1158
+ cmd = ["kubectl"] + ctx_args + ["get", resource, "-o", "json"]
1159
+
1160
+ if namespace:
1161
+ cmd.extend(["-n", namespace])
1162
+ else:
1163
+ cmd.append("--all-namespaces")
1164
+
1165
+ if label_selector:
1166
+ cmd.extend(["-l", label_selector])
1167
+
1168
+ if field_selector:
1169
+ cmd.extend(["--field-selector", field_selector])
1170
+
1171
+ result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
1172
+
1173
+ if result.returncode != 0:
1174
+ return {
1175
+ "context": ctx,
1176
+ "success": False,
1177
+ "error": result.stderr.strip()
1178
+ }
1179
+
1180
+ data = json.loads(result.stdout)
1181
+ items = data.get("items", [])
1182
+
1183
+ # Extract summary info based on resource type
1184
+ summaries = []
1185
+ for item in items:
1186
+ metadata = item.get("metadata", {})
1187
+ summary = {
1188
+ "name": metadata.get("name"),
1189
+ "namespace": metadata.get("namespace"),
1190
+ }
1191
+
1192
+ # Add resource-specific fields
1193
+ if resource == "pods":
1194
+ status = item.get("status", {})
1195
+ summary["phase"] = status.get("phase")
1196
+ summary["podIP"] = status.get("podIP")
1197
+ summary["nodeName"] = item.get("spec", {}).get("nodeName")
1198
+ elif resource == "deployments":
1199
+ status = item.get("status", {})
1200
+ summary["replicas"] = status.get("replicas", 0)
1201
+ summary["readyReplicas"] = status.get("readyReplicas", 0)
1202
+ summary["availableReplicas"] = status.get("availableReplicas", 0)
1203
+ elif resource == "services":
1204
+ spec = item.get("spec", {})
1205
+ summary["type"] = spec.get("type")
1206
+ summary["clusterIP"] = spec.get("clusterIP")
1207
+ summary["ports"] = [
1208
+ {"port": p.get("port"), "targetPort": p.get("targetPort")}
1209
+ for p in spec.get("ports", [])[:5] # Limit ports
1210
+ ]
1211
+ elif resource == "nodes":
1212
+ summary["namespace"] = None # Nodes are cluster-scoped
1213
+ conditions = item.get("status", {}).get("conditions", [])
1214
+ for c in conditions:
1215
+ if c.get("type") == "Ready":
1216
+ summary["ready"] = c.get("status") == "True"
1217
+ node_info = item.get("status", {}).get("nodeInfo", {})
1218
+ summary["kubeletVersion"] = node_info.get("kubeletVersion")
1219
+ elif resource == "namespaces":
1220
+ summary["namespace"] = None
1221
+ summary["phase"] = item.get("status", {}).get("phase")
1222
+
1223
+ summaries.append(summary)
1224
+
1225
+ return {
1226
+ "context": ctx,
1227
+ "success": True,
1228
+ "count": len(items),
1229
+ "items": summaries
1230
+ }
1231
+ except subprocess.TimeoutExpired:
1232
+ return {"context": ctx, "success": False, "error": "Query timed out"}
1233
+ except json.JSONDecodeError as e:
1234
+ return {"context": ctx, "success": False, "error": f"Invalid JSON: {e}"}
1235
+ except Exception as e:
1236
+ return {"context": ctx, "success": False, "error": str(e)}
1237
+
1238
+ results = []
1239
+ with concurrent.futures.ThreadPoolExecutor(max_workers=min(len(contexts), 10)) as executor:
1240
+ future_to_ctx = {executor.submit(query_cluster, ctx): ctx for ctx in contexts}
1241
+ for future in concurrent.futures.as_completed(future_to_ctx):
1242
+ results.append(future.result())
1243
+
1244
+ results.sort(key=lambda x: x.get("context", ""))
1245
+ total_items = sum(r.get("count", 0) for r in results if r.get("success"))
1246
+ successful_clusters = sum(1 for r in results if r.get("success"))
1247
+ failed_clusters = len(results) - successful_clusters
1248
+
1249
+ return {
1250
+ "success": successful_clusters > 0,
1251
+ "query": {
1252
+ "resource": resource,
1253
+ "namespace": namespace or "all",
1254
+ "labelSelector": label_selector or None,
1255
+ "fieldSelector": field_selector or None,
1256
+ },
1257
+ "summary": {
1258
+ "totalClusters": len(contexts),
1259
+ "successfulClusters": successful_clusters,
1260
+ "failedClusters": failed_clusters,
1261
+ "totalItems": total_items
1262
+ },
1263
+ "clusters": results
1264
+ }
1265
+
1266
+ @server.tool(
1267
+ annotations=ToolAnnotations(
1268
+ title="Multi-Cluster Health Check",
1269
+ readOnlyHint=True,
1270
+ ),
1271
+ )
1272
+ def multi_cluster_health(
1273
+ contexts: Optional[List[str]] = None
1274
+ ) -> Dict[str, Any]:
1275
+ """Check health status across multiple Kubernetes clusters.
1276
+
1277
+ Performs connectivity and version checks across specified clusters or all
1278
+ available contexts if none specified.
1279
+
1280
+ Args:
1281
+ contexts: List of context names to check (optional, uses all contexts if not specified)
1282
+
1283
+ Returns:
1284
+ Health status for each cluster including version, node count, and connectivity.
1285
+ """
1286
+ import concurrent.futures
1287
+
1288
+ if not contexts:
1289
+ try:
1290
+ all_contexts = list_contexts()
1291
+ contexts = [c.get("name") for c in all_contexts if c.get("name")]
1292
+ except Exception as e:
1293
+ return {"success": False, "error": f"Failed to list contexts: {e}"}
1294
+
1295
+ if not contexts:
1296
+ return {"success": False, "error": "No contexts available"}
1297
+
1298
+ def check_cluster(ctx: str) -> Dict[str, Any]:
1299
+ """Check health of a single cluster."""
1300
+ cluster_health = {
1301
+ "context": ctx,
1302
+ "reachable": False,
1303
+ "version": None,
1304
+ "nodeCount": None,
1305
+ "readyNodes": None,
1306
+ "error": None
1307
+ }
1308
+
1309
+ try:
1310
+ # Check version (tests API connectivity)
1311
+ version_api = get_version_client(ctx)
1312
+ version_info = version_api.get_code()
1313
+ cluster_health["reachable"] = True
1314
+ cluster_health["version"] = version_info.git_version
1315
+
1316
+ # Get node count
1317
+ v1 = get_k8s_client(ctx)
1318
+ nodes = v1.list_node()
1319
+ cluster_health["nodeCount"] = len(nodes.items)
1320
+
1321
+ ready_count = 0
1322
+ for node in nodes.items:
1323
+ for condition in (node.status.conditions or []):
1324
+ if condition.type == "Ready" and condition.status == "True":
1325
+ ready_count += 1
1326
+ break
1327
+ cluster_health["readyNodes"] = ready_count
1328
+
1329
+ except Exception as e:
1330
+ cluster_health["error"] = str(e)
1331
+
1332
+ return cluster_health
1333
+
1334
+ results = []
1335
+ with concurrent.futures.ThreadPoolExecutor(max_workers=min(len(contexts), 10)) as executor:
1336
+ future_to_ctx = {executor.submit(check_cluster, ctx): ctx for ctx in contexts}
1337
+ for future in concurrent.futures.as_completed(future_to_ctx):
1338
+ results.append(future.result())
1339
+
1340
+ results.sort(key=lambda x: x.get("context", ""))
1341
+ reachable = sum(1 for r in results if r.get("reachable"))
1342
+ total_nodes = sum(r.get("nodeCount", 0) or 0 for r in results)
1343
+ ready_nodes = sum(r.get("readyNodes", 0) or 0 for r in results)
1344
+
1345
+ return {
1346
+ "success": reachable > 0,
1347
+ "summary": {
1348
+ "totalClusters": len(contexts),
1349
+ "reachableClusters": reachable,
1350
+ "unreachableClusters": len(contexts) - reachable,
1351
+ "totalNodes": total_nodes,
1352
+ "readyNodes": ready_nodes
1353
+ },
1354
+ "clusters": results
1355
+ }
1356
+
1357
+ @server.tool(
1358
+ annotations=ToolAnnotations(
1359
+ title="Multi-Cluster Pod Count",
1360
+ readOnlyHint=True,
1361
+ ),
1362
+ )
1363
+ def multi_cluster_pod_count(
1364
+ contexts: Optional[List[str]] = None,
1365
+ namespace: Optional[str] = None,
1366
+ group_by: str = "status"
1367
+ ) -> Dict[str, Any]:
1368
+ """Get pod counts across multiple clusters grouped by status or namespace.
1369
+
1370
+ Quickly see pod distribution across your clusters without fetching full pod details.
1371
+
1372
+ Args:
1373
+ contexts: List of context names (optional, uses all contexts if not specified)
1374
+ namespace: Filter by namespace (optional, all namespaces if not specified)
1375
+ group_by: How to group results: "status" (Running/Pending/etc) or "namespace"
1376
+
1377
+ Returns:
1378
+ Pod counts per cluster with grouping breakdown.
1379
+ """
1380
+ import concurrent.futures
1381
+
1382
+ if group_by not in ["status", "namespace"]:
1383
+ return {"success": False, "error": "group_by must be 'status' or 'namespace'"}
1384
+
1385
+ if not contexts:
1386
+ try:
1387
+ all_contexts = list_contexts()
1388
+ contexts = [c.get("name") for c in all_contexts if c.get("name")]
1389
+ except Exception as e:
1390
+ return {"success": False, "error": f"Failed to list contexts: {e}"}
1391
+
1392
+ if not contexts:
1393
+ return {"success": False, "error": "No contexts available"}
1394
+
1395
+ def count_pods(ctx: str) -> Dict[str, Any]:
1396
+ """Count pods in a single cluster."""
1397
+ try:
1398
+ v1 = get_k8s_client(ctx)
1399
+
1400
+ if namespace:
1401
+ pods = v1.list_namespaced_pod(namespace)
1402
+ else:
1403
+ pods = v1.list_pod_for_all_namespaces()
1404
+
1405
+ counts = {}
1406
+ total = 0
1407
+
1408
+ for pod in pods.items:
1409
+ total += 1
1410
+ if group_by == "status":
1411
+ key = pod.status.phase or "Unknown"
1412
+ else: # namespace
1413
+ key = pod.metadata.namespace
1414
+
1415
+ counts[key] = counts.get(key, 0) + 1
1416
+
1417
+ return {
1418
+ "context": ctx,
1419
+ "success": True,
1420
+ "total": total,
1421
+ "breakdown": counts
1422
+ }
1423
+ except Exception as e:
1424
+ return {"context": ctx, "success": False, "error": str(e)}
1425
+
1426
+ results = []
1427
+ with concurrent.futures.ThreadPoolExecutor(max_workers=min(len(contexts), 10)) as executor:
1428
+ future_to_ctx = {executor.submit(count_pods, ctx): ctx for ctx in contexts}
1429
+ for future in concurrent.futures.as_completed(future_to_ctx):
1430
+ results.append(future.result())
1431
+
1432
+ results.sort(key=lambda x: x.get("context", ""))
1433
+ aggregate = {}
1434
+ total_pods = 0
1435
+ for r in results:
1436
+ if r.get("success"):
1437
+ total_pods += r.get("total", 0)
1438
+ for key, count in r.get("breakdown", {}).items():
1439
+ aggregate[key] = aggregate.get(key, 0) + count
1440
+
1441
+ return {
1442
+ "success": any(r.get("success") for r in results),
1443
+ "query": {
1444
+ "namespace": namespace or "all",
1445
+ "groupBy": group_by
1446
+ },
1447
+ "summary": {
1448
+ "totalClusters": len(contexts),
1449
+ "totalPods": total_pods,
1450
+ "aggregateBreakdown": aggregate
1451
+ },
1452
+ "clusters": results
1453
+ }