kubectl-mcp-server 1.14.0__py3-none-any.whl → 1.16.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kubectl_mcp_server-1.16.0.dist-info/METADATA +1047 -0
- kubectl_mcp_server-1.16.0.dist-info/RECORD +61 -0
- kubectl_mcp_tool/__init__.py +1 -1
- kubectl_mcp_tool/crd_detector.py +247 -0
- kubectl_mcp_tool/k8s_config.py +304 -63
- kubectl_mcp_tool/mcp_server.py +27 -0
- kubectl_mcp_tool/tools/__init__.py +20 -0
- kubectl_mcp_tool/tools/backup.py +881 -0
- kubectl_mcp_tool/tools/capi.py +727 -0
- kubectl_mcp_tool/tools/certs.py +709 -0
- kubectl_mcp_tool/tools/cilium.py +582 -0
- kubectl_mcp_tool/tools/cluster.py +395 -121
- kubectl_mcp_tool/tools/core.py +157 -60
- kubectl_mcp_tool/tools/cost.py +97 -41
- kubectl_mcp_tool/tools/deployments.py +173 -56
- kubectl_mcp_tool/tools/diagnostics.py +40 -13
- kubectl_mcp_tool/tools/gitops.py +552 -0
- kubectl_mcp_tool/tools/helm.py +133 -46
- kubectl_mcp_tool/tools/keda.py +464 -0
- kubectl_mcp_tool/tools/kiali.py +652 -0
- kubectl_mcp_tool/tools/kubevirt.py +803 -0
- kubectl_mcp_tool/tools/networking.py +106 -32
- kubectl_mcp_tool/tools/operations.py +176 -50
- kubectl_mcp_tool/tools/pods.py +162 -50
- kubectl_mcp_tool/tools/policy.py +554 -0
- kubectl_mcp_tool/tools/rollouts.py +790 -0
- kubectl_mcp_tool/tools/security.py +89 -36
- kubectl_mcp_tool/tools/storage.py +35 -16
- tests/test_browser.py +2 -2
- tests/test_ecosystem.py +331 -0
- tests/test_tools.py +73 -10
- kubectl_mcp_server-1.14.0.dist-info/METADATA +0 -780
- kubectl_mcp_server-1.14.0.dist-info/RECORD +0 -49
- {kubectl_mcp_server-1.14.0.dist-info → kubectl_mcp_server-1.16.0.dist-info}/WHEEL +0 -0
- {kubectl_mcp_server-1.14.0.dist-info → kubectl_mcp_server-1.16.0.dist-info}/entry_points.txt +0 -0
- {kubectl_mcp_server-1.14.0.dist-info → kubectl_mcp_server-1.16.0.dist-info}/licenses/LICENSE +0 -0
- {kubectl_mcp_server-1.14.0.dist-info → kubectl_mcp_server-1.16.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,582 @@
|
|
|
1
|
+
"""Cilium/Hubble network toolset for kubectl-mcp-server.
|
|
2
|
+
|
|
3
|
+
Provides tools for managing Cilium network policies and observability.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import subprocess
|
|
7
|
+
import json
|
|
8
|
+
from typing import Dict, Any, List, Optional
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
from fastmcp import FastMCP
|
|
12
|
+
from fastmcp.tools import ToolAnnotations
|
|
13
|
+
except ImportError:
|
|
14
|
+
from mcp.server.fastmcp import FastMCP
|
|
15
|
+
from mcp.types import ToolAnnotations
|
|
16
|
+
|
|
17
|
+
from ..k8s_config import _get_kubectl_context_args
|
|
18
|
+
from ..crd_detector import crd_exists
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
CILIUM_NETWORK_POLICY_CRD = "ciliumnetworkpolicies.cilium.io"
|
|
22
|
+
CILIUM_CLUSTERWIDE_POLICY_CRD = "ciliumclusterwidenetworkpolicies.cilium.io"
|
|
23
|
+
CILIUM_ENDPOINT_CRD = "ciliumendpoints.cilium.io"
|
|
24
|
+
CILIUM_IDENTITY_CRD = "ciliumidentities.cilium.io"
|
|
25
|
+
CILIUM_NODE_CRD = "ciliumnodes.cilium.io"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _run_kubectl(args: List[str], context: str = "") -> Dict[str, Any]:
|
|
29
|
+
"""Run kubectl command and return result."""
|
|
30
|
+
cmd = ["kubectl"] + _get_kubectl_context_args(context) + args
|
|
31
|
+
try:
|
|
32
|
+
result = subprocess.run(cmd, capture_output=True, text=True, timeout=60)
|
|
33
|
+
if result.returncode == 0:
|
|
34
|
+
return {"success": True, "output": result.stdout}
|
|
35
|
+
return {"success": False, "error": result.stderr}
|
|
36
|
+
except subprocess.TimeoutExpired:
|
|
37
|
+
return {"success": False, "error": "Command timed out"}
|
|
38
|
+
except Exception as e:
|
|
39
|
+
return {"success": False, "error": str(e)}
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _get_resources(kind: str, namespace: str = "", context: str = "", label_selector: str = "") -> List[Dict]:
|
|
43
|
+
"""Get Kubernetes resources of a specific kind."""
|
|
44
|
+
args = ["get", kind, "-o", "json"]
|
|
45
|
+
if namespace:
|
|
46
|
+
args.extend(["-n", namespace])
|
|
47
|
+
else:
|
|
48
|
+
args.append("-A")
|
|
49
|
+
if label_selector:
|
|
50
|
+
args.extend(["-l", label_selector])
|
|
51
|
+
|
|
52
|
+
result = _run_kubectl(args, context)
|
|
53
|
+
if result["success"]:
|
|
54
|
+
try:
|
|
55
|
+
data = json.loads(result["output"])
|
|
56
|
+
return data.get("items", [])
|
|
57
|
+
except json.JSONDecodeError:
|
|
58
|
+
return []
|
|
59
|
+
return []
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _cilium_cli_available() -> bool:
|
|
63
|
+
"""Check if cilium CLI is available."""
|
|
64
|
+
try:
|
|
65
|
+
result = subprocess.run(["cilium", "version"], capture_output=True, timeout=5)
|
|
66
|
+
return result.returncode == 0
|
|
67
|
+
except Exception:
|
|
68
|
+
return False
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def _hubble_cli_available() -> bool:
|
|
72
|
+
"""Check if hubble CLI is available."""
|
|
73
|
+
try:
|
|
74
|
+
result = subprocess.run(["hubble", "version"], capture_output=True, timeout=5)
|
|
75
|
+
return result.returncode == 0
|
|
76
|
+
except Exception:
|
|
77
|
+
return False
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def cilium_policies_list(
|
|
81
|
+
namespace: str = "",
|
|
82
|
+
context: str = "",
|
|
83
|
+
include_clusterwide: bool = True
|
|
84
|
+
) -> Dict[str, Any]:
|
|
85
|
+
"""List Cilium network policies.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
namespace: Filter by namespace (empty for all namespaces)
|
|
89
|
+
context: Kubernetes context to use (optional)
|
|
90
|
+
include_clusterwide: Include CiliumClusterwideNetworkPolicies
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
List of Cilium network policies
|
|
94
|
+
"""
|
|
95
|
+
policies = []
|
|
96
|
+
|
|
97
|
+
if crd_exists(CILIUM_NETWORK_POLICY_CRD, context):
|
|
98
|
+
for item in _get_resources("ciliumnetworkpolicies.cilium.io", namespace, context):
|
|
99
|
+
spec = item.get("spec", {})
|
|
100
|
+
status = item.get("status", {})
|
|
101
|
+
|
|
102
|
+
# Parse endpoint selector
|
|
103
|
+
endpoint_selector = spec.get("endpointSelector", {})
|
|
104
|
+
match_labels = endpoint_selector.get("matchLabels", {})
|
|
105
|
+
|
|
106
|
+
# Count rules
|
|
107
|
+
ingress_rules = len(spec.get("ingress", []))
|
|
108
|
+
egress_rules = len(spec.get("egress", []))
|
|
109
|
+
ingress_deny = len(spec.get("ingressDeny", []))
|
|
110
|
+
egress_deny = len(spec.get("egressDeny", []))
|
|
111
|
+
|
|
112
|
+
policies.append({
|
|
113
|
+
"name": item["metadata"]["name"],
|
|
114
|
+
"namespace": item["metadata"]["namespace"],
|
|
115
|
+
"kind": "CiliumNetworkPolicy",
|
|
116
|
+
"endpoint_selector": match_labels,
|
|
117
|
+
"ingress_rules": ingress_rules,
|
|
118
|
+
"egress_rules": egress_rules,
|
|
119
|
+
"ingress_deny_rules": ingress_deny,
|
|
120
|
+
"egress_deny_rules": egress_deny,
|
|
121
|
+
"total_rules": ingress_rules + egress_rules + ingress_deny + egress_deny,
|
|
122
|
+
"derivate_from_rules": status.get("derivativePolicies", []),
|
|
123
|
+
})
|
|
124
|
+
|
|
125
|
+
if include_clusterwide and crd_exists(CILIUM_CLUSTERWIDE_POLICY_CRD, context):
|
|
126
|
+
for item in _get_resources("ciliumclusterwidenetworkpolicies.cilium.io", "", context):
|
|
127
|
+
spec = item.get("spec", {})
|
|
128
|
+
status = item.get("status", {})
|
|
129
|
+
|
|
130
|
+
endpoint_selector = spec.get("endpointSelector", {})
|
|
131
|
+
match_labels = endpoint_selector.get("matchLabels", {})
|
|
132
|
+
|
|
133
|
+
ingress_rules = len(spec.get("ingress", []))
|
|
134
|
+
egress_rules = len(spec.get("egress", []))
|
|
135
|
+
ingress_deny = len(spec.get("ingressDeny", []))
|
|
136
|
+
egress_deny = len(spec.get("egressDeny", []))
|
|
137
|
+
|
|
138
|
+
policies.append({
|
|
139
|
+
"name": item["metadata"]["name"],
|
|
140
|
+
"namespace": "",
|
|
141
|
+
"kind": "CiliumClusterwideNetworkPolicy",
|
|
142
|
+
"endpoint_selector": match_labels,
|
|
143
|
+
"ingress_rules": ingress_rules,
|
|
144
|
+
"egress_rules": egress_rules,
|
|
145
|
+
"ingress_deny_rules": ingress_deny,
|
|
146
|
+
"egress_deny_rules": egress_deny,
|
|
147
|
+
"total_rules": ingress_rules + egress_rules + ingress_deny + egress_deny,
|
|
148
|
+
"node_selector": spec.get("nodeSelector", {}),
|
|
149
|
+
})
|
|
150
|
+
|
|
151
|
+
return {
|
|
152
|
+
"context": context or "current",
|
|
153
|
+
"total": len(policies),
|
|
154
|
+
"policies": policies,
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def cilium_policy_get(
|
|
159
|
+
name: str,
|
|
160
|
+
namespace: str = "",
|
|
161
|
+
kind: str = "CiliumNetworkPolicy",
|
|
162
|
+
context: str = ""
|
|
163
|
+
) -> Dict[str, Any]:
|
|
164
|
+
"""Get detailed information about a Cilium network policy.
|
|
165
|
+
|
|
166
|
+
Args:
|
|
167
|
+
name: Name of the policy
|
|
168
|
+
namespace: Namespace (for CiliumNetworkPolicy)
|
|
169
|
+
kind: CiliumNetworkPolicy or CiliumClusterwideNetworkPolicy
|
|
170
|
+
context: Kubernetes context to use (optional)
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
Detailed policy information
|
|
174
|
+
"""
|
|
175
|
+
if kind.lower() == "ciliumclusterwidenetworkpolicy":
|
|
176
|
+
crd = "ciliumclusterwidenetworkpolicies.cilium.io"
|
|
177
|
+
args = ["get", crd, name, "-o", "json"]
|
|
178
|
+
else:
|
|
179
|
+
crd = "ciliumnetworkpolicies.cilium.io"
|
|
180
|
+
args = ["get", crd, name, "-n", namespace, "-o", "json"]
|
|
181
|
+
|
|
182
|
+
if not crd_exists(crd, context):
|
|
183
|
+
return {"success": False, "error": f"{crd} not found"}
|
|
184
|
+
|
|
185
|
+
result = _run_kubectl(args, context)
|
|
186
|
+
|
|
187
|
+
if result["success"]:
|
|
188
|
+
try:
|
|
189
|
+
data = json.loads(result["output"])
|
|
190
|
+
return {
|
|
191
|
+
"success": True,
|
|
192
|
+
"context": context or "current",
|
|
193
|
+
"policy": data,
|
|
194
|
+
}
|
|
195
|
+
except json.JSONDecodeError:
|
|
196
|
+
return {"success": False, "error": "Failed to parse response"}
|
|
197
|
+
|
|
198
|
+
return {"success": False, "error": result.get("error", "Unknown error")}
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def cilium_endpoints_list(
|
|
202
|
+
namespace: str = "",
|
|
203
|
+
context: str = "",
|
|
204
|
+
label_selector: str = ""
|
|
205
|
+
) -> Dict[str, Any]:
|
|
206
|
+
"""List Cilium endpoints.
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
namespace: Filter by namespace (empty for all namespaces)
|
|
210
|
+
context: Kubernetes context to use (optional)
|
|
211
|
+
label_selector: Label selector to filter endpoints
|
|
212
|
+
|
|
213
|
+
Returns:
|
|
214
|
+
List of Cilium endpoints with their status
|
|
215
|
+
"""
|
|
216
|
+
if not crd_exists(CILIUM_ENDPOINT_CRD, context):
|
|
217
|
+
return {
|
|
218
|
+
"success": False,
|
|
219
|
+
"error": "Cilium is not installed (ciliumendpoints.cilium.io CRD not found)"
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
endpoints = []
|
|
223
|
+
for item in _get_resources("ciliumendpoints.cilium.io", namespace, context, label_selector):
|
|
224
|
+
status = item.get("status", {})
|
|
225
|
+
networking = status.get("networking", {})
|
|
226
|
+
identity = status.get("identity", {})
|
|
227
|
+
policy = status.get("policy", {})
|
|
228
|
+
|
|
229
|
+
addresses = networking.get("addressing", [])
|
|
230
|
+
ipv4 = next((a.get("ipv4") for a in addresses if a.get("ipv4")), None)
|
|
231
|
+
ipv6 = next((a.get("ipv6") for a in addresses if a.get("ipv6")), None)
|
|
232
|
+
|
|
233
|
+
endpoints.append({
|
|
234
|
+
"name": item["metadata"]["name"],
|
|
235
|
+
"namespace": item["metadata"]["namespace"],
|
|
236
|
+
"identity_id": identity.get("id"),
|
|
237
|
+
"identity_labels": identity.get("labels", []),
|
|
238
|
+
"ipv4": ipv4,
|
|
239
|
+
"ipv6": ipv6,
|
|
240
|
+
"state": status.get("state"),
|
|
241
|
+
"health": status.get("health", {}),
|
|
242
|
+
"policy_enabled": policy.get("ingress", {}).get("enforcing", False) or
|
|
243
|
+
policy.get("egress", {}).get("enforcing", False),
|
|
244
|
+
"ingress_enforcing": policy.get("ingress", {}).get("enforcing", False),
|
|
245
|
+
"egress_enforcing": policy.get("egress", {}).get("enforcing", False),
|
|
246
|
+
})
|
|
247
|
+
|
|
248
|
+
ready_count = sum(1 for e in endpoints if e["state"] == "ready")
|
|
249
|
+
|
|
250
|
+
return {
|
|
251
|
+
"context": context or "current",
|
|
252
|
+
"total": len(endpoints),
|
|
253
|
+
"ready": ready_count,
|
|
254
|
+
"endpoints": endpoints,
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def cilium_identities_list(
|
|
259
|
+
context: str = "",
|
|
260
|
+
label_selector: str = ""
|
|
261
|
+
) -> Dict[str, Any]:
|
|
262
|
+
"""List Cilium identities.
|
|
263
|
+
|
|
264
|
+
Args:
|
|
265
|
+
context: Kubernetes context to use (optional)
|
|
266
|
+
label_selector: Label selector to filter identities
|
|
267
|
+
|
|
268
|
+
Returns:
|
|
269
|
+
List of Cilium identities
|
|
270
|
+
"""
|
|
271
|
+
if not crd_exists(CILIUM_IDENTITY_CRD, context):
|
|
272
|
+
return {
|
|
273
|
+
"success": False,
|
|
274
|
+
"error": "Cilium identities CRD not found"
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
identities = []
|
|
278
|
+
for item in _get_resources("ciliumidentities.cilium.io", "", context, label_selector):
|
|
279
|
+
security_labels = item.get("security-labels", {})
|
|
280
|
+
|
|
281
|
+
identities.append({
|
|
282
|
+
"name": item["metadata"]["name"],
|
|
283
|
+
"id": item["metadata"]["name"],
|
|
284
|
+
"labels": security_labels,
|
|
285
|
+
"namespace": security_labels.get("k8s:io.kubernetes.pod.namespace", ""),
|
|
286
|
+
})
|
|
287
|
+
|
|
288
|
+
return {
|
|
289
|
+
"context": context or "current",
|
|
290
|
+
"total": len(identities),
|
|
291
|
+
"identities": identities,
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
def cilium_nodes_list(context: str = "") -> Dict[str, Any]:
|
|
296
|
+
"""List Cilium nodes with their status.
|
|
297
|
+
|
|
298
|
+
Args:
|
|
299
|
+
context: Kubernetes context to use (optional)
|
|
300
|
+
|
|
301
|
+
Returns:
|
|
302
|
+
List of Cilium nodes
|
|
303
|
+
"""
|
|
304
|
+
if not crd_exists(CILIUM_NODE_CRD, context):
|
|
305
|
+
return {
|
|
306
|
+
"success": False,
|
|
307
|
+
"error": "Cilium nodes CRD not found"
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
nodes = []
|
|
311
|
+
for item in _get_resources("ciliumnodes.cilium.io", "", context):
|
|
312
|
+
spec = item.get("spec", {})
|
|
313
|
+
status = item.get("status", {})
|
|
314
|
+
|
|
315
|
+
addresses = spec.get("addresses", [])
|
|
316
|
+
ipv4_address = next((a.get("ip") for a in addresses if a.get("type") == "InternalIP"), None)
|
|
317
|
+
|
|
318
|
+
nodes.append({
|
|
319
|
+
"name": item["metadata"]["name"],
|
|
320
|
+
"ipv4_address": ipv4_address,
|
|
321
|
+
"ipv4_health": spec.get("ipam", {}).get("podCIDRs", []),
|
|
322
|
+
"encryption_key": spec.get("encryption", {}).get("key"),
|
|
323
|
+
"boot_id": status.get("nodeIdentity"),
|
|
324
|
+
})
|
|
325
|
+
|
|
326
|
+
return {
|
|
327
|
+
"context": context or "current",
|
|
328
|
+
"total": len(nodes),
|
|
329
|
+
"nodes": nodes,
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
def cilium_status(context: str = "") -> Dict[str, Any]:
|
|
334
|
+
"""Get Cilium cluster status.
|
|
335
|
+
|
|
336
|
+
Args:
|
|
337
|
+
context: Kubernetes context to use (optional)
|
|
338
|
+
|
|
339
|
+
Returns:
|
|
340
|
+
Cilium status information
|
|
341
|
+
"""
|
|
342
|
+
# Try using cilium CLI if available
|
|
343
|
+
if _cilium_cli_available():
|
|
344
|
+
try:
|
|
345
|
+
cmd = ["cilium", "status", "--output", "json"]
|
|
346
|
+
if context:
|
|
347
|
+
cmd.extend(["--context", context])
|
|
348
|
+
result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
|
|
349
|
+
if result.returncode == 0:
|
|
350
|
+
try:
|
|
351
|
+
data = json.loads(result.stdout)
|
|
352
|
+
return {
|
|
353
|
+
"success": True,
|
|
354
|
+
"context": context or "current",
|
|
355
|
+
"source": "cilium-cli",
|
|
356
|
+
"status": data,
|
|
357
|
+
}
|
|
358
|
+
except json.JSONDecodeError:
|
|
359
|
+
pass
|
|
360
|
+
except Exception:
|
|
361
|
+
pass
|
|
362
|
+
|
|
363
|
+
# Fallback to kubectl
|
|
364
|
+
# Check Cilium pods status
|
|
365
|
+
args = ["get", "pods", "-n", "kube-system", "-l", "k8s-app=cilium", "-o", "json"]
|
|
366
|
+
result = _run_kubectl(args, context)
|
|
367
|
+
|
|
368
|
+
if not result["success"]:
|
|
369
|
+
return {"success": False, "error": result.get("error", "Failed to get Cilium status")}
|
|
370
|
+
|
|
371
|
+
try:
|
|
372
|
+
data = json.loads(result["output"])
|
|
373
|
+
pods = data.get("items", [])
|
|
374
|
+
except json.JSONDecodeError:
|
|
375
|
+
return {"success": False, "error": "Failed to parse response"}
|
|
376
|
+
|
|
377
|
+
pod_status = []
|
|
378
|
+
for pod in pods:
|
|
379
|
+
status = pod.get("status", {})
|
|
380
|
+
conditions = status.get("conditions", [])
|
|
381
|
+
ready_cond = next((c for c in conditions if c.get("type") == "Ready"), {})
|
|
382
|
+
|
|
383
|
+
pod_status.append({
|
|
384
|
+
"name": pod["metadata"]["name"],
|
|
385
|
+
"node": pod["spec"].get("nodeName", ""),
|
|
386
|
+
"ready": ready_cond.get("status") == "True",
|
|
387
|
+
"phase": status.get("phase", "Unknown"),
|
|
388
|
+
"restarts": sum(c.get("restartCount", 0) for c in status.get("containerStatuses", [])),
|
|
389
|
+
})
|
|
390
|
+
|
|
391
|
+
ready_count = sum(1 for p in pod_status if p["ready"])
|
|
392
|
+
|
|
393
|
+
return {
|
|
394
|
+
"success": True,
|
|
395
|
+
"context": context or "current",
|
|
396
|
+
"source": "kubectl",
|
|
397
|
+
"total_agents": len(pod_status),
|
|
398
|
+
"ready_agents": ready_count,
|
|
399
|
+
"agents": pod_status,
|
|
400
|
+
"hubble_cli_available": _hubble_cli_available(),
|
|
401
|
+
"cilium_cli_available": _cilium_cli_available(),
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
|
|
405
|
+
def hubble_flows_query(
|
|
406
|
+
namespace: str = "",
|
|
407
|
+
pod: str = "",
|
|
408
|
+
label_selector: str = "",
|
|
409
|
+
verdict: str = "",
|
|
410
|
+
protocol: str = "",
|
|
411
|
+
last: int = 100,
|
|
412
|
+
context: str = ""
|
|
413
|
+
) -> Dict[str, Any]:
|
|
414
|
+
"""Query Hubble flows (requires hubble CLI or hubble-relay).
|
|
415
|
+
|
|
416
|
+
Args:
|
|
417
|
+
namespace: Filter by namespace
|
|
418
|
+
pod: Filter by pod name
|
|
419
|
+
label_selector: Filter by labels
|
|
420
|
+
verdict: Filter by verdict (FORWARDED, DROPPED, AUDIT)
|
|
421
|
+
protocol: Filter by protocol (TCP, UDP, ICMP)
|
|
422
|
+
last: Number of flows to retrieve (default 100)
|
|
423
|
+
context: Kubernetes context to use (optional)
|
|
424
|
+
|
|
425
|
+
Returns:
|
|
426
|
+
Hubble flow data
|
|
427
|
+
"""
|
|
428
|
+
if not _hubble_cli_available():
|
|
429
|
+
return {
|
|
430
|
+
"success": False,
|
|
431
|
+
"error": "Hubble CLI not available. Install hubble CLI or use hubble-relay port-forward."
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
cmd = ["hubble", "observe", "--output", "json", f"--last={last}"]
|
|
435
|
+
|
|
436
|
+
if namespace:
|
|
437
|
+
cmd.extend(["--namespace", namespace])
|
|
438
|
+
if pod:
|
|
439
|
+
cmd.extend(["--pod", pod])
|
|
440
|
+
if label_selector:
|
|
441
|
+
cmd.extend(["--label", label_selector])
|
|
442
|
+
if verdict:
|
|
443
|
+
cmd.extend(["--verdict", verdict.upper()])
|
|
444
|
+
if protocol:
|
|
445
|
+
cmd.extend(["--protocol", protocol.upper()])
|
|
446
|
+
if context:
|
|
447
|
+
cmd.extend(["--context", context])
|
|
448
|
+
|
|
449
|
+
try:
|
|
450
|
+
result = subprocess.run(cmd, capture_output=True, text=True, timeout=60)
|
|
451
|
+
if result.returncode != 0:
|
|
452
|
+
return {"success": False, "error": result.stderr}
|
|
453
|
+
|
|
454
|
+
flows = []
|
|
455
|
+
for line in result.stdout.strip().split("\n"):
|
|
456
|
+
if line:
|
|
457
|
+
try:
|
|
458
|
+
flows.append(json.loads(line))
|
|
459
|
+
except json.JSONDecodeError:
|
|
460
|
+
continue
|
|
461
|
+
|
|
462
|
+
# Summarize flows
|
|
463
|
+
verdicts = {}
|
|
464
|
+
protocols = {}
|
|
465
|
+
for flow in flows:
|
|
466
|
+
v = flow.get("verdict", "UNKNOWN")
|
|
467
|
+
verdicts[v] = verdicts.get(v, 0) + 1
|
|
468
|
+
|
|
469
|
+
l4 = flow.get("l4", {})
|
|
470
|
+
if "TCP" in l4:
|
|
471
|
+
protocols["TCP"] = protocols.get("TCP", 0) + 1
|
|
472
|
+
elif "UDP" in l4:
|
|
473
|
+
protocols["UDP"] = protocols.get("UDP", 0) + 1
|
|
474
|
+
elif "ICMPv4" in l4 or "ICMPv6" in l4:
|
|
475
|
+
protocols["ICMP"] = protocols.get("ICMP", 0) + 1
|
|
476
|
+
|
|
477
|
+
return {
|
|
478
|
+
"success": True,
|
|
479
|
+
"context": context or "current",
|
|
480
|
+
"total_flows": len(flows),
|
|
481
|
+
"verdicts_summary": verdicts,
|
|
482
|
+
"protocols_summary": protocols,
|
|
483
|
+
"flows": flows[:50], # Return first 50 for display
|
|
484
|
+
}
|
|
485
|
+
except subprocess.TimeoutExpired:
|
|
486
|
+
return {"success": False, "error": "Hubble query timed out"}
|
|
487
|
+
except Exception as e:
|
|
488
|
+
return {"success": False, "error": str(e)}
|
|
489
|
+
|
|
490
|
+
|
|
491
|
+
def cilium_detect(context: str = "") -> Dict[str, Any]:
|
|
492
|
+
"""Detect if Cilium is installed and its components.
|
|
493
|
+
|
|
494
|
+
Args:
|
|
495
|
+
context: Kubernetes context to use (optional)
|
|
496
|
+
|
|
497
|
+
Returns:
|
|
498
|
+
Detection results for Cilium
|
|
499
|
+
"""
|
|
500
|
+
return {
|
|
501
|
+
"context": context or "current",
|
|
502
|
+
"installed": crd_exists(CILIUM_NETWORK_POLICY_CRD, context),
|
|
503
|
+
"crds": {
|
|
504
|
+
"ciliumnetworkpolicies": crd_exists(CILIUM_NETWORK_POLICY_CRD, context),
|
|
505
|
+
"ciliumclusterwidenetworkpolicies": crd_exists(CILIUM_CLUSTERWIDE_POLICY_CRD, context),
|
|
506
|
+
"ciliumendpoints": crd_exists(CILIUM_ENDPOINT_CRD, context),
|
|
507
|
+
"ciliumidentities": crd_exists(CILIUM_IDENTITY_CRD, context),
|
|
508
|
+
"ciliumnodes": crd_exists(CILIUM_NODE_CRD, context),
|
|
509
|
+
},
|
|
510
|
+
"cli": {
|
|
511
|
+
"cilium_available": _cilium_cli_available(),
|
|
512
|
+
"hubble_available": _hubble_cli_available(),
|
|
513
|
+
},
|
|
514
|
+
}
|
|
515
|
+
|
|
516
|
+
|
|
517
|
+
def register_cilium_tools(mcp: FastMCP, non_destructive: bool = False):
|
|
518
|
+
"""Register Cilium tools with the MCP server."""
|
|
519
|
+
|
|
520
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
521
|
+
def cilium_policies_list_tool(
|
|
522
|
+
namespace: str = "",
|
|
523
|
+
context: str = "",
|
|
524
|
+
include_clusterwide: bool = True
|
|
525
|
+
) -> str:
|
|
526
|
+
"""List Cilium network policies."""
|
|
527
|
+
return json.dumps(cilium_policies_list(namespace, context, include_clusterwide), indent=2)
|
|
528
|
+
|
|
529
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
530
|
+
def cilium_policy_get_tool(
|
|
531
|
+
name: str,
|
|
532
|
+
namespace: str = "",
|
|
533
|
+
kind: str = "CiliumNetworkPolicy",
|
|
534
|
+
context: str = ""
|
|
535
|
+
) -> str:
|
|
536
|
+
"""Get detailed information about a Cilium network policy."""
|
|
537
|
+
return json.dumps(cilium_policy_get(name, namespace, kind, context), indent=2)
|
|
538
|
+
|
|
539
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
540
|
+
def cilium_endpoints_list_tool(
|
|
541
|
+
namespace: str = "",
|
|
542
|
+
context: str = "",
|
|
543
|
+
label_selector: str = ""
|
|
544
|
+
) -> str:
|
|
545
|
+
"""List Cilium endpoints with their status."""
|
|
546
|
+
return json.dumps(cilium_endpoints_list(namespace, context, label_selector), indent=2)
|
|
547
|
+
|
|
548
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
549
|
+
def cilium_identities_list_tool(
|
|
550
|
+
context: str = "",
|
|
551
|
+
label_selector: str = ""
|
|
552
|
+
) -> str:
|
|
553
|
+
"""List Cilium identities."""
|
|
554
|
+
return json.dumps(cilium_identities_list(context, label_selector), indent=2)
|
|
555
|
+
|
|
556
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
557
|
+
def cilium_nodes_list_tool(context: str = "") -> str:
|
|
558
|
+
"""List Cilium nodes with their status."""
|
|
559
|
+
return json.dumps(cilium_nodes_list(context), indent=2)
|
|
560
|
+
|
|
561
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
562
|
+
def cilium_status_tool(context: str = "") -> str:
|
|
563
|
+
"""Get Cilium cluster status."""
|
|
564
|
+
return json.dumps(cilium_status(context), indent=2)
|
|
565
|
+
|
|
566
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
567
|
+
def hubble_flows_query_tool(
|
|
568
|
+
namespace: str = "",
|
|
569
|
+
pod: str = "",
|
|
570
|
+
label_selector: str = "",
|
|
571
|
+
verdict: str = "",
|
|
572
|
+
protocol: str = "",
|
|
573
|
+
last: int = 100,
|
|
574
|
+
context: str = ""
|
|
575
|
+
) -> str:
|
|
576
|
+
"""Query Hubble flows for network observability."""
|
|
577
|
+
return json.dumps(hubble_flows_query(namespace, pod, label_selector, verdict, protocol, last, context), indent=2)
|
|
578
|
+
|
|
579
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
580
|
+
def cilium_detect_tool(context: str = "") -> str:
|
|
581
|
+
"""Detect if Cilium is installed and its components."""
|
|
582
|
+
return json.dumps(cilium_detect(context), indent=2)
|