kubectl-mcp-server 1.19.2__py3-none-any.whl → 1.21.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {kubectl_mcp_server-1.19.2.dist-info → kubectl_mcp_server-1.21.0.dist-info}/METADATA +26 -18
- {kubectl_mcp_server-1.19.2.dist-info → kubectl_mcp_server-1.21.0.dist-info}/RECORD +13 -9
- kubectl_mcp_tool/__init__.py +1 -1
- kubectl_mcp_tool/mcp_server.py +5 -1
- kubectl_mcp_tool/tools/__init__.py +4 -0
- kubectl_mcp_tool/tools/kind.py +1723 -0
- kubectl_mcp_tool/tools/vind.py +744 -0
- tests/test_kind.py +1206 -0
- tests/test_vind.py +512 -0
- {kubectl_mcp_server-1.19.2.dist-info → kubectl_mcp_server-1.21.0.dist-info}/WHEEL +0 -0
- {kubectl_mcp_server-1.19.2.dist-info → kubectl_mcp_server-1.21.0.dist-info}/entry_points.txt +0 -0
- {kubectl_mcp_server-1.19.2.dist-info → kubectl_mcp_server-1.21.0.dist-info}/licenses/LICENSE +0 -0
- {kubectl_mcp_server-1.19.2.dist-info → kubectl_mcp_server-1.21.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1723 @@
|
|
|
1
|
+
"""kind (Kubernetes IN Docker) toolset for kubectl-mcp-server.
|
|
2
|
+
|
|
3
|
+
kind enables running local Kubernetes clusters using Docker container "nodes".
|
|
4
|
+
It's a tool from Kubernetes SIG for local development and CI testing.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import subprocess
|
|
8
|
+
import json
|
|
9
|
+
import re
|
|
10
|
+
import os
|
|
11
|
+
import tempfile
|
|
12
|
+
import yaml
|
|
13
|
+
from typing import Dict, Any, List, Optional
|
|
14
|
+
|
|
15
|
+
try:
|
|
16
|
+
from fastmcp import FastMCP
|
|
17
|
+
from fastmcp.tools import ToolAnnotations
|
|
18
|
+
except ImportError:
|
|
19
|
+
from mcp.server.fastmcp import FastMCP
|
|
20
|
+
from mcp.types import ToolAnnotations
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _kind_available() -> bool:
|
|
24
|
+
"""Check if kind CLI is available."""
|
|
25
|
+
try:
|
|
26
|
+
result = subprocess.run(
|
|
27
|
+
["kind", "version"],
|
|
28
|
+
capture_output=True,
|
|
29
|
+
timeout=10
|
|
30
|
+
)
|
|
31
|
+
return result.returncode == 0
|
|
32
|
+
except Exception:
|
|
33
|
+
return False
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _get_kind_version() -> Optional[str]:
|
|
37
|
+
"""Get kind CLI version."""
|
|
38
|
+
try:
|
|
39
|
+
result = subprocess.run(
|
|
40
|
+
["kind", "version"],
|
|
41
|
+
capture_output=True,
|
|
42
|
+
text=True,
|
|
43
|
+
timeout=10
|
|
44
|
+
)
|
|
45
|
+
if result.returncode == 0:
|
|
46
|
+
output = result.stdout.strip()
|
|
47
|
+
match = re.search(r'v?\d+\.\d+\.\d+', output)
|
|
48
|
+
if match:
|
|
49
|
+
return match.group(0)
|
|
50
|
+
return output
|
|
51
|
+
return None
|
|
52
|
+
except Exception:
|
|
53
|
+
return None
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _run_kind(
|
|
57
|
+
args: List[str],
|
|
58
|
+
timeout: int = 300,
|
|
59
|
+
capture_output: bool = True
|
|
60
|
+
) -> Dict[str, Any]:
|
|
61
|
+
"""Run kind command and return result.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
args: Command arguments (without 'kind' prefix)
|
|
65
|
+
timeout: Command timeout in seconds
|
|
66
|
+
capture_output: Whether to capture stdout/stderr
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
Result dict with success status and output/error
|
|
70
|
+
"""
|
|
71
|
+
if not _kind_available():
|
|
72
|
+
return {
|
|
73
|
+
"success": False,
|
|
74
|
+
"error": "kind CLI not available. Install from: https://kind.sigs.k8s.io/docs/user/quick-start/#installation"
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
cmd = ["kind"] + args
|
|
78
|
+
|
|
79
|
+
try:
|
|
80
|
+
result = subprocess.run(
|
|
81
|
+
cmd,
|
|
82
|
+
capture_output=capture_output,
|
|
83
|
+
text=True,
|
|
84
|
+
timeout=timeout
|
|
85
|
+
)
|
|
86
|
+
if result.returncode == 0:
|
|
87
|
+
output = result.stdout.strip() if capture_output else ""
|
|
88
|
+
return {"success": True, "output": output}
|
|
89
|
+
return {
|
|
90
|
+
"success": False,
|
|
91
|
+
"error": result.stderr.strip() if capture_output else f"Command failed with exit code {result.returncode}"
|
|
92
|
+
}
|
|
93
|
+
except subprocess.TimeoutExpired:
|
|
94
|
+
return {"success": False, "error": f"Command timed out after {timeout} seconds"}
|
|
95
|
+
except Exception as e:
|
|
96
|
+
return {"success": False, "error": str(e)}
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def kind_detect() -> Dict[str, Any]:
|
|
100
|
+
"""Detect if kind CLI is installed and get version info.
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
Detection results including CLI availability and version
|
|
104
|
+
"""
|
|
105
|
+
available = _kind_available()
|
|
106
|
+
version = _get_kind_version() if available else None
|
|
107
|
+
|
|
108
|
+
return {
|
|
109
|
+
"installed": available,
|
|
110
|
+
"cli_available": available,
|
|
111
|
+
"version": version,
|
|
112
|
+
"install_instructions": "https://kind.sigs.k8s.io/docs/user/quick-start/#installation" if not available else None
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def kind_version() -> Dict[str, Any]:
|
|
117
|
+
"""Get kind CLI version information.
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
Version information
|
|
121
|
+
"""
|
|
122
|
+
result = _run_kind(["version"], timeout=10)
|
|
123
|
+
if result["success"]:
|
|
124
|
+
return {
|
|
125
|
+
"success": True,
|
|
126
|
+
"version": result.get("output", ""),
|
|
127
|
+
}
|
|
128
|
+
return result
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def kind_list_clusters() -> Dict[str, Any]:
|
|
132
|
+
"""List all kind clusters.
|
|
133
|
+
|
|
134
|
+
Returns:
|
|
135
|
+
List of kind cluster names
|
|
136
|
+
"""
|
|
137
|
+
result = _run_kind(["get", "clusters"], timeout=30)
|
|
138
|
+
|
|
139
|
+
if not result["success"]:
|
|
140
|
+
return result
|
|
141
|
+
|
|
142
|
+
output = result.get("output", "")
|
|
143
|
+
clusters = [name.strip() for name in output.split("\n") if name.strip()]
|
|
144
|
+
|
|
145
|
+
return {
|
|
146
|
+
"success": True,
|
|
147
|
+
"total": len(clusters),
|
|
148
|
+
"clusters": clusters
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def kind_get_nodes(name: str = "kind") -> Dict[str, Any]:
|
|
153
|
+
"""List nodes in a kind cluster.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
name: Name of the kind cluster (default: kind)
|
|
157
|
+
|
|
158
|
+
Returns:
|
|
159
|
+
List of node container names
|
|
160
|
+
"""
|
|
161
|
+
result = _run_kind(["get", "nodes", "--name", name], timeout=30)
|
|
162
|
+
|
|
163
|
+
if not result["success"]:
|
|
164
|
+
return result
|
|
165
|
+
|
|
166
|
+
output = result.get("output", "")
|
|
167
|
+
nodes = [node.strip() for node in output.split("\n") if node.strip()]
|
|
168
|
+
|
|
169
|
+
return {
|
|
170
|
+
"success": True,
|
|
171
|
+
"cluster": name,
|
|
172
|
+
"total": len(nodes),
|
|
173
|
+
"nodes": nodes
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def kind_get_kubeconfig(name: str = "kind", internal: bool = False) -> Dict[str, Any]:
|
|
178
|
+
"""Get kubeconfig for a kind cluster.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
name: Name of the kind cluster
|
|
182
|
+
internal: Return internal (container) kubeconfig instead of external
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
Kubeconfig content
|
|
186
|
+
"""
|
|
187
|
+
args = ["get", "kubeconfig", "--name", name]
|
|
188
|
+
if internal:
|
|
189
|
+
args.append("--internal")
|
|
190
|
+
|
|
191
|
+
result = _run_kind(args, timeout=30)
|
|
192
|
+
|
|
193
|
+
if result["success"]:
|
|
194
|
+
return {
|
|
195
|
+
"success": True,
|
|
196
|
+
"kubeconfig": result.get("output", ""),
|
|
197
|
+
"message": f"Kubeconfig for kind cluster '{name}'"
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
return result
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
def kind_export_logs(
|
|
204
|
+
name: str = "kind",
|
|
205
|
+
output_dir: str = ""
|
|
206
|
+
) -> Dict[str, Any]:
|
|
207
|
+
"""Export cluster logs for debugging.
|
|
208
|
+
|
|
209
|
+
Args:
|
|
210
|
+
name: Name of the kind cluster
|
|
211
|
+
output_dir: Directory to export logs to (default: temp directory)
|
|
212
|
+
|
|
213
|
+
Returns:
|
|
214
|
+
Export result with log location
|
|
215
|
+
"""
|
|
216
|
+
if not output_dir:
|
|
217
|
+
output_dir = tempfile.mkdtemp(prefix=f"kind-logs-{name}-")
|
|
218
|
+
|
|
219
|
+
args = ["export", "logs", output_dir, "--name", name]
|
|
220
|
+
result = _run_kind(args, timeout=120)
|
|
221
|
+
|
|
222
|
+
if result["success"]:
|
|
223
|
+
return {
|
|
224
|
+
"success": True,
|
|
225
|
+
"message": f"Logs exported for cluster '{name}'",
|
|
226
|
+
"log_directory": output_dir,
|
|
227
|
+
"output": result.get("output", "")
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
return result
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def kind_create_cluster(
|
|
234
|
+
name: str = "kind",
|
|
235
|
+
image: str = "",
|
|
236
|
+
config: str = "",
|
|
237
|
+
wait: str = "5m",
|
|
238
|
+
retain: bool = False,
|
|
239
|
+
kubeconfig: str = ""
|
|
240
|
+
) -> Dict[str, Any]:
|
|
241
|
+
"""Create a new kind cluster.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
name: Name for the new cluster (default: kind)
|
|
245
|
+
image: Node image (determines K8s version, e.g., kindest/node:v1.29.0)
|
|
246
|
+
config: Path to kind config YAML file for multi-node or custom setup
|
|
247
|
+
wait: Wait timeout for control plane (default: 5m)
|
|
248
|
+
retain: Retain nodes on creation failure for debugging
|
|
249
|
+
kubeconfig: Path to kubeconfig file to update
|
|
250
|
+
|
|
251
|
+
Returns:
|
|
252
|
+
Creation result
|
|
253
|
+
"""
|
|
254
|
+
args = ["create", "cluster", "--name", name]
|
|
255
|
+
|
|
256
|
+
if image:
|
|
257
|
+
args.extend(["--image", image])
|
|
258
|
+
|
|
259
|
+
if config:
|
|
260
|
+
args.extend(["--config", config])
|
|
261
|
+
|
|
262
|
+
if wait:
|
|
263
|
+
args.extend(["--wait", wait])
|
|
264
|
+
|
|
265
|
+
if retain:
|
|
266
|
+
args.append("--retain")
|
|
267
|
+
|
|
268
|
+
if kubeconfig:
|
|
269
|
+
args.extend(["--kubeconfig", kubeconfig])
|
|
270
|
+
|
|
271
|
+
result = _run_kind(args, timeout=600)
|
|
272
|
+
|
|
273
|
+
if result["success"]:
|
|
274
|
+
return {
|
|
275
|
+
"success": True,
|
|
276
|
+
"message": f"kind cluster '{name}' created successfully",
|
|
277
|
+
"output": result.get("output", ""),
|
|
278
|
+
"cluster": name
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
return result
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
def kind_delete_cluster(name: str = "kind", kubeconfig: str = "") -> Dict[str, Any]:
|
|
285
|
+
"""Delete a kind cluster.
|
|
286
|
+
|
|
287
|
+
Args:
|
|
288
|
+
name: Name of the cluster to delete
|
|
289
|
+
kubeconfig: Path to kubeconfig file to update
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
Deletion result
|
|
293
|
+
"""
|
|
294
|
+
args = ["delete", "cluster", "--name", name]
|
|
295
|
+
|
|
296
|
+
if kubeconfig:
|
|
297
|
+
args.extend(["--kubeconfig", kubeconfig])
|
|
298
|
+
|
|
299
|
+
result = _run_kind(args, timeout=120)
|
|
300
|
+
|
|
301
|
+
if result["success"]:
|
|
302
|
+
return {
|
|
303
|
+
"success": True,
|
|
304
|
+
"message": f"kind cluster '{name}' deleted successfully",
|
|
305
|
+
"output": result.get("output", "")
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
return result
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
def kind_delete_all_clusters(kubeconfig: str = "") -> Dict[str, Any]:
|
|
312
|
+
"""Delete all kind clusters.
|
|
313
|
+
|
|
314
|
+
Args:
|
|
315
|
+
kubeconfig: Path to kubeconfig file to update
|
|
316
|
+
|
|
317
|
+
Returns:
|
|
318
|
+
Deletion result
|
|
319
|
+
"""
|
|
320
|
+
args = ["delete", "clusters", "--all"]
|
|
321
|
+
|
|
322
|
+
if kubeconfig:
|
|
323
|
+
args.extend(["--kubeconfig", kubeconfig])
|
|
324
|
+
|
|
325
|
+
result = _run_kind(args, timeout=300)
|
|
326
|
+
|
|
327
|
+
if result["success"]:
|
|
328
|
+
return {
|
|
329
|
+
"success": True,
|
|
330
|
+
"message": "All kind clusters deleted successfully",
|
|
331
|
+
"output": result.get("output", "")
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
return result
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
def kind_load_image(
|
|
338
|
+
images: List[str],
|
|
339
|
+
name: str = "kind",
|
|
340
|
+
nodes: List[str] = None
|
|
341
|
+
) -> Dict[str, Any]:
|
|
342
|
+
"""Load Docker images into kind cluster nodes.
|
|
343
|
+
|
|
344
|
+
This is a key feature for local development - load locally built
|
|
345
|
+
images directly into the cluster without pushing to a registry.
|
|
346
|
+
|
|
347
|
+
Args:
|
|
348
|
+
images: List of Docker image names to load
|
|
349
|
+
name: Name of the kind cluster
|
|
350
|
+
nodes: Specific nodes to load images to (default: all nodes)
|
|
351
|
+
|
|
352
|
+
Returns:
|
|
353
|
+
Load result
|
|
354
|
+
"""
|
|
355
|
+
if not images:
|
|
356
|
+
return {"success": False, "error": "No images specified to load"}
|
|
357
|
+
|
|
358
|
+
args = ["load", "docker-image", "--name", name] + images
|
|
359
|
+
|
|
360
|
+
if nodes:
|
|
361
|
+
for node in nodes:
|
|
362
|
+
args.extend(["--nodes", node])
|
|
363
|
+
|
|
364
|
+
result = _run_kind(args, timeout=300)
|
|
365
|
+
|
|
366
|
+
if result["success"]:
|
|
367
|
+
return {
|
|
368
|
+
"success": True,
|
|
369
|
+
"message": f"Loaded {len(images)} image(s) into cluster '{name}'",
|
|
370
|
+
"images": images,
|
|
371
|
+
"output": result.get("output", "")
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
return result
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
def kind_load_image_archive(
|
|
378
|
+
archive: str,
|
|
379
|
+
name: str = "kind",
|
|
380
|
+
nodes: List[str] = None
|
|
381
|
+
) -> Dict[str, Any]:
|
|
382
|
+
"""Load Docker images from tar archive into kind cluster.
|
|
383
|
+
|
|
384
|
+
Args:
|
|
385
|
+
archive: Path to image archive (tar file)
|
|
386
|
+
name: Name of the kind cluster
|
|
387
|
+
nodes: Specific nodes to load images to (default: all nodes)
|
|
388
|
+
|
|
389
|
+
Returns:
|
|
390
|
+
Load result
|
|
391
|
+
"""
|
|
392
|
+
if not os.path.exists(archive):
|
|
393
|
+
return {"success": False, "error": f"Archive file not found: {archive}"}
|
|
394
|
+
|
|
395
|
+
args = ["load", "image-archive", archive, "--name", name]
|
|
396
|
+
|
|
397
|
+
if nodes:
|
|
398
|
+
for node in nodes:
|
|
399
|
+
args.extend(["--nodes", node])
|
|
400
|
+
|
|
401
|
+
result = _run_kind(args, timeout=300)
|
|
402
|
+
|
|
403
|
+
if result["success"]:
|
|
404
|
+
return {
|
|
405
|
+
"success": True,
|
|
406
|
+
"message": f"Loaded images from archive into cluster '{name}'",
|
|
407
|
+
"archive": archive,
|
|
408
|
+
"output": result.get("output", "")
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
return result
|
|
412
|
+
|
|
413
|
+
|
|
414
|
+
def kind_build_node_image(
|
|
415
|
+
image: str = "",
|
|
416
|
+
base_image: str = "",
|
|
417
|
+
kube_root: str = ""
|
|
418
|
+
) -> Dict[str, Any]:
|
|
419
|
+
"""Build a kind node image from Kubernetes source.
|
|
420
|
+
|
|
421
|
+
This is an advanced feature for testing custom Kubernetes builds.
|
|
422
|
+
|
|
423
|
+
Args:
|
|
424
|
+
image: Name for the resulting image (default: kindest/node:latest)
|
|
425
|
+
base_image: Base image to use
|
|
426
|
+
kube_root: Path to Kubernetes source root
|
|
427
|
+
|
|
428
|
+
Returns:
|
|
429
|
+
Build result
|
|
430
|
+
"""
|
|
431
|
+
args = ["build", "node-image"]
|
|
432
|
+
|
|
433
|
+
if image:
|
|
434
|
+
args.extend(["--image", image])
|
|
435
|
+
|
|
436
|
+
if base_image:
|
|
437
|
+
args.extend(["--base-image", base_image])
|
|
438
|
+
|
|
439
|
+
if kube_root:
|
|
440
|
+
args.extend(["--kube-root", kube_root])
|
|
441
|
+
|
|
442
|
+
result = _run_kind(args, timeout=1800)
|
|
443
|
+
|
|
444
|
+
if result["success"]:
|
|
445
|
+
return {
|
|
446
|
+
"success": True,
|
|
447
|
+
"message": "Node image built successfully",
|
|
448
|
+
"image": image or "kindest/node:latest",
|
|
449
|
+
"output": result.get("output", "")
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
return result
|
|
453
|
+
|
|
454
|
+
|
|
455
|
+
def kind_cluster_info(name: str = "kind") -> Dict[str, Any]:
|
|
456
|
+
"""Get cluster information including nodes and kubeconfig.
|
|
457
|
+
|
|
458
|
+
Args:
|
|
459
|
+
name: Name of the kind cluster
|
|
460
|
+
|
|
461
|
+
Returns:
|
|
462
|
+
Cluster information
|
|
463
|
+
"""
|
|
464
|
+
clusters_result = kind_list_clusters()
|
|
465
|
+
if not clusters_result["success"]:
|
|
466
|
+
return clusters_result
|
|
467
|
+
|
|
468
|
+
if name not in clusters_result.get("clusters", []):
|
|
469
|
+
return {
|
|
470
|
+
"success": False,
|
|
471
|
+
"error": f"Cluster '{name}' not found. Available clusters: {clusters_result.get('clusters', [])}"
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
nodes_result = kind_get_nodes(name)
|
|
475
|
+
kubeconfig_result = kind_get_kubeconfig(name)
|
|
476
|
+
|
|
477
|
+
return {
|
|
478
|
+
"success": True,
|
|
479
|
+
"cluster": name,
|
|
480
|
+
"nodes": nodes_result.get("nodes", []) if nodes_result["success"] else [],
|
|
481
|
+
"node_count": nodes_result.get("total", 0) if nodes_result["success"] else 0,
|
|
482
|
+
"kubeconfig_available": kubeconfig_result["success"],
|
|
483
|
+
}
|
|
484
|
+
|
|
485
|
+
|
|
486
|
+
def kind_node_labels(name: str = "kind") -> Dict[str, Any]:
|
|
487
|
+
"""Get node labels for kind cluster nodes.
|
|
488
|
+
|
|
489
|
+
Args:
|
|
490
|
+
name: Name of the kind cluster
|
|
491
|
+
|
|
492
|
+
Returns:
|
|
493
|
+
Node labels information
|
|
494
|
+
"""
|
|
495
|
+
nodes_result = kind_get_nodes(name)
|
|
496
|
+
if not nodes_result["success"]:
|
|
497
|
+
return nodes_result
|
|
498
|
+
|
|
499
|
+
node_labels = {}
|
|
500
|
+
for node in nodes_result.get("nodes", []):
|
|
501
|
+
try:
|
|
502
|
+
result = subprocess.run(
|
|
503
|
+
["docker", "inspect", "--format", '{{json .Config.Labels}}', node],
|
|
504
|
+
capture_output=True,
|
|
505
|
+
text=True,
|
|
506
|
+
timeout=30
|
|
507
|
+
)
|
|
508
|
+
if result.returncode == 0:
|
|
509
|
+
labels = json.loads(result.stdout.strip())
|
|
510
|
+
node_labels[node] = labels
|
|
511
|
+
else:
|
|
512
|
+
node_labels[node] = {"error": "Failed to get labels"}
|
|
513
|
+
except Exception as e:
|
|
514
|
+
node_labels[node] = {"error": str(e)}
|
|
515
|
+
|
|
516
|
+
return {
|
|
517
|
+
"success": True,
|
|
518
|
+
"cluster": name,
|
|
519
|
+
"node_labels": node_labels
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
|
|
523
|
+
def _run_docker(
|
|
524
|
+
args: List[str],
|
|
525
|
+
timeout: int = 60,
|
|
526
|
+
capture_output: bool = True
|
|
527
|
+
) -> Dict[str, Any]:
|
|
528
|
+
"""Run docker command and return result.
|
|
529
|
+
|
|
530
|
+
Args:
|
|
531
|
+
args: Command arguments (without 'docker' prefix)
|
|
532
|
+
timeout: Command timeout in seconds
|
|
533
|
+
capture_output: Whether to capture stdout/stderr
|
|
534
|
+
|
|
535
|
+
Returns:
|
|
536
|
+
Result dict with success status and output/error
|
|
537
|
+
"""
|
|
538
|
+
cmd = ["docker"] + args
|
|
539
|
+
|
|
540
|
+
try:
|
|
541
|
+
result = subprocess.run(
|
|
542
|
+
cmd,
|
|
543
|
+
capture_output=capture_output,
|
|
544
|
+
text=True,
|
|
545
|
+
timeout=timeout
|
|
546
|
+
)
|
|
547
|
+
if result.returncode == 0:
|
|
548
|
+
output = result.stdout.strip() if capture_output else ""
|
|
549
|
+
return {"success": True, "output": output}
|
|
550
|
+
return {
|
|
551
|
+
"success": False,
|
|
552
|
+
"error": result.stderr.strip() if capture_output else f"Command failed with exit code {result.returncode}"
|
|
553
|
+
}
|
|
554
|
+
except subprocess.TimeoutExpired:
|
|
555
|
+
return {"success": False, "error": f"Command timed out after {timeout} seconds"}
|
|
556
|
+
except FileNotFoundError:
|
|
557
|
+
return {"success": False, "error": "Docker CLI not available"}
|
|
558
|
+
except Exception as e:
|
|
559
|
+
return {"success": False, "error": str(e)}
|
|
560
|
+
|
|
561
|
+
|
|
562
|
+
KNOWN_KIND_IMAGES = [
|
|
563
|
+
"kindest/node:v1.32.0",
|
|
564
|
+
"kindest/node:v1.31.0",
|
|
565
|
+
"kindest/node:v1.30.0",
|
|
566
|
+
"kindest/node:v1.29.0",
|
|
567
|
+
"kindest/node:v1.28.0",
|
|
568
|
+
"kindest/node:v1.27.0",
|
|
569
|
+
"kindest/node:v1.26.0",
|
|
570
|
+
"kindest/node:v1.25.0",
|
|
571
|
+
]
|
|
572
|
+
|
|
573
|
+
|
|
574
|
+
def kind_config_validate(config_path: str) -> Dict[str, Any]:
|
|
575
|
+
"""Validate kind configuration file.
|
|
576
|
+
|
|
577
|
+
Args:
|
|
578
|
+
config_path: Path to kind config YAML file
|
|
579
|
+
|
|
580
|
+
Returns:
|
|
581
|
+
Validation results
|
|
582
|
+
"""
|
|
583
|
+
if not os.path.exists(config_path):
|
|
584
|
+
return {"success": False, "error": f"Config file not found: {config_path}"}
|
|
585
|
+
|
|
586
|
+
try:
|
|
587
|
+
with open(config_path, 'r') as f:
|
|
588
|
+
config = yaml.safe_load(f)
|
|
589
|
+
except yaml.YAMLError as e:
|
|
590
|
+
return {"success": False, "error": f"Invalid YAML: {e}"}
|
|
591
|
+
except Exception as e:
|
|
592
|
+
return {"success": False, "error": f"Failed to read config: {e}"}
|
|
593
|
+
|
|
594
|
+
errors = []
|
|
595
|
+
warnings = []
|
|
596
|
+
|
|
597
|
+
if config.get("kind") != "Cluster":
|
|
598
|
+
errors.append("kind must be 'Cluster'")
|
|
599
|
+
|
|
600
|
+
api_version = config.get("apiVersion", "")
|
|
601
|
+
if not api_version.startswith("kind.x-k8s.io/"):
|
|
602
|
+
errors.append("apiVersion should be 'kind.x-k8s.io/v1alpha4'")
|
|
603
|
+
|
|
604
|
+
nodes = config.get("nodes", [])
|
|
605
|
+
if not nodes:
|
|
606
|
+
warnings.append("No nodes defined, will create single control-plane")
|
|
607
|
+
|
|
608
|
+
control_planes = [n for n in nodes if n.get("role") == "control-plane"]
|
|
609
|
+
workers = [n for n in nodes if n.get("role") == "worker"]
|
|
610
|
+
|
|
611
|
+
if len(control_planes) == 0 and nodes:
|
|
612
|
+
warnings.append("No control-plane node defined")
|
|
613
|
+
elif len(control_planes) > 1:
|
|
614
|
+
warnings.append(f"HA setup with {len(control_planes)} control-planes")
|
|
615
|
+
|
|
616
|
+
return {
|
|
617
|
+
"success": len(errors) == 0,
|
|
618
|
+
"valid": len(errors) == 0,
|
|
619
|
+
"errors": errors,
|
|
620
|
+
"warnings": warnings,
|
|
621
|
+
"config_summary": {
|
|
622
|
+
"control_planes": len(control_planes),
|
|
623
|
+
"workers": len(workers),
|
|
624
|
+
"total_nodes": len(nodes) if nodes else 1,
|
|
625
|
+
"api_version": api_version
|
|
626
|
+
}
|
|
627
|
+
}
|
|
628
|
+
|
|
629
|
+
|
|
630
|
+
def kind_config_generate(
|
|
631
|
+
name: str = "kind",
|
|
632
|
+
workers: int = 0,
|
|
633
|
+
control_planes: int = 1,
|
|
634
|
+
registry: bool = False,
|
|
635
|
+
ingress: bool = False,
|
|
636
|
+
port_mappings: List[Dict] = None
|
|
637
|
+
) -> Dict[str, Any]:
|
|
638
|
+
"""Generate kind config YAML for common scenarios.
|
|
639
|
+
|
|
640
|
+
Args:
|
|
641
|
+
name: Cluster name (for reference)
|
|
642
|
+
workers: Number of worker nodes
|
|
643
|
+
control_planes: Number of control-plane nodes (1 for single, 3 for HA)
|
|
644
|
+
registry: Add local registry configuration
|
|
645
|
+
ingress: Add port mappings for ingress (80, 443)
|
|
646
|
+
port_mappings: Custom port mappings list
|
|
647
|
+
|
|
648
|
+
Returns:
|
|
649
|
+
Generated config YAML
|
|
650
|
+
"""
|
|
651
|
+
config = {
|
|
652
|
+
"kind": "Cluster",
|
|
653
|
+
"apiVersion": "kind.x-k8s.io/v1alpha4",
|
|
654
|
+
"nodes": []
|
|
655
|
+
}
|
|
656
|
+
|
|
657
|
+
for i in range(control_planes):
|
|
658
|
+
node = {"role": "control-plane"}
|
|
659
|
+
if ingress and i == 0:
|
|
660
|
+
node["kubeadmConfigPatches"] = [
|
|
661
|
+
"kind: InitConfiguration\nnodeRegistration:\n kubeletExtraArgs:\n node-labels: \"ingress-ready=true\""
|
|
662
|
+
]
|
|
663
|
+
node["extraPortMappings"] = [
|
|
664
|
+
{"containerPort": 80, "hostPort": 80, "protocol": "TCP"},
|
|
665
|
+
{"containerPort": 443, "hostPort": 443, "protocol": "TCP"},
|
|
666
|
+
]
|
|
667
|
+
if port_mappings and i == 0:
|
|
668
|
+
node["extraPortMappings"] = node.get("extraPortMappings", []) + port_mappings
|
|
669
|
+
config["nodes"].append(node)
|
|
670
|
+
|
|
671
|
+
for _ in range(workers):
|
|
672
|
+
config["nodes"].append({"role": "worker"})
|
|
673
|
+
|
|
674
|
+
if registry:
|
|
675
|
+
config["containerdConfigPatches"] = [
|
|
676
|
+
"[plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors.\"localhost:5001\"]\n endpoint = [\"http://kind-registry:5001\"]"
|
|
677
|
+
]
|
|
678
|
+
|
|
679
|
+
config_yaml = yaml.dump(config, default_flow_style=False, sort_keys=False)
|
|
680
|
+
|
|
681
|
+
return {
|
|
682
|
+
"success": True,
|
|
683
|
+
"config": config_yaml,
|
|
684
|
+
"summary": {
|
|
685
|
+
"name": name,
|
|
686
|
+
"control_planes": control_planes,
|
|
687
|
+
"workers": workers,
|
|
688
|
+
"total_nodes": control_planes + workers,
|
|
689
|
+
"features": {
|
|
690
|
+
"registry": registry,
|
|
691
|
+
"ingress": ingress,
|
|
692
|
+
"custom_ports": bool(port_mappings)
|
|
693
|
+
}
|
|
694
|
+
}
|
|
695
|
+
}
|
|
696
|
+
|
|
697
|
+
|
|
698
|
+
def kind_config_show(name: str = "kind") -> Dict[str, Any]:
|
|
699
|
+
"""Show effective config for a running cluster.
|
|
700
|
+
|
|
701
|
+
Args:
|
|
702
|
+
name: Name of the kind cluster
|
|
703
|
+
|
|
704
|
+
Returns:
|
|
705
|
+
Cluster configuration details
|
|
706
|
+
"""
|
|
707
|
+
nodes_result = kind_get_nodes(name)
|
|
708
|
+
if not nodes_result["success"]:
|
|
709
|
+
return nodes_result
|
|
710
|
+
|
|
711
|
+
nodes = nodes_result.get("nodes", [])
|
|
712
|
+
if not nodes:
|
|
713
|
+
return {"success": False, "error": f"No nodes found for cluster '{name}'"}
|
|
714
|
+
|
|
715
|
+
node_configs = []
|
|
716
|
+
for node in nodes:
|
|
717
|
+
inspect_result = _run_docker(
|
|
718
|
+
["inspect", "--format", '{{json .}}', node],
|
|
719
|
+
timeout=30
|
|
720
|
+
)
|
|
721
|
+
if inspect_result["success"]:
|
|
722
|
+
try:
|
|
723
|
+
node_info = json.loads(inspect_result["output"])
|
|
724
|
+
labels = node_info.get("Config", {}).get("Labels", {})
|
|
725
|
+
ports = node_info.get("HostConfig", {}).get("PortBindings", {})
|
|
726
|
+
node_configs.append({
|
|
727
|
+
"name": node,
|
|
728
|
+
"role": labels.get("io.x-k8s.kind.role", "unknown"),
|
|
729
|
+
"cluster": labels.get("io.x-k8s.kind.cluster", name),
|
|
730
|
+
"port_mappings": ports
|
|
731
|
+
})
|
|
732
|
+
except json.JSONDecodeError:
|
|
733
|
+
node_configs.append({"name": node, "error": "Failed to parse"})
|
|
734
|
+
|
|
735
|
+
return {
|
|
736
|
+
"success": True,
|
|
737
|
+
"cluster": name,
|
|
738
|
+
"nodes": node_configs,
|
|
739
|
+
"total_nodes": len(node_configs)
|
|
740
|
+
}
|
|
741
|
+
|
|
742
|
+
|
|
743
|
+
def kind_available_images() -> Dict[str, Any]:
|
|
744
|
+
"""List available kindest/node images (K8s versions).
|
|
745
|
+
|
|
746
|
+
Returns:
|
|
747
|
+
List of available node images
|
|
748
|
+
"""
|
|
749
|
+
return {
|
|
750
|
+
"success": True,
|
|
751
|
+
"images": KNOWN_KIND_IMAGES,
|
|
752
|
+
"latest": KNOWN_KIND_IMAGES[0] if KNOWN_KIND_IMAGES else None,
|
|
753
|
+
"note": "Use image parameter with kind_create_cluster_tool to specify K8s version"
|
|
754
|
+
}
|
|
755
|
+
|
|
756
|
+
|
|
757
|
+
def kind_registry_create(
|
|
758
|
+
name: str = "kind-registry",
|
|
759
|
+
port: int = 5001
|
|
760
|
+
) -> Dict[str, Any]:
|
|
761
|
+
"""Create local Docker registry for kind clusters.
|
|
762
|
+
|
|
763
|
+
Args:
|
|
764
|
+
name: Name for the registry container
|
|
765
|
+
port: Host port to expose registry on
|
|
766
|
+
|
|
767
|
+
Returns:
|
|
768
|
+
Creation result
|
|
769
|
+
"""
|
|
770
|
+
check_result = _run_docker(["ps", "-q", "-f", f"name={name}"], timeout=30)
|
|
771
|
+
if check_result["success"] and check_result.get("output"):
|
|
772
|
+
return {
|
|
773
|
+
"success": True,
|
|
774
|
+
"message": f"Registry '{name}' already exists",
|
|
775
|
+
"name": name,
|
|
776
|
+
"port": port
|
|
777
|
+
}
|
|
778
|
+
|
|
779
|
+
result = _run_docker([
|
|
780
|
+
"run", "-d",
|
|
781
|
+
"--restart=always",
|
|
782
|
+
"-p", f"127.0.0.1:{port}:5000",
|
|
783
|
+
"--name", name,
|
|
784
|
+
"--network", "bridge",
|
|
785
|
+
"registry:2"
|
|
786
|
+
], timeout=120)
|
|
787
|
+
|
|
788
|
+
if not result["success"]:
|
|
789
|
+
return result
|
|
790
|
+
|
|
791
|
+
network_result = _run_docker(["network", "ls", "-q", "-f", "name=kind"], timeout=30)
|
|
792
|
+
if network_result["success"] and network_result.get("output"):
|
|
793
|
+
_run_docker(["network", "connect", "kind", name], timeout=30)
|
|
794
|
+
|
|
795
|
+
return {
|
|
796
|
+
"success": True,
|
|
797
|
+
"message": f"Registry '{name}' created successfully",
|
|
798
|
+
"name": name,
|
|
799
|
+
"port": port,
|
|
800
|
+
"endpoint": f"localhost:{port}"
|
|
801
|
+
}
|
|
802
|
+
|
|
803
|
+
|
|
804
|
+
def kind_registry_connect(
|
|
805
|
+
cluster_name: str = "kind",
|
|
806
|
+
registry_name: str = "kind-registry"
|
|
807
|
+
) -> Dict[str, Any]:
|
|
808
|
+
"""Connect kind cluster to local registry.
|
|
809
|
+
|
|
810
|
+
Args:
|
|
811
|
+
cluster_name: Name of the kind cluster
|
|
812
|
+
registry_name: Name of the registry container
|
|
813
|
+
|
|
814
|
+
Returns:
|
|
815
|
+
Connection result
|
|
816
|
+
"""
|
|
817
|
+
network_result = _run_docker(
|
|
818
|
+
["network", "connect", "kind", registry_name],
|
|
819
|
+
timeout=30
|
|
820
|
+
)
|
|
821
|
+
|
|
822
|
+
if not network_result["success"] and "already exists" not in network_result.get("error", ""):
|
|
823
|
+
return network_result
|
|
824
|
+
|
|
825
|
+
nodes_result = kind_get_nodes(cluster_name)
|
|
826
|
+
if not nodes_result["success"]:
|
|
827
|
+
return nodes_result
|
|
828
|
+
|
|
829
|
+
return {
|
|
830
|
+
"success": True,
|
|
831
|
+
"message": f"Registry '{registry_name}' connected to cluster '{cluster_name}'",
|
|
832
|
+
"cluster": cluster_name,
|
|
833
|
+
"registry": registry_name,
|
|
834
|
+
"usage": f"Tag images as localhost:5001/image:tag and push to registry"
|
|
835
|
+
}
|
|
836
|
+
|
|
837
|
+
|
|
838
|
+
def kind_registry_status(name: str = "kind-registry") -> Dict[str, Any]:
|
|
839
|
+
"""Check local registry status.
|
|
840
|
+
|
|
841
|
+
Args:
|
|
842
|
+
name: Name of the registry container
|
|
843
|
+
|
|
844
|
+
Returns:
|
|
845
|
+
Registry status information
|
|
846
|
+
"""
|
|
847
|
+
result = _run_docker(
|
|
848
|
+
["inspect", "--format", '{{json .}}', name],
|
|
849
|
+
timeout=30
|
|
850
|
+
)
|
|
851
|
+
|
|
852
|
+
if not result["success"]:
|
|
853
|
+
return {
|
|
854
|
+
"success": False,
|
|
855
|
+
"error": f"Registry '{name}' not found",
|
|
856
|
+
"installed": False
|
|
857
|
+
}
|
|
858
|
+
|
|
859
|
+
try:
|
|
860
|
+
info = json.loads(result["output"])
|
|
861
|
+
state = info.get("State", {})
|
|
862
|
+
network_settings = info.get("NetworkSettings", {})
|
|
863
|
+
ports = network_settings.get("Ports", {})
|
|
864
|
+
|
|
865
|
+
host_port = None
|
|
866
|
+
for port_binding in ports.get("5000/tcp", []) or []:
|
|
867
|
+
host_port = port_binding.get("HostPort")
|
|
868
|
+
break
|
|
869
|
+
|
|
870
|
+
networks = list(network_settings.get("Networks", {}).keys())
|
|
871
|
+
|
|
872
|
+
return {
|
|
873
|
+
"success": True,
|
|
874
|
+
"name": name,
|
|
875
|
+
"running": state.get("Running", False),
|
|
876
|
+
"status": state.get("Status", "unknown"),
|
|
877
|
+
"port": host_port,
|
|
878
|
+
"networks": networks,
|
|
879
|
+
"connected_to_kind": "kind" in networks
|
|
880
|
+
}
|
|
881
|
+
except json.JSONDecodeError:
|
|
882
|
+
return {"success": False, "error": "Failed to parse registry info"}
|
|
883
|
+
|
|
884
|
+
|
|
885
|
+
def kind_node_exec(
|
|
886
|
+
node: str,
|
|
887
|
+
command: str,
|
|
888
|
+
cluster: str = "kind"
|
|
889
|
+
) -> Dict[str, Any]:
|
|
890
|
+
"""Execute command on kind node container.
|
|
891
|
+
|
|
892
|
+
Args:
|
|
893
|
+
node: Node name (e.g., kind-control-plane)
|
|
894
|
+
command: Command to execute
|
|
895
|
+
cluster: Cluster name (for validation)
|
|
896
|
+
|
|
897
|
+
Returns:
|
|
898
|
+
Command execution result
|
|
899
|
+
"""
|
|
900
|
+
if not node:
|
|
901
|
+
return {"success": False, "error": "Node name is required"}
|
|
902
|
+
if not command:
|
|
903
|
+
return {"success": False, "error": "Command is required"}
|
|
904
|
+
|
|
905
|
+
nodes_result = kind_get_nodes(cluster)
|
|
906
|
+
if nodes_result["success"] and node not in nodes_result.get("nodes", []):
|
|
907
|
+
return {
|
|
908
|
+
"success": False,
|
|
909
|
+
"error": f"Node '{node}' not found in cluster '{cluster}'",
|
|
910
|
+
"available_nodes": nodes_result.get("nodes", [])
|
|
911
|
+
}
|
|
912
|
+
|
|
913
|
+
result = _run_docker(
|
|
914
|
+
["exec", node] + command.split(),
|
|
915
|
+
timeout=120
|
|
916
|
+
)
|
|
917
|
+
|
|
918
|
+
if result["success"]:
|
|
919
|
+
return {
|
|
920
|
+
"success": True,
|
|
921
|
+
"node": node,
|
|
922
|
+
"command": command,
|
|
923
|
+
"output": result.get("output", "")
|
|
924
|
+
}
|
|
925
|
+
|
|
926
|
+
return result
|
|
927
|
+
|
|
928
|
+
|
|
929
|
+
def kind_node_logs(
|
|
930
|
+
node: str,
|
|
931
|
+
tail: int = 100
|
|
932
|
+
) -> Dict[str, Any]:
|
|
933
|
+
"""Get logs from kind node container.
|
|
934
|
+
|
|
935
|
+
Args:
|
|
936
|
+
node: Node name
|
|
937
|
+
tail: Number of lines to return
|
|
938
|
+
|
|
939
|
+
Returns:
|
|
940
|
+
Node container logs
|
|
941
|
+
"""
|
|
942
|
+
if not node:
|
|
943
|
+
return {"success": False, "error": "Node name is required"}
|
|
944
|
+
|
|
945
|
+
result = _run_docker(
|
|
946
|
+
["logs", "--tail", str(tail), node],
|
|
947
|
+
timeout=60
|
|
948
|
+
)
|
|
949
|
+
|
|
950
|
+
if result["success"]:
|
|
951
|
+
return {
|
|
952
|
+
"success": True,
|
|
953
|
+
"node": node,
|
|
954
|
+
"logs": result.get("output", "")
|
|
955
|
+
}
|
|
956
|
+
|
|
957
|
+
return result
|
|
958
|
+
|
|
959
|
+
|
|
960
|
+
def kind_node_inspect(node: str) -> Dict[str, Any]:
|
|
961
|
+
"""Inspect kind node container details.
|
|
962
|
+
|
|
963
|
+
Args:
|
|
964
|
+
node: Node name
|
|
965
|
+
|
|
966
|
+
Returns:
|
|
967
|
+
Node container details
|
|
968
|
+
"""
|
|
969
|
+
if not node:
|
|
970
|
+
return {"success": False, "error": "Node name is required"}
|
|
971
|
+
|
|
972
|
+
result = _run_docker(
|
|
973
|
+
["inspect", "--format", '{{json .}}', node],
|
|
974
|
+
timeout=30
|
|
975
|
+
)
|
|
976
|
+
|
|
977
|
+
if not result["success"]:
|
|
978
|
+
return result
|
|
979
|
+
|
|
980
|
+
try:
|
|
981
|
+
info = json.loads(result["output"])
|
|
982
|
+
state = info.get("State", {})
|
|
983
|
+
config = info.get("Config", {})
|
|
984
|
+
network_settings = info.get("NetworkSettings", {})
|
|
985
|
+
host_config = info.get("HostConfig", {})
|
|
986
|
+
|
|
987
|
+
return {
|
|
988
|
+
"success": True,
|
|
989
|
+
"node": node,
|
|
990
|
+
"state": {
|
|
991
|
+
"running": state.get("Running", False),
|
|
992
|
+
"status": state.get("Status", "unknown"),
|
|
993
|
+
"started_at": state.get("StartedAt"),
|
|
994
|
+
"pid": state.get("Pid")
|
|
995
|
+
},
|
|
996
|
+
"image": config.get("Image"),
|
|
997
|
+
"labels": config.get("Labels", {}),
|
|
998
|
+
"ip_address": network_settings.get("IPAddress", ""),
|
|
999
|
+
"networks": list(network_settings.get("Networks", {}).keys()),
|
|
1000
|
+
"port_bindings": host_config.get("PortBindings", {}),
|
|
1001
|
+
"mounts": [
|
|
1002
|
+
{"source": m.get("Source"), "destination": m.get("Destination")}
|
|
1003
|
+
for m in info.get("Mounts", [])
|
|
1004
|
+
]
|
|
1005
|
+
}
|
|
1006
|
+
except json.JSONDecodeError:
|
|
1007
|
+
return {"success": False, "error": "Failed to parse node info"}
|
|
1008
|
+
|
|
1009
|
+
|
|
1010
|
+
def kind_node_restart(node: str) -> Dict[str, Any]:
|
|
1011
|
+
"""Restart kind node container.
|
|
1012
|
+
|
|
1013
|
+
Args:
|
|
1014
|
+
node: Node name to restart
|
|
1015
|
+
|
|
1016
|
+
Returns:
|
|
1017
|
+
Restart result
|
|
1018
|
+
"""
|
|
1019
|
+
if not node:
|
|
1020
|
+
return {"success": False, "error": "Node name is required"}
|
|
1021
|
+
|
|
1022
|
+
result = _run_docker(["restart", node], timeout=120)
|
|
1023
|
+
|
|
1024
|
+
if result["success"]:
|
|
1025
|
+
return {
|
|
1026
|
+
"success": True,
|
|
1027
|
+
"message": f"Node '{node}' restarted successfully",
|
|
1028
|
+
"node": node
|
|
1029
|
+
}
|
|
1030
|
+
|
|
1031
|
+
return result
|
|
1032
|
+
|
|
1033
|
+
|
|
1034
|
+
def kind_network_inspect(cluster: str = "kind") -> Dict[str, Any]:
|
|
1035
|
+
"""Inspect kind Docker network.
|
|
1036
|
+
|
|
1037
|
+
Args:
|
|
1038
|
+
cluster: Cluster name (kind network is shared)
|
|
1039
|
+
|
|
1040
|
+
Returns:
|
|
1041
|
+
Network details
|
|
1042
|
+
"""
|
|
1043
|
+
result = _run_docker(
|
|
1044
|
+
["network", "inspect", "kind"],
|
|
1045
|
+
timeout=30
|
|
1046
|
+
)
|
|
1047
|
+
|
|
1048
|
+
if not result["success"]:
|
|
1049
|
+
return {"success": False, "error": "kind network not found. Is any cluster running?"}
|
|
1050
|
+
|
|
1051
|
+
try:
|
|
1052
|
+
info = json.loads(result["output"])
|
|
1053
|
+
if isinstance(info, list) and len(info) > 0:
|
|
1054
|
+
info = info[0]
|
|
1055
|
+
|
|
1056
|
+
ipam = info.get("IPAM", {}).get("Config", [{}])[0]
|
|
1057
|
+
containers = {}
|
|
1058
|
+
for container_id, container_info in info.get("Containers", {}).items():
|
|
1059
|
+
containers[container_info.get("Name", container_id[:12])] = {
|
|
1060
|
+
"ip": container_info.get("IPv4Address", "").split("/")[0],
|
|
1061
|
+
"mac": container_info.get("MacAddress")
|
|
1062
|
+
}
|
|
1063
|
+
|
|
1064
|
+
return {
|
|
1065
|
+
"success": True,
|
|
1066
|
+
"name": info.get("Name", "kind"),
|
|
1067
|
+
"driver": info.get("Driver"),
|
|
1068
|
+
"scope": info.get("Scope"),
|
|
1069
|
+
"subnet": ipam.get("Subnet"),
|
|
1070
|
+
"gateway": ipam.get("Gateway"),
|
|
1071
|
+
"containers": containers,
|
|
1072
|
+
"container_count": len(containers)
|
|
1073
|
+
}
|
|
1074
|
+
except (json.JSONDecodeError, IndexError):
|
|
1075
|
+
return {"success": False, "error": "Failed to parse network info"}
|
|
1076
|
+
|
|
1077
|
+
|
|
1078
|
+
def kind_port_mappings(cluster: str = "kind") -> Dict[str, Any]:
|
|
1079
|
+
"""List all port mappings for cluster.
|
|
1080
|
+
|
|
1081
|
+
Args:
|
|
1082
|
+
cluster: Cluster name
|
|
1083
|
+
|
|
1084
|
+
Returns:
|
|
1085
|
+
Port mappings for all nodes
|
|
1086
|
+
"""
|
|
1087
|
+
nodes_result = kind_get_nodes(cluster)
|
|
1088
|
+
if not nodes_result["success"]:
|
|
1089
|
+
return nodes_result
|
|
1090
|
+
|
|
1091
|
+
mappings = {}
|
|
1092
|
+
for node in nodes_result.get("nodes", []):
|
|
1093
|
+
inspect_result = _run_docker(
|
|
1094
|
+
["inspect", "--format", '{{json .HostConfig.PortBindings}}', node],
|
|
1095
|
+
timeout=30
|
|
1096
|
+
)
|
|
1097
|
+
if inspect_result["success"]:
|
|
1098
|
+
try:
|
|
1099
|
+
ports = json.loads(inspect_result["output"]) or {}
|
|
1100
|
+
node_ports = []
|
|
1101
|
+
for container_port, bindings in ports.items():
|
|
1102
|
+
for binding in bindings or []:
|
|
1103
|
+
node_ports.append({
|
|
1104
|
+
"container_port": container_port,
|
|
1105
|
+
"host_ip": binding.get("HostIp", "0.0.0.0"),
|
|
1106
|
+
"host_port": binding.get("HostPort")
|
|
1107
|
+
})
|
|
1108
|
+
if node_ports:
|
|
1109
|
+
mappings[node] = node_ports
|
|
1110
|
+
except json.JSONDecodeError:
|
|
1111
|
+
pass
|
|
1112
|
+
|
|
1113
|
+
return {
|
|
1114
|
+
"success": True,
|
|
1115
|
+
"cluster": cluster,
|
|
1116
|
+
"port_mappings": mappings,
|
|
1117
|
+
"has_mappings": len(mappings) > 0
|
|
1118
|
+
}
|
|
1119
|
+
|
|
1120
|
+
|
|
1121
|
+
def kind_ingress_setup(
|
|
1122
|
+
cluster: str = "kind",
|
|
1123
|
+
ingress_type: str = "nginx"
|
|
1124
|
+
) -> Dict[str, Any]:
|
|
1125
|
+
"""Setup ingress controller on kind cluster.
|
|
1126
|
+
|
|
1127
|
+
Args:
|
|
1128
|
+
cluster: Cluster name
|
|
1129
|
+
ingress_type: Type of ingress (nginx, contour)
|
|
1130
|
+
|
|
1131
|
+
Returns:
|
|
1132
|
+
Setup result
|
|
1133
|
+
"""
|
|
1134
|
+
if ingress_type not in ["nginx", "contour"]:
|
|
1135
|
+
return {"success": False, "error": f"Unsupported ingress type: {ingress_type}"}
|
|
1136
|
+
|
|
1137
|
+
clusters_result = kind_list_clusters()
|
|
1138
|
+
if not clusters_result["success"]:
|
|
1139
|
+
return clusters_result
|
|
1140
|
+
if cluster not in clusters_result.get("clusters", []):
|
|
1141
|
+
return {"success": False, "error": f"Cluster '{cluster}' not found"}
|
|
1142
|
+
|
|
1143
|
+
if ingress_type == "nginx":
|
|
1144
|
+
manifest_url = "https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml"
|
|
1145
|
+
else:
|
|
1146
|
+
manifest_url = "https://projectcontour.io/quickstart/contour.yaml"
|
|
1147
|
+
|
|
1148
|
+
try:
|
|
1149
|
+
result = subprocess.run(
|
|
1150
|
+
["kubectl", "apply", "-f", manifest_url, "--context", f"kind-{cluster}"],
|
|
1151
|
+
capture_output=True,
|
|
1152
|
+
text=True,
|
|
1153
|
+
timeout=120
|
|
1154
|
+
)
|
|
1155
|
+
if result.returncode == 0:
|
|
1156
|
+
return {
|
|
1157
|
+
"success": True,
|
|
1158
|
+
"message": f"{ingress_type.title()} ingress controller installed on '{cluster}'",
|
|
1159
|
+
"cluster": cluster,
|
|
1160
|
+
"ingress_type": ingress_type,
|
|
1161
|
+
"manifest": manifest_url,
|
|
1162
|
+
"next_steps": [
|
|
1163
|
+
"Wait for ingress controller pods to be ready",
|
|
1164
|
+
"Create Ingress resources to expose services",
|
|
1165
|
+
f"Access via localhost (ports 80/443 if configured)"
|
|
1166
|
+
]
|
|
1167
|
+
}
|
|
1168
|
+
return {"success": False, "error": result.stderr.strip()}
|
|
1169
|
+
except subprocess.TimeoutExpired:
|
|
1170
|
+
return {"success": False, "error": "kubectl apply timed out"}
|
|
1171
|
+
except FileNotFoundError:
|
|
1172
|
+
return {"success": False, "error": "kubectl not found"}
|
|
1173
|
+
except Exception as e:
|
|
1174
|
+
return {"success": False, "error": str(e)}
|
|
1175
|
+
|
|
1176
|
+
|
|
1177
|
+
def kind_cluster_status(name: str = "kind") -> Dict[str, Any]:
|
|
1178
|
+
"""Get detailed cluster health status.
|
|
1179
|
+
|
|
1180
|
+
Args:
|
|
1181
|
+
name: Cluster name
|
|
1182
|
+
|
|
1183
|
+
Returns:
|
|
1184
|
+
Comprehensive cluster status
|
|
1185
|
+
"""
|
|
1186
|
+
clusters_result = kind_list_clusters()
|
|
1187
|
+
if not clusters_result["success"]:
|
|
1188
|
+
return clusters_result
|
|
1189
|
+
if name not in clusters_result.get("clusters", []):
|
|
1190
|
+
return {
|
|
1191
|
+
"success": False,
|
|
1192
|
+
"error": f"Cluster '{name}' not found",
|
|
1193
|
+
"available_clusters": clusters_result.get("clusters", [])
|
|
1194
|
+
}
|
|
1195
|
+
|
|
1196
|
+
nodes_result = kind_get_nodes(name)
|
|
1197
|
+
node_statuses = []
|
|
1198
|
+
|
|
1199
|
+
for node in nodes_result.get("nodes", []):
|
|
1200
|
+
inspect_result = _run_docker(
|
|
1201
|
+
["inspect", "--format", '{{json .State}}', node],
|
|
1202
|
+
timeout=30
|
|
1203
|
+
)
|
|
1204
|
+
if inspect_result["success"]:
|
|
1205
|
+
try:
|
|
1206
|
+
state = json.loads(inspect_result["output"])
|
|
1207
|
+
node_statuses.append({
|
|
1208
|
+
"name": node,
|
|
1209
|
+
"running": state.get("Running", False),
|
|
1210
|
+
"status": state.get("Status", "unknown")
|
|
1211
|
+
})
|
|
1212
|
+
except json.JSONDecodeError:
|
|
1213
|
+
node_statuses.append({"name": node, "running": False, "status": "unknown"})
|
|
1214
|
+
|
|
1215
|
+
all_running = all(n.get("running", False) for n in node_statuses)
|
|
1216
|
+
|
|
1217
|
+
try:
|
|
1218
|
+
kubectl_result = subprocess.run(
|
|
1219
|
+
["kubectl", "get", "nodes", "-o", "json", "--context", f"kind-{name}"],
|
|
1220
|
+
capture_output=True,
|
|
1221
|
+
text=True,
|
|
1222
|
+
timeout=30
|
|
1223
|
+
)
|
|
1224
|
+
k8s_nodes = []
|
|
1225
|
+
if kubectl_result.returncode == 0:
|
|
1226
|
+
nodes_data = json.loads(kubectl_result.stdout)
|
|
1227
|
+
for item in nodes_data.get("items", []):
|
|
1228
|
+
conditions = {c["type"]: c["status"] for c in item.get("status", {}).get("conditions", [])}
|
|
1229
|
+
k8s_nodes.append({
|
|
1230
|
+
"name": item.get("metadata", {}).get("name"),
|
|
1231
|
+
"ready": conditions.get("Ready") == "True",
|
|
1232
|
+
"conditions": conditions
|
|
1233
|
+
})
|
|
1234
|
+
except Exception:
|
|
1235
|
+
k8s_nodes = []
|
|
1236
|
+
|
|
1237
|
+
return {
|
|
1238
|
+
"success": True,
|
|
1239
|
+
"cluster": name,
|
|
1240
|
+
"healthy": all_running,
|
|
1241
|
+
"container_nodes": node_statuses,
|
|
1242
|
+
"kubernetes_nodes": k8s_nodes,
|
|
1243
|
+
"summary": {
|
|
1244
|
+
"total_nodes": len(node_statuses),
|
|
1245
|
+
"running_containers": sum(1 for n in node_statuses if n.get("running")),
|
|
1246
|
+
"ready_k8s_nodes": sum(1 for n in k8s_nodes if n.get("ready"))
|
|
1247
|
+
}
|
|
1248
|
+
}
|
|
1249
|
+
|
|
1250
|
+
|
|
1251
|
+
def kind_images_list(
|
|
1252
|
+
cluster: str = "kind",
|
|
1253
|
+
node: str = ""
|
|
1254
|
+
) -> Dict[str, Any]:
|
|
1255
|
+
"""List images on cluster nodes.
|
|
1256
|
+
|
|
1257
|
+
Args:
|
|
1258
|
+
cluster: Cluster name
|
|
1259
|
+
node: Specific node (optional, defaults to first control-plane)
|
|
1260
|
+
|
|
1261
|
+
Returns:
|
|
1262
|
+
List of images on the node
|
|
1263
|
+
"""
|
|
1264
|
+
if not node:
|
|
1265
|
+
nodes_result = kind_get_nodes(cluster)
|
|
1266
|
+
if not nodes_result["success"]:
|
|
1267
|
+
return nodes_result
|
|
1268
|
+
nodes = nodes_result.get("nodes", [])
|
|
1269
|
+
node = next((n for n in nodes if "control-plane" in n), nodes[0] if nodes else None)
|
|
1270
|
+
if not node:
|
|
1271
|
+
return {"success": False, "error": f"No nodes found in cluster '{cluster}'"}
|
|
1272
|
+
|
|
1273
|
+
result = _run_docker(
|
|
1274
|
+
["exec", node, "crictl", "images", "-o", "json"],
|
|
1275
|
+
timeout=60
|
|
1276
|
+
)
|
|
1277
|
+
|
|
1278
|
+
if not result["success"]:
|
|
1279
|
+
result = _run_docker(
|
|
1280
|
+
["exec", node, "crictl", "images"],
|
|
1281
|
+
timeout=60
|
|
1282
|
+
)
|
|
1283
|
+
if result["success"]:
|
|
1284
|
+
return {
|
|
1285
|
+
"success": True,
|
|
1286
|
+
"node": node,
|
|
1287
|
+
"cluster": cluster,
|
|
1288
|
+
"images_raw": result.get("output", "")
|
|
1289
|
+
}
|
|
1290
|
+
return result
|
|
1291
|
+
|
|
1292
|
+
try:
|
|
1293
|
+
images_data = json.loads(result["output"])
|
|
1294
|
+
images = []
|
|
1295
|
+
for img in images_data.get("images", []):
|
|
1296
|
+
tags = img.get("repoTags", [])
|
|
1297
|
+
images.append({
|
|
1298
|
+
"id": img.get("id", "")[:12],
|
|
1299
|
+
"tags": tags,
|
|
1300
|
+
"size": img.get("size")
|
|
1301
|
+
})
|
|
1302
|
+
return {
|
|
1303
|
+
"success": True,
|
|
1304
|
+
"node": node,
|
|
1305
|
+
"cluster": cluster,
|
|
1306
|
+
"images": images,
|
|
1307
|
+
"total": len(images)
|
|
1308
|
+
}
|
|
1309
|
+
except json.JSONDecodeError:
|
|
1310
|
+
return {"success": True, "node": node, "images_raw": result.get("output", "")}
|
|
1311
|
+
|
|
1312
|
+
|
|
1313
|
+
def kind_provider_info() -> Dict[str, Any]:
|
|
1314
|
+
"""Get container runtime provider info.
|
|
1315
|
+
|
|
1316
|
+
Returns:
|
|
1317
|
+
Provider (Docker/Podman) details
|
|
1318
|
+
"""
|
|
1319
|
+
provider = os.environ.get("KIND_EXPERIMENTAL_PROVIDER", "docker")
|
|
1320
|
+
|
|
1321
|
+
version_result = _run_docker(["version", "--format", "{{json .}}"], timeout=30)
|
|
1322
|
+
|
|
1323
|
+
if not version_result["success"]:
|
|
1324
|
+
return {
|
|
1325
|
+
"success": False,
|
|
1326
|
+
"error": "Failed to get provider info",
|
|
1327
|
+
"provider": provider
|
|
1328
|
+
}
|
|
1329
|
+
|
|
1330
|
+
try:
|
|
1331
|
+
version_info = json.loads(version_result["output"])
|
|
1332
|
+
client = version_info.get("Client", {})
|
|
1333
|
+
server = version_info.get("Server", {})
|
|
1334
|
+
|
|
1335
|
+
return {
|
|
1336
|
+
"success": True,
|
|
1337
|
+
"provider": provider,
|
|
1338
|
+
"client_version": client.get("Version"),
|
|
1339
|
+
"server_version": server.get("Version"),
|
|
1340
|
+
"api_version": client.get("ApiVersion"),
|
|
1341
|
+
"os": client.get("Os"),
|
|
1342
|
+
"arch": client.get("Arch"),
|
|
1343
|
+
"experimental": os.environ.get("KIND_EXPERIMENTAL_PROVIDER") is not None
|
|
1344
|
+
}
|
|
1345
|
+
except json.JSONDecodeError:
|
|
1346
|
+
return {
|
|
1347
|
+
"success": True,
|
|
1348
|
+
"provider": provider,
|
|
1349
|
+
"raw_output": version_result.get("output", "")
|
|
1350
|
+
}
|
|
1351
|
+
|
|
1352
|
+
|
|
1353
|
+
def register_kind_tools(mcp: FastMCP, non_destructive: bool = False):
|
|
1354
|
+
"""Register kind (Kubernetes IN Docker) tools with the MCP server."""
|
|
1355
|
+
|
|
1356
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1357
|
+
def kind_detect_tool() -> str:
|
|
1358
|
+
"""Detect if kind CLI is installed and get version info."""
|
|
1359
|
+
return json.dumps(kind_detect(), indent=2)
|
|
1360
|
+
|
|
1361
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1362
|
+
def kind_version_tool() -> str:
|
|
1363
|
+
"""Get kind CLI version information."""
|
|
1364
|
+
return json.dumps(kind_version(), indent=2)
|
|
1365
|
+
|
|
1366
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1367
|
+
def kind_list_clusters_tool() -> str:
|
|
1368
|
+
"""List all kind clusters."""
|
|
1369
|
+
return json.dumps(kind_list_clusters(), indent=2)
|
|
1370
|
+
|
|
1371
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1372
|
+
def kind_get_nodes_tool(name: str = "kind") -> str:
|
|
1373
|
+
"""List nodes in a kind cluster.
|
|
1374
|
+
|
|
1375
|
+
Args:
|
|
1376
|
+
name: Name of the kind cluster (default: kind)
|
|
1377
|
+
"""
|
|
1378
|
+
return json.dumps(kind_get_nodes(name), indent=2)
|
|
1379
|
+
|
|
1380
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1381
|
+
def kind_get_kubeconfig_tool(
|
|
1382
|
+
name: str = "kind",
|
|
1383
|
+
internal: bool = False
|
|
1384
|
+
) -> str:
|
|
1385
|
+
"""Get kubeconfig for a kind cluster.
|
|
1386
|
+
|
|
1387
|
+
Args:
|
|
1388
|
+
name: Name of the kind cluster
|
|
1389
|
+
internal: Return internal (container) kubeconfig instead of external
|
|
1390
|
+
"""
|
|
1391
|
+
return json.dumps(kind_get_kubeconfig(name, internal), indent=2)
|
|
1392
|
+
|
|
1393
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1394
|
+
def kind_export_logs_tool(
|
|
1395
|
+
name: str = "kind",
|
|
1396
|
+
output_dir: str = ""
|
|
1397
|
+
) -> str:
|
|
1398
|
+
"""Export cluster logs for debugging.
|
|
1399
|
+
|
|
1400
|
+
Args:
|
|
1401
|
+
name: Name of the kind cluster
|
|
1402
|
+
output_dir: Directory to export logs to (default: temp directory)
|
|
1403
|
+
"""
|
|
1404
|
+
return json.dumps(kind_export_logs(name, output_dir), indent=2)
|
|
1405
|
+
|
|
1406
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1407
|
+
def kind_cluster_info_tool(name: str = "kind") -> str:
|
|
1408
|
+
"""Get cluster information including nodes and kubeconfig.
|
|
1409
|
+
|
|
1410
|
+
Args:
|
|
1411
|
+
name: Name of the kind cluster
|
|
1412
|
+
"""
|
|
1413
|
+
return json.dumps(kind_cluster_info(name), indent=2)
|
|
1414
|
+
|
|
1415
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1416
|
+
def kind_node_labels_tool(name: str = "kind") -> str:
|
|
1417
|
+
"""Get node labels for kind cluster nodes.
|
|
1418
|
+
|
|
1419
|
+
Args:
|
|
1420
|
+
name: Name of the kind cluster
|
|
1421
|
+
"""
|
|
1422
|
+
return json.dumps(kind_node_labels(name), indent=2)
|
|
1423
|
+
|
|
1424
|
+
@mcp.tool()
|
|
1425
|
+
def kind_create_cluster_tool(
|
|
1426
|
+
name: str = "kind",
|
|
1427
|
+
image: str = "",
|
|
1428
|
+
config: str = "",
|
|
1429
|
+
wait: str = "5m",
|
|
1430
|
+
retain: bool = False
|
|
1431
|
+
) -> str:
|
|
1432
|
+
"""Create a new kind cluster.
|
|
1433
|
+
|
|
1434
|
+
Args:
|
|
1435
|
+
name: Name for the new cluster (default: kind)
|
|
1436
|
+
image: Node image (determines K8s version, e.g., kindest/node:v1.29.0)
|
|
1437
|
+
config: Path to kind config YAML file for multi-node or custom setup
|
|
1438
|
+
wait: Wait timeout for control plane (default: 5m)
|
|
1439
|
+
retain: Retain nodes on creation failure for debugging
|
|
1440
|
+
"""
|
|
1441
|
+
if non_destructive:
|
|
1442
|
+
return json.dumps({"success": False, "error": "Operation blocked: non-destructive mode"})
|
|
1443
|
+
return json.dumps(kind_create_cluster(name, image, config, wait, retain), indent=2)
|
|
1444
|
+
|
|
1445
|
+
@mcp.tool()
|
|
1446
|
+
def kind_delete_cluster_tool(name: str = "kind") -> str:
|
|
1447
|
+
"""Delete a kind cluster.
|
|
1448
|
+
|
|
1449
|
+
Args:
|
|
1450
|
+
name: Name of the cluster to delete
|
|
1451
|
+
"""
|
|
1452
|
+
if non_destructive:
|
|
1453
|
+
return json.dumps({"success": False, "error": "Operation blocked: non-destructive mode"})
|
|
1454
|
+
return json.dumps(kind_delete_cluster(name), indent=2)
|
|
1455
|
+
|
|
1456
|
+
@mcp.tool()
|
|
1457
|
+
def kind_delete_all_clusters_tool() -> str:
|
|
1458
|
+
"""Delete all kind clusters."""
|
|
1459
|
+
if non_destructive:
|
|
1460
|
+
return json.dumps({"success": False, "error": "Operation blocked: non-destructive mode"})
|
|
1461
|
+
return json.dumps(kind_delete_all_clusters(), indent=2)
|
|
1462
|
+
|
|
1463
|
+
@mcp.tool()
|
|
1464
|
+
def kind_load_image_tool(
|
|
1465
|
+
images: str,
|
|
1466
|
+
name: str = "kind"
|
|
1467
|
+
) -> str:
|
|
1468
|
+
"""Load Docker images into kind cluster nodes.
|
|
1469
|
+
|
|
1470
|
+
This is a key feature for local development - load locally built
|
|
1471
|
+
images directly into the cluster without pushing to a registry.
|
|
1472
|
+
|
|
1473
|
+
Args:
|
|
1474
|
+
images: Comma-separated list of Docker image names to load
|
|
1475
|
+
name: Name of the kind cluster
|
|
1476
|
+
"""
|
|
1477
|
+
if non_destructive:
|
|
1478
|
+
return json.dumps({"success": False, "error": "Operation blocked: non-destructive mode"})
|
|
1479
|
+
image_list = [img.strip() for img in images.split(",") if img.strip()]
|
|
1480
|
+
return json.dumps(kind_load_image(image_list, name), indent=2)
|
|
1481
|
+
|
|
1482
|
+
@mcp.tool()
|
|
1483
|
+
def kind_load_image_archive_tool(
|
|
1484
|
+
archive: str,
|
|
1485
|
+
name: str = "kind"
|
|
1486
|
+
) -> str:
|
|
1487
|
+
"""Load Docker images from tar archive into kind cluster.
|
|
1488
|
+
|
|
1489
|
+
Args:
|
|
1490
|
+
archive: Path to image archive (tar file)
|
|
1491
|
+
name: Name of the kind cluster
|
|
1492
|
+
"""
|
|
1493
|
+
if non_destructive:
|
|
1494
|
+
return json.dumps({"success": False, "error": "Operation blocked: non-destructive mode"})
|
|
1495
|
+
return json.dumps(kind_load_image_archive(archive, name), indent=2)
|
|
1496
|
+
|
|
1497
|
+
@mcp.tool()
|
|
1498
|
+
def kind_build_node_image_tool(
|
|
1499
|
+
image: str = "",
|
|
1500
|
+
base_image: str = "",
|
|
1501
|
+
kube_root: str = ""
|
|
1502
|
+
) -> str:
|
|
1503
|
+
"""Build a kind node image from Kubernetes source.
|
|
1504
|
+
|
|
1505
|
+
This is an advanced feature for testing custom Kubernetes builds.
|
|
1506
|
+
|
|
1507
|
+
Args:
|
|
1508
|
+
image: Name for the resulting image (default: kindest/node:latest)
|
|
1509
|
+
base_image: Base image to use
|
|
1510
|
+
kube_root: Path to Kubernetes source root
|
|
1511
|
+
"""
|
|
1512
|
+
if non_destructive:
|
|
1513
|
+
return json.dumps({"success": False, "error": "Operation blocked: non-destructive mode"})
|
|
1514
|
+
return json.dumps(kind_build_node_image(image, base_image, kube_root), indent=2)
|
|
1515
|
+
|
|
1516
|
+
@mcp.tool()
|
|
1517
|
+
def kind_set_kubeconfig_tool(name: str = "kind") -> str:
|
|
1518
|
+
"""Export kubeconfig and set as current context.
|
|
1519
|
+
|
|
1520
|
+
This updates your KUBECONFIG to add the kind cluster context.
|
|
1521
|
+
|
|
1522
|
+
Args:
|
|
1523
|
+
name: Name of the kind cluster
|
|
1524
|
+
"""
|
|
1525
|
+
if non_destructive:
|
|
1526
|
+
return json.dumps({"success": False, "error": "Operation blocked: non-destructive mode"})
|
|
1527
|
+
result = _run_kind(["export", "kubeconfig", "--name", name], timeout=30)
|
|
1528
|
+
if result["success"]:
|
|
1529
|
+
return json.dumps({
|
|
1530
|
+
"success": True,
|
|
1531
|
+
"message": f"Kubeconfig exported and context set for cluster '{name}'",
|
|
1532
|
+
"output": result.get("output", "")
|
|
1533
|
+
}, indent=2)
|
|
1534
|
+
return json.dumps(result, indent=2)
|
|
1535
|
+
|
|
1536
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1537
|
+
def kind_config_validate_tool(config_path: str) -> str:
|
|
1538
|
+
"""Validate kind configuration file before cluster creation.
|
|
1539
|
+
|
|
1540
|
+
Args:
|
|
1541
|
+
config_path: Path to kind config YAML file
|
|
1542
|
+
"""
|
|
1543
|
+
return json.dumps(kind_config_validate(config_path), indent=2)
|
|
1544
|
+
|
|
1545
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1546
|
+
def kind_config_generate_tool(
|
|
1547
|
+
name: str = "kind",
|
|
1548
|
+
workers: int = 0,
|
|
1549
|
+
control_planes: int = 1,
|
|
1550
|
+
registry: bool = False,
|
|
1551
|
+
ingress: bool = False
|
|
1552
|
+
) -> str:
|
|
1553
|
+
"""Generate kind config YAML for common scenarios.
|
|
1554
|
+
|
|
1555
|
+
Args:
|
|
1556
|
+
name: Cluster name (for reference)
|
|
1557
|
+
workers: Number of worker nodes (default: 0)
|
|
1558
|
+
control_planes: Number of control-plane nodes (1 for single, 3 for HA)
|
|
1559
|
+
registry: Add local registry configuration
|
|
1560
|
+
ingress: Add port mappings for ingress (80, 443)
|
|
1561
|
+
"""
|
|
1562
|
+
return json.dumps(kind_config_generate(
|
|
1563
|
+
name, workers, control_planes, registry, ingress
|
|
1564
|
+
), indent=2)
|
|
1565
|
+
|
|
1566
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1567
|
+
def kind_config_show_tool(name: str = "kind") -> str:
|
|
1568
|
+
"""Show effective config for a running cluster.
|
|
1569
|
+
|
|
1570
|
+
Args:
|
|
1571
|
+
name: Name of the kind cluster
|
|
1572
|
+
"""
|
|
1573
|
+
return json.dumps(kind_config_show(name), indent=2)
|
|
1574
|
+
|
|
1575
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1576
|
+
def kind_available_images_tool() -> str:
|
|
1577
|
+
"""List available kindest/node images (K8s versions)."""
|
|
1578
|
+
return json.dumps(kind_available_images(), indent=2)
|
|
1579
|
+
|
|
1580
|
+
@mcp.tool()
|
|
1581
|
+
def kind_registry_create_tool(
|
|
1582
|
+
name: str = "kind-registry",
|
|
1583
|
+
port: int = 5001
|
|
1584
|
+
) -> str:
|
|
1585
|
+
"""Create local Docker registry for kind clusters.
|
|
1586
|
+
|
|
1587
|
+
Args:
|
|
1588
|
+
name: Name for the registry container
|
|
1589
|
+
port: Host port to expose registry on
|
|
1590
|
+
"""
|
|
1591
|
+
if non_destructive:
|
|
1592
|
+
return json.dumps({"success": False, "error": "Operation blocked: non-destructive mode"})
|
|
1593
|
+
return json.dumps(kind_registry_create(name, port), indent=2)
|
|
1594
|
+
|
|
1595
|
+
@mcp.tool()
|
|
1596
|
+
def kind_registry_connect_tool(
|
|
1597
|
+
cluster_name: str = "kind",
|
|
1598
|
+
registry_name: str = "kind-registry"
|
|
1599
|
+
) -> str:
|
|
1600
|
+
"""Connect kind cluster to local registry.
|
|
1601
|
+
|
|
1602
|
+
Args:
|
|
1603
|
+
cluster_name: Name of the kind cluster
|
|
1604
|
+
registry_name: Name of the registry container
|
|
1605
|
+
"""
|
|
1606
|
+
if non_destructive:
|
|
1607
|
+
return json.dumps({"success": False, "error": "Operation blocked: non-destructive mode"})
|
|
1608
|
+
return json.dumps(kind_registry_connect(cluster_name, registry_name), indent=2)
|
|
1609
|
+
|
|
1610
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1611
|
+
def kind_registry_status_tool(name: str = "kind-registry") -> str:
|
|
1612
|
+
"""Check local registry status.
|
|
1613
|
+
|
|
1614
|
+
Args:
|
|
1615
|
+
name: Name of the registry container
|
|
1616
|
+
"""
|
|
1617
|
+
return json.dumps(kind_registry_status(name), indent=2)
|
|
1618
|
+
|
|
1619
|
+
@mcp.tool()
|
|
1620
|
+
def kind_node_exec_tool(
|
|
1621
|
+
node: str,
|
|
1622
|
+
command: str,
|
|
1623
|
+
cluster: str = "kind"
|
|
1624
|
+
) -> str:
|
|
1625
|
+
"""Execute command on kind node container.
|
|
1626
|
+
|
|
1627
|
+
Useful for debugging with crictl, journalctl, systemctl.
|
|
1628
|
+
|
|
1629
|
+
Args:
|
|
1630
|
+
node: Node name (e.g., kind-control-plane)
|
|
1631
|
+
command: Command to execute
|
|
1632
|
+
cluster: Cluster name (for validation)
|
|
1633
|
+
"""
|
|
1634
|
+
if non_destructive:
|
|
1635
|
+
return json.dumps({"success": False, "error": "Operation blocked: non-destructive mode"})
|
|
1636
|
+
return json.dumps(kind_node_exec(node, command, cluster), indent=2)
|
|
1637
|
+
|
|
1638
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1639
|
+
def kind_node_logs_tool(node: str, tail: int = 100) -> str:
|
|
1640
|
+
"""Get logs from kind node container.
|
|
1641
|
+
|
|
1642
|
+
Args:
|
|
1643
|
+
node: Node name
|
|
1644
|
+
tail: Number of lines to return
|
|
1645
|
+
"""
|
|
1646
|
+
return json.dumps(kind_node_logs(node, tail), indent=2)
|
|
1647
|
+
|
|
1648
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1649
|
+
def kind_node_inspect_tool(node: str) -> str:
|
|
1650
|
+
"""Inspect kind node container details.
|
|
1651
|
+
|
|
1652
|
+
Args:
|
|
1653
|
+
node: Node name
|
|
1654
|
+
"""
|
|
1655
|
+
return json.dumps(kind_node_inspect(node), indent=2)
|
|
1656
|
+
|
|
1657
|
+
@mcp.tool()
|
|
1658
|
+
def kind_node_restart_tool(node: str) -> str:
|
|
1659
|
+
"""Restart kind node container.
|
|
1660
|
+
|
|
1661
|
+
Args:
|
|
1662
|
+
node: Node name to restart
|
|
1663
|
+
"""
|
|
1664
|
+
if non_destructive:
|
|
1665
|
+
return json.dumps({"success": False, "error": "Operation blocked: non-destructive mode"})
|
|
1666
|
+
return json.dumps(kind_node_restart(node), indent=2)
|
|
1667
|
+
|
|
1668
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1669
|
+
def kind_network_inspect_tool(cluster: str = "kind") -> str:
|
|
1670
|
+
"""Inspect kind Docker network.
|
|
1671
|
+
|
|
1672
|
+
Args:
|
|
1673
|
+
cluster: Cluster name (kind network is shared)
|
|
1674
|
+
"""
|
|
1675
|
+
return json.dumps(kind_network_inspect(cluster), indent=2)
|
|
1676
|
+
|
|
1677
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1678
|
+
def kind_port_mappings_tool(cluster: str = "kind") -> str:
|
|
1679
|
+
"""List all port mappings for cluster.
|
|
1680
|
+
|
|
1681
|
+
Args:
|
|
1682
|
+
cluster: Cluster name
|
|
1683
|
+
"""
|
|
1684
|
+
return json.dumps(kind_port_mappings(cluster), indent=2)
|
|
1685
|
+
|
|
1686
|
+
@mcp.tool()
|
|
1687
|
+
def kind_ingress_setup_tool(
|
|
1688
|
+
cluster: str = "kind",
|
|
1689
|
+
ingress_type: str = "nginx"
|
|
1690
|
+
) -> str:
|
|
1691
|
+
"""Setup ingress controller on kind cluster.
|
|
1692
|
+
|
|
1693
|
+
Args:
|
|
1694
|
+
cluster: Cluster name
|
|
1695
|
+
ingress_type: Type of ingress (nginx or contour)
|
|
1696
|
+
"""
|
|
1697
|
+
if non_destructive:
|
|
1698
|
+
return json.dumps({"success": False, "error": "Operation blocked: non-destructive mode"})
|
|
1699
|
+
return json.dumps(kind_ingress_setup(cluster, ingress_type), indent=2)
|
|
1700
|
+
|
|
1701
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1702
|
+
def kind_cluster_status_tool(name: str = "kind") -> str:
|
|
1703
|
+
"""Get detailed cluster health status.
|
|
1704
|
+
|
|
1705
|
+
Args:
|
|
1706
|
+
name: Cluster name
|
|
1707
|
+
"""
|
|
1708
|
+
return json.dumps(kind_cluster_status(name), indent=2)
|
|
1709
|
+
|
|
1710
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1711
|
+
def kind_images_list_tool(cluster: str = "kind", node: str = "") -> str:
|
|
1712
|
+
"""List images on cluster nodes.
|
|
1713
|
+
|
|
1714
|
+
Args:
|
|
1715
|
+
cluster: Cluster name
|
|
1716
|
+
node: Specific node (optional, defaults to control-plane)
|
|
1717
|
+
"""
|
|
1718
|
+
return json.dumps(kind_images_list(cluster, node), indent=2)
|
|
1719
|
+
|
|
1720
|
+
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
|
|
1721
|
+
def kind_provider_info_tool() -> str:
|
|
1722
|
+
"""Get container runtime provider info (Docker/Podman)."""
|
|
1723
|
+
return json.dumps(kind_provider_info(), indent=2)
|