kubectl-mcp-server 1.18.0__py3-none-any.whl → 1.19.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,9 +1,5 @@
1
- """Policy toolset for kubectl-mcp-server.
1
+ """Policy toolset for kubectl-mcp-server (Kyverno and Gatekeeper)."""
2
2
 
3
- Provides tools for managing Kyverno and Gatekeeper policies.
4
- """
5
-
6
- import subprocess
7
3
  import json
8
4
  from typing import Dict, Any, List, Optional
9
5
 
@@ -14,8 +10,8 @@ except ImportError:
14
10
  from mcp.server.fastmcp import FastMCP
15
11
  from mcp.types import ToolAnnotations
16
12
 
17
- from ..k8s_config import _get_kubectl_context_args
18
13
  from ..crd_detector import crd_exists
14
+ from .utils import run_kubectl, get_resources
19
15
 
20
16
 
21
17
  KYVERNO_CLUSTER_POLICY_CRD = "clusterpolicies.kyverno.io"
@@ -26,40 +22,6 @@ GATEKEEPER_CONSTRAINT_TEMPLATE_CRD = "constrainttemplates.templates.gatekeeper.s
26
22
  GATEKEEPER_CONFIG_CRD = "configs.config.gatekeeper.sh"
27
23
 
28
24
 
29
- def _run_kubectl(args: List[str], context: str = "") -> Dict[str, Any]:
30
- """Run kubectl command and return result."""
31
- cmd = ["kubectl"] + _get_kubectl_context_args(context) + args
32
- try:
33
- result = subprocess.run(cmd, capture_output=True, text=True, timeout=60)
34
- if result.returncode == 0:
35
- return {"success": True, "output": result.stdout}
36
- return {"success": False, "error": result.stderr}
37
- except subprocess.TimeoutExpired:
38
- return {"success": False, "error": "Command timed out"}
39
- except Exception as e:
40
- return {"success": False, "error": str(e)}
41
-
42
-
43
- def _get_resources(kind: str, namespace: str = "", context: str = "", label_selector: str = "") -> List[Dict]:
44
- """Get Kubernetes resources of a specific kind."""
45
- args = ["get", kind, "-o", "json"]
46
- if namespace:
47
- args.extend(["-n", namespace])
48
- else:
49
- args.append("-A")
50
- if label_selector:
51
- args.extend(["-l", label_selector])
52
-
53
- result = _run_kubectl(args, context)
54
- if result["success"]:
55
- try:
56
- data = json.loads(result["output"])
57
- return data.get("items", [])
58
- except json.JSONDecodeError:
59
- return []
60
- return []
61
-
62
-
63
25
  def _get_condition(conditions: List[Dict], condition_type: str) -> Optional[Dict]:
64
26
  """Get a specific condition from conditions list."""
65
27
  return next((c for c in conditions if c.get("type") == condition_type), None)
@@ -86,7 +48,7 @@ def policy_list(
86
48
 
87
49
  if engine in ("kyverno", "all"):
88
50
  if crd_exists(KYVERNO_CLUSTER_POLICY_CRD, context):
89
- for item in _get_resources("clusterpolicies.kyverno.io", "", context, label_selector):
51
+ for item in get_resources("clusterpolicies.kyverno.io", "", context, label_selector):
90
52
  status = item.get("status", {})
91
53
  conditions = status.get("conditions", [])
92
54
  ready_cond = _get_condition(conditions, "Ready")
@@ -105,7 +67,7 @@ def policy_list(
105
67
  })
106
68
 
107
69
  if crd_exists(KYVERNO_POLICY_CRD, context):
108
- for item in _get_resources("policies.kyverno.io", namespace, context, label_selector):
70
+ for item in get_resources("policies.kyverno.io", namespace, context, label_selector):
109
71
  status = item.get("status", {})
110
72
  conditions = status.get("conditions", [])
111
73
  ready_cond = _get_condition(conditions, "Ready")
@@ -125,7 +87,7 @@ def policy_list(
125
87
 
126
88
  if engine in ("gatekeeper", "all"):
127
89
  if crd_exists(GATEKEEPER_CONSTRAINT_TEMPLATE_CRD, context):
128
- for item in _get_resources("constrainttemplates.templates.gatekeeper.sh", "", context, label_selector):
90
+ for item in get_resources("constrainttemplates.templates.gatekeeper.sh", "", context, label_selector):
129
91
  status = item.get("status", {})
130
92
  spec = item.get("spec", {})
131
93
 
@@ -158,14 +120,14 @@ def _get_gatekeeper_constraints(context: str = "") -> List[Dict]:
158
120
  """Get all Gatekeeper constraints dynamically."""
159
121
  constraints = []
160
122
 
161
- templates = _get_resources("constrainttemplates.templates.gatekeeper.sh", "", context)
123
+ templates = get_resources("constrainttemplates.templates.gatekeeper.sh", "", context)
162
124
  for template in templates:
163
125
  crd_kind = template.get("spec", {}).get("crd", {}).get("spec", {}).get("names", {}).get("kind", "")
164
126
  if not crd_kind:
165
127
  continue
166
128
 
167
129
  try:
168
- constraint_items = _get_resources(crd_kind.lower(), "", context)
130
+ constraint_items = get_resources(crd_kind.lower(), "", context)
169
131
  for item in constraint_items:
170
132
  status = item.get("status", {})
171
133
  spec = item.get("spec", {})
@@ -222,7 +184,7 @@ def policy_get(
222
184
  else:
223
185
  args = ["get", k8s_kind, name, "-o", "json"]
224
186
 
225
- result = _run_kubectl(args, context)
187
+ result = run_kubectl(args, context)
226
188
 
227
189
  if result["success"]:
228
190
  try:
@@ -259,7 +221,7 @@ def policy_violations_list(
259
221
 
260
222
  if engine in ("kyverno", "all"):
261
223
  if crd_exists(KYVERNO_POLICY_REPORT_CRD, context):
262
- for report in _get_resources("policyreports.wgpolicyk8s.io", namespace, context):
224
+ for report in get_resources("policyreports.wgpolicyk8s.io", namespace, context):
263
225
  results = report.get("results", [])
264
226
  for result in results:
265
227
  if result.get("result") in ("fail", "error"):
@@ -279,7 +241,7 @@ def policy_violations_list(
279
241
  })
280
242
 
281
243
  if crd_exists(KYVERNO_CLUSTER_POLICY_REPORT_CRD, context):
282
- for report in _get_resources("clusterpolicyreports.wgpolicyk8s.io", "", context):
244
+ for report in get_resources("clusterpolicyreports.wgpolicyk8s.io", "", context):
283
245
  results = report.get("results", [])
284
246
  for result in results:
285
247
  if result.get("result") in ("fail", "error"):
@@ -350,7 +312,7 @@ def policy_explain_denial(
350
312
  message_lower = message.lower()
351
313
 
352
314
  if crd_exists(KYVERNO_CLUSTER_POLICY_CRD, context):
353
- for policy in _get_resources("clusterpolicies.kyverno.io", "", context):
315
+ for policy in get_resources("clusterpolicies.kyverno.io", "", context):
354
316
  policy_name = policy["metadata"]["name"]
355
317
  if policy_name.lower() in message_lower:
356
318
  spec = policy.get("spec", {})
@@ -1,7 +1,4 @@
1
- """Argo Rollouts and Flagger progressive delivery toolset for kubectl-mcp-server.
2
-
3
- Provides tools for managing canary deployments, blue-green deployments, and progressive delivery.
4
- """
1
+ """Argo Rollouts and Flagger progressive delivery toolset for kubectl-mcp-server."""
5
2
 
6
3
  import subprocess
7
4
  import json
@@ -15,57 +12,20 @@ except ImportError:
15
12
  from mcp.server.fastmcp import FastMCP
16
13
  from mcp.types import ToolAnnotations
17
14
 
18
- from ..k8s_config import _get_kubectl_context_args
19
15
  from ..crd_detector import crd_exists
16
+ from .utils import run_kubectl, get_resources
20
17
 
21
18
 
22
- # Argo Rollouts CRDs
23
19
  ARGO_ROLLOUT_CRD = "rollouts.argoproj.io"
24
20
  ARGO_ANALYSIS_TEMPLATE_CRD = "analysistemplates.argoproj.io"
25
21
  ARGO_CLUSTER_ANALYSIS_TEMPLATE_CRD = "clusteranalysistemplates.argoproj.io"
26
22
  ARGO_ANALYSIS_RUN_CRD = "analysisruns.argoproj.io"
27
23
  ARGO_EXPERIMENT_CRD = "experiments.argoproj.io"
28
-
29
- # Flagger CRDs
30
24
  FLAGGER_CANARY_CRD = "canaries.flagger.app"
31
25
  FLAGGER_METRIC_TEMPLATE_CRD = "metrictemplates.flagger.app"
32
26
  FLAGGER_ALERT_PROVIDER_CRD = "alertproviders.flagger.app"
33
27
 
34
28
 
35
- def _run_kubectl(args: List[str], context: str = "") -> Dict[str, Any]:
36
- """Run kubectl command and return result."""
37
- cmd = ["kubectl"] + _get_kubectl_context_args(context) + args
38
- try:
39
- result = subprocess.run(cmd, capture_output=True, text=True, timeout=60)
40
- if result.returncode == 0:
41
- return {"success": True, "output": result.stdout}
42
- return {"success": False, "error": result.stderr}
43
- except subprocess.TimeoutExpired:
44
- return {"success": False, "error": "Command timed out"}
45
- except Exception as e:
46
- return {"success": False, "error": str(e)}
47
-
48
-
49
- def _get_resources(kind: str, namespace: str = "", context: str = "", label_selector: str = "") -> List[Dict]:
50
- """Get Kubernetes resources of a specific kind."""
51
- args = ["get", kind, "-o", "json"]
52
- if namespace:
53
- args.extend(["-n", namespace])
54
- else:
55
- args.append("-A")
56
- if label_selector:
57
- args.extend(["-l", label_selector])
58
-
59
- result = _run_kubectl(args, context)
60
- if result["success"]:
61
- try:
62
- data = json.loads(result["output"])
63
- return data.get("items", [])
64
- except json.JSONDecodeError:
65
- return []
66
- return []
67
-
68
-
69
29
  def _argo_rollouts_cli_available() -> bool:
70
30
  """Check if kubectl-argo-rollouts plugin is available."""
71
31
  try:
@@ -76,8 +36,6 @@ def _argo_rollouts_cli_available() -> bool:
76
36
  return False
77
37
 
78
38
 
79
- # ============== Argo Rollouts Functions ==============
80
-
81
39
  def rollouts_list(
82
40
  namespace: str = "",
83
41
  context: str = "",
@@ -100,11 +58,9 @@ def rollouts_list(
100
58
  }
101
59
 
102
60
  rollouts = []
103
- for item in _get_resources("rollouts.argoproj.io", namespace, context, label_selector):
61
+ for item in get_resources("rollouts.argoproj.io", namespace, context, label_selector):
104
62
  status = item.get("status", {})
105
63
  spec = item.get("spec", {})
106
-
107
- # Determine strategy
108
64
  strategy_spec = spec.get("strategy", {})
109
65
  if "canary" in strategy_spec:
110
66
  strategy = "canary"
@@ -116,7 +72,6 @@ def rollouts_list(
116
72
  strategy = "unknown"
117
73
  strategy_details = {}
118
74
 
119
- # Get conditions
120
75
  conditions = status.get("conditions", [])
121
76
  available_cond = next((c for c in conditions if c.get("type") == "Available"), {})
122
77
  progressing_cond = next((c for c in conditions if c.get("type") == "Progressing"), {})
@@ -142,7 +97,6 @@ def rollouts_list(
142
97
  "aborted": status.get("abort", False),
143
98
  })
144
99
 
145
- # Summary
146
100
  healthy = sum(1 for r in rollouts if r["phase"] == "Healthy")
147
101
  progressing = sum(1 for r in rollouts if r["phase"] == "Progressing")
148
102
  paused = sum(1 for r in rollouts if r["paused"])
@@ -176,7 +130,7 @@ def rollout_get(
176
130
  return {"success": False, "error": "Argo Rollouts is not installed"}
177
131
 
178
132
  args = ["get", "rollouts.argoproj.io", name, "-n", namespace, "-o", "json"]
179
- result = _run_kubectl(args, context)
133
+ result = run_kubectl(args, context)
180
134
 
181
135
  if result["success"]:
182
136
  try:
@@ -216,7 +170,6 @@ def rollout_status(
216
170
  spec = rollout.get("spec", {})
217
171
  strategy_spec = spec.get("strategy", {})
218
172
 
219
- # Determine strategy
220
173
  if "canary" in strategy_spec:
221
174
  strategy = "canary"
222
175
  steps = strategy_spec.get("canary", {}).get("steps", [])
@@ -228,8 +181,6 @@ def rollout_status(
228
181
  steps = []
229
182
 
230
183
  current_step = status.get("currentStepIndex", 0)
231
-
232
- # Parse steps
233
184
  step_info = []
234
185
  for i, step in enumerate(steps):
235
186
  step_type = list(step.keys())[0] if step else "unknown"
@@ -314,7 +265,7 @@ def rollout_promote(
314
265
  "--type=merge",
315
266
  "-p", json.dumps(patch)
316
267
  ]
317
- result = _run_kubectl(args, context)
268
+ result = run_kubectl(args, context)
318
269
 
319
270
  if result["success"]:
320
271
  return {
@@ -370,7 +321,7 @@ def rollout_abort(
370
321
  "--type=merge",
371
322
  "-p", json.dumps(patch)
372
323
  ]
373
- result = _run_kubectl(args, context)
324
+ result = run_kubectl(args, context)
374
325
 
375
326
  if result["success"]:
376
327
  return {
@@ -426,7 +377,7 @@ def rollout_retry(
426
377
  "--type=merge",
427
378
  "-p", json.dumps(patch)
428
379
  ]
429
- result = _run_kubectl(args, context)
380
+ result = run_kubectl(args, context)
430
381
 
431
382
  if result["success"]:
432
383
  return {
@@ -487,7 +438,7 @@ def rollout_restart(
487
438
  "--type=merge",
488
439
  "-p", json.dumps(patch)
489
440
  ]
490
- result = _run_kubectl(args, context)
441
+ result = run_kubectl(args, context)
491
442
 
492
443
  if result["success"]:
493
444
  return {
@@ -521,7 +472,7 @@ def analysis_runs_list(
521
472
  }
522
473
 
523
474
  runs = []
524
- for item in _get_resources("analysisruns.argoproj.io", namespace, context, label_selector):
475
+ for item in get_resources("analysisruns.argoproj.io", namespace, context, label_selector):
525
476
  status = item.get("status", {})
526
477
  spec = item.get("spec", {})
527
478
 
@@ -551,8 +502,6 @@ def analysis_runs_list(
551
502
  }
552
503
 
553
504
 
554
- # ============== Flagger Functions ==============
555
-
556
505
  def flagger_canaries_list(
557
506
  namespace: str = "",
558
507
  context: str = "",
@@ -575,7 +524,7 @@ def flagger_canaries_list(
575
524
  }
576
525
 
577
526
  canaries = []
578
- for item in _get_resources("canaries.flagger.app", namespace, context, label_selector):
527
+ for item in get_resources("canaries.flagger.app", namespace, context, label_selector):
579
528
  status = item.get("status", {})
580
529
  spec = item.get("spec", {})
581
530
  analysis = spec.get("analysis", {})
@@ -597,7 +546,6 @@ def flagger_canaries_list(
597
546
  "last_transition_time": status.get("lastTransitionTime", ""),
598
547
  })
599
548
 
600
- # Summary
601
549
  progressing = sum(1 for c in canaries if c["phase"] == "Progressing")
602
550
  succeeded = sum(1 for c in canaries if c["phase"] == "Succeeded")
603
551
  failed = sum(1 for c in canaries if c["phase"] == "Failed")
@@ -631,7 +579,7 @@ def flagger_canary_get(
631
579
  return {"success": False, "error": "Flagger is not installed"}
632
580
 
633
581
  args = ["get", "canaries.flagger.app", name, "-n", namespace, "-o", "json"]
634
- result = _run_kubectl(args, context)
582
+ result = run_kubectl(args, context)
635
583
 
636
584
  if result["success"]:
637
585
  try:
@@ -683,7 +631,6 @@ def rollouts_detect(context: str = "") -> Dict[str, Any]:
683
631
  def register_rollouts_tools(mcp: FastMCP, non_destructive: bool = False):
684
632
  """Register progressive delivery tools with the MCP server."""
685
633
 
686
- # Argo Rollouts tools
687
634
  @mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
688
635
  def rollouts_list_tool(
689
636
  namespace: str = "",
@@ -765,7 +712,6 @@ def register_rollouts_tools(mcp: FastMCP, non_destructive: bool = False):
765
712
  """List Argo Rollouts AnalysisRuns."""
766
713
  return json.dumps(analysis_runs_list(namespace, context, label_selector), indent=2)
767
714
 
768
- # Flagger tools
769
715
  @mcp.tool(annotations=ToolAnnotations(readOnlyHint=True))
770
716
  def flagger_canaries_list_tool(
771
717
  namespace: str = "",
@@ -0,0 +1,41 @@
1
+ """Shared utilities for kubectl-mcp-server tools."""
2
+
3
+ import subprocess
4
+ import json
5
+ from typing import Any, Dict, List
6
+
7
+ from ..k8s_config import _get_kubectl_context_args
8
+
9
+
10
+ def run_kubectl(args: List[str], context: str = "", timeout: int = 60) -> Dict[str, Any]:
11
+ """Run kubectl command and return result."""
12
+ cmd = ["kubectl"] + _get_kubectl_context_args(context) + args
13
+ try:
14
+ result = subprocess.run(cmd, capture_output=True, text=True, timeout=timeout)
15
+ if result.returncode == 0:
16
+ return {"success": True, "output": result.stdout}
17
+ return {"success": False, "error": result.stderr}
18
+ except subprocess.TimeoutExpired:
19
+ return {"success": False, "error": "Command timed out"}
20
+ except Exception as e:
21
+ return {"success": False, "error": str(e)}
22
+
23
+
24
+ def get_resources(kind: str, namespace: str = "", context: str = "", label_selector: str = "") -> List[Dict]:
25
+ """Get Kubernetes resources of a specific kind."""
26
+ args = ["get", kind, "-o", "json"]
27
+ if namespace:
28
+ args.extend(["-n", namespace])
29
+ else:
30
+ args.append("-A")
31
+ if label_selector:
32
+ args.extend(["-l", label_selector])
33
+
34
+ result = run_kubectl(args, context)
35
+ if result["success"]:
36
+ try:
37
+ data = json.loads(result["output"])
38
+ return data.get("items", [])
39
+ except json.JSONDecodeError:
40
+ return []
41
+ return []
tests/test_tools.py CHANGED
@@ -12,11 +12,11 @@ from unittest.mock import patch, MagicMock
12
12
  from datetime import datetime
13
13
 
14
14
 
15
- # Complete list of all 224 tools that must be registered (125 core + 6 UI + 93 ecosystem)
15
+ # Complete list of all 235 tools that must be registered (136 core + 6 UI + 93 ecosystem)
16
16
  EXPECTED_TOOLS = [
17
17
  # Pods (pods.py)
18
18
  "get_pods", "get_logs", "get_pod_events", "check_pod_health", "exec_in_pod",
19
- "cleanup_pods", "get_pod_conditions", "get_previous_logs", "diagnose_pod_crash",
19
+ "cleanup_pods", "run_pod", "get_pod_conditions", "get_previous_logs", "diagnose_pod_crash",
20
20
  "detect_pending_pods", "get_evicted_pods",
21
21
  # Deployments (deployments.py)
22
22
  "get_deployments", "create_deployment", "scale_deployment", "restart_deployment",
@@ -29,6 +29,12 @@ EXPECTED_TOOLS = [
29
29
  "get_context_details", "set_namespace_for_context", "get_cluster_info",
30
30
  "get_cluster_version", "get_nodes", "get_api_resources", "health_check",
31
31
  "kubeconfig_view", "get_api_versions", "check_crd_exists", "list_crds", "get_nodes_summary",
32
+ "node_logs_tool", "node_stats_summary_tool", "node_top_tool",
33
+ # Config management tools (cluster.py)
34
+ "get_server_config_status", "enable_kubeconfig_watching", "disable_kubeconfig_watching",
35
+ "set_server_stateless_mode",
36
+ # Multi-cluster tools (cluster.py)
37
+ "multi_cluster_query", "multi_cluster_health", "multi_cluster_pod_count",
32
38
  # Networking (networking.py)
33
39
  "get_services", "get_endpoints", "get_ingress", "port_forward",
34
40
  "diagnose_network_connectivity", "check_dns_resolution", "trace_service_chain",
@@ -109,11 +115,11 @@ EXPECTED_TOOLS = [
109
115
 
110
116
 
111
117
  class TestAllToolsRegistered:
112
- """Comprehensive tests to verify all 224 tools are registered (125 core + 6 UI + 93 ecosystem)."""
118
+ """Comprehensive tests to verify all 235 tools are registered (136 core + 6 UI + 93 ecosystem)."""
113
119
 
114
120
  @pytest.mark.unit
115
121
  def test_all_164_tools_registered(self):
116
- """Verify all 224 expected tools are registered (excluding optional browser tools)."""
122
+ """Verify all 235 expected tools are registered (excluding optional browser tools)."""
117
123
  import os
118
124
  from kubectl_mcp_tool.mcp_server import MCPServer
119
125
 
@@ -134,8 +140,8 @@ class TestAllToolsRegistered:
134
140
  tools = asyncio.run(get_tools())
135
141
  tool_names = {t.name for t in tools}
136
142
 
137
- # Verify count (224 tools = 125 core + 6 UI + 93 ecosystem, browser tools disabled)
138
- assert len(tools) == 224, f"Expected 224 tools, got {len(tools)}"
143
+ # Verify count (235 tools = 136 core + 6 UI + 93 ecosystem, browser tools disabled)
144
+ assert len(tools) == 235, f"Expected 235 tools, got {len(tools)}"
139
145
 
140
146
  # Check for missing tools
141
147
  missing_tools = set(EXPECTED_TOOLS) - tool_names
@@ -272,6 +278,32 @@ class TestPodTools:
272
278
  with patch("kubectl_mcp_tool.mcp_server.MCPServer._check_dependencies", return_value=True):
273
279
  server = MCPServer(name="test")
274
280
 
281
+ @pytest.mark.unit
282
+ def test_run_pod(self, mock_all_kubernetes_apis):
283
+ """Test running a container image as a pod."""
284
+ with patch("kubernetes.config.load_kube_config"):
285
+ with patch("kubernetes.client.CoreV1Api") as mock_api:
286
+ mock_pod = MagicMock()
287
+ mock_pod.metadata.name = "nginx-abc12345"
288
+ mock_pod.metadata.namespace = "default"
289
+ mock_pod.metadata.uid = "test-uid-123"
290
+ mock_pod.status.phase = "Pending"
291
+ mock_api.return_value.create_namespaced_pod.return_value = mock_pod
292
+
293
+ from kubectl_mcp_tool.mcp_server import MCPServer
294
+ with patch("kubectl_mcp_tool.mcp_server.MCPServer._check_dependencies", return_value=True):
295
+ server = MCPServer(name="test")
296
+
297
+ @pytest.mark.unit
298
+ def test_run_pod_non_destructive_mode(self, mock_all_kubernetes_apis):
299
+ """Test that run_pod is blocked in non-destructive mode."""
300
+ from kubectl_mcp_tool.mcp_server import MCPServer
301
+ with patch("kubectl_mcp_tool.mcp_server.MCPServer._check_dependencies", return_value=True):
302
+ with patch("kubernetes.config.load_kube_config"):
303
+ server = MCPServer(name="test", disable_destructive=True)
304
+ # Non-destructive mode should be set via the non_destructive property
305
+ assert server.non_destructive is True
306
+
275
307
 
276
308
  class TestDeploymentTools:
277
309
  """Tests for deployment-related tools."""
@@ -621,11 +653,12 @@ class TestApplyAndDeleteTools:
621
653
  """Test non-destructive mode blocks destructive operations."""
622
654
  from kubectl_mcp_tool.mcp_server import MCPServer
623
655
  with patch("kubectl_mcp_tool.mcp_server.MCPServer._check_dependencies", return_value=True):
624
- server = MCPServer(name="test", non_destructive=True)
625
- result = server._check_destructive()
626
- assert result is not None
627
- assert result["success"] is False
628
- assert "non-destructive mode" in result["error"]
656
+ with patch("kubernetes.config.load_kube_config"):
657
+ server = MCPServer(name="test", disable_destructive=True)
658
+ result = server._check_destructive()
659
+ assert result is not None
660
+ assert result["success"] is False
661
+ assert "non-destructive mode" in result["error"] or "disable-destructive" in result["error"]
629
662
 
630
663
 
631
664
  class TestToolAnnotations: