awslabs.eks-mcp-server 0.1.10__py3-none-any.whl → 0.1.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- awslabs/eks_mcp_server/__init__.py +1 -1
- awslabs/eks_mcp_server/insights_handler.py +342 -0
- awslabs/eks_mcp_server/k8s_apis.py +4 -0
- awslabs/eks_mcp_server/k8s_handler.py +6 -0
- awslabs/eks_mcp_server/models.py +78 -0
- awslabs/eks_mcp_server/server.py +4 -0
- awslabs/eks_mcp_server/vpc_config_handler.py +425 -0
- {awslabs_eks_mcp_server-0.1.10.dist-info → awslabs_eks_mcp_server-0.1.13.dist-info}/METADATA +41 -3
- {awslabs_eks_mcp_server-0.1.10.dist-info → awslabs_eks_mcp_server-0.1.13.dist-info}/RECORD +13 -11
- {awslabs_eks_mcp_server-0.1.10.dist-info → awslabs_eks_mcp_server-0.1.13.dist-info}/WHEEL +0 -0
- {awslabs_eks_mcp_server-0.1.10.dist-info → awslabs_eks_mcp_server-0.1.13.dist-info}/entry_points.txt +0 -0
- {awslabs_eks_mcp_server-0.1.10.dist-info → awslabs_eks_mcp_server-0.1.13.dist-info}/licenses/LICENSE +0 -0
- {awslabs_eks_mcp_server-0.1.10.dist-info → awslabs_eks_mcp_server-0.1.13.dist-info}/licenses/NOTICE +0 -0
|
@@ -0,0 +1,342 @@
|
|
|
1
|
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
"""Insights handler for the EKS MCP Server."""
|
|
16
|
+
|
|
17
|
+
from awslabs.eks_mcp_server.aws_helper import AwsHelper
|
|
18
|
+
from awslabs.eks_mcp_server.logging_helper import LogLevel, log_with_request_id
|
|
19
|
+
from awslabs.eks_mcp_server.models import (
|
|
20
|
+
EksInsightItem,
|
|
21
|
+
EksInsightsResponse,
|
|
22
|
+
EksInsightStatus,
|
|
23
|
+
)
|
|
24
|
+
from datetime import datetime
|
|
25
|
+
from mcp.server.fastmcp import Context
|
|
26
|
+
from mcp.types import TextContent
|
|
27
|
+
from pydantic import Field
|
|
28
|
+
from typing import Any, Optional
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class InsightsHandler:
|
|
32
|
+
"""Handler for Amazon EKS Insights.
|
|
33
|
+
|
|
34
|
+
This class provides tools for retrieving and analyzing insights about
|
|
35
|
+
EKS cluster configuration and upgrade readiness.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
def __init__(
|
|
39
|
+
self,
|
|
40
|
+
mcp,
|
|
41
|
+
allow_sensitive_data_access: bool = False,
|
|
42
|
+
):
|
|
43
|
+
"""Initialize the Insights handler.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
mcp: The MCP server instance
|
|
47
|
+
allow_sensitive_data_access: Whether to allow access to sensitive data (default: False)
|
|
48
|
+
"""
|
|
49
|
+
self.mcp = mcp
|
|
50
|
+
self.allow_sensitive_data_access = allow_sensitive_data_access
|
|
51
|
+
|
|
52
|
+
# Register tools
|
|
53
|
+
self.mcp.tool(name='get_eks_insights')(self.get_eks_insights)
|
|
54
|
+
|
|
55
|
+
# Initialize AWS clients
|
|
56
|
+
self.eks_client = AwsHelper.create_boto3_client('eks')
|
|
57
|
+
|
|
58
|
+
# EKS Insights tool
|
|
59
|
+
async def get_eks_insights(
|
|
60
|
+
self,
|
|
61
|
+
ctx: Context,
|
|
62
|
+
cluster_name: str = Field(..., description='Name of the EKS cluster'),
|
|
63
|
+
insight_id: Optional[str] = Field(
|
|
64
|
+
None,
|
|
65
|
+
description='ID of a specific insight to get detailed information for. If provided, returns detailed information about this specific insight.',
|
|
66
|
+
),
|
|
67
|
+
category: Optional[str] = Field(
|
|
68
|
+
None,
|
|
69
|
+
description='Filter insights by category (e.g., "MISCONFIGURATION" or "UPGRADE_READINESS")',
|
|
70
|
+
),
|
|
71
|
+
next_token: Optional[str] = Field(
|
|
72
|
+
None,
|
|
73
|
+
description='Token for pagination to get the next set of results',
|
|
74
|
+
),
|
|
75
|
+
) -> EksInsightsResponse:
|
|
76
|
+
"""Get EKS Insights for cluster configuration and upgrade readiness.
|
|
77
|
+
|
|
78
|
+
This tool retrieves Amazon EKS Insights that identify potential issues with
|
|
79
|
+
your EKS cluster. These insights help identify both cluster configuration issues
|
|
80
|
+
and upgrade readiness concerns that might affect hybrid nodes functionality.
|
|
81
|
+
|
|
82
|
+
Amazon EKS provides two types of insights:
|
|
83
|
+
- MISCONFIGURATION insights: Identify misconfigurations in your EKS cluster setup
|
|
84
|
+
- UPGRADE_READINESS insights: Identify issues that could prevent successful cluster upgrades
|
|
85
|
+
|
|
86
|
+
When used without an insight_id, it returns a list of all insights.
|
|
87
|
+
When used with an insight_id, it returns detailed information about
|
|
88
|
+
that specific insight, including recommendations.
|
|
89
|
+
|
|
90
|
+
## Requirements
|
|
91
|
+
- The server must be run with the `--allow-sensitive-data-access` flag
|
|
92
|
+
|
|
93
|
+
## Response Information
|
|
94
|
+
The response includes insight details such as status, description, and
|
|
95
|
+
recommendations for addressing identified issues.
|
|
96
|
+
|
|
97
|
+
## Usage Tips
|
|
98
|
+
- Review MISCONFIGURATION insights to identify cluster misconfigurations
|
|
99
|
+
- Check UPGRADE_READINESS insights before upgrading your cluster
|
|
100
|
+
- Pay special attention to insights with FAILING status
|
|
101
|
+
- Focus on insights related to node and network configuration for hybrid nodes
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
ctx: MCP context
|
|
105
|
+
cluster_name: Name of the EKS cluster
|
|
106
|
+
insight_id: Optional ID of a specific insight to get detailed information for
|
|
107
|
+
category: Optional category to filter insights by (e.g., "MISCONFIGURATION" or "UPGRADE_READINESS")
|
|
108
|
+
next_token: Optional token for pagination to get the next set of results
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
EksInsightsResponse with insights information
|
|
112
|
+
"""
|
|
113
|
+
# Extract values from Field objects before passing them to the implementation method
|
|
114
|
+
cluster_name_value = cluster_name
|
|
115
|
+
insight_id_value = insight_id
|
|
116
|
+
category_value = category
|
|
117
|
+
next_token_value = next_token
|
|
118
|
+
|
|
119
|
+
# Delegate to the implementation method with extracted values
|
|
120
|
+
return await self._get_eks_insights_impl(
|
|
121
|
+
ctx, cluster_name_value, insight_id_value, category_value, next_token_value
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
async def _get_eks_insights_impl(
|
|
125
|
+
self,
|
|
126
|
+
ctx: Context,
|
|
127
|
+
cluster_name: str,
|
|
128
|
+
insight_id: Optional[str] = None,
|
|
129
|
+
category: Optional[str] = None,
|
|
130
|
+
next_token: Optional[str] = None,
|
|
131
|
+
) -> EksInsightsResponse:
|
|
132
|
+
"""Internal implementation of get_eks_insights."""
|
|
133
|
+
try:
|
|
134
|
+
# Always use the default EKS client
|
|
135
|
+
eks_client = self.eks_client
|
|
136
|
+
|
|
137
|
+
# Determine operation mode based on whether insight_id is provided
|
|
138
|
+
detail_mode = insight_id is not None
|
|
139
|
+
|
|
140
|
+
if detail_mode:
|
|
141
|
+
# Get details for a specific insight
|
|
142
|
+
return await self._get_insight_detail(
|
|
143
|
+
ctx, eks_client, cluster_name, insight_id, next_token
|
|
144
|
+
)
|
|
145
|
+
else:
|
|
146
|
+
# List all insights with optional category filter
|
|
147
|
+
return await self._list_insights(
|
|
148
|
+
ctx, eks_client, cluster_name, category, next_token
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
except Exception as e:
|
|
152
|
+
error_message = f'Error processing EKS insights request: {str(e)}'
|
|
153
|
+
log_with_request_id(ctx, LogLevel.ERROR, error_message)
|
|
154
|
+
return EksInsightsResponse(
|
|
155
|
+
isError=True,
|
|
156
|
+
content=[TextContent(type='text', text=error_message)],
|
|
157
|
+
cluster_name=cluster_name,
|
|
158
|
+
insights=[],
|
|
159
|
+
next_token=None,
|
|
160
|
+
detail_mode=(insight_id is not None),
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
async def _get_insight_detail(
|
|
164
|
+
self,
|
|
165
|
+
ctx: Context,
|
|
166
|
+
eks_client,
|
|
167
|
+
cluster_name: str,
|
|
168
|
+
insight_id: str,
|
|
169
|
+
next_token: Optional[str] = None,
|
|
170
|
+
) -> EksInsightsResponse:
|
|
171
|
+
"""Get details for a specific EKS insight."""
|
|
172
|
+
log_with_request_id(
|
|
173
|
+
ctx,
|
|
174
|
+
LogLevel.INFO,
|
|
175
|
+
f'Getting details for insight {insight_id} in cluster {cluster_name}',
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
try:
|
|
179
|
+
response = eks_client.describe_insight(id=insight_id, clusterName=cluster_name)
|
|
180
|
+
|
|
181
|
+
# Extract and format the insight details
|
|
182
|
+
if 'insight' in response:
|
|
183
|
+
insight_data = response['insight']
|
|
184
|
+
|
|
185
|
+
# Create insight status object
|
|
186
|
+
status_obj = EksInsightStatus(
|
|
187
|
+
status=insight_data.get('insightStatus', {}).get('status', 'UNKNOWN'),
|
|
188
|
+
reason=insight_data.get('insightStatus', {}).get('reason', ''),
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
# Handle datetime objects for timestamps
|
|
192
|
+
last_refresh_time = insight_data.get('lastRefreshTime', 0)
|
|
193
|
+
if isinstance(last_refresh_time, datetime):
|
|
194
|
+
last_refresh_time = last_refresh_time.timestamp()
|
|
195
|
+
|
|
196
|
+
last_transition_time = insight_data.get('lastTransitionTime', 0)
|
|
197
|
+
if isinstance(last_transition_time, datetime):
|
|
198
|
+
last_transition_time = last_transition_time.timestamp()
|
|
199
|
+
|
|
200
|
+
# Convert insight to EksInsightItem format
|
|
201
|
+
insight_item = EksInsightItem(
|
|
202
|
+
id=insight_data.get('id', ''),
|
|
203
|
+
name=insight_data.get('name', ''),
|
|
204
|
+
category=insight_data.get('category', ''),
|
|
205
|
+
kubernetes_version=insight_data.get('kubernetesVersion'),
|
|
206
|
+
last_refresh_time=last_refresh_time,
|
|
207
|
+
last_transition_time=last_transition_time,
|
|
208
|
+
description=insight_data.get('description', ''),
|
|
209
|
+
insight_status=status_obj,
|
|
210
|
+
recommendation=insight_data.get('recommendation'),
|
|
211
|
+
additional_info=insight_data.get('additionalInfo', {}),
|
|
212
|
+
resources=insight_data.get('resources', []),
|
|
213
|
+
category_specific_summary=insight_data.get('categorySpecificSummary', {}),
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
success_message = f'Successfully retrieved details for insight {insight_id}'
|
|
217
|
+
return EksInsightsResponse(
|
|
218
|
+
isError=False,
|
|
219
|
+
content=[TextContent(type='text', text=success_message)],
|
|
220
|
+
cluster_name=cluster_name,
|
|
221
|
+
insights=[insight_item],
|
|
222
|
+
next_token=None, # No pagination for detail view
|
|
223
|
+
detail_mode=True,
|
|
224
|
+
)
|
|
225
|
+
else:
|
|
226
|
+
error_message = f'No insight details found for ID {insight_id}'
|
|
227
|
+
log_with_request_id(ctx, LogLevel.WARNING, error_message)
|
|
228
|
+
return EksInsightsResponse(
|
|
229
|
+
isError=True,
|
|
230
|
+
content=[TextContent(type='text', text=error_message)],
|
|
231
|
+
cluster_name=cluster_name,
|
|
232
|
+
insights=[],
|
|
233
|
+
next_token=None,
|
|
234
|
+
detail_mode=True,
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
except Exception as e:
|
|
238
|
+
error_message = f'Error retrieving insight details: {str(e)}'
|
|
239
|
+
log_with_request_id(ctx, LogLevel.ERROR, error_message)
|
|
240
|
+
return EksInsightsResponse(
|
|
241
|
+
isError=True,
|
|
242
|
+
content=[TextContent(type='text', text=error_message)],
|
|
243
|
+
cluster_name=cluster_name,
|
|
244
|
+
insights=[],
|
|
245
|
+
next_token=None,
|
|
246
|
+
detail_mode=True,
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
async def _list_insights(
|
|
250
|
+
self,
|
|
251
|
+
ctx: Context,
|
|
252
|
+
eks_client,
|
|
253
|
+
cluster_name: str,
|
|
254
|
+
category: Optional[str] = None,
|
|
255
|
+
next_token: Optional[str] = None,
|
|
256
|
+
) -> EksInsightsResponse:
|
|
257
|
+
"""List EKS insights for a cluster with optional category filtering."""
|
|
258
|
+
log_with_request_id(ctx, LogLevel.INFO, f'Listing insights for cluster {cluster_name}')
|
|
259
|
+
|
|
260
|
+
try:
|
|
261
|
+
# Build the list_insights parameters
|
|
262
|
+
list_params: dict[str, Any] = {'clusterName': cluster_name}
|
|
263
|
+
|
|
264
|
+
# Add category filter if provided
|
|
265
|
+
if category:
|
|
266
|
+
log_with_request_id(
|
|
267
|
+
ctx, LogLevel.INFO, f'Filtering insights by category: {category}'
|
|
268
|
+
)
|
|
269
|
+
# Use the filter parameter with the correct structure
|
|
270
|
+
list_params['filter'] = {'categories': [category]}
|
|
271
|
+
|
|
272
|
+
# Add next_token if provided
|
|
273
|
+
if next_token:
|
|
274
|
+
log_with_request_id(
|
|
275
|
+
ctx, LogLevel.INFO, 'Using pagination token for next page of results'
|
|
276
|
+
)
|
|
277
|
+
list_params['nextToken'] = next_token
|
|
278
|
+
|
|
279
|
+
response = eks_client.list_insights(**list_params)
|
|
280
|
+
|
|
281
|
+
# Extract and format the insights
|
|
282
|
+
insight_items = []
|
|
283
|
+
|
|
284
|
+
if 'insights' in response:
|
|
285
|
+
for insight_data in response['insights']:
|
|
286
|
+
# Create insight status object
|
|
287
|
+
status_obj = EksInsightStatus(
|
|
288
|
+
status=insight_data.get('insightStatus', {}).get('status', 'UNKNOWN'),
|
|
289
|
+
reason=insight_data.get('insightStatus', {}).get('reason', ''),
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
# Handle datetime objects for timestamps
|
|
293
|
+
last_refresh_time = insight_data.get('lastRefreshTime', 0)
|
|
294
|
+
if isinstance(last_refresh_time, datetime):
|
|
295
|
+
last_refresh_time = last_refresh_time.timestamp()
|
|
296
|
+
|
|
297
|
+
last_transition_time = insight_data.get('lastTransitionTime', 0)
|
|
298
|
+
if isinstance(last_transition_time, datetime):
|
|
299
|
+
last_transition_time = last_transition_time.timestamp()
|
|
300
|
+
|
|
301
|
+
# Convert insight to EksInsightItem format
|
|
302
|
+
insight_item = EksInsightItem(
|
|
303
|
+
id=insight_data.get('id', ''),
|
|
304
|
+
name=insight_data.get('name', ''),
|
|
305
|
+
category=insight_data.get('category', ''),
|
|
306
|
+
kubernetes_version=insight_data.get('kubernetesVersion'),
|
|
307
|
+
last_refresh_time=last_refresh_time,
|
|
308
|
+
last_transition_time=last_transition_time,
|
|
309
|
+
description=insight_data.get('description', ''),
|
|
310
|
+
insight_status=status_obj,
|
|
311
|
+
# List mode doesn't include these fields
|
|
312
|
+
recommendation=None,
|
|
313
|
+
additional_info=None,
|
|
314
|
+
resources=None,
|
|
315
|
+
category_specific_summary=None,
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
insight_items.append(insight_item)
|
|
319
|
+
|
|
320
|
+
success_message = (
|
|
321
|
+
f'Successfully retrieved {len(insight_items)} insights for cluster {cluster_name}'
|
|
322
|
+
)
|
|
323
|
+
return EksInsightsResponse(
|
|
324
|
+
isError=False,
|
|
325
|
+
content=[TextContent(type='text', text=success_message)],
|
|
326
|
+
cluster_name=cluster_name,
|
|
327
|
+
insights=insight_items,
|
|
328
|
+
next_token=response.get('nextToken'),
|
|
329
|
+
detail_mode=False,
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
except Exception as e:
|
|
333
|
+
error_message = f'Error listing insights: {str(e)}'
|
|
334
|
+
log_with_request_id(ctx, LogLevel.ERROR, error_message)
|
|
335
|
+
return EksInsightsResponse(
|
|
336
|
+
isError=True,
|
|
337
|
+
content=[TextContent(type='text', text=error_message)],
|
|
338
|
+
cluster_name=cluster_name,
|
|
339
|
+
insights=[],
|
|
340
|
+
next_token=None,
|
|
341
|
+
detail_mode=False,
|
|
342
|
+
)
|
|
@@ -418,6 +418,7 @@ class K8sApis:
|
|
|
418
418
|
since_seconds: Optional[int] = None,
|
|
419
419
|
tail_lines: Optional[int] = None,
|
|
420
420
|
limit_bytes: Optional[int] = None,
|
|
421
|
+
previous: Optional[bool] = None,
|
|
421
422
|
) -> str:
|
|
422
423
|
"""Get logs from a pod.
|
|
423
424
|
|
|
@@ -428,6 +429,7 @@ class K8sApis:
|
|
|
428
429
|
since_seconds: Only return logs newer than this many seconds (optional)
|
|
429
430
|
tail_lines: Number of lines to return from the end of the logs (optional)
|
|
430
431
|
limit_bytes: Maximum number of bytes to return (optional)
|
|
432
|
+
previous: Return previous terminated container logs (optional)
|
|
431
433
|
|
|
432
434
|
Returns:
|
|
433
435
|
Pod logs as a string
|
|
@@ -448,6 +450,8 @@ class K8sApis:
|
|
|
448
450
|
params['tail_lines'] = tail_lines
|
|
449
451
|
if limit_bytes:
|
|
450
452
|
params['limit_bytes'] = limit_bytes
|
|
453
|
+
if previous:
|
|
454
|
+
params['previous'] = previous
|
|
451
455
|
|
|
452
456
|
# Call the read_namespaced_pod_log method
|
|
453
457
|
logs_response = core_v1_api.read_namespaced_pod_log(
|
|
@@ -878,6 +878,10 @@ class K8sHandler:
|
|
|
878
878
|
10240,
|
|
879
879
|
description='Maximum number of bytes to return. Default: 10KB (10240 bytes). Prevents retrieving extremely large log files.',
|
|
880
880
|
),
|
|
881
|
+
previous: bool = Field(
|
|
882
|
+
False,
|
|
883
|
+
description='Return previous terminated container logs. Default: false. Useful to get logs for pods that are restarting.',
|
|
884
|
+
),
|
|
881
885
|
) -> PodLogsResponse:
|
|
882
886
|
"""Get logs from a pod in a Kubernetes cluster.
|
|
883
887
|
|
|
@@ -905,6 +909,7 @@ class K8sHandler:
|
|
|
905
909
|
since_seconds: Only return logs newer than this many seconds (optional)
|
|
906
910
|
tail_lines: Number of lines to return from the end of the logs (defaults to 100)
|
|
907
911
|
limit_bytes: Maximum number of bytes to return (defaults to 10KB)
|
|
912
|
+
previous: Return previous terminated container logs (defaults to false)
|
|
908
913
|
|
|
909
914
|
Returns:
|
|
910
915
|
PodLogsResponse with pod logs
|
|
@@ -934,6 +939,7 @@ class K8sHandler:
|
|
|
934
939
|
since_seconds=since_seconds,
|
|
935
940
|
tail_lines=tail_lines,
|
|
936
941
|
limit_bytes=limit_bytes,
|
|
942
|
+
previous=previous,
|
|
937
943
|
)
|
|
938
944
|
|
|
939
945
|
# Split logs into lines
|
awslabs/eks_mcp_server/models.py
CHANGED
|
@@ -287,3 +287,81 @@ class MetricsGuidanceResponse(CallToolResult):
|
|
|
287
287
|
..., description='Resource type (cluster, node, pod, namespace, service)'
|
|
288
288
|
)
|
|
289
289
|
metrics: List[Dict[str, Any]] = Field(..., description='List of metrics with their details')
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
class EksVpcConfigResponse(CallToolResult):
|
|
293
|
+
"""Response model for get_eks_vpc_config tool.
|
|
294
|
+
|
|
295
|
+
This model contains comprehensive VPC configuration details for any EKS cluster,
|
|
296
|
+
including CIDR blocks and route tables which are essential for understanding
|
|
297
|
+
network connectivity. For hybrid node setups, it also automatically identifies
|
|
298
|
+
and includes remote node and pod CIDR configurations.
|
|
299
|
+
"""
|
|
300
|
+
|
|
301
|
+
vpc_id: str = Field(..., description='ID of the VPC')
|
|
302
|
+
cidr_block: str = Field(..., description='Primary CIDR block of the VPC')
|
|
303
|
+
additional_cidr_blocks: List[str] = Field(
|
|
304
|
+
[], description='Additional CIDR blocks associated with the VPC'
|
|
305
|
+
)
|
|
306
|
+
routes: List[Dict[str, Any]] = Field(
|
|
307
|
+
..., description='List of route entries in the main route table'
|
|
308
|
+
)
|
|
309
|
+
remote_node_cidr_blocks: List[str] = Field(
|
|
310
|
+
[], description='CIDR blocks configured for remote node access (for hybrid setups)'
|
|
311
|
+
)
|
|
312
|
+
remote_pod_cidr_blocks: List[str] = Field(
|
|
313
|
+
[], description='CIDR blocks configured for remote pod access (for hybrid setups)'
|
|
314
|
+
)
|
|
315
|
+
subnets: List[Dict[str, Any]] = Field(
|
|
316
|
+
[], description='List of subnets in the VPC with their configurations'
|
|
317
|
+
)
|
|
318
|
+
cluster_name: str = Field(..., description='Name of the EKS cluster')
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
class EksInsightStatus(BaseModel):
|
|
322
|
+
"""Status of an EKS insight with status code and reason."""
|
|
323
|
+
|
|
324
|
+
status: str = Field(..., description='Status of the insight (e.g., PASSING, FAILING, UNKNOWN)')
|
|
325
|
+
reason: str = Field(..., description='Explanation of the current status')
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
class EksInsightItem(BaseModel):
|
|
329
|
+
"""Model for a single EKS insight item."""
|
|
330
|
+
|
|
331
|
+
id: str = Field(..., description='Unique identifier of the insight')
|
|
332
|
+
name: str = Field(..., description='Name of the insight')
|
|
333
|
+
category: str = Field(
|
|
334
|
+
..., description='Category of the insight (e.g., CONFIGURATION, UPGRADE_READINESS)'
|
|
335
|
+
)
|
|
336
|
+
kubernetes_version: Optional[str] = Field(
|
|
337
|
+
None, description='Target Kubernetes version for upgrade insights'
|
|
338
|
+
)
|
|
339
|
+
last_refresh_time: float = Field(
|
|
340
|
+
..., description='Timestamp when the insight was last refreshed'
|
|
341
|
+
)
|
|
342
|
+
last_transition_time: float = Field(
|
|
343
|
+
..., description='Timestamp when the insight last changed status'
|
|
344
|
+
)
|
|
345
|
+
description: str = Field(..., description='Description of what the insight checks')
|
|
346
|
+
insight_status: EksInsightStatus = Field(..., description='Current status of the insight')
|
|
347
|
+
recommendation: Optional[str] = Field(
|
|
348
|
+
None, description='Recommendation for addressing the insight'
|
|
349
|
+
)
|
|
350
|
+
additional_info: Optional[Dict[str, str]] = Field(
|
|
351
|
+
None, description='Additional information links'
|
|
352
|
+
)
|
|
353
|
+
resources: Optional[List[str]] = Field(None, description='Resources involved in the insight')
|
|
354
|
+
category_specific_summary: Optional[Dict[str, Any]] = Field(
|
|
355
|
+
None, description='Additional category-specific details'
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
|
|
359
|
+
class EksInsightsResponse(CallToolResult):
|
|
360
|
+
"""Response model for get_eks_insights tool."""
|
|
361
|
+
|
|
362
|
+
cluster_name: str = Field(..., description='Name of the EKS cluster')
|
|
363
|
+
insights: List[EksInsightItem] = Field(..., description='List of insights')
|
|
364
|
+
next_token: Optional[str] = Field(None, description='Token for pagination')
|
|
365
|
+
detail_mode: bool = Field(
|
|
366
|
+
False, description='Whether the response contains detailed insight information'
|
|
367
|
+
)
|
awslabs/eks_mcp_server/server.py
CHANGED
|
@@ -29,7 +29,9 @@ from awslabs.eks_mcp_server.cloudwatch_metrics_guidance_handler import CloudWatc
|
|
|
29
29
|
from awslabs.eks_mcp_server.eks_kb_handler import EKSKnowledgeBaseHandler
|
|
30
30
|
from awslabs.eks_mcp_server.eks_stack_handler import EksStackHandler
|
|
31
31
|
from awslabs.eks_mcp_server.iam_handler import IAMHandler
|
|
32
|
+
from awslabs.eks_mcp_server.insights_handler import InsightsHandler
|
|
32
33
|
from awslabs.eks_mcp_server.k8s_handler import K8sHandler
|
|
34
|
+
from awslabs.eks_mcp_server.vpc_config_handler import VpcConfigHandler
|
|
33
35
|
from loguru import logger
|
|
34
36
|
from mcp.server.fastmcp import FastMCP
|
|
35
37
|
|
|
@@ -149,6 +151,8 @@ def main():
|
|
|
149
151
|
K8sHandler(mcp, allow_write, allow_sensitive_data_access)
|
|
150
152
|
IAMHandler(mcp, allow_write)
|
|
151
153
|
CloudWatchMetricsHandler(mcp)
|
|
154
|
+
VpcConfigHandler(mcp, allow_sensitive_data_access)
|
|
155
|
+
InsightsHandler(mcp, allow_sensitive_data_access)
|
|
152
156
|
|
|
153
157
|
# Run server
|
|
154
158
|
mcp.run()
|
|
@@ -0,0 +1,425 @@
|
|
|
1
|
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
"""VPC Configuration handler for the EKS MCP Server."""
|
|
16
|
+
|
|
17
|
+
from awslabs.eks_mcp_server.aws_helper import AwsHelper
|
|
18
|
+
from awslabs.eks_mcp_server.logging_helper import LogLevel, log_with_request_id
|
|
19
|
+
from awslabs.eks_mcp_server.models import EksVpcConfigResponse
|
|
20
|
+
from mcp.server.fastmcp import Context
|
|
21
|
+
from mcp.types import TextContent
|
|
22
|
+
from pydantic import Field
|
|
23
|
+
from typing import Optional
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class VpcConfigHandler:
|
|
27
|
+
"""Handler for Amazon EKS VPC configuration.
|
|
28
|
+
|
|
29
|
+
This class provides tools for retrieving and analyzing VPC configurations
|
|
30
|
+
for EKS clusters, with special support for hybrid node setups.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
mcp,
|
|
36
|
+
allow_sensitive_data_access: bool = False,
|
|
37
|
+
):
|
|
38
|
+
"""Initialize the VPC Config handler.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
mcp: The MCP server instance
|
|
42
|
+
allow_sensitive_data_access: Whether to allow access to sensitive data (default: False)
|
|
43
|
+
"""
|
|
44
|
+
self.mcp = mcp
|
|
45
|
+
self.allow_sensitive_data_access = allow_sensitive_data_access
|
|
46
|
+
|
|
47
|
+
# Register tools
|
|
48
|
+
self.mcp.tool(name='get_eks_vpc_config')(self.get_eks_vpc_config)
|
|
49
|
+
|
|
50
|
+
# Initialize AWS clients
|
|
51
|
+
self.ec2_client = AwsHelper.create_boto3_client('ec2')
|
|
52
|
+
self.eks_client = AwsHelper.create_boto3_client('eks')
|
|
53
|
+
|
|
54
|
+
# VPC tool
|
|
55
|
+
async def get_eks_vpc_config(
|
|
56
|
+
self,
|
|
57
|
+
ctx: Context,
|
|
58
|
+
cluster_name: str = Field(
|
|
59
|
+
...,
|
|
60
|
+
description='Name of the EKS cluster to get VPC configuration for',
|
|
61
|
+
),
|
|
62
|
+
vpc_id: Optional[str] = Field(
|
|
63
|
+
None,
|
|
64
|
+
description='ID of the specific VPC to query (optional, will use cluster VPC if not specified)',
|
|
65
|
+
),
|
|
66
|
+
) -> EksVpcConfigResponse:
|
|
67
|
+
"""Get VPC configuration for an EKS cluster.
|
|
68
|
+
|
|
69
|
+
This tool retrieves comprehensive VPC configuration details for any EKS cluster,
|
|
70
|
+
including CIDR blocks and route tables which are essential for understanding
|
|
71
|
+
network connectivity. For hybrid node setups, it also automatically identifies
|
|
72
|
+
and includes remote node and pod CIDR configurations.
|
|
73
|
+
|
|
74
|
+
## Requirements
|
|
75
|
+
- The server must be run with the `--allow-sensitive-data-access` flag
|
|
76
|
+
|
|
77
|
+
## Response Information
|
|
78
|
+
The response includes VPC CIDR blocks, route tables, and when available,
|
|
79
|
+
remote CIDR configurations for hybrid node connectivity.
|
|
80
|
+
|
|
81
|
+
## Usage Tips
|
|
82
|
+
- Understand VPC networking configuration for any EKS cluster
|
|
83
|
+
- Examine route tables to verify proper network connectivity
|
|
84
|
+
- For hybrid setups: Check that remote node CIDR blocks are correctly configured
|
|
85
|
+
- For hybrid setups: Verify that VPC route tables include routes for hybrid node CIDRs
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
ctx: MCP context
|
|
89
|
+
cluster_name: Name of the EKS cluster
|
|
90
|
+
vpc_id: Optional ID of the specific VPC to query
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
EksVpcConfigResponse with VPC configuration details
|
|
94
|
+
"""
|
|
95
|
+
# Extract values from Field objects before passing them to the implementation method
|
|
96
|
+
vpc_id_value = None if vpc_id is None else str(vpc_id)
|
|
97
|
+
|
|
98
|
+
# Delegate to the implementation method with extracted values
|
|
99
|
+
return await self._get_eks_vpc_config_impl(ctx, cluster_name, vpc_id_value)
|
|
100
|
+
|
|
101
|
+
async def _get_vpc_id_for_cluster(self, ctx: Context, cluster_name: str) -> tuple[str, dict]:
|
|
102
|
+
"""Get the VPC ID for a cluster.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
ctx: MCP context
|
|
106
|
+
cluster_name: Name of the EKS cluster
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
Tuple of (vpc_id, cluster_response)
|
|
110
|
+
|
|
111
|
+
Raises:
|
|
112
|
+
Exception: If the VPC ID cannot be determined
|
|
113
|
+
"""
|
|
114
|
+
# Get cluster information to determine VPC ID
|
|
115
|
+
cluster_response = self.eks_client.describe_cluster(name=cluster_name)
|
|
116
|
+
vpc_id = cluster_response['cluster'].get('resourcesVpcConfig', {}).get('vpcId')
|
|
117
|
+
|
|
118
|
+
if not vpc_id:
|
|
119
|
+
error_message = f'Could not determine VPC ID for cluster {cluster_name}'
|
|
120
|
+
log_with_request_id(ctx, LogLevel.ERROR, error_message)
|
|
121
|
+
raise Exception(error_message)
|
|
122
|
+
|
|
123
|
+
return vpc_id, cluster_response
|
|
124
|
+
|
|
125
|
+
async def _get_vpc_details(self, ctx: Context, vpc_id: str) -> tuple[str, list[str]]:
|
|
126
|
+
"""Get VPC details using the VPC ID.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
ctx: MCP context
|
|
130
|
+
vpc_id: ID of the VPC to query
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
Tuple of (cidr_block, additional_cidr_blocks)
|
|
134
|
+
|
|
135
|
+
Raises:
|
|
136
|
+
Exception: If the VPC is not found
|
|
137
|
+
"""
|
|
138
|
+
# Get VPC details
|
|
139
|
+
vpc_response = self.ec2_client.describe_vpcs(VpcIds=[vpc_id])
|
|
140
|
+
|
|
141
|
+
if not vpc_response['Vpcs']:
|
|
142
|
+
error_message = f'VPC {vpc_id} not found'
|
|
143
|
+
log_with_request_id(ctx, LogLevel.ERROR, error_message)
|
|
144
|
+
raise Exception(error_message)
|
|
145
|
+
|
|
146
|
+
# Extract VPC information
|
|
147
|
+
vpc = vpc_response['Vpcs'][0]
|
|
148
|
+
cidr_block = vpc.get('CidrBlock', '')
|
|
149
|
+
additional_cidr_blocks = [
|
|
150
|
+
cidr_association.get('CidrBlock', '')
|
|
151
|
+
for cidr_association in vpc.get('CidrBlockAssociationSet', [])[1:]
|
|
152
|
+
if 'CidrBlock' in cidr_association
|
|
153
|
+
]
|
|
154
|
+
|
|
155
|
+
return cidr_block, additional_cidr_blocks
|
|
156
|
+
|
|
157
|
+
async def _get_subnet_information(self, ctx: Context, vpc_id: str) -> list[dict]:
|
|
158
|
+
"""Get subnet information for a VPC.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
ctx: MCP context
|
|
162
|
+
vpc_id: ID of the VPC to query
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
List of subnet information dictionaries
|
|
166
|
+
"""
|
|
167
|
+
# Get subnets for the VPC
|
|
168
|
+
subnets_response = self.ec2_client.describe_subnets(
|
|
169
|
+
Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}]
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
subnets = []
|
|
173
|
+
for subnet in subnets_response.get('Subnets', []):
|
|
174
|
+
# Extract all subnet information to variables first
|
|
175
|
+
subnet_id = subnet.get('SubnetId', '')
|
|
176
|
+
subnet_cidr_block = subnet.get('CidrBlock', '')
|
|
177
|
+
az_id = subnet.get('AvailabilityZoneId', '')
|
|
178
|
+
az_name = subnet.get('AvailabilityZone', '')
|
|
179
|
+
available_ips = subnet.get('AvailableIpAddressCount', 0)
|
|
180
|
+
is_public = subnet.get('MapPublicIpOnLaunch', False)
|
|
181
|
+
assign_ipv6 = subnet.get('AssignIpv6AddressOnCreation', False)
|
|
182
|
+
|
|
183
|
+
# Check for disallowed AZs
|
|
184
|
+
disallowed_azs = ['use1-az3', 'usw1-az2', 'cac1-az3']
|
|
185
|
+
in_disallowed_az = az_id in disallowed_azs
|
|
186
|
+
has_sufficient_ips = available_ips >= 16 # AWS recommends 16
|
|
187
|
+
|
|
188
|
+
# Store subnet information
|
|
189
|
+
subnet_info = {
|
|
190
|
+
'subnet_id': subnet_id,
|
|
191
|
+
'cidr_block': subnet_cidr_block,
|
|
192
|
+
'az_id': az_id,
|
|
193
|
+
'az_name': az_name,
|
|
194
|
+
'available_ips': available_ips,
|
|
195
|
+
'is_public': is_public,
|
|
196
|
+
'assign_ipv6': assign_ipv6,
|
|
197
|
+
'in_disallowed_az': in_disallowed_az,
|
|
198
|
+
'has_sufficient_ips': has_sufficient_ips,
|
|
199
|
+
}
|
|
200
|
+
subnets.append(subnet_info)
|
|
201
|
+
|
|
202
|
+
return subnets
|
|
203
|
+
|
|
204
|
+
async def _get_route_table_information(self, ctx: Context, vpc_id: str) -> list[dict]:
|
|
205
|
+
"""Get route table information for a VPC.
|
|
206
|
+
|
|
207
|
+
Args:
|
|
208
|
+
ctx: MCP context
|
|
209
|
+
vpc_id: ID of the VPC to query
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
List of route information dictionaries
|
|
213
|
+
"""
|
|
214
|
+
# Get route tables for the VPC
|
|
215
|
+
route_tables_response = self.ec2_client.describe_route_tables(
|
|
216
|
+
Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}]
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
# Extract route information from the main route table
|
|
220
|
+
routes = []
|
|
221
|
+
for rt in route_tables_response.get('RouteTables', []):
|
|
222
|
+
# Check if this is the main route table
|
|
223
|
+
is_main = False
|
|
224
|
+
for association in rt.get('Associations', []):
|
|
225
|
+
if association.get('Main', False):
|
|
226
|
+
is_main = True
|
|
227
|
+
break
|
|
228
|
+
|
|
229
|
+
if is_main:
|
|
230
|
+
for route in rt.get('Routes', []):
|
|
231
|
+
# Skip the local route
|
|
232
|
+
if route.get('GatewayId') == 'local':
|
|
233
|
+
continue
|
|
234
|
+
|
|
235
|
+
# Determine the target type and ID
|
|
236
|
+
target_type = None
|
|
237
|
+
target_id = None
|
|
238
|
+
|
|
239
|
+
for target_field in [
|
|
240
|
+
'GatewayId',
|
|
241
|
+
'NatGatewayId',
|
|
242
|
+
'TransitGatewayId',
|
|
243
|
+
'NetworkInterfaceId',
|
|
244
|
+
'VpcPeeringConnectionId',
|
|
245
|
+
]:
|
|
246
|
+
if target_field in route and route[target_field]:
|
|
247
|
+
target_type = target_field.replace('Id', '').lower()
|
|
248
|
+
target_id = route[target_field]
|
|
249
|
+
break
|
|
250
|
+
|
|
251
|
+
route_info = {
|
|
252
|
+
'destination_cidr_block': route.get('DestinationCidrBlock', ''),
|
|
253
|
+
'target_type': target_type or 'unknown',
|
|
254
|
+
'target_id': target_id or 'unknown',
|
|
255
|
+
'state': route.get('State', ''),
|
|
256
|
+
}
|
|
257
|
+
routes.append(route_info)
|
|
258
|
+
|
|
259
|
+
return routes
|
|
260
|
+
|
|
261
|
+
async def _get_remote_cidr_blocks(
|
|
262
|
+
self, ctx: Context, cluster_name: str, cluster_response: Optional[dict] = None
|
|
263
|
+
) -> tuple[list[str], list[str]]:
|
|
264
|
+
"""Get remote node and pod CIDR blocks.
|
|
265
|
+
|
|
266
|
+
Args:
|
|
267
|
+
ctx: MCP context
|
|
268
|
+
cluster_name: Name of the EKS cluster
|
|
269
|
+
cluster_response: Cluster response from a previous API call
|
|
270
|
+
|
|
271
|
+
Returns:
|
|
272
|
+
Tuple of (remote_node_cidr_blocks, remote_pod_cidr_blocks)
|
|
273
|
+
"""
|
|
274
|
+
remote_node_cidr_blocks = []
|
|
275
|
+
remote_pod_cidr_blocks = []
|
|
276
|
+
|
|
277
|
+
# Extract remote network config from the cluster response
|
|
278
|
+
if cluster_response and 'cluster' in cluster_response:
|
|
279
|
+
if 'remoteNetworkConfig' in cluster_response['cluster']:
|
|
280
|
+
remote_config = cluster_response['cluster']['remoteNetworkConfig']
|
|
281
|
+
|
|
282
|
+
# Extract remote node CIDRs
|
|
283
|
+
if 'remoteNodeNetworks' in remote_config:
|
|
284
|
+
for network in remote_config['remoteNodeNetworks']:
|
|
285
|
+
if 'cidrs' in network:
|
|
286
|
+
for cidr in network['cidrs']:
|
|
287
|
+
if cidr not in remote_node_cidr_blocks:
|
|
288
|
+
remote_node_cidr_blocks.append(cidr)
|
|
289
|
+
log_with_request_id(
|
|
290
|
+
ctx,
|
|
291
|
+
LogLevel.INFO,
|
|
292
|
+
f'Found remote node CIDR in remoteNetworkConfig: {cidr}',
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
# Extract remote pod CIDRs
|
|
296
|
+
if 'remotePodNetworks' in remote_config:
|
|
297
|
+
for network in remote_config['remotePodNetworks']:
|
|
298
|
+
if 'cidrs' in network:
|
|
299
|
+
for cidr in network['cidrs']:
|
|
300
|
+
if cidr not in remote_pod_cidr_blocks:
|
|
301
|
+
remote_pod_cidr_blocks.append(cidr)
|
|
302
|
+
log_with_request_id(
|
|
303
|
+
ctx,
|
|
304
|
+
LogLevel.INFO,
|
|
305
|
+
f'Found remote pod CIDR in remoteNetworkConfig: {cidr}',
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
# Log summary of detected CIDRs
|
|
309
|
+
if remote_node_cidr_blocks:
|
|
310
|
+
log_with_request_id(
|
|
311
|
+
ctx,
|
|
312
|
+
LogLevel.INFO,
|
|
313
|
+
f'Detected remote node CIDRs: {", ".join(remote_node_cidr_blocks)}',
|
|
314
|
+
)
|
|
315
|
+
else:
|
|
316
|
+
log_with_request_id(ctx, LogLevel.WARNING, 'No remote node CIDRs detected')
|
|
317
|
+
|
|
318
|
+
if remote_pod_cidr_blocks:
|
|
319
|
+
log_with_request_id(
|
|
320
|
+
ctx,
|
|
321
|
+
LogLevel.INFO,
|
|
322
|
+
f'Detected remote pod CIDRs: {", ".join(remote_pod_cidr_blocks)}',
|
|
323
|
+
)
|
|
324
|
+
else:
|
|
325
|
+
log_with_request_id(ctx, LogLevel.WARNING, 'No remote pod CIDRs detected')
|
|
326
|
+
|
|
327
|
+
return remote_node_cidr_blocks, remote_pod_cidr_blocks
|
|
328
|
+
|
|
329
|
+
async def _get_eks_vpc_config_impl(
|
|
330
|
+
self, ctx: Context, cluster_name: str, vpc_id: Optional[str] = None
|
|
331
|
+
) -> EksVpcConfigResponse:
|
|
332
|
+
"""Internal implementation of get_eks_vpc_config."""
|
|
333
|
+
try:
|
|
334
|
+
# Always get the cluster response for remote CIDR information
|
|
335
|
+
cluster_response = None
|
|
336
|
+
try:
|
|
337
|
+
if not vpc_id:
|
|
338
|
+
# Get both VPC ID and cluster response
|
|
339
|
+
vpc_id, cluster_response = await self._get_vpc_id_for_cluster(
|
|
340
|
+
ctx, cluster_name
|
|
341
|
+
)
|
|
342
|
+
else:
|
|
343
|
+
# Just get the cluster response when VPC ID is provided
|
|
344
|
+
_, cluster_response = await self._get_vpc_id_for_cluster(ctx, cluster_name)
|
|
345
|
+
except Exception as eks_error:
|
|
346
|
+
error_message = f'Error getting cluster information: {str(eks_error)}'
|
|
347
|
+
log_with_request_id(ctx, LogLevel.ERROR, error_message)
|
|
348
|
+
return EksVpcConfigResponse(
|
|
349
|
+
isError=True,
|
|
350
|
+
content=[TextContent(type='text', text=error_message)],
|
|
351
|
+
vpc_id='',
|
|
352
|
+
cidr_block='',
|
|
353
|
+
additional_cidr_blocks=[], # Add missing parameter
|
|
354
|
+
routes=[],
|
|
355
|
+
remote_node_cidr_blocks=[],
|
|
356
|
+
remote_pod_cidr_blocks=[],
|
|
357
|
+
subnets=[],
|
|
358
|
+
cluster_name=cluster_name,
|
|
359
|
+
)
|
|
360
|
+
|
|
361
|
+
try:
|
|
362
|
+
# Get VPC details
|
|
363
|
+
cidr_block, additional_cidr_blocks = await self._get_vpc_details(ctx, vpc_id)
|
|
364
|
+
|
|
365
|
+
# Get subnet information
|
|
366
|
+
subnets = await self._get_subnet_information(ctx, vpc_id)
|
|
367
|
+
|
|
368
|
+
# Get route table information
|
|
369
|
+
routes = await self._get_route_table_information(ctx, vpc_id)
|
|
370
|
+
|
|
371
|
+
# Get remote CIDR blocks
|
|
372
|
+
(
|
|
373
|
+
remote_node_cidr_blocks,
|
|
374
|
+
remote_pod_cidr_blocks,
|
|
375
|
+
) = await self._get_remote_cidr_blocks(ctx, cluster_name, cluster_response)
|
|
376
|
+
|
|
377
|
+
# Create the response
|
|
378
|
+
success_message = (
|
|
379
|
+
f'Retrieved VPC configuration for {vpc_id} (cluster {cluster_name})'
|
|
380
|
+
)
|
|
381
|
+
log_with_request_id(ctx, LogLevel.INFO, success_message)
|
|
382
|
+
|
|
383
|
+
return EksVpcConfigResponse(
|
|
384
|
+
isError=False,
|
|
385
|
+
content=[TextContent(type='text', text=success_message)],
|
|
386
|
+
vpc_id=vpc_id,
|
|
387
|
+
cidr_block=cidr_block,
|
|
388
|
+
additional_cidr_blocks=additional_cidr_blocks,
|
|
389
|
+
routes=routes,
|
|
390
|
+
remote_node_cidr_blocks=remote_node_cidr_blocks,
|
|
391
|
+
remote_pod_cidr_blocks=remote_pod_cidr_blocks,
|
|
392
|
+
subnets=subnets,
|
|
393
|
+
cluster_name=cluster_name,
|
|
394
|
+
)
|
|
395
|
+
except Exception as e:
|
|
396
|
+
error_message = f'Error retrieving VPC configuration: {str(e)}'
|
|
397
|
+
log_with_request_id(ctx, LogLevel.ERROR, error_message)
|
|
398
|
+
return EksVpcConfigResponse(
|
|
399
|
+
isError=True,
|
|
400
|
+
content=[TextContent(type='text', text=error_message)],
|
|
401
|
+
vpc_id='',
|
|
402
|
+
cidr_block='',
|
|
403
|
+
additional_cidr_blocks=[], # Add missing parameter
|
|
404
|
+
routes=[],
|
|
405
|
+
remote_node_cidr_blocks=[],
|
|
406
|
+
remote_pod_cidr_blocks=[],
|
|
407
|
+
subnets=[],
|
|
408
|
+
cluster_name=cluster_name,
|
|
409
|
+
)
|
|
410
|
+
|
|
411
|
+
except Exception as e:
|
|
412
|
+
error_message = f'Error retrieving VPC configuration: {str(e)}'
|
|
413
|
+
log_with_request_id(ctx, LogLevel.ERROR, error_message)
|
|
414
|
+
return EksVpcConfigResponse(
|
|
415
|
+
isError=True,
|
|
416
|
+
content=[TextContent(type='text', text=error_message)],
|
|
417
|
+
vpc_id='',
|
|
418
|
+
cidr_block='',
|
|
419
|
+
additional_cidr_blocks=[], # Add missing parameter
|
|
420
|
+
routes=[],
|
|
421
|
+
remote_node_cidr_blocks=[],
|
|
422
|
+
remote_pod_cidr_blocks=[],
|
|
423
|
+
subnets=[],
|
|
424
|
+
cluster_name=cluster_name,
|
|
425
|
+
)
|
{awslabs_eks_mcp_server-0.1.10.dist-info → awslabs_eks_mcp_server-0.1.13.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: awslabs.eks-mcp-server
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.13
|
|
4
4
|
Summary: An AWS Labs Model Context Protocol (MCP) server for EKS
|
|
5
5
|
Project-URL: homepage, https://awslabs.github.io/mcp/
|
|
6
6
|
Project-URL: docs, https://awslabs.github.io/mcp/servers/eks-mcp-server/
|
|
@@ -69,6 +69,11 @@ For read operations, the following permissions are required:
|
|
|
69
69
|
"Effect": "Allow",
|
|
70
70
|
"Action": [
|
|
71
71
|
"eks:DescribeCluster",
|
|
72
|
+
"eks:DescribeInsight",
|
|
73
|
+
"eks:ListInsights",
|
|
74
|
+
"ec2:DescribeVpcs",
|
|
75
|
+
"ec2:DescribeSubnets",
|
|
76
|
+
"ec2:DescribeRouteTables",
|
|
72
77
|
"cloudformation:DescribeStacks",
|
|
73
78
|
"cloudwatch:GetMetricData",
|
|
74
79
|
"logs:StartQuery",
|
|
@@ -417,7 +422,7 @@ Features:
|
|
|
417
422
|
|
|
418
423
|
Parameters:
|
|
419
424
|
|
|
420
|
-
* cluster_name, pod_name, namespace, container_name (optional), since_seconds (optional), tail_lines (optional), limit_bytes (optional)
|
|
425
|
+
* cluster_name, pod_name, namespace, container_name (optional), since_seconds (optional), tail_lines (optional), limit_bytes (optional), previous (optional)
|
|
421
426
|
|
|
422
427
|
#### `get_k8s_events`
|
|
423
428
|
|
|
@@ -433,6 +438,22 @@ Parameters:
|
|
|
433
438
|
|
|
434
439
|
* cluster_name, kind, name, namespace (optional)
|
|
435
440
|
|
|
441
|
+
#### `get_eks_vpc_config`
|
|
442
|
+
|
|
443
|
+
Retrieves comprehensive VPC configuration details for EKS clusters, with support for hybrid node setups.
|
|
444
|
+
|
|
445
|
+
Features:
|
|
446
|
+
|
|
447
|
+
* Returns detailed VPC configuration including CIDR blocks, route tables, and subnet information
|
|
448
|
+
* Automatically identifies and includes remote node and pod CIDR configurations for hybrid node setups
|
|
449
|
+
* Validates subnet capacity for EKS networking requirements
|
|
450
|
+
* Flags subnets in disallowed availability zones that can't be used with EKS
|
|
451
|
+
* Requires `--allow-sensitive-data-access` server flag to be enabled
|
|
452
|
+
|
|
453
|
+
Parameters:
|
|
454
|
+
|
|
455
|
+
* cluster_name, vpc_id (optional)
|
|
456
|
+
|
|
436
457
|
### CloudWatch Integration
|
|
437
458
|
|
|
438
459
|
#### `get_cloudwatch_logs`
|
|
@@ -536,6 +557,23 @@ Parameters:
|
|
|
536
557
|
|
|
537
558
|
* query
|
|
538
559
|
|
|
560
|
+
#### `get_eks_insights`
|
|
561
|
+
|
|
562
|
+
Retrieves Amazon EKS Insights that identify potential issues with your EKS cluster configuration and upgrade readiness.
|
|
563
|
+
|
|
564
|
+
Features:
|
|
565
|
+
|
|
566
|
+
* Returns insights in two categories: MISCONFIGURATION and UPGRADE_READINESS (for upgrade blockers)
|
|
567
|
+
* Supports both list mode (all insights) and detail mode (specific insight with recommendations)
|
|
568
|
+
* Includes status, descriptions, and timestamps for each insight
|
|
569
|
+
* Provides detailed recommendations for addressing identified issues when using detail mode
|
|
570
|
+
* Supports optional filtering by insight category
|
|
571
|
+
* Requires `--allow-sensitive-data-access` server flag to be enabled
|
|
572
|
+
|
|
573
|
+
Parameters:
|
|
574
|
+
|
|
575
|
+
* cluster_name, insight_id (optional), category (optional), next_token (optional)
|
|
576
|
+
|
|
539
577
|
|
|
540
578
|
## Security & permissions
|
|
541
579
|
|
|
@@ -567,7 +605,7 @@ When using the EKS MCP Server, consider the following:
|
|
|
567
605
|
|
|
568
606
|
The EKS MCP Server can be used for production environments with proper security controls in place. The server runs in read-only mode by default, which is recommended and considered generally safer for production environments. Only explicitly enable write access when necessary. Below are the EKS MCP server tools available in read-only versus write-access mode:
|
|
569
607
|
|
|
570
|
-
* **Read-only mode (default)**: `manage_eks_stacks` (with operation="describe"), `manage_k8s_resource` (with operation="read"), `list_k8s_resources`, `get_pod_logs`, `get_k8s_events`, `get_cloudwatch_logs`, `get_cloudwatch_metrics`, `get_policies_for_role`, `search_eks_troubleshoot_guide`, `list_api_versions`.
|
|
608
|
+
* **Read-only mode (default)**: `manage_eks_stacks` (with operation="describe"), `manage_k8s_resource` (with operation="read"), `list_k8s_resources`, `get_pod_logs`, `get_k8s_events`, `get_cloudwatch_logs`, `get_cloudwatch_metrics`, `get_policies_for_role`, `search_eks_troubleshoot_guide`, `list_api_versions`, `get_eks_vpc_config`, `get_eks_insights`.
|
|
571
609
|
* **Write-access mode**: (require `--allow-write`): `manage_eks_stacks` (with "generate", "deploy", "delete"), `manage_k8s_resource` (with "create", "replace", "patch", "delete"), `apply_yaml`, `generate_app_manifest`, `add_inline_policy`.
|
|
572
610
|
|
|
573
611
|
#### `autoApprove` (optional)
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
awslabs/__init__.py,sha256=WuqxdDgUZylWNmVoPKiK7qGsTB_G4UmuXIrJ-VBwDew,731
|
|
2
|
-
awslabs/eks_mcp_server/__init__.py,sha256=
|
|
2
|
+
awslabs/eks_mcp_server/__init__.py,sha256=l3da-u1n3oy8cuxMtz1_wJvoRox4rwaxDLjR4C9jKwc,669
|
|
3
3
|
awslabs/eks_mcp_server/aws_helper.py,sha256=Is0BCVPjhO-AqKFF0MnGpzNRjAe8E896VGrKWCz4gfo,4031
|
|
4
4
|
awslabs/eks_mcp_server/cloudwatch_handler.py,sha256=k3GsORIFswOknxYM0reCBsCHXn--gSwl7WVtxkUTyzg,28853
|
|
5
5
|
awslabs/eks_mcp_server/cloudwatch_metrics_guidance_handler.py,sha256=b0fFMvsGX98HKK4kVFI1YbhZqZC623lZ6TNXs3EiRSI,5283
|
|
@@ -7,20 +7,22 @@ awslabs/eks_mcp_server/consts.py,sha256=XBat-KcMckueQQpDeLkD_Nv_9G9kX0_d48sMEHtZ
|
|
|
7
7
|
awslabs/eks_mcp_server/eks_kb_handler.py,sha256=6L3wS500AadKburhwf0zXEapXscd4VZCTY8t61u-j1Y,3548
|
|
8
8
|
awslabs/eks_mcp_server/eks_stack_handler.py,sha256=p4Cbjcb7ga52hPo6-M1FFWhgIpuI0X4Hx024k4wxcwQ,27400
|
|
9
9
|
awslabs/eks_mcp_server/iam_handler.py,sha256=lXA4acsBJAy5IekgdJM_yVzaaztanrPSpqX84ttSlO0,14817
|
|
10
|
-
awslabs/eks_mcp_server/
|
|
10
|
+
awslabs/eks_mcp_server/insights_handler.py,sha256=Tc-2UkNReALQ_5L36BqiufGwy-AKkvt_g0W5ORhc52M,14374
|
|
11
|
+
awslabs/eks_mcp_server/k8s_apis.py,sha256=qyRmT8i390wp-L3gVYqmtGBt5HDELTvUFto7fsVKBio,21328
|
|
11
12
|
awslabs/eks_mcp_server/k8s_client_cache.py,sha256=vm-8VKC3zaCqpW9pOUmRDFulxzYCX8p8OvyOgtvh96o,5697
|
|
12
|
-
awslabs/eks_mcp_server/k8s_handler.py,sha256=
|
|
13
|
+
awslabs/eks_mcp_server/k8s_handler.py,sha256=ztN5Tvf9cNNbSHxlgBua_o9-VsZH7YpVTw0ef4We3gE,49763
|
|
13
14
|
awslabs/eks_mcp_server/logging_helper.py,sha256=hr8xZhAZOKyR7dkwc7bhqkDVuSDI3BRK3-UzllQkWgE,1854
|
|
14
|
-
awslabs/eks_mcp_server/models.py,sha256=
|
|
15
|
-
awslabs/eks_mcp_server/server.py,sha256=
|
|
15
|
+
awslabs/eks_mcp_server/models.py,sha256=fMiuW2XYsrBA6eI8s1O0y_xN73Q-Q9yOf0xJmBsr-c8,15631
|
|
16
|
+
awslabs/eks_mcp_server/server.py,sha256=VjbDh-Kh-3sMwb9NXXp6kYoL9RRa6ikq-JzHeQZ-7PA,6952
|
|
17
|
+
awslabs/eks_mcp_server/vpc_config_handler.py,sha256=iWOK0kSFrgdFszxUHKVt4e0GFfSgphgJqsYVSREWbIM,17144
|
|
16
18
|
awslabs/eks_mcp_server/data/eks_cloudwatch_metrics_guidance.json,sha256=a4tzVdwpF_0kZGwOJCDEQskScp5rjRU89M7ONp3HpdA,9304
|
|
17
19
|
awslabs/eks_mcp_server/scripts/update_eks_cloudwatch_metrics_guidance.py,sha256=Y1OvU85wc3WIviUbabbGCCO-DvJwtP2UU9ABuEMSNyY,10099
|
|
18
20
|
awslabs/eks_mcp_server/templates/eks-templates/eks-with-vpc.yaml,sha256=_Lxk2MEXNA7N0-kvXckxwBamDEagjGvC6-Z5uxhVO5s,10774
|
|
19
21
|
awslabs/eks_mcp_server/templates/k8s-templates/deployment.yaml,sha256=J2efYFISlT3sTvf8_BJV3p0_m51cltqiRhXdBXb9YJs,2343
|
|
20
22
|
awslabs/eks_mcp_server/templates/k8s-templates/service.yaml,sha256=DA0Db_5yjUZmnnYy5Bljcv3hj7D6YvFFWFRB6GiIstY,414
|
|
21
|
-
awslabs_eks_mcp_server-0.1.
|
|
22
|
-
awslabs_eks_mcp_server-0.1.
|
|
23
|
-
awslabs_eks_mcp_server-0.1.
|
|
24
|
-
awslabs_eks_mcp_server-0.1.
|
|
25
|
-
awslabs_eks_mcp_server-0.1.
|
|
26
|
-
awslabs_eks_mcp_server-0.1.
|
|
23
|
+
awslabs_eks_mcp_server-0.1.13.dist-info/METADATA,sha256=k9hpWwKrhEL_j-axjOfHWeX3DgfYnNPbIeecpY7LynM,31384
|
|
24
|
+
awslabs_eks_mcp_server-0.1.13.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
25
|
+
awslabs_eks_mcp_server-0.1.13.dist-info/entry_points.txt,sha256=VydotfOJYck8o4TPsaF6Pjmc8Bp_doacYXSE_71qH4c,78
|
|
26
|
+
awslabs_eks_mcp_server-0.1.13.dist-info/licenses/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
|
|
27
|
+
awslabs_eks_mcp_server-0.1.13.dist-info/licenses/NOTICE,sha256=gnCtD34qTDnb2Lykm9kNFYkqZIvqJHGuq1ZJBkl6EgE,90
|
|
28
|
+
awslabs_eks_mcp_server-0.1.13.dist-info/RECORD,,
|
|
File without changes
|
{awslabs_eks_mcp_server-0.1.10.dist-info → awslabs_eks_mcp_server-0.1.13.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
{awslabs_eks_mcp_server-0.1.10.dist-info → awslabs_eks_mcp_server-0.1.13.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|
{awslabs_eks_mcp_server-0.1.10.dist-info → awslabs_eks_mcp_server-0.1.13.dist-info}/licenses/NOTICE
RENAMED
|
File without changes
|