awslabs.elasticache-mcp-server 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- awslabs/__init__.py +16 -0
- awslabs/elasticache_mcp_server/__init__.py +17 -0
- awslabs/elasticache_mcp_server/common/__init__.py +15 -0
- awslabs/elasticache_mcp_server/common/connection.py +117 -0
- awslabs/elasticache_mcp_server/common/decorators.py +41 -0
- awslabs/elasticache_mcp_server/common/server.py +30 -0
- awslabs/elasticache_mcp_server/context.py +39 -0
- awslabs/elasticache_mcp_server/main.py +52 -0
- awslabs/elasticache_mcp_server/tools/__init__.py +15 -0
- awslabs/elasticache_mcp_server/tools/cc/__init__.py +31 -0
- awslabs/elasticache_mcp_server/tools/cc/connect.py +444 -0
- awslabs/elasticache_mcp_server/tools/cc/create.py +212 -0
- awslabs/elasticache_mcp_server/tools/cc/delete.py +65 -0
- awslabs/elasticache_mcp_server/tools/cc/describe.py +80 -0
- awslabs/elasticache_mcp_server/tools/cc/modify.py +159 -0
- awslabs/elasticache_mcp_server/tools/cc/parsers.py +78 -0
- awslabs/elasticache_mcp_server/tools/cc/processors.py +74 -0
- awslabs/elasticache_mcp_server/tools/ce/__init__.py +19 -0
- awslabs/elasticache_mcp_server/tools/ce/get_cost_and_usage.py +76 -0
- awslabs/elasticache_mcp_server/tools/cw/__init__.py +19 -0
- awslabs/elasticache_mcp_server/tools/cw/get_metric_statistics.py +85 -0
- awslabs/elasticache_mcp_server/tools/cwlogs/__init__.py +29 -0
- awslabs/elasticache_mcp_server/tools/cwlogs/create_log_group.py +68 -0
- awslabs/elasticache_mcp_server/tools/cwlogs/describe_log_groups.py +123 -0
- awslabs/elasticache_mcp_server/tools/cwlogs/describe_log_streams.py +120 -0
- awslabs/elasticache_mcp_server/tools/cwlogs/filter_log_events.py +122 -0
- awslabs/elasticache_mcp_server/tools/cwlogs/get_log_events.py +99 -0
- awslabs/elasticache_mcp_server/tools/firehose/__init__.py +19 -0
- awslabs/elasticache_mcp_server/tools/firehose/list_delivery_streams.py +63 -0
- awslabs/elasticache_mcp_server/tools/misc/__init__.py +31 -0
- awslabs/elasticache_mcp_server/tools/misc/batch_apply_update_action.py +62 -0
- awslabs/elasticache_mcp_server/tools/misc/batch_stop_update_action.py +62 -0
- awslabs/elasticache_mcp_server/tools/misc/describe_cache_engine_versions.py +79 -0
- awslabs/elasticache_mcp_server/tools/misc/describe_engine_default_parameters.py +64 -0
- awslabs/elasticache_mcp_server/tools/misc/describe_events.py +86 -0
- awslabs/elasticache_mcp_server/tools/misc/describe_service_updates.py +71 -0
- awslabs/elasticache_mcp_server/tools/rg/__init__.py +54 -0
- awslabs/elasticache_mcp_server/tools/rg/complete_migration.py +94 -0
- awslabs/elasticache_mcp_server/tools/rg/connect.py +537 -0
- awslabs/elasticache_mcp_server/tools/rg/create.py +318 -0
- awslabs/elasticache_mcp_server/tools/rg/delete.py +68 -0
- awslabs/elasticache_mcp_server/tools/rg/describe.py +68 -0
- awslabs/elasticache_mcp_server/tools/rg/modify.py +236 -0
- awslabs/elasticache_mcp_server/tools/rg/parsers.py +268 -0
- awslabs/elasticache_mcp_server/tools/rg/processors.py +227 -0
- awslabs/elasticache_mcp_server/tools/rg/start_migration.py +151 -0
- awslabs/elasticache_mcp_server/tools/rg/test_migration.py +139 -0
- awslabs/elasticache_mcp_server/tools/serverless/__init__.py +37 -0
- awslabs/elasticache_mcp_server/tools/serverless/connect.py +451 -0
- awslabs/elasticache_mcp_server/tools/serverless/create.py +174 -0
- awslabs/elasticache_mcp_server/tools/serverless/delete.py +49 -0
- awslabs/elasticache_mcp_server/tools/serverless/describe.py +69 -0
- awslabs/elasticache_mcp_server/tools/serverless/models.py +160 -0
- awslabs/elasticache_mcp_server/tools/serverless/modify.py +95 -0
- awslabs_elasticache_mcp_server-0.1.1.dist-info/METADATA +257 -0
- awslabs_elasticache_mcp_server-0.1.1.dist-info/RECORD +60 -0
- awslabs_elasticache_mcp_server-0.1.1.dist-info/WHEEL +4 -0
- awslabs_elasticache_mcp_server-0.1.1.dist-info/entry_points.txt +2 -0
- awslabs_elasticache_mcp_server-0.1.1.dist-info/licenses/LICENSE +175 -0
- awslabs_elasticache_mcp_server-0.1.1.dist-info/licenses/NOTICE +2 -0
|
@@ -0,0 +1,444 @@
|
|
|
1
|
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
"""Connect module for creating and configuring jump host EC2 instances to access ElastiCache clusters."""
|
|
16
|
+
|
|
17
|
+
from ...common.connection import EC2ConnectionManager, ElastiCacheConnectionManager
|
|
18
|
+
from ...common.decorators import handle_exceptions
|
|
19
|
+
from ...common.server import mcp
|
|
20
|
+
from ...context import Context
|
|
21
|
+
from botocore.exceptions import ClientError
|
|
22
|
+
from typing import Any, Dict, Tuple, Union
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
async def _configure_security_groups(
|
|
26
|
+
cache_cluster_id: str, instance_id: str, ec2_client: Any = None, elasticache_client: Any = None
|
|
27
|
+
) -> Tuple[bool, str, int]:
|
|
28
|
+
"""Configure security group rules to allow access from EC2 instance to ElastiCache cluster.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
cache_cluster_id (str): ID of the ElastiCache cluster
|
|
32
|
+
instance_id (str): ID of the EC2 instance
|
|
33
|
+
ec2_client (Any, optional): EC2 client. If not provided, will get from connection manager
|
|
34
|
+
elasticache_client (Any, optional): ElastiCache client. If not provided, will get from connection manager
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
Tuple[bool, str, int]: Tuple containing (success status, vpc id, cache port)
|
|
38
|
+
|
|
39
|
+
Raises:
|
|
40
|
+
ValueError: If VPC compatibility check fails or required resources not found
|
|
41
|
+
"""
|
|
42
|
+
if not ec2_client:
|
|
43
|
+
ec2_client = EC2ConnectionManager.get_connection()
|
|
44
|
+
if not elasticache_client:
|
|
45
|
+
elasticache_client = ElastiCacheConnectionManager.get_connection()
|
|
46
|
+
|
|
47
|
+
# Get cache cluster details
|
|
48
|
+
cache_cluster = elasticache_client.describe_cache_clusters(
|
|
49
|
+
CacheClusterId=cache_cluster_id, ShowCacheNodeInfo=True
|
|
50
|
+
)['CacheClusters'][0]
|
|
51
|
+
|
|
52
|
+
# Get cache cluster VPC ID
|
|
53
|
+
cache_subnet_group = elasticache_client.describe_cache_subnet_groups(
|
|
54
|
+
CacheSubnetGroupName=cache_cluster['CacheSubnetGroupName']
|
|
55
|
+
)['CacheSubnetGroups'][0]
|
|
56
|
+
cache_vpc_id = cache_subnet_group['VpcId']
|
|
57
|
+
|
|
58
|
+
# Get cache cluster security groups
|
|
59
|
+
cache_security_groups = cache_cluster.get('SecurityGroups', [])
|
|
60
|
+
if not cache_security_groups:
|
|
61
|
+
raise ValueError(f'No security groups found for cache cluster {cache_cluster_id}')
|
|
62
|
+
|
|
63
|
+
# Get cache cluster port
|
|
64
|
+
cache_port = cache_cluster['CacheNodes'][0]['Endpoint']['Port']
|
|
65
|
+
|
|
66
|
+
# Get EC2 instance details
|
|
67
|
+
instance_info = ec2_client.describe_instances(InstanceIds=[instance_id])
|
|
68
|
+
if not instance_info['Reservations']:
|
|
69
|
+
raise ValueError(f'EC2 instance {instance_id} not found')
|
|
70
|
+
|
|
71
|
+
instance = instance_info['Reservations'][0]['Instances'][0]
|
|
72
|
+
instance_vpc_id = instance['VpcId']
|
|
73
|
+
|
|
74
|
+
# Check VPC compatibility
|
|
75
|
+
if instance_vpc_id != cache_vpc_id:
|
|
76
|
+
raise ValueError(
|
|
77
|
+
f'EC2 instance VPC ({instance_vpc_id}) does not match cache cluster VPC ({cache_vpc_id})'
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
# Get EC2 instance security groups
|
|
81
|
+
instance_security_groups = [sg['GroupId'] for sg in instance['SecurityGroups']]
|
|
82
|
+
if not instance_security_groups:
|
|
83
|
+
raise ValueError(f'No security groups found for EC2 instance {instance_id}')
|
|
84
|
+
|
|
85
|
+
# For each cache security group, ensure it allows inbound access from EC2 security groups
|
|
86
|
+
for cache_sg in cache_security_groups:
|
|
87
|
+
cache_sg_id = cache_sg['SecurityGroupId']
|
|
88
|
+
cache_sg_info = ec2_client.describe_security_groups(GroupIds=[cache_sg_id])[
|
|
89
|
+
'SecurityGroups'
|
|
90
|
+
][0]
|
|
91
|
+
|
|
92
|
+
# Check existing rules
|
|
93
|
+
existing_rules = cache_sg_info.get('IpPermissions', [])
|
|
94
|
+
needs_rule = True
|
|
95
|
+
|
|
96
|
+
for rule in existing_rules:
|
|
97
|
+
if (
|
|
98
|
+
rule.get('IpProtocol') == 'tcp'
|
|
99
|
+
and rule.get('FromPort') == cache_port
|
|
100
|
+
and rule.get('ToPort') == cache_port
|
|
101
|
+
):
|
|
102
|
+
# Check if any EC2 security group is already allowed
|
|
103
|
+
for group_pair in rule.get('UserIdGroupPairs', []):
|
|
104
|
+
if group_pair.get('GroupId') in instance_security_groups:
|
|
105
|
+
needs_rule = False
|
|
106
|
+
break
|
|
107
|
+
if not needs_rule:
|
|
108
|
+
break
|
|
109
|
+
|
|
110
|
+
# Add rule if needed
|
|
111
|
+
if needs_rule:
|
|
112
|
+
ec2_client.authorize_security_group_ingress(
|
|
113
|
+
GroupId=cache_sg_id,
|
|
114
|
+
IpPermissions=[
|
|
115
|
+
{
|
|
116
|
+
'IpProtocol': 'tcp',
|
|
117
|
+
'FromPort': cache_port,
|
|
118
|
+
'ToPort': cache_port,
|
|
119
|
+
'UserIdGroupPairs': [
|
|
120
|
+
{
|
|
121
|
+
'GroupId': instance_security_groups[0],
|
|
122
|
+
'Description': f'Allow access from jump host {instance_id}',
|
|
123
|
+
}
|
|
124
|
+
],
|
|
125
|
+
}
|
|
126
|
+
],
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
return True, cache_vpc_id, cache_port
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
@mcp.tool(name='connect-jump-host-cache-cluster')
|
|
133
|
+
@handle_exceptions
|
|
134
|
+
async def connect_jump_host_cc(cache_cluster_id: str, instance_id: str) -> Dict[str, Any]:
|
|
135
|
+
"""Configures an existing EC2 instance as a jump host to access an ElastiCache cluster.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
cache_cluster_id (str): ID of the ElastiCache cluster to connect to
|
|
139
|
+
instance_id (str): ID of the EC2 instance to use as jump host
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
Dict[str, Any]: Dictionary containing connection details and configuration status
|
|
143
|
+
|
|
144
|
+
Raises:
|
|
145
|
+
ValueError: If VPC compatibility check fails or required resources not found
|
|
146
|
+
"""
|
|
147
|
+
# Check if readonly mode is enabled
|
|
148
|
+
if Context.readonly_mode():
|
|
149
|
+
raise ValueError(
|
|
150
|
+
'You have configured this tool in readonly mode. To make this change you will have to update your configuration.'
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
try:
|
|
154
|
+
# Configure security groups using common function
|
|
155
|
+
configured, vpc_id, cache_port = await _configure_security_groups(
|
|
156
|
+
cache_cluster_id, instance_id
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
return {
|
|
160
|
+
'Status': 'Success',
|
|
161
|
+
'InstanceId': instance_id,
|
|
162
|
+
'CacheClusterId': cache_cluster_id,
|
|
163
|
+
'CachePort': cache_port,
|
|
164
|
+
'VpcId': vpc_id,
|
|
165
|
+
'SecurityGroupsConfigured': configured,
|
|
166
|
+
'Message': 'Jump host connection configured successfully',
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
except Exception as e:
|
|
170
|
+
raise ValueError(str(e))
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
@mcp.tool(name='get-ssh-tunnel-command-cache-cluster')
|
|
174
|
+
@handle_exceptions
|
|
175
|
+
async def get_ssh_tunnel_command_cc(
|
|
176
|
+
cache_cluster_id: str, instance_id: str
|
|
177
|
+
) -> Dict[str, Union[str, int]]:
|
|
178
|
+
"""Generates an SSH tunnel command to connect to an ElastiCache cluster through an EC2 jump host.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
cache_cluster_id (str): ID of the ElastiCache cluster to connect to
|
|
182
|
+
instance_id (str): ID of the EC2 instance to use as jump host
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
Dict[str, Union[str, int]]: Dictionary containing the SSH tunnel command and related details
|
|
186
|
+
|
|
187
|
+
Raises:
|
|
188
|
+
ValueError: If required resources not found or information cannot be retrieved
|
|
189
|
+
"""
|
|
190
|
+
# Get AWS clients
|
|
191
|
+
ec2_client = EC2ConnectionManager.get_connection()
|
|
192
|
+
elasticache_client = ElastiCacheConnectionManager.get_connection()
|
|
193
|
+
|
|
194
|
+
try:
|
|
195
|
+
# Get EC2 instance details
|
|
196
|
+
instance_info = ec2_client.describe_instances(InstanceIds=[instance_id])
|
|
197
|
+
if not instance_info['Reservations']:
|
|
198
|
+
raise ValueError(f'EC2 instance {instance_id} not found')
|
|
199
|
+
|
|
200
|
+
instance = instance_info['Reservations'][0]['Instances'][0]
|
|
201
|
+
|
|
202
|
+
# Get instance key name and public DNS
|
|
203
|
+
key_name = instance.get('KeyName')
|
|
204
|
+
if not key_name:
|
|
205
|
+
raise ValueError(f'No key pair associated with EC2 instance {instance_id}')
|
|
206
|
+
|
|
207
|
+
public_dns = instance.get('PublicDnsName')
|
|
208
|
+
if not public_dns:
|
|
209
|
+
raise ValueError(f'No public DNS name found for EC2 instance {instance_id}')
|
|
210
|
+
|
|
211
|
+
# Get instance platform details to determine user
|
|
212
|
+
platform = instance.get('Platform', '')
|
|
213
|
+
user = 'ec2-user' # Default for Amazon Linux
|
|
214
|
+
if platform.lower() == 'windows':
|
|
215
|
+
raise ValueError('Windows instances are not supported for SSH tunneling')
|
|
216
|
+
elif 'ubuntu' in instance.get('ImageId', '').lower():
|
|
217
|
+
user = 'ubuntu'
|
|
218
|
+
|
|
219
|
+
# Get cache cluster details
|
|
220
|
+
cache_cluster = elasticache_client.describe_cache_clusters(
|
|
221
|
+
CacheClusterId=cache_cluster_id, ShowCacheNodeInfo=True
|
|
222
|
+
)['CacheClusters'][0]
|
|
223
|
+
|
|
224
|
+
# Get cache endpoint and port
|
|
225
|
+
if not cache_cluster.get('CacheNodes'):
|
|
226
|
+
raise ValueError(f'No cache nodes found for cluster {cache_cluster_id}')
|
|
227
|
+
|
|
228
|
+
cache_endpoint = cache_cluster['CacheNodes'][0]['Endpoint']['Address']
|
|
229
|
+
cache_port = cache_cluster['CacheNodes'][0]['Endpoint']['Port']
|
|
230
|
+
|
|
231
|
+
# Generate SSH tunnel command
|
|
232
|
+
ssh_command = (
|
|
233
|
+
f'ssh -i "{key_name}.pem" -fN -l {user} '
|
|
234
|
+
f'-L {cache_port}:{cache_endpoint}:{cache_port} {public_dns} -v'
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
return {
|
|
238
|
+
'command': ssh_command,
|
|
239
|
+
'keyName': key_name,
|
|
240
|
+
'user': user,
|
|
241
|
+
'localPort': cache_port,
|
|
242
|
+
'cacheEndpoint': cache_endpoint,
|
|
243
|
+
'cachePort': cache_port,
|
|
244
|
+
'jumpHostDns': public_dns,
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
except Exception as e:
|
|
248
|
+
raise ValueError(str(e))
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
@mcp.tool(name='create-jump-host-cache-cluster')
|
|
252
|
+
@handle_exceptions
|
|
253
|
+
async def create_jump_host_cc(
|
|
254
|
+
cache_cluster_id: str,
|
|
255
|
+
subnet_id: str,
|
|
256
|
+
security_group_id: str,
|
|
257
|
+
key_name: str,
|
|
258
|
+
instance_type: str = 't3.small',
|
|
259
|
+
) -> Dict[str, Any]:
|
|
260
|
+
"""Creates an EC2 jump host instance to access an ElastiCache cluster via SSH tunnel.
|
|
261
|
+
|
|
262
|
+
Args:
|
|
263
|
+
cache_cluster_id (str): ID of the ElastiCache cluster to connect to
|
|
264
|
+
subnet_id (str): ID of the subnet to launch the EC2 instance in (must be public)
|
|
265
|
+
security_group_id (str): ID of the security group to assign to the EC2 instance
|
|
266
|
+
key_name (str): Name of the EC2 key pair to use for SSH access
|
|
267
|
+
instance_type (str, optional): EC2 instance type. Defaults to "t3.small"
|
|
268
|
+
|
|
269
|
+
Returns:
|
|
270
|
+
Dict[str, Any]: Dictionary containing the created EC2 instance details
|
|
271
|
+
|
|
272
|
+
Raises:
|
|
273
|
+
ValueError: If subnet is not public or VPC compatibility check fails
|
|
274
|
+
"""
|
|
275
|
+
# Check if readonly mode is enabled
|
|
276
|
+
if Context.readonly_mode():
|
|
277
|
+
raise ValueError(
|
|
278
|
+
'You have configured this tool in readonly mode. To make this change you will have to update your configuration.'
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
# Get AWS clients from connection managers
|
|
282
|
+
ec2_client = EC2ConnectionManager.get_connection()
|
|
283
|
+
elasticache_client = ElastiCacheConnectionManager.get_connection()
|
|
284
|
+
|
|
285
|
+
try:
|
|
286
|
+
# Validate key_name
|
|
287
|
+
if not key_name:
|
|
288
|
+
raise ValueError(
|
|
289
|
+
'key_name is required. Use CreateKeyPair or ImportKeyPair EC2 APIs to create/import an SSH key pair.'
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
# Verify key pair exists
|
|
293
|
+
key_pairs = ec2_client.describe_key_pairs(KeyNames=[key_name])
|
|
294
|
+
if not key_pairs.get('KeyPairs'):
|
|
295
|
+
return {
|
|
296
|
+
'error': f"Key pair '{key_name}' not found. Use CreateKeyPair or ImportKeyPair EC2 APIs to create/import an SSH key pair."
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
# Get cache cluster details to find its VPC
|
|
300
|
+
cache_cluster = elasticache_client.describe_cache_clusters(
|
|
301
|
+
CacheClusterId=cache_cluster_id, ShowCacheNodeInfo=True
|
|
302
|
+
)['CacheClusters'][0]
|
|
303
|
+
|
|
304
|
+
cache_subnet_group = elasticache_client.describe_cache_subnet_groups(
|
|
305
|
+
CacheSubnetGroupName=cache_cluster['CacheSubnetGroupName']
|
|
306
|
+
)['CacheSubnetGroups'][0]
|
|
307
|
+
cache_vpc_id = cache_subnet_group['VpcId']
|
|
308
|
+
|
|
309
|
+
# Get subnet details and verify it's public
|
|
310
|
+
subnet_response = ec2_client.describe_subnets(SubnetIds=[subnet_id])
|
|
311
|
+
subnet = subnet_response['Subnets'][0]
|
|
312
|
+
subnet_vpc_id = subnet['VpcId']
|
|
313
|
+
|
|
314
|
+
# Check VPC compatibility
|
|
315
|
+
if subnet_vpc_id != cache_vpc_id:
|
|
316
|
+
raise ValueError(
|
|
317
|
+
f'Subnet VPC ({subnet_vpc_id}) does not match cache cluster VPC ({cache_vpc_id})'
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
# Check if subnet is public by looking for route to internet gateway
|
|
321
|
+
route_tables = ec2_client.describe_route_tables(
|
|
322
|
+
Filters=[{'Name': 'association.subnet-id', 'Values': [subnet_id]}]
|
|
323
|
+
)['RouteTables']
|
|
324
|
+
|
|
325
|
+
is_public = False
|
|
326
|
+
for rt in route_tables:
|
|
327
|
+
for route in rt.get('Routes', []):
|
|
328
|
+
if route.get('GatewayId', '').startswith('igw-'):
|
|
329
|
+
is_public = True
|
|
330
|
+
break
|
|
331
|
+
if is_public:
|
|
332
|
+
break
|
|
333
|
+
|
|
334
|
+
if not is_public:
|
|
335
|
+
raise ValueError(
|
|
336
|
+
f'Subnet {subnet_id} is not public (no route to internet gateway found). '
|
|
337
|
+
'The subnet must be public to allow SSH access to the jump host.'
|
|
338
|
+
)
|
|
339
|
+
|
|
340
|
+
# Use Amazon Linux 2023 AMI
|
|
341
|
+
images = ec2_client.describe_images(
|
|
342
|
+
Filters=[
|
|
343
|
+
{'Name': 'name', 'Values': ['al2023-ami-2023.*-x86_64']},
|
|
344
|
+
{'Name': 'owner-alias', 'Values': ['amazon']},
|
|
345
|
+
]
|
|
346
|
+
)
|
|
347
|
+
ami_id = sorted(images['Images'], key=lambda x: x['CreationDate'], reverse=True)[0][
|
|
348
|
+
'ImageId'
|
|
349
|
+
]
|
|
350
|
+
|
|
351
|
+
# Verify and update security group rules for SSH access
|
|
352
|
+
security_group = ec2_client.describe_security_groups(GroupIds=[security_group_id])[
|
|
353
|
+
'SecurityGroups'
|
|
354
|
+
][0]
|
|
355
|
+
|
|
356
|
+
# Check if port 22 is already open
|
|
357
|
+
has_ssh_rule = False
|
|
358
|
+
for rule in security_group.get('IpPermissions', []):
|
|
359
|
+
if (
|
|
360
|
+
rule.get('IpProtocol') == 'tcp'
|
|
361
|
+
and rule.get('FromPort') == 22
|
|
362
|
+
and rule.get('ToPort') == 22
|
|
363
|
+
and any(
|
|
364
|
+
ip_range.get('CidrIp') == '0.0.0.0/0' for ip_range in rule.get('IpRanges', [])
|
|
365
|
+
)
|
|
366
|
+
):
|
|
367
|
+
has_ssh_rule = True
|
|
368
|
+
break
|
|
369
|
+
|
|
370
|
+
# Add SSH rule if it doesn't exist
|
|
371
|
+
if not has_ssh_rule:
|
|
372
|
+
ec2_client.authorize_security_group_ingress(
|
|
373
|
+
GroupId=security_group_id,
|
|
374
|
+
IpPermissions=[
|
|
375
|
+
{
|
|
376
|
+
'IpProtocol': 'tcp',
|
|
377
|
+
'FromPort': 22,
|
|
378
|
+
'ToPort': 22,
|
|
379
|
+
'IpRanges': [
|
|
380
|
+
{'CidrIp': '0.0.0.0/0', 'Description': 'SSH access from anywhere'}
|
|
381
|
+
],
|
|
382
|
+
}
|
|
383
|
+
],
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
# Launch EC2 instance
|
|
387
|
+
instance = ec2_client.run_instances(
|
|
388
|
+
ImageId=ami_id,
|
|
389
|
+
InstanceType=instance_type,
|
|
390
|
+
KeyName=key_name,
|
|
391
|
+
MaxCount=1,
|
|
392
|
+
MinCount=1,
|
|
393
|
+
NetworkInterfaces=[
|
|
394
|
+
{
|
|
395
|
+
'SubnetId': subnet_id,
|
|
396
|
+
'DeviceIndex': 0,
|
|
397
|
+
'AssociatePublicIpAddress': True,
|
|
398
|
+
'Groups': [security_group_id],
|
|
399
|
+
}
|
|
400
|
+
],
|
|
401
|
+
TagSpecifications=[
|
|
402
|
+
{
|
|
403
|
+
'ResourceType': 'instance',
|
|
404
|
+
'Tags': [{'Key': 'Name', 'Value': f'ElastiCache-JumpHost-{cache_cluster_id}'}],
|
|
405
|
+
}
|
|
406
|
+
],
|
|
407
|
+
)
|
|
408
|
+
|
|
409
|
+
# Wait for instance to be running and get its public IP
|
|
410
|
+
waiter = ec2_client.get_waiter('instance_running')
|
|
411
|
+
instance_id = instance['Instances'][0]['InstanceId']
|
|
412
|
+
waiter.wait(InstanceIds=[instance_id])
|
|
413
|
+
|
|
414
|
+
instance_info = ec2_client.describe_instances(InstanceIds=[instance_id])
|
|
415
|
+
public_ip = instance_info['Reservations'][0]['Instances'][0]['PublicIpAddress']
|
|
416
|
+
|
|
417
|
+
# Configure security groups using common function
|
|
418
|
+
configured, vpc_id, cache_port = await _configure_security_groups(
|
|
419
|
+
cache_cluster_id,
|
|
420
|
+
instance_id,
|
|
421
|
+
ec2_client=ec2_client,
|
|
422
|
+
elasticache_client=elasticache_client,
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
return {
|
|
426
|
+
'InstanceId': instance_id,
|
|
427
|
+
'PublicIpAddress': public_ip,
|
|
428
|
+
'InstanceType': instance_type,
|
|
429
|
+
'SubnetId': subnet_id,
|
|
430
|
+
'SecurityGroupId': security_group_id,
|
|
431
|
+
'CacheClusterId': cache_cluster_id,
|
|
432
|
+
'SecurityGroupsConfigured': configured,
|
|
433
|
+
'CachePort': cache_port,
|
|
434
|
+
'VpcId': vpc_id,
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
except ClientError as e:
|
|
438
|
+
if e.response['Error']['Code'] == 'InvalidKeyPair.NotFound':
|
|
439
|
+
return {
|
|
440
|
+
'error': f"Key pair '{key_name}' not found. Use CreateKeyPair or ImportKeyPair EC2 APIs to create/import an SSH key pair."
|
|
441
|
+
}
|
|
442
|
+
return {'error': str(e)}
|
|
443
|
+
except Exception as e:
|
|
444
|
+
return {'error': str(e)}
|
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
"""Create cache cluster tool for ElastiCache MCP server."""
|
|
16
|
+
|
|
17
|
+
from ...common.connection import ElastiCacheConnectionManager
|
|
18
|
+
from ...common.decorators import handle_exceptions
|
|
19
|
+
from ...common.server import mcp
|
|
20
|
+
from ...context import Context
|
|
21
|
+
from ..rg.processors import process_log_delivery_configurations
|
|
22
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
23
|
+
from typing import Any, Dict, List, Optional, Union
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class CreateCacheClusterRequest(BaseModel):
|
|
27
|
+
"""Request model for creating an ElastiCache cache cluster."""
|
|
28
|
+
|
|
29
|
+
model_config = ConfigDict(validate_by_name=True, arbitrary_types_allowed=True)
|
|
30
|
+
|
|
31
|
+
cache_cluster_id: str = Field(..., description='The cache cluster identifier')
|
|
32
|
+
cache_node_type: Optional[str] = Field(
|
|
33
|
+
None, description='The compute and memory capacity of nodes'
|
|
34
|
+
)
|
|
35
|
+
engine: Optional[str] = Field(None, description='The name of the cache engine')
|
|
36
|
+
engine_version: Optional[str] = Field(
|
|
37
|
+
None, description='The version number of the cache engine'
|
|
38
|
+
)
|
|
39
|
+
num_cache_nodes: Optional[int] = Field(None, description='The number of cache nodes', gt=0)
|
|
40
|
+
preferred_availability_zone: Optional[str] = Field(
|
|
41
|
+
None, description='The EC2 Availability Zone for the cluster'
|
|
42
|
+
)
|
|
43
|
+
preferred_availability_zones: Optional[List[str]] = Field(
|
|
44
|
+
None, description='List of preferred Availability Zones'
|
|
45
|
+
)
|
|
46
|
+
cache_parameter_group_name: Optional[str] = Field(
|
|
47
|
+
None, description='The name of the parameter group to associate'
|
|
48
|
+
)
|
|
49
|
+
cache_subnet_group_name: Optional[str] = Field(
|
|
50
|
+
None, description='The name of the cache subnet group to use'
|
|
51
|
+
)
|
|
52
|
+
cache_security_group_names: Optional[List[str]] = Field(
|
|
53
|
+
None, description='List of cache security group names'
|
|
54
|
+
)
|
|
55
|
+
security_group_ids: Optional[List[str]] = Field(
|
|
56
|
+
None, description='List of Amazon VPC security group IDs'
|
|
57
|
+
)
|
|
58
|
+
tags: Optional[Union[str, List[Dict[str, str]], Dict[str, str]]] = Field(
|
|
59
|
+
None, description='Tags to apply'
|
|
60
|
+
)
|
|
61
|
+
snapshot_arns: Optional[List[str]] = Field(
|
|
62
|
+
None, description='List of ARNs of snapshots to restore from'
|
|
63
|
+
)
|
|
64
|
+
snapshot_name: Optional[str] = Field(
|
|
65
|
+
None, description='The name of a snapshot to restore from'
|
|
66
|
+
)
|
|
67
|
+
preferred_maintenance_window: Optional[str] = Field(
|
|
68
|
+
None, description='The weekly time range for maintenance'
|
|
69
|
+
)
|
|
70
|
+
port: Optional[int] = Field(
|
|
71
|
+
None, description='The port number on which the cache accepts connections'
|
|
72
|
+
)
|
|
73
|
+
notification_topic_arn: Optional[str] = Field(
|
|
74
|
+
None, description='The ARN of an SNS topic for notifications'
|
|
75
|
+
)
|
|
76
|
+
auto_minor_version_upgrade: Optional[bool] = Field(
|
|
77
|
+
None, description='Enable/disable automatic minor version upgrades'
|
|
78
|
+
)
|
|
79
|
+
snapshot_retention_limit: Optional[int] = Field(
|
|
80
|
+
None, description='The number of days to retain backups'
|
|
81
|
+
)
|
|
82
|
+
snapshot_window: Optional[str] = Field(None, description='The daily time range for backups')
|
|
83
|
+
auth_token: Optional[str] = Field(
|
|
84
|
+
None, description='Password used to access a password protected server'
|
|
85
|
+
)
|
|
86
|
+
outpost_mode: Optional[str] = Field(
|
|
87
|
+
None, description="Outpost mode ('single-outpost' or 'cross-outpost')"
|
|
88
|
+
)
|
|
89
|
+
preferred_outpost_arn: Optional[str] = Field(
|
|
90
|
+
None, description='The ARN of the preferred outpost'
|
|
91
|
+
)
|
|
92
|
+
preferred_outpost_arns: Optional[List[str]] = Field(
|
|
93
|
+
None, description='List of preferred outpost ARNs'
|
|
94
|
+
)
|
|
95
|
+
log_delivery_configurations: Optional[Union[str, List[Dict]]] = Field(
|
|
96
|
+
None, description='Log delivery configurations'
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
@mcp.tool(name='create-cache-cluster')
|
|
101
|
+
@handle_exceptions
|
|
102
|
+
async def create_cache_cluster(request: CreateCacheClusterRequest) -> Dict:
|
|
103
|
+
"""Create an Amazon ElastiCache cache cluster."""
|
|
104
|
+
# Check if readonly mode is enabled
|
|
105
|
+
if Context.readonly_mode():
|
|
106
|
+
raise ValueError(
|
|
107
|
+
'You have configured this tool in readonly mode. To make this change you will have to update your configuration.'
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
# Get ElastiCache client
|
|
111
|
+
elasticache_client = ElastiCacheConnectionManager.get_connection()
|
|
112
|
+
|
|
113
|
+
# Convert request model to dictionary, only including non-None values
|
|
114
|
+
create_request: Dict[str, Any] = {
|
|
115
|
+
'CacheClusterId': request.cache_cluster_id
|
|
116
|
+
} # Required parameter
|
|
117
|
+
|
|
118
|
+
# Optional parameters - only include if they have a value
|
|
119
|
+
if request.cache_node_type is not None:
|
|
120
|
+
create_request['CacheNodeType'] = request.cache_node_type
|
|
121
|
+
if request.engine is not None:
|
|
122
|
+
create_request['Engine'] = request.engine
|
|
123
|
+
if request.engine_version is not None:
|
|
124
|
+
create_request['EngineVersion'] = request.engine_version
|
|
125
|
+
if request.num_cache_nodes is not None:
|
|
126
|
+
create_request['NumCacheNodes'] = request.num_cache_nodes
|
|
127
|
+
if request.preferred_availability_zone is not None:
|
|
128
|
+
create_request['PreferredAvailabilityZone'] = request.preferred_availability_zone
|
|
129
|
+
if request.preferred_availability_zones is not None:
|
|
130
|
+
create_request['PreferredAvailabilityZones'] = request.preferred_availability_zones
|
|
131
|
+
if request.cache_parameter_group_name is not None:
|
|
132
|
+
create_request['CacheParameterGroupName'] = request.cache_parameter_group_name
|
|
133
|
+
if request.cache_subnet_group_name is not None:
|
|
134
|
+
create_request['CacheSubnetGroupName'] = request.cache_subnet_group_name
|
|
135
|
+
if request.cache_security_group_names is not None:
|
|
136
|
+
create_request['CacheSecurityGroupNames'] = request.cache_security_group_names
|
|
137
|
+
if request.security_group_ids is not None:
|
|
138
|
+
create_request['SecurityGroupIds'] = request.security_group_ids
|
|
139
|
+
if request.tags:
|
|
140
|
+
if isinstance(request.tags, str):
|
|
141
|
+
# Parse shorthand syntax: Key=string,Value=string
|
|
142
|
+
tag_list = []
|
|
143
|
+
try:
|
|
144
|
+
pairs = [p.strip() for p in request.tags.split(',') if p.strip()]
|
|
145
|
+
for pair in pairs:
|
|
146
|
+
if '=' not in pair:
|
|
147
|
+
return {
|
|
148
|
+
'error': 'Invalid tag format. Each tag must be in Key=Value format'
|
|
149
|
+
}
|
|
150
|
+
key, value = pair.split('=', 1)
|
|
151
|
+
key = key.strip()
|
|
152
|
+
value = value.strip() if value.strip() else None
|
|
153
|
+
if not key:
|
|
154
|
+
return {'error': 'Tag key cannot be empty'}
|
|
155
|
+
tag_list.append({'Key': key, 'Value': value})
|
|
156
|
+
create_request['Tags'] = tag_list
|
|
157
|
+
except Exception as e:
|
|
158
|
+
return {
|
|
159
|
+
'error': f'Invalid tag shorthand syntax. Expected format: Key=string,Value=string. Error: {str(e)}'
|
|
160
|
+
}
|
|
161
|
+
elif isinstance(request.tags, dict):
|
|
162
|
+
# Handle dictionary format
|
|
163
|
+
tag_list = []
|
|
164
|
+
for k, v in request.tags.items():
|
|
165
|
+
if not k:
|
|
166
|
+
return {'error': 'Tag key cannot be empty'}
|
|
167
|
+
tag_list.append({'Key': k, 'Value': v})
|
|
168
|
+
create_request['Tags'] = tag_list
|
|
169
|
+
elif isinstance(request.tags, list):
|
|
170
|
+
# Handle list format
|
|
171
|
+
for tag in request.tags:
|
|
172
|
+
if not isinstance(tag, dict) or 'Key' not in tag:
|
|
173
|
+
return {'error': 'Each tag must be a dictionary with a Key'}
|
|
174
|
+
if not tag['Key']:
|
|
175
|
+
return {'error': 'Tag key cannot be empty'}
|
|
176
|
+
create_request['Tags'] = request.tags
|
|
177
|
+
if request.snapshot_arns is not None:
|
|
178
|
+
create_request['SnapshotArns'] = request.snapshot_arns
|
|
179
|
+
if request.snapshot_name is not None:
|
|
180
|
+
create_request['SnapshotName'] = request.snapshot_name
|
|
181
|
+
if request.preferred_maintenance_window is not None:
|
|
182
|
+
create_request['PreferredMaintenanceWindow'] = request.preferred_maintenance_window
|
|
183
|
+
if request.port is not None:
|
|
184
|
+
create_request['Port'] = request.port
|
|
185
|
+
if request.notification_topic_arn is not None:
|
|
186
|
+
create_request['NotificationTopicArn'] = request.notification_topic_arn
|
|
187
|
+
if request.auto_minor_version_upgrade is not None:
|
|
188
|
+
create_request['AutoMinorVersionUpgrade'] = request.auto_minor_version_upgrade
|
|
189
|
+
if request.snapshot_retention_limit is not None:
|
|
190
|
+
create_request['SnapshotRetentionLimit'] = request.snapshot_retention_limit
|
|
191
|
+
if request.snapshot_window is not None:
|
|
192
|
+
create_request['SnapshotWindow'] = request.snapshot_window
|
|
193
|
+
if request.auth_token is not None:
|
|
194
|
+
create_request['AuthToken'] = request.auth_token
|
|
195
|
+
if request.outpost_mode is not None:
|
|
196
|
+
create_request['OutpostMode'] = request.outpost_mode
|
|
197
|
+
if request.preferred_outpost_arn is not None:
|
|
198
|
+
create_request['PreferredOutpostArn'] = request.preferred_outpost_arn
|
|
199
|
+
if request.preferred_outpost_arns is not None:
|
|
200
|
+
create_request['PreferredOutpostArns'] = request.preferred_outpost_arns
|
|
201
|
+
if request.log_delivery_configurations:
|
|
202
|
+
try:
|
|
203
|
+
processed_configs = process_log_delivery_configurations(
|
|
204
|
+
request.log_delivery_configurations
|
|
205
|
+
)
|
|
206
|
+
create_request['LogDeliveryConfigurations'] = processed_configs
|
|
207
|
+
except ValueError as e:
|
|
208
|
+
return {'error': str(e)}
|
|
209
|
+
|
|
210
|
+
# Create the cache cluster
|
|
211
|
+
response = elasticache_client.create_cache_cluster(**create_request)
|
|
212
|
+
return response
|