awslabs.cost-explorer-mcp-server 0.0.2__py3-none-any.whl → 0.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
awslabs/__init__.py CHANGED
@@ -1,3 +1,17 @@
1
+ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
1
15
  """
2
16
  AWS Labs Cost Explorer MCP Server package.
3
17
  """
@@ -1,6 +1,20 @@
1
+ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
1
15
  """Cost Explorer MCP Server module.
2
16
 
3
17
  This module provides MCP tools for analyzing AWS costs and usage data through the AWS Cost Explorer API.
4
18
  """
5
19
 
6
- __version__ = "0.0.0"
20
+ __version__ = '0.0.0'
@@ -1,17 +1,61 @@
1
+ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
1
15
  """Helper functions for the Cost Explorer MCP server."""
2
16
 
3
17
  import boto3
4
- import logging
18
+ import os
5
19
  import re
20
+ import sys
6
21
  from datetime import datetime
7
- from typing import Any, Dict, Tuple
22
+ from loguru import logger
23
+ from typing import Any, Dict, Optional, Tuple
24
+
25
+
26
+ # Configure Loguru logging
27
+ logger.remove()
28
+ logger.add(sys.stderr, level=os.getenv('FASTMCP_LOG_LEVEL', 'WARNING'))
29
+
30
+ # Global client cache
31
+ _cost_explorer_client = None
8
32
 
9
33
 
10
- # Set up logging
11
- logger = logging.getLogger(__name__)
34
+ def get_cost_explorer_client():
35
+ """Get Cost Explorer client with proper session management and caching.
36
+
37
+ Returns:
38
+ boto3.client: Configured Cost Explorer client (cached after first call)
39
+ """
40
+ global _cost_explorer_client
41
+
42
+ if _cost_explorer_client is None:
43
+ try:
44
+ # Read environment variables dynamically
45
+ aws_region = os.environ.get('AWS_REGION', 'us-east-1')
46
+ aws_profile = os.environ.get('AWS_PROFILE')
12
47
 
13
- # Initialize AWS Cost Explorer client
14
- ce = boto3.client("ce")
48
+ if aws_profile:
49
+ _cost_explorer_client = boto3.Session(
50
+ profile_name=aws_profile, region_name=aws_region
51
+ ).client('ce')
52
+ else:
53
+ _cost_explorer_client = boto3.Session(region_name=aws_region).client('ce')
54
+ except Exception as e:
55
+ logger.error(f'Error creating Cost Explorer client: {str(e)}')
56
+ raise
57
+
58
+ return _cost_explorer_client
15
59
 
16
60
 
17
61
  def validate_date_format(date_str: str) -> Tuple[bool, str]:
@@ -24,78 +68,152 @@ def validate_date_format(date_str: str) -> Tuple[bool, str]:
24
68
  Tuple of (is_valid, error_message)
25
69
  """
26
70
  # Check format with regex
27
- if not re.match(r"^\d{4}-\d{2}-\d{2}$", date_str):
71
+ if not re.match(r'^\d{4}-\d{2}-\d{2}$', date_str):
28
72
  return False, f"Date '{date_str}' is not in YYYY-MM-DD format"
29
73
 
30
74
  # Check if it's a valid date
31
75
  try:
32
- datetime.strptime(date_str, "%Y-%m-%d")
33
- return True, ""
76
+ datetime.strptime(date_str, '%Y-%m-%d')
77
+ return True, ''
34
78
  except ValueError as e:
35
79
  return False, f"Invalid date '{date_str}': {str(e)}"
36
80
 
37
81
 
38
- def get_dimension_values(
39
- key: str, billing_period_start: str, billing_period_end: str
40
- ) -> Dict[str, Any]:
41
- """Get available values for a specific dimension."""
42
- # Validate date formats
43
- is_valid_start, error_start = validate_date_format(billing_period_start)
82
+ def format_date_for_api(date_str: str, granularity: str) -> str:
83
+ """Format date string appropriately for AWS Cost Explorer API based on granularity.
84
+
85
+ Args:
86
+ date_str: Date string in YYYY-MM-DD format
87
+ granularity: The granularity (DAILY, MONTHLY, HOURLY)
88
+
89
+ Returns:
90
+ Formatted date string appropriate for the API call
91
+ """
92
+ if granularity.upper() == 'HOURLY':
93
+ # For hourly granularity, AWS expects datetime format
94
+ # Convert YYYY-MM-DD to YYYY-MM-DDTHH:MM:SSZ
95
+ dt = datetime.strptime(date_str, '%Y-%m-%d')
96
+ return dt.strftime('%Y-%m-%dT00:00:00Z')
97
+ else:
98
+ # For DAILY and MONTHLY, use the original date format
99
+ return date_str
100
+
101
+
102
+ def validate_date_range(
103
+ start_date: str, end_date: str, granularity: Optional[str] = None
104
+ ) -> Tuple[bool, str]:
105
+ """Validate date range with format and logical checks.
106
+
107
+ Args:
108
+ start_date: The start date string in YYYY-MM-DD format
109
+ end_date: The end date string in YYYY-MM-DD format
110
+ granularity: Optional granularity to check specific constraints
111
+
112
+ Returns:
113
+ Tuple of (is_valid, error_message)
114
+ """
115
+ # Validate start date format
116
+ is_valid_start, error_start = validate_date_format(start_date)
44
117
  if not is_valid_start:
45
- return {"error": error_start}
118
+ return False, error_start
46
119
 
47
- is_valid_end, error_end = validate_date_format(billing_period_end)
120
+ # Validate end date format
121
+ is_valid_end, error_end = validate_date_format(end_date)
48
122
  if not is_valid_end:
49
- return {"error": error_end}
123
+ return False, error_end
124
+
125
+ # Validate date range logic
126
+ start_dt = datetime.strptime(start_date, '%Y-%m-%d')
127
+ end_dt = datetime.strptime(end_date, '%Y-%m-%d')
128
+ if start_dt > end_dt:
129
+ return False, f"Start date '{start_date}' cannot be after end date '{end_date}'"
130
+
131
+ # Validate granularity-specific constraints
132
+ if granularity and granularity.upper() == 'HOURLY':
133
+ # HOURLY granularity supports maximum 14 days
134
+ date_diff = (end_dt - start_dt).days
135
+ if date_diff > 14:
136
+ return (
137
+ False,
138
+ f'HOURLY granularity supports a maximum of 14 days. Current range is {date_diff} days ({start_date} to {end_date}). Please use a shorter date range.',
139
+ )
50
140
 
51
- # Validate date range
52
- if billing_period_start > billing_period_end:
53
- return {
54
- "error": f"Start date '{billing_period_start}' cannot be after end date '{billing_period_end}'"
55
- }
141
+ return True, ''
142
+
143
+
144
+ def get_dimension_values(
145
+ key: str, billing_period_start: str, billing_period_end: str
146
+ ) -> Dict[str, Any]:
147
+ """Get available values for a specific dimension."""
148
+ # Validate date range (no granularity constraint for dimension values)
149
+ is_valid, error_message = validate_date_range(billing_period_start, billing_period_end)
150
+ if not is_valid:
151
+ return {'error': error_message}
56
152
 
57
153
  try:
154
+ ce = get_cost_explorer_client()
58
155
  response = ce.get_dimension_values(
59
- TimePeriod={"Start": billing_period_start, "End": billing_period_end},
156
+ TimePeriod={'Start': billing_period_start, 'End': billing_period_end},
60
157
  Dimension=key.upper(),
61
158
  )
62
- dimension_values = response["DimensionValues"]
63
- values = [value["Value"] for value in dimension_values]
64
- return {"dimension": key.upper(), "values": values}
159
+ dimension_values = response['DimensionValues']
160
+ values = [value['Value'] for value in dimension_values]
161
+ return {'dimension': key.upper(), 'values': values}
65
162
  except Exception as e:
66
- logger.error(f"Error getting dimension values: {e}")
67
- return {"error": str(e)}
163
+ logger.error(
164
+ f'Error getting dimension values for {key.upper()} ({billing_period_start} to {billing_period_end}): {e}'
165
+ )
166
+ return {'error': str(e)}
68
167
 
69
168
 
70
169
  def get_tag_values(
71
170
  tag_key: str, billing_period_start: str, billing_period_end: str
72
171
  ) -> Dict[str, Any]:
73
172
  """Get available values for a specific tag key."""
74
- # Validate date formats
75
- is_valid_start, error_start = validate_date_format(billing_period_start)
76
- if not is_valid_start:
77
- return {"error": error_start}
78
-
79
- is_valid_end, error_end = validate_date_format(billing_period_end)
80
- if not is_valid_end:
81
- return {"error": error_end}
82
-
83
- # Validate date range
84
- if billing_period_start > billing_period_end:
85
- return {
86
- "error": f"Start date '{billing_period_start}' cannot be after end date '{billing_period_end}'"
87
- }
173
+ # Validate date range (no granularity constraint for tag values)
174
+ is_valid, error_message = validate_date_range(billing_period_start, billing_period_end)
175
+ if not is_valid:
176
+ return {'error': error_message}
88
177
 
89
178
  try:
179
+ ce = get_cost_explorer_client()
90
180
  response = ce.get_tags(
91
- TimePeriod={"Start": billing_period_start, "End": billing_period_end},
181
+ TimePeriod={'Start': billing_period_start, 'End': billing_period_end},
92
182
  TagKey=tag_key,
93
183
  )
94
- tag_values = response["Tags"]
95
- return {"tag_key": tag_key, "values": tag_values}
184
+ tag_values = response['Tags']
185
+ return {'tag_key': tag_key, 'values': tag_values}
96
186
  except Exception as e:
97
- logger.error(f"Error getting tag values: {e}")
98
- return {"error": str(e)}
187
+ logger.error(
188
+ f'Error getting tag values for {tag_key} ({billing_period_start} to {billing_period_end}): {e}'
189
+ )
190
+ return {'error': str(e)}
191
+
192
+
193
+ def validate_match_options(match_options: list, filter_type: str) -> Dict[str, Any]:
194
+ """Validate MatchOptions based on filter type.
195
+
196
+ Args:
197
+ match_options: List of match options to validate
198
+ filter_type: Type of filter ('Dimensions', 'Tags', 'CostCategories')
199
+
200
+ Returns:
201
+ Empty dictionary if valid, or an error dictionary
202
+ """
203
+ if filter_type == 'Dimensions':
204
+ valid_options = ['EQUALS', 'CASE_SENSITIVE']
205
+ elif filter_type in ['Tags', 'CostCategories']:
206
+ valid_options = ['EQUALS', 'ABSENT', 'CASE_SENSITIVE']
207
+ else:
208
+ return {'error': f'Unknown filter type: {filter_type}'}
209
+
210
+ for option in match_options:
211
+ if option not in valid_options:
212
+ return {
213
+ 'error': f"Invalid MatchOption '{option}' for {filter_type}. Valid values are: {valid_options}"
214
+ }
215
+
216
+ return {}
99
217
 
100
218
 
101
219
  def validate_expression(
@@ -111,132 +229,139 @@ def validate_expression(
111
229
  Returns:
112
230
  Empty dictionary if valid, or an error dictionary
113
231
  """
114
- # Validate date formats
115
- is_valid_start, error_start = validate_date_format(billing_period_start)
116
- if not is_valid_start:
117
- return {"error": error_start}
118
-
119
- is_valid_end, error_end = validate_date_format(billing_period_end)
120
- if not is_valid_end:
121
- return {"error": error_end}
122
-
123
- # Validate date range
124
- if billing_period_start > billing_period_end:
125
- return {
126
- "error": f"Start date '{billing_period_start}' cannot be after end date '{billing_period_end}'"
127
- }
232
+ # Validate date range (no granularity constraint for filter validation)
233
+ is_valid, error_message = validate_date_range(billing_period_start, billing_period_end)
234
+ if not is_valid:
235
+ return {'error': error_message}
128
236
 
129
237
  try:
130
- if "Dimensions" in expression:
131
- dimension = expression["Dimensions"]
238
+ if 'Dimensions' in expression:
239
+ dimension = expression['Dimensions']
132
240
  if (
133
- "Key" not in dimension
134
- or "Values" not in dimension
135
- or "MatchOptions" not in dimension
241
+ 'Key' not in dimension
242
+ or 'Values' not in dimension
243
+ or 'MatchOptions' not in dimension
136
244
  ):
137
245
  return {
138
- "error": 'Dimensions filter must include "Key", "Values", and "MatchOptions".'
246
+ 'error': 'Dimensions filter must include "Key", "Values", and "MatchOptions".'
139
247
  }
140
248
 
141
- dimension_key = dimension["Key"]
142
- dimension_values = dimension["Values"]
249
+ # Validate MatchOptions for Dimensions
250
+ match_options_result = validate_match_options(dimension['MatchOptions'], 'Dimensions')
251
+ if 'error' in match_options_result:
252
+ return match_options_result
253
+
254
+ dimension_key = dimension['Key']
255
+ dimension_values = dimension['Values']
143
256
  valid_values_response = get_dimension_values(
144
257
  dimension_key, billing_period_start, billing_period_end
145
258
  )
146
- if "error" in valid_values_response:
147
- return {"error": valid_values_response["error"]}
148
- valid_values = valid_values_response["values"]
259
+ if 'error' in valid_values_response:
260
+ return {'error': valid_values_response['error']}
261
+ valid_values = valid_values_response['values']
149
262
  for value in dimension_values:
150
263
  if value not in valid_values:
151
264
  return {
152
- "error": f"Invalid value '{value}' for dimension '{dimension_key}'. Valid values are: {valid_values}"
265
+ 'error': f"Invalid value '{value}' for dimension '{dimension_key}'. Valid values are: {valid_values}"
153
266
  }
154
267
 
155
- if "Tags" in expression:
156
- tag = expression["Tags"]
157
- if "Key" not in tag or "Values" not in tag or "MatchOptions" not in tag:
158
- return {"error": 'Tags filter must include "Key", "Values", and "MatchOptions".'}
268
+ if 'Tags' in expression:
269
+ tag = expression['Tags']
270
+ if 'Key' not in tag or 'Values' not in tag or 'MatchOptions' not in tag:
271
+ return {'error': 'Tags filter must include "Key", "Values", and "MatchOptions".'}
272
+
273
+ # Validate MatchOptions for Tags
274
+ match_options_result = validate_match_options(tag['MatchOptions'], 'Tags')
275
+ if 'error' in match_options_result:
276
+ return match_options_result
159
277
 
160
- tag_key = tag["Key"]
161
- tag_values = tag["Values"]
278
+ tag_key = tag['Key']
279
+ tag_values = tag['Values']
162
280
  valid_tag_values_response = get_tag_values(
163
281
  tag_key, billing_period_start, billing_period_end
164
282
  )
165
- if "error" in valid_tag_values_response:
166
- return {"error": valid_tag_values_response["error"]}
167
- valid_tag_values = valid_tag_values_response["values"]
283
+ if 'error' in valid_tag_values_response:
284
+ return {'error': valid_tag_values_response['error']}
285
+ valid_tag_values = valid_tag_values_response['values']
168
286
  for value in tag_values:
169
287
  if value not in valid_tag_values:
170
288
  return {
171
- "error": f"Invalid value '{value}' for tag '{tag_key}'. Valid values are: {valid_tag_values}"
289
+ 'error': f"Invalid value '{value}' for tag '{tag_key}'. Valid values are: {valid_tag_values}"
172
290
  }
173
291
 
174
- if "CostCategories" in expression:
175
- cost_category = expression["CostCategories"]
292
+ if 'CostCategories' in expression:
293
+ cost_category = expression['CostCategories']
176
294
  if (
177
- "Key" not in cost_category
178
- or "Values" not in cost_category
179
- or "MatchOptions" not in cost_category
295
+ 'Key' not in cost_category
296
+ or 'Values' not in cost_category
297
+ or 'MatchOptions' not in cost_category
180
298
  ):
181
299
  return {
182
- "error": 'CostCategories filter must include "Key", "Values", and "MatchOptions".'
300
+ 'error': 'CostCategories filter must include "Key", "Values", and "MatchOptions".'
183
301
  }
184
302
 
185
- logical_operators = ["And", "Or", "Not"]
303
+ # Validate MatchOptions for CostCategories
304
+ match_options_result = validate_match_options(
305
+ cost_category['MatchOptions'], 'CostCategories'
306
+ )
307
+ if 'error' in match_options_result:
308
+ return match_options_result
309
+
310
+ logical_operators = ['And', 'Or', 'Not']
186
311
  logical_count = sum(1 for op in logical_operators if op in expression)
187
312
 
188
313
  if logical_count > 1:
189
314
  return {
190
- "error": "Only one logical operator (And, Or, Not) is allowed per expression in filter parameter."
315
+ 'error': 'Only one logical operator (And, Or, Not) is allowed per expression in filter parameter.'
191
316
  }
192
317
 
193
318
  if logical_count == 0 and len(expression) > 1:
194
319
  return {
195
- "error": "Filter parameter with multiple expressions require a logical operator (And, Or, Not)."
320
+ 'error': 'Filter parameter with multiple expressions require a logical operator (And, Or, Not).'
196
321
  }
197
322
 
198
- if "And" in expression:
199
- if not isinstance(expression["And"], list):
200
- return {"error": "And expression must be a list of expressions."}
201
- for sub_expression in expression["And"]:
323
+ if 'And' in expression:
324
+ if not isinstance(expression['And'], list):
325
+ return {'error': 'And expression must be a list of expressions.'}
326
+ for sub_expression in expression['And']:
202
327
  result = validate_expression(
203
328
  sub_expression, billing_period_start, billing_period_end
204
329
  )
205
- if "error" in result:
330
+ if 'error' in result:
206
331
  return result
207
332
 
208
- if "Or" in expression:
209
- if not isinstance(expression["Or"], list):
210
- return {"error": "Or expression must be a list of expressions."}
211
- for sub_expression in expression["Or"]:
333
+ if 'Or' in expression:
334
+ if not isinstance(expression['Or'], list):
335
+ return {'error': 'Or expression must be a list of expressions.'}
336
+ for sub_expression in expression['Or']:
212
337
  result = validate_expression(
213
338
  sub_expression, billing_period_start, billing_period_end
214
339
  )
215
- if "error" in result:
340
+ if 'error' in result:
216
341
  return result
217
342
 
218
- if "Not" in expression:
219
- if not isinstance(expression["Not"], dict):
220
- return {"error": "Not expression must be a single expression."}
343
+ if 'Not' in expression:
344
+ if not isinstance(expression['Not'], dict):
345
+ return {'error': 'Not expression must be a single expression.'}
221
346
  result = validate_expression(
222
- expression["Not"], billing_period_start, billing_period_end
347
+ expression['Not'], billing_period_start, billing_period_end
223
348
  )
224
- if "error" in result:
349
+ if 'error' in result:
225
350
  return result
226
351
 
227
352
  if not any(
228
- k in expression for k in ["Dimensions", "Tags", "CostCategories", "And", "Or", "Not"]
353
+ k in expression for k in ['Dimensions', 'Tags', 'CostCategories', 'And', 'Or', 'Not']
229
354
  ):
230
355
  return {
231
- "error": 'Filter Expression must include at least one of the following keys: "Dimensions", "Tags", "CostCategories", "And", "Or", "Not".'
356
+ 'error': 'Filter Expression must include at least one of the following keys: "Dimensions", "Tags", "CostCategories", "And", "Or", "Not".'
232
357
  }
233
358
 
234
359
  return {}
235
360
  except Exception as e:
236
- return {"error": f"Error validating expression: {str(e)}"}
361
+ return {'error': f'Error validating expression: {str(e)}'}
237
362
 
238
363
 
239
- def validate_group_by(group_by: Dict[str, Any]) -> Dict[str, Any]:
364
+ def validate_group_by(group_by: Optional[Dict[str, Any]]) -> Dict[str, Any]:
240
365
  """Validate the group_by parameter.
241
366
 
242
367
  Args:
@@ -246,14 +371,19 @@ def validate_group_by(group_by: Dict[str, Any]) -> Dict[str, Any]:
246
371
  Empty dictionary if valid, or an error dictionary
247
372
  """
248
373
  try:
249
- if not isinstance(group_by, dict) or "Type" not in group_by or "Key" not in group_by:
250
- return {"error": 'group_by must be a dictionary with "Type" and "Key" keys.'}
374
+ if (
375
+ group_by is None
376
+ or not isinstance(group_by, dict)
377
+ or 'Type' not in group_by
378
+ or 'Key' not in group_by
379
+ ):
380
+ return {'error': 'group_by must be a dictionary with "Type" and "Key" keys.'}
251
381
 
252
- if group_by["Type"].upper() not in ["DIMENSION", "TAG", "COST_CATEGORY"]:
382
+ if group_by['Type'].upper() not in ['DIMENSION', 'TAG', 'COST_CATEGORY']:
253
383
  return {
254
- "error": "Invalid group Type. Valid types are DIMENSION, TAG, and COST_CATEGORY."
384
+ 'error': 'Invalid group Type. Valid types are DIMENSION, TAG, and COST_CATEGORY.'
255
385
  }
256
386
 
257
387
  return {}
258
388
  except Exception as e:
259
- return {"error": f"Error validating group_by: {str(e)}"}
389
+ return {'error': f'Error validating group_by: {str(e)}'}
@@ -1,30 +1,49 @@
1
+ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
1
15
  """Cost Explorer MCP server implementation.
2
16
 
3
17
  This server provides tools for analyzing AWS costs and usage data through the AWS Cost Explorer API.
4
18
  """
5
19
 
6
- import boto3
7
- import logging
20
+ import json
21
+ import os
8
22
  import pandas as pd
23
+ import sys
9
24
  from awslabs.cost_explorer_mcp_server.helpers import (
25
+ format_date_for_api,
26
+ get_cost_explorer_client,
10
27
  get_dimension_values,
11
28
  get_tag_values,
12
29
  validate_date_format,
30
+ validate_date_range,
13
31
  validate_expression,
14
32
  validate_group_by,
15
33
  )
16
34
  from datetime import datetime, timedelta
35
+ from loguru import logger
17
36
  from mcp.server.fastmcp import Context, FastMCP
18
37
  from pydantic import BaseModel, Field, field_validator
19
38
  from typing import Any, Dict, Optional, Union
20
39
 
21
40
 
22
- # Set up logging
23
- logging.basicConfig(level=logging.INFO)
24
- logger = logging.getLogger(__name__)
41
+ # Configure Loguru logging
42
+ logger.remove()
43
+ logger.add(sys.stderr, level=os.getenv('FASTMCP_LOG_LEVEL', 'WARNING'))
25
44
 
26
- # Initialize AWS Cost Explorer client
27
- ce = boto3.client("ce")
45
+ # Constants
46
+ COST_EXPLORER_END_DATE_OFFSET = 1
28
47
 
29
48
 
30
49
  class DateRange(BaseModel):
@@ -32,66 +51,32 @@ class DateRange(BaseModel):
32
51
 
33
52
  start_date: str = Field(
34
53
  ...,
35
- description="The start date of the billing period in YYYY-MM-DD format. Defaults to last month, if not provided.",
54
+ description='The start date of the billing period in YYYY-MM-DD format. Defaults to last month, if not provided.',
36
55
  )
37
56
  end_date: str = Field(
38
- ..., description="The end date of the billing period in YYYY-MM-DD format."
57
+ ..., description='The end date of the billing period in YYYY-MM-DD format.'
39
58
  )
40
59
 
41
- @field_validator("start_date")
60
+ @field_validator('start_date', 'end_date')
42
61
  @classmethod
43
- def validate_start_date(cls, v):
44
- """Validate that start_date is in YYYY-MM-DD format and is a valid date."""
62
+ def validate_individual_dates(cls, v):
63
+ """Validate that individual dates are in YYYY-MM-DD format and are valid dates."""
45
64
  is_valid, error = validate_date_format(v)
46
65
  if not is_valid:
47
66
  raise ValueError(error)
48
67
  return v
49
68
 
50
- @field_validator("end_date")
51
- @classmethod
52
- def validate_end_date(cls, v, info):
53
- """Validate that end_date is in YYYY-MM-DD format and is a valid date, and not before start_date."""
54
- is_valid, error = validate_date_format(v)
69
+ def model_post_init(self, __context):
70
+ """Validate the date range after both dates are set."""
71
+ is_valid, error = validate_date_range(self.start_date, self.end_date)
55
72
  if not is_valid:
56
73
  raise ValueError(error)
57
74
 
58
- # Access the start_date from the data dictionary
59
- start_date = info.data.get("start_date")
60
- if start_date and v < start_date:
61
- raise ValueError(f"End date '{v}' cannot be before start date '{start_date}'")
62
-
63
- return v
64
-
65
-
66
- class GroupBy(BaseModel):
67
- """Group by model for cost queries."""
68
-
69
- type: str = Field(
70
- ...,
71
- description="Type of grouping. Valid values are DIMENSION, TAG, and COST_CATEGORY.",
72
- )
73
- key: str = Field(
74
- ...,
75
- description="Key to group by. For DIMENSION type, valid values include AZ, INSTANCE_TYPE, LEGAL_ENTITY_NAME, INVOICING_ENTITY, LINKED_ACCOUNT, OPERATION, PLATFORM, PURCHASE_TYPE, SERVICE, TENANCY, RECORD_TYPE, and USAGE_TYPE.",
76
- )
77
-
78
-
79
- class FilterExpression(BaseModel):
80
- """Filter expression model for cost queries."""
81
-
82
- filter_json: str = Field(
83
- ...,
84
- description="Filter criteria as a Python dictionary to narrow down AWS costs. Supports filtering by Dimensions (SERVICE, REGION, etc.), Tags, or CostCategories. You can use logical operators (And, Or, Not) for complex filters. Examples: 1) Simple service filter: {'Dimensions': {'Key': 'SERVICE', 'Values': ['Amazon Elastic Compute Cloud - Compute', 'Amazon Simple Storage Service'], 'MatchOptions': ['EQUALS']}}. 2) Region filter: {'Dimensions': {'Key': 'REGION', 'Values': ['us-east-1'], 'MatchOptions': ['EQUALS']}}. 3) Combined filter: {'And': [{'Dimensions': {'Key': 'SERVICE', 'Values': ['Amazon Elastic Compute Cloud - Compute'], 'MatchOptions': ['EQUALS']}}, {'Dimensions': {'Key': 'REGION', 'Values': ['us-east-1'], 'MatchOptions': ['EQUALS']}}]}.",
85
- )
86
-
87
-
88
- class CostMetric(BaseModel):
89
- """Cost metric model."""
90
-
91
- metric: str = Field(
92
- "UnblendedCost",
93
- description="The metric to return in the query. Valid values are AmortizedCost, BlendedCost, NetAmortizedCost, NetUnblendedCost, NormalizedUsageAmount, UnblendedCost, and UsageQuantity. Note: For UsageQuantity, the service aggregates usage numbers without considering units. To get meaningful UsageQuantity metrics, filter by UsageType or UsageTypeGroups.",
94
- )
75
+ def validate_with_granularity(self, granularity: str):
76
+ """Validate the date range with granularity-specific constraints."""
77
+ is_valid, error = validate_date_range(self.start_date, self.end_date, granularity)
78
+ if not is_valid:
79
+ raise ValueError(error)
95
80
 
96
81
 
97
82
  class DimensionKey(BaseModel):
@@ -99,20 +84,20 @@ class DimensionKey(BaseModel):
99
84
 
100
85
  dimension_key: str = Field(
101
86
  ...,
102
- description="The name of the dimension to retrieve values for. Valid values are AZ, INSTANCE_TYPE, LINKED_ACCOUNT, OPERATION, PURCHASE_TYPE, SERVICE, USAGE_TYPE, PLATFORM, TENANCY, RECORD_TYPE, LEGAL_ENTITY_NAME, INVOICING_ENTITY, DEPLOYMENT_OPTION, DATABASE_ENGINE, CACHE_ENGINE, INSTANCE_TYPE_FAMILY, REGION, BILLING_ENTITY, RESERVATION_ID, SAVINGS_PLANS_TYPE, SAVINGS_PLAN_ARN, OPERATING_SYSTEM.",
87
+ description='The name of the dimension to retrieve values for. Valid values are AZ, INSTANCE_TYPE, LINKED_ACCOUNT, OPERATION, PURCHASE_TYPE, SERVICE, USAGE_TYPE, PLATFORM, TENANCY, RECORD_TYPE, LEGAL_ENTITY_NAME, INVOICING_ENTITY, DEPLOYMENT_OPTION, DATABASE_ENGINE, CACHE_ENGINE, INSTANCE_TYPE_FAMILY, REGION, BILLING_ENTITY, RESERVATION_ID, SAVINGS_PLANS_TYPE, SAVINGS_PLAN_ARN, OPERATING_SYSTEM.',
103
88
  )
104
89
 
105
90
 
106
91
  # Create FastMCP server
107
- app = FastMCP(title="Cost Explorer MCP Server")
92
+ app = FastMCP(title='Cost Explorer MCP Server')
108
93
 
109
94
 
110
- @app.tool("get_today_date")
95
+ @app.tool('get_today_date')
111
96
  async def get_today_date(ctx: Context) -> Dict[str, str]:
112
97
  """Retrieve current date information.
113
98
 
114
99
  This tool retrieves the current date in YYYY-MM-DD format and the current month in YYYY-MM format.
115
- It's useful for comparing if the billing period requested by the user is not in the future.
100
+ It's useful for calculating relevent date when user ask last N months/days.
116
101
 
117
102
  Args:
118
103
  ctx: MCP context
@@ -121,12 +106,12 @@ async def get_today_date(ctx: Context) -> Dict[str, str]:
121
106
  Dictionary containing today's date and current month
122
107
  """
123
108
  return {
124
- "today_date": datetime.now().strftime("%Y-%m-%d"),
125
- "current_month": datetime.now().strftime("%Y-%m"),
109
+ 'today_date': datetime.now().strftime('%Y-%m-%d'),
110
+ 'current_month': datetime.now().strftime('%Y-%m'),
126
111
  }
127
112
 
128
113
 
129
- @app.tool("get_dimension_values")
114
+ @app.tool('get_dimension_values')
130
115
  async def get_dimension_values_tool(
131
116
  ctx: Context, date_range: DateRange, dimension: DimensionKey
132
117
  ) -> Dict[str, Any]:
@@ -150,15 +135,15 @@ async def get_dimension_values_tool(
150
135
  )
151
136
  return response
152
137
  except Exception as e:
153
- logger.error(f"Error getting dimension values: {e}")
154
- return {"error": f"Error getting dimension values: {str(e)}"}
138
+ logger.error(f'Error getting dimension values for {dimension.dimension_key}: {e}')
139
+ return {'error': f'Error getting dimension values: {str(e)}'}
155
140
 
156
141
 
157
- @app.tool("get_tag_values")
142
+ @app.tool('get_tag_values')
158
143
  async def get_tag_values_tool(
159
144
  ctx: Context,
160
145
  date_range: DateRange,
161
- tag_key: str = Field(..., description="The tag key to retrieve values for"),
146
+ tag_key: str = Field(..., description='The tag key to retrieve values for'),
162
147
  ) -> Dict[str, Any]:
163
148
  """Retrieve available tag values for AWS Cost Explorer.
164
149
 
@@ -177,29 +162,29 @@ async def get_tag_values_tool(
177
162
  response = get_tag_values(tag_key, date_range.start_date, date_range.end_date)
178
163
  return response
179
164
  except Exception as e:
180
- logger.error(f"Error getting tag values: {e}")
181
- return {"error": f"Error getting tag values: {str(e)}"}
165
+ logger.error(f'Error getting tag values for {tag_key}: {e}')
166
+ return {'error': f'Error getting tag values: {str(e)}'}
182
167
 
183
168
 
184
- @app.tool("get_cost_and_usage")
169
+ @app.tool('get_cost_and_usage')
185
170
  async def get_cost_and_usage(
186
171
  ctx: Context,
187
172
  date_range: DateRange,
188
173
  granularity: str = Field(
189
- "MONTHLY",
190
- description="The granularity at which cost data is aggregated. Valid values are DAILY, MONTHLY, and HOURLY. If not provided, defaults to MONTHLY.",
174
+ 'MONTHLY',
175
+ description='The granularity at which cost data is aggregated. Valid values are DAILY, MONTHLY, and HOURLY. If not provided, defaults to MONTHLY.',
191
176
  ),
192
177
  group_by: Optional[Union[Dict[str, str], str]] = Field(
193
- None,
178
+ 'SERVICE',
194
179
  description="Either a dictionary with Type and Key for grouping costs, or simply a string key to group by (which will default to DIMENSION type). Example dictionary: {'Type': 'DIMENSION', 'Key': 'SERVICE'}. Example string: 'SERVICE'.",
195
180
  ),
196
181
  filter_expression: Optional[Dict[str, Any]] = Field(
197
182
  None,
198
- description="Filter criteria as a Python dictionary to narrow down AWS costs. Supports filtering by Dimensions (SERVICE, REGION, etc.), Tags, or CostCategories. You can use logical operators (And, Or, Not) for complex filters. Examples: 1) Simple service filter: {'Dimensions': {'Key': 'SERVICE', 'Values': ['Amazon Elastic Compute Cloud - Compute', 'Amazon Simple Storage Service'], 'MatchOptions': ['EQUALS']}}. 2) Region filter: {'Dimensions': {'Key': 'REGION', 'Values': ['us-east-1'], 'MatchOptions': ['EQUALS']}}. 3) Combined filter: {'And': [{'Dimensions': {'Key': 'SERVICE', 'Values': ['Amazon Elastic Compute Cloud - Compute'], 'MatchOptions': ['EQUALS']}}, {'Dimensions': {'Key': 'REGION', 'Values': ['us-east-1'], 'MatchOptions': ['EQUALS']}}]}.",
183
+ description="Filter criteria as a Python dictionary to narrow down AWS costs. Supports filtering by Dimensions (SERVICE, REGION, etc.), Tags, or CostCategories. You can use logical operators (And, Or, Not) for complex filters. MatchOptions validation: For Dimensions, valid values are EQUALS and CASE_SENSITIVE. For Tags and CostCategories, valid values are EQUALS, ABSENT, and CASE_SENSITIVE (defaults to EQUALS and CASE_SENSITIVE). Examples: 1) Simple service filter: {'Dimensions': {'Key': 'SERVICE', 'Values': ['Amazon Elastic Compute Cloud - Compute', 'Amazon Simple Storage Service'], 'MatchOptions': ['EQUALS']}}. 2) Region filter: {'Dimensions': {'Key': 'REGION', 'Values': ['us-east-1'], 'MatchOptions': ['EQUALS']}}. 3) Combined filter: {'And': [{'Dimensions': {'Key': 'SERVICE', 'Values': ['Amazon Elastic Compute Cloud - Compute'], 'MatchOptions': ['EQUALS']}}, {'Dimensions': {'Key': 'REGION', 'Values': ['us-east-1'], 'MatchOptions': ['EQUALS']}}]}.",
199
184
  ),
200
185
  metric: str = Field(
201
- "UnblendedCost",
202
- description="The metric to return in the query. Valid values are AmortizedCost, BlendedCost, NetAmortizedCost, NetUnblendedCost, NormalizedUsageAmount, UnblendedCost, and UsageQuantity.",
186
+ 'UnblendedCost',
187
+ description='The metric to return in the query. Valid values are AmortizedCost, BlendedCost, NetAmortizedCost, NetUnblendedCost, NormalizedUsageAmount, UnblendedCost, and UsageQuantity. IMPORTANT: For UsageQuantity, the service aggregates usage numbers without considering units, making results meaningless when mixing different unit types (e.g., compute hours + data transfer GB). To get meaningful UsageQuantity metrics, you MUST filter by USAGE_TYPE or group by USAGE_TYPE/USAGE_TYPE_GROUP to ensure consistent units.',
203
188
  ),
204
189
  ) -> Dict[str, Any]:
205
190
  """Retrieve AWS cost and usage data.
@@ -212,6 +197,10 @@ async def get_cost_and_usage(
212
197
  "2025-01-31", the results will include data for January 31st. This differs from the AWS Cost Explorer
213
198
  API which treats end_date as exclusive.
214
199
 
200
+ IMPORTANT: When using UsageQuantity metric, AWS aggregates usage numbers without considering units.
201
+ This makes results meaningless when different usage types have different units (e.g., EC2 compute hours
202
+ vs data transfer GB). For meaningful UsageQuantity results, you MUST be very specific with filtering, including USAGE_TYPE or USAGE_TYPE_GROUP.
203
+
215
204
  Example: Get monthly costs for EC2 and S3 services in us-east-1 for May 2025
216
205
  await get_cost_and_usage(
217
206
  ctx=context,
@@ -242,6 +231,45 @@ async def get_cost_and_usage(
242
231
  metric="UnblendedCost"
243
232
  )
244
233
 
234
+ Example: Get meaningful UsageQuantity for specific EC2 instance usage
235
+ await get_cost_and_usage(
236
+ ctx=context,
237
+ {
238
+ "date_range": {
239
+ "end_date": "2025-05-01",
240
+ "start_date": "2025-05-31"
241
+ },
242
+ "filter_expression": {
243
+ "And": [
244
+ {
245
+ "Dimensions": {
246
+ "Values": [
247
+ "Amazon Elastic Compute Cloud - Compute"
248
+ ],
249
+ "Key": "SERVICE",
250
+ "MatchOptions": [
251
+ "EQUALS"
252
+ ]
253
+ }
254
+ },
255
+ {
256
+ "Dimensions": {
257
+ "Values": [
258
+ "EC2: Running Hours"
259
+ ],
260
+ "Key": "USAGE_TYPE_GROUP",
261
+ "MatchOptions": [
262
+ "EQUALS"
263
+ ]
264
+ }
265
+ }
266
+ ]
267
+ },
268
+ "metric": "UsageQuantity",
269
+ "group_by": "USAGE_TYPE",
270
+ "granularity": "MONTHLY"
271
+ }
272
+
245
273
  Args:
246
274
  ctx: MCP context
247
275
  date_range: The billing period start and end dates in YYYY-MM-DD format (end date is inclusive)
@@ -253,30 +281,38 @@ async def get_cost_and_usage(
253
281
  Returns:
254
282
  Dictionary containing cost report data grouped according to the specified parameters
255
283
  """
284
+ # Initialize variables at function scope to avoid unbound variable issues
285
+ billing_period_start = date_range.start_date
286
+ billing_period_end = date_range.end_date
287
+
256
288
  try:
257
- # Process inputs
258
- granularity = granularity.upper()
259
- if granularity not in ["DAILY", "MONTHLY", "HOURLY"]:
289
+ # Process inputs - simplified granularity validation
290
+ granularity = str(granularity).upper()
291
+
292
+ if granularity not in ['DAILY', 'MONTHLY', 'HOURLY']:
260
293
  return {
261
- "error": f"Invalid granularity: {granularity}. Valid values are DAILY, MONTHLY, and HOURLY."
294
+ 'error': f'Invalid granularity: {granularity}. Valid values are DAILY, MONTHLY, and HOURLY.'
262
295
  }
263
296
 
264
- billing_period_start = date_range.start_date
265
- billing_period_end = date_range.end_date
297
+ # Validate date range with granularity-specific constraints
298
+ try:
299
+ date_range.validate_with_granularity(granularity)
300
+ except ValueError as e:
301
+ return {'error': str(e)}
266
302
 
267
303
  # Define valid metrics and their expected data structure
268
304
  valid_metrics = {
269
- "AmortizedCost": {"has_unit": True, "is_cost": True},
270
- "BlendedCost": {"has_unit": True, "is_cost": True},
271
- "NetAmortizedCost": {"has_unit": True, "is_cost": True},
272
- "NetUnblendedCost": {"has_unit": True, "is_cost": True},
273
- "UnblendedCost": {"has_unit": True, "is_cost": True},
274
- "UsageQuantity": {"has_unit": True, "is_cost": False},
305
+ 'AmortizedCost': {'has_unit': True, 'is_cost': True},
306
+ 'BlendedCost': {'has_unit': True, 'is_cost': True},
307
+ 'NetAmortizedCost': {'has_unit': True, 'is_cost': True},
308
+ 'NetUnblendedCost': {'has_unit': True, 'is_cost': True},
309
+ 'UnblendedCost': {'has_unit': True, 'is_cost': True},
310
+ 'UsageQuantity': {'has_unit': True, 'is_cost': False},
275
311
  }
276
312
 
277
313
  if metric not in valid_metrics:
278
314
  return {
279
- "error": f"Invalid metric: {metric}. Valid values are {', '.join(valid_metrics.keys())}."
315
+ 'error': f'Invalid metric: {metric}. Valid values are {", ".join(valid_metrics.keys())}.'
280
316
  }
281
317
 
282
318
  metric_config = valid_metrics[metric]
@@ -284,8 +320,9 @@ async def get_cost_and_usage(
284
320
  # Adjust end date for Cost Explorer API (exclusive)
285
321
  # Add one day to make the end date inclusive for the user
286
322
  billing_period_end_adj = (
287
- datetime.strptime(billing_period_end, "%Y-%m-%d") + timedelta(days=1)
288
- ).strftime("%Y-%m-%d")
323
+ datetime.strptime(billing_period_end, '%Y-%m-%d')
324
+ + timedelta(days=COST_EXPLORER_END_DATE_OFFSET)
325
+ ).strftime('%Y-%m-%d')
289
326
 
290
327
  # Process filter
291
328
  filter_criteria = filter_expression
@@ -296,175 +333,179 @@ async def get_cost_and_usage(
296
333
  validation_result = validate_expression(
297
334
  filter_criteria, billing_period_start, billing_period_end_adj
298
335
  )
299
- if "error" in validation_result:
336
+ if 'error' in validation_result:
300
337
  return validation_result
301
338
 
302
339
  # Process group_by
303
- if not group_by:
304
- group_by = {"Type": "DIMENSION", "Key": "SERVICE"}
340
+ if group_by is None:
341
+ group_by = {'Type': 'DIMENSION', 'Key': 'SERVICE'}
305
342
  elif isinstance(group_by, str):
306
- group_by = {"Type": "DIMENSION", "Key": group_by}
343
+ group_by = {'Type': 'DIMENSION', 'Key': group_by}
307
344
 
308
345
  # Validate group_by using the existing validate_group_by function
309
346
  validation_result = validate_group_by(group_by)
310
- if "error" in validation_result:
347
+ if 'error' in validation_result:
311
348
  return validation_result
312
349
 
313
350
  # Prepare API call parameters
314
351
  common_params = {
315
- "TimePeriod": {
316
- "Start": billing_period_start,
317
- "End": billing_period_end_adj,
352
+ 'TimePeriod': {
353
+ 'Start': format_date_for_api(billing_period_start, granularity),
354
+ 'End': format_date_for_api(billing_period_end_adj, granularity),
318
355
  },
319
- "Granularity": granularity,
320
- "GroupBy": [{"Type": group_by["Type"].upper(), "Key": group_by["Key"]}],
321
- "Metrics": [metric],
356
+ 'Granularity': granularity,
357
+ 'GroupBy': [{'Type': group_by['Type'].upper(), 'Key': group_by['Key']}],
358
+ 'Metrics': [metric],
322
359
  }
323
360
 
324
361
  if filter_criteria:
325
- common_params["Filter"] = filter_criteria
362
+ common_params['Filter'] = filter_criteria
326
363
 
327
364
  # Get cost data
328
365
  grouped_costs = {}
329
366
  next_token = None
367
+ ce = get_cost_explorer_client()
368
+
330
369
  while True:
331
370
  if next_token:
332
- common_params["NextPageToken"] = next_token
371
+ common_params['NextPageToken'] = next_token
333
372
 
334
373
  try:
335
374
  response = ce.get_cost_and_usage(**common_params)
336
375
  except Exception as e:
337
- logger.error(f"Error calling Cost Explorer API: {e}")
338
- return {"error": f"AWS Cost Explorer API error: {str(e)}"}
339
-
340
- for result_by_time in response["ResultsByTime"]:
341
- date = result_by_time["TimePeriod"]["Start"]
342
- for group in result_by_time.get("Groups", []):
343
- if not group.get("Keys") or len(group["Keys"]) == 0:
344
- logger.warning(f"Skipping group with no keys: {group}")
376
+ logger.error(f'Error calling Cost Explorer API: {e}')
377
+ return {'error': f'AWS Cost Explorer API error: {str(e)}'}
378
+
379
+ for result_by_time in response['ResultsByTime']:
380
+ date = result_by_time['TimePeriod']['Start']
381
+ for group in result_by_time.get('Groups', []):
382
+ if not group.get('Keys') or len(group['Keys']) == 0:
383
+ logger.warning(f'Skipping group with no keys: {group}')
345
384
  continue
346
385
 
347
- group_key = group["Keys"][0]
386
+ group_key = group['Keys'][0]
348
387
 
349
388
  # Validate that the metric exists in the response
350
- if metric not in group.get("Metrics", {}):
389
+ if metric not in group.get('Metrics', {}):
351
390
  logger.error(
352
391
  f"Metric '{metric}' not found in response for group {group_key}"
353
392
  )
354
393
  return {
355
- "error": f"Metric '{metric}' not found in response for group {group_key}"
356
- }
357
-
358
- metric_data = group["Metrics"][metric]
359
-
360
- # Validate metric data structure
361
- if "Amount" not in metric_data:
362
- logger.error(
363
- f"Amount not found in metric data for {group_key}: {metric_data}"
364
- )
365
- return {
366
- "error": "Invalid response format: 'Amount' not found in metric data"
394
+ 'error': f"Metric '{metric}' not found in response for group {group_key}"
367
395
  }
368
396
 
369
397
  try:
370
- metric_data = group["Metrics"][metric]
398
+ metric_data = group['Metrics'][metric]
371
399
 
372
400
  # Validate metric data structure
373
- if "Amount" not in metric_data:
401
+ if 'Amount' not in metric_data:
374
402
  logger.error(
375
- f"Amount not found in metric data for {group_key}: {metric_data}"
403
+ f'Amount not found in metric data for {group_key}: {metric_data}'
376
404
  )
377
405
  return {
378
- "error": "Invalid response format: 'Amount' not found in metric data"
406
+ 'error': "Invalid response format: 'Amount' not found in metric data"
379
407
  }
380
408
 
381
409
  # Process based on metric type
382
- if metric_config["is_cost"]:
410
+ if metric_config['is_cost']:
383
411
  # Handle cost metrics
384
- cost = float(metric_data["Amount"])
412
+ cost = float(metric_data['Amount'])
385
413
  grouped_costs.setdefault(date, {}).update({group_key: cost})
386
414
  else:
387
415
  # Handle usage metrics (UsageQuantity, NormalizedUsageAmount)
388
- if "Unit" not in metric_data and metric_config["has_unit"]:
416
+ if 'Unit' not in metric_data and metric_config['has_unit']:
389
417
  logger.warning(
390
418
  f"Unit not found in {metric} data for {group_key}, using 'Unknown' as unit"
391
419
  )
392
- unit = "Unknown"
420
+ unit = 'Unknown'
393
421
  else:
394
- unit = metric_data.get("Unit", "Count")
395
- amount = float(metric_data["Amount"])
422
+ unit = metric_data.get('Unit', 'Count')
423
+ amount = float(metric_data['Amount'])
396
424
  grouped_costs.setdefault(date, {}).update({group_key: (amount, unit)})
397
425
  except (ValueError, TypeError) as e:
398
- logger.error(f"Error processing metric data: {e}, data: {metric_data}")
399
- return {"error": f"Error processing metric data: {str(e)}"}
426
+ logger.error(f'Error processing metric data: {e}, group: {group_key}')
427
+ return {'error': f'Error processing metric data: {str(e)}'}
400
428
 
401
- next_token = response.get("NextPageToken")
429
+ next_token = response.get('NextPageToken')
402
430
  if not next_token:
403
431
  break
404
432
 
405
433
  # Process results
406
434
  if not grouped_costs:
407
- logger.info("No cost data found for the specified parameters")
435
+ logger.info(
436
+ f'No cost data found for the specified parameters: {billing_period_start} to {billing_period_end}'
437
+ )
408
438
  return {
409
- "message": "No cost data found for the specified parameters",
410
- "GroupedCosts": {},
439
+ 'message': 'No cost data found for the specified parameters',
440
+ 'GroupedCosts': {},
411
441
  }
412
442
 
413
443
  try:
414
- if metric_config["is_cost"]:
444
+ if metric_config['is_cost']:
415
445
  # Process cost metrics
416
446
  df = pd.DataFrame.from_dict(grouped_costs).round(2)
417
- df["Service total"] = df.sum(axis=1).round(2)
418
- df.loc["Total Costs"] = df.sum().round(2)
419
- df = df.sort_values(by="Service total", ascending=False)
447
+
448
+ # Dynamic labeling based on group dimension
449
+ group_dimension = group_by['Key'].lower().replace('_', ' ')
450
+ df[f'{group_dimension.title()} Total'] = df.sum(axis=1).round(2)
451
+ df.loc[f'Total {metric}'] = df.sum().round(2)
452
+ df = df.sort_values(by=f'{group_dimension.title()} Total', ascending=False)
453
+
454
+ result = {'GroupedCosts': df.to_dict()}
420
455
  else:
421
- # Process usage metrics (UsageQuantity, NormalizedUsageAmount)
422
- usage_df = pd.DataFrame(
423
- {
424
- (k, "Amount"): {k1: v1[0] for k1, v1 in v.items()}
425
- for k, v in grouped_costs.items()
426
- }
427
- )
428
- units_df = pd.DataFrame(
429
- {
430
- (k, "Unit"): {k1: v1[1] for k1, v1 in v.items()}
431
- for k, v in grouped_costs.items()
432
- }
433
- )
434
- df = pd.concat([usage_df, units_df], axis=1)
456
+ # Process usage metrics with cleaner structure
457
+ result_data = {}
458
+ for date, groups in grouped_costs.items():
459
+ result_data[date] = {}
460
+ for group_key, (amount, unit) in groups.items():
461
+ result_data[date][group_key] = {
462
+ 'amount': round(float(amount), 2),
463
+ 'unit': unit,
464
+ }
435
465
 
436
- result = {"GroupedCosts": df.to_dict()}
466
+ # Add metadata for usage metrics
467
+ result = {
468
+ 'metadata': {
469
+ 'grouped_by': group_by['Key'],
470
+ 'metric': metric,
471
+ 'period': f'{billing_period_start} to {billing_period_end}',
472
+ },
473
+ 'GroupedUsage': result_data,
474
+ }
437
475
  except Exception as e:
438
- logger.error(f"Error processing cost data into DataFrame: {e}")
476
+ logger.error(f'Error processing cost data into DataFrame: {e}')
439
477
  return {
440
- "error": f"Error processing cost data: {str(e)}",
441
- "raw_data": grouped_costs,
478
+ 'error': f'Error processing cost data: {str(e)}',
479
+ 'raw_data': grouped_costs,
442
480
  }
443
481
 
444
- result = {"GroupedCosts": df.to_dict()}
445
-
446
- # Convert all keys to strings for JSON serialization
447
- def stringify_keys(d):
448
- if isinstance(d, dict):
449
- return {str(k): stringify_keys(v) for k, v in d.items()}
450
- elif isinstance(d, list):
451
- return [stringify_keys(i) for i in d]
452
- else:
453
- return d
454
-
482
+ # Test JSON serialization first, only stringify if needed
455
483
  try:
456
- result = stringify_keys(result)
484
+ json.dumps(result)
457
485
  return result
458
- except Exception as e:
459
- logger.error(f"Error serializing result: {e}")
460
- return {"error": f"Error serializing result: {str(e)}"}
486
+ except (TypeError, ValueError):
487
+ # Only stringify if JSON serialization fails
488
+ def stringify_keys(d: Any) -> Any:
489
+ if isinstance(d, dict):
490
+ return {str(k): stringify_keys(v) for k, v in d.items()}
491
+ elif isinstance(d, list):
492
+ return [stringify_keys(i) if i is not None else None for i in d]
493
+ else:
494
+ return d
495
+
496
+ try:
497
+ result = stringify_keys(result)
498
+ return result
499
+ except Exception as e:
500
+ logger.error(f'Error serializing result: {e}')
501
+ return {'error': f'Error serializing result: {str(e)}'}
461
502
 
462
503
  except Exception as e:
463
- logger.error(f"Error generating cost report: {e}")
464
- import traceback
504
+ logger.error(
505
+ f'Error generating cost report for period {billing_period_start} to {billing_period_end}: {e}'
506
+ )
465
507
 
466
- logger.error(f"Traceback: {traceback.format_exc()}")
467
- return {"error": f"Error generating cost report: {str(e)}"}
508
+ return {'error': f'Error generating cost report: {str(e)}'}
468
509
 
469
510
 
470
511
  def main():
@@ -472,5 +513,5 @@ def main():
472
513
  app.run()
473
514
 
474
515
 
475
- if __name__ == "__main__":
516
+ if __name__ == '__main__':
476
517
  main()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: awslabs.cost-explorer-mcp-server
3
- Version: 0.0.2
3
+ Version: 0.0.5
4
4
  Summary: MCP server for analyzing AWS costs and usage data through the AWS Cost Explorer API
5
5
  Project-URL: Homepage, https://awslabs.github.io/mcp/
6
6
  Project-URL: Documentation, https://awslabs.github.io/mcp/servers/cost-explorer-mcp-server/
@@ -22,7 +22,8 @@ Classifier: Programming Language :: Python :: 3.12
22
22
  Classifier: Programming Language :: Python :: 3.13
23
23
  Requires-Python: >=3.10
24
24
  Requires-Dist: boto3>=1.36.20
25
- Requires-Dist: fastmcp>=0.1.0
25
+ Requires-Dist: loguru>=0.7.0
26
+ Requires-Dist: mcp[cli]>=1.6.0
26
27
  Requires-Dist: pandas>=2.2.3
27
28
  Requires-Dist: pydantic>=2.10.6
28
29
  Description-Content-Type: text/markdown
@@ -63,6 +64,8 @@ MCP server for analyzing AWS costs and usage data through the AWS Cost Explorer
63
64
 
64
65
  ## Installation
65
66
 
67
+ [![Install MCP Server](https://cursor.com/deeplink/mcp-install-light.svg)](https://cursor.com/install-mcp?name=awslabs.cost-explorer-mcp-server&config=eyJjb21tYW5kIjoidXZ4IGF3c2xhYnMuY29zdC1leHBsb3Jlci1tY3Atc2VydmVyQGxhdGVzdCIsImVudiI6eyJGQVNUTUNQX0xPR19MRVZFTCI6IkVSUk9SIiwiQVdTX1BST0ZJTEUiOiJ5b3VyLWF3cy1wcm9maWxlIn0sImRpc2FibGVkIjpmYWxzZSwiYXV0b0FwcHJvdmUiOltdfQ%3D%3D)
68
+
66
69
  Here are some ways you can work with MCP across AWS, and we'll be adding support to more products including Amazon Q Developer CLI soon: (e.g. for Amazon Q Developer CLI MCP, `~/.aws/amazonq/mcp.json`):
67
70
 
68
71
  ```json
@@ -127,6 +130,7 @@ The MCP server uses the AWS profile specified in the `AWS_PROFILE` environment v
127
130
  ```
128
131
 
129
132
  Make sure the AWS profile has permissions to access the AWS Cost Explorer API. The MCP server creates a boto3 session using the specified profile to authenticate with AWS services. Your AWS IAM credentials remain on your local machine and are strictly used for accessing AWS services.
133
+
130
134
  ## Cost Considerations
131
135
 
132
136
  **Important:** AWS Cost Explorer API incurs charges on a per-request basis. Each API call made by this MCP server will result in charges to your AWS account.
@@ -0,0 +1,10 @@
1
+ awslabs/__init__.py,sha256=XlNvbbm4JS0QaAK93MUCbMITZLOSkWkBilYvLI3rBpU,667
2
+ awslabs/cost_explorer_mcp_server/__init__.py,sha256=jj08M9QRfjYVfiV85UhDzpEO4Vseafpeekg31d2DhfM,785
3
+ awslabs/cost_explorer_mcp_server/helpers.py,sha256=p1iMZ_OlKY0bpgT2HR2oEfpeAAERmqQLBFVkVPyIUOE,14418
4
+ awslabs/cost_explorer_mcp_server/server.py,sha256=MEoZpUb104v06VZXnw1CYTpubMnyWveMuCwDvfOMD5g,21806
5
+ awslabs_cost_explorer_mcp_server-0.0.5.dist-info/METADATA,sha256=f7u_oUsGy-ePY3Wm_JH7S8gngvB7kkBCEEYf49Ycq_c,6745
6
+ awslabs_cost_explorer_mcp_server-0.0.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
7
+ awslabs_cost_explorer_mcp_server-0.0.5.dist-info/entry_points.txt,sha256=nkewGFi8GZCCtHhFofUmYii3OCeK_5qqgLXE4eUSFZg,98
8
+ awslabs_cost_explorer_mcp_server-0.0.5.dist-info/licenses/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
9
+ awslabs_cost_explorer_mcp_server-0.0.5.dist-info/licenses/NOTICE,sha256=VL_gWrK0xFaHGFxxYj6BcZI30EkRxUH4Dv1u2Qsh3ao,92
10
+ awslabs_cost_explorer_mcp_server-0.0.5.dist-info/RECORD,,
@@ -1,10 +0,0 @@
1
- awslabs/__init__.py,sha256=vi9O_PzTkEpojELcg0v9esQUWLbseZTynQB4YpNYzA8,51
2
- awslabs/cost_explorer_mcp_server/__init__.py,sha256=DkDBVzhBu0iM3Y6ZrDi6zTydT3mtgKtwjavEhmLBnl0,169
3
- awslabs/cost_explorer_mcp_server/helpers.py,sha256=Snc2rhsNFbl7WmLBbrIYR21jmKU7QAjshLiv26RyilU,9470
4
- awslabs/cost_explorer_mcp_server/server.py,sha256=MTHU04Wm0izmrk52fWAQw-y9TpG2O-ACKzfcA_-2Rp0,20150
5
- awslabs_cost_explorer_mcp_server-0.0.2.dist-info/METADATA,sha256=kUuzyv89t_LOq1UNDfcinZc97ZcAfR9mnEc162kK7iM,6340
6
- awslabs_cost_explorer_mcp_server-0.0.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
7
- awslabs_cost_explorer_mcp_server-0.0.2.dist-info/entry_points.txt,sha256=nkewGFi8GZCCtHhFofUmYii3OCeK_5qqgLXE4eUSFZg,98
8
- awslabs_cost_explorer_mcp_server-0.0.2.dist-info/licenses/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
9
- awslabs_cost_explorer_mcp_server-0.0.2.dist-info/licenses/NOTICE,sha256=VL_gWrK0xFaHGFxxYj6BcZI30EkRxUH4Dv1u2Qsh3ao,92
10
- awslabs_cost_explorer_mcp_server-0.0.2.dist-info/RECORD,,