awslabs.cost-explorer-mcp-server 0.0.4__py3-none-any.whl → 0.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- awslabs/cost_explorer_mcp_server/helpers.py +160 -49
- awslabs/cost_explorer_mcp_server/server.py +141 -119
- {awslabs_cost_explorer_mcp_server-0.0.4.dist-info → awslabs_cost_explorer_mcp_server-0.0.5.dist-info}/METADATA +4 -1
- awslabs_cost_explorer_mcp_server-0.0.5.dist-info/RECORD +10 -0
- awslabs_cost_explorer_mcp_server-0.0.4.dist-info/RECORD +0 -10
- {awslabs_cost_explorer_mcp_server-0.0.4.dist-info → awslabs_cost_explorer_mcp_server-0.0.5.dist-info}/WHEEL +0 -0
- {awslabs_cost_explorer_mcp_server-0.0.4.dist-info → awslabs_cost_explorer_mcp_server-0.0.5.dist-info}/entry_points.txt +0 -0
- {awslabs_cost_explorer_mcp_server-0.0.4.dist-info → awslabs_cost_explorer_mcp_server-0.0.5.dist-info}/licenses/LICENSE +0 -0
- {awslabs_cost_explorer_mcp_server-0.0.4.dist-info → awslabs_cost_explorer_mcp_server-0.0.5.dist-info}/licenses/NOTICE +0 -0
|
@@ -15,17 +15,47 @@
|
|
|
15
15
|
"""Helper functions for the Cost Explorer MCP server."""
|
|
16
16
|
|
|
17
17
|
import boto3
|
|
18
|
-
import
|
|
18
|
+
import os
|
|
19
19
|
import re
|
|
20
|
+
import sys
|
|
20
21
|
from datetime import datetime
|
|
22
|
+
from loguru import logger
|
|
21
23
|
from typing import Any, Dict, Optional, Tuple
|
|
22
24
|
|
|
23
25
|
|
|
24
|
-
#
|
|
25
|
-
logger
|
|
26
|
+
# Configure Loguru logging
|
|
27
|
+
logger.remove()
|
|
28
|
+
logger.add(sys.stderr, level=os.getenv('FASTMCP_LOG_LEVEL', 'WARNING'))
|
|
26
29
|
|
|
27
|
-
#
|
|
28
|
-
|
|
30
|
+
# Global client cache
|
|
31
|
+
_cost_explorer_client = None
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def get_cost_explorer_client():
|
|
35
|
+
"""Get Cost Explorer client with proper session management and caching.
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
boto3.client: Configured Cost Explorer client (cached after first call)
|
|
39
|
+
"""
|
|
40
|
+
global _cost_explorer_client
|
|
41
|
+
|
|
42
|
+
if _cost_explorer_client is None:
|
|
43
|
+
try:
|
|
44
|
+
# Read environment variables dynamically
|
|
45
|
+
aws_region = os.environ.get('AWS_REGION', 'us-east-1')
|
|
46
|
+
aws_profile = os.environ.get('AWS_PROFILE')
|
|
47
|
+
|
|
48
|
+
if aws_profile:
|
|
49
|
+
_cost_explorer_client = boto3.Session(
|
|
50
|
+
profile_name=aws_profile, region_name=aws_region
|
|
51
|
+
).client('ce')
|
|
52
|
+
else:
|
|
53
|
+
_cost_explorer_client = boto3.Session(region_name=aws_region).client('ce')
|
|
54
|
+
except Exception as e:
|
|
55
|
+
logger.error(f'Error creating Cost Explorer client: {str(e)}')
|
|
56
|
+
raise
|
|
57
|
+
|
|
58
|
+
return _cost_explorer_client
|
|
29
59
|
|
|
30
60
|
|
|
31
61
|
def validate_date_format(date_str: str) -> Tuple[bool, str]:
|
|
@@ -49,26 +79,79 @@ def validate_date_format(date_str: str) -> Tuple[bool, str]:
|
|
|
49
79
|
return False, f"Invalid date '{date_str}': {str(e)}"
|
|
50
80
|
|
|
51
81
|
|
|
52
|
-
def
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
82
|
+
def format_date_for_api(date_str: str, granularity: str) -> str:
|
|
83
|
+
"""Format date string appropriately for AWS Cost Explorer API based on granularity.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
date_str: Date string in YYYY-MM-DD format
|
|
87
|
+
granularity: The granularity (DAILY, MONTHLY, HOURLY)
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
Formatted date string appropriate for the API call
|
|
91
|
+
"""
|
|
92
|
+
if granularity.upper() == 'HOURLY':
|
|
93
|
+
# For hourly granularity, AWS expects datetime format
|
|
94
|
+
# Convert YYYY-MM-DD to YYYY-MM-DDTHH:MM:SSZ
|
|
95
|
+
dt = datetime.strptime(date_str, '%Y-%m-%d')
|
|
96
|
+
return dt.strftime('%Y-%m-%dT00:00:00Z')
|
|
97
|
+
else:
|
|
98
|
+
# For DAILY and MONTHLY, use the original date format
|
|
99
|
+
return date_str
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def validate_date_range(
|
|
103
|
+
start_date: str, end_date: str, granularity: Optional[str] = None
|
|
104
|
+
) -> Tuple[bool, str]:
|
|
105
|
+
"""Validate date range with format and logical checks.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
start_date: The start date string in YYYY-MM-DD format
|
|
109
|
+
end_date: The end date string in YYYY-MM-DD format
|
|
110
|
+
granularity: Optional granularity to check specific constraints
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
Tuple of (is_valid, error_message)
|
|
114
|
+
"""
|
|
115
|
+
# Validate start date format
|
|
116
|
+
is_valid_start, error_start = validate_date_format(start_date)
|
|
58
117
|
if not is_valid_start:
|
|
59
|
-
return
|
|
118
|
+
return False, error_start
|
|
60
119
|
|
|
61
|
-
|
|
120
|
+
# Validate end date format
|
|
121
|
+
is_valid_end, error_end = validate_date_format(end_date)
|
|
62
122
|
if not is_valid_end:
|
|
63
|
-
return
|
|
123
|
+
return False, error_end
|
|
124
|
+
|
|
125
|
+
# Validate date range logic
|
|
126
|
+
start_dt = datetime.strptime(start_date, '%Y-%m-%d')
|
|
127
|
+
end_dt = datetime.strptime(end_date, '%Y-%m-%d')
|
|
128
|
+
if start_dt > end_dt:
|
|
129
|
+
return False, f"Start date '{start_date}' cannot be after end date '{end_date}'"
|
|
130
|
+
|
|
131
|
+
# Validate granularity-specific constraints
|
|
132
|
+
if granularity and granularity.upper() == 'HOURLY':
|
|
133
|
+
# HOURLY granularity supports maximum 14 days
|
|
134
|
+
date_diff = (end_dt - start_dt).days
|
|
135
|
+
if date_diff > 14:
|
|
136
|
+
return (
|
|
137
|
+
False,
|
|
138
|
+
f'HOURLY granularity supports a maximum of 14 days. Current range is {date_diff} days ({start_date} to {end_date}). Please use a shorter date range.',
|
|
139
|
+
)
|
|
64
140
|
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
141
|
+
return True, ''
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def get_dimension_values(
|
|
145
|
+
key: str, billing_period_start: str, billing_period_end: str
|
|
146
|
+
) -> Dict[str, Any]:
|
|
147
|
+
"""Get available values for a specific dimension."""
|
|
148
|
+
# Validate date range (no granularity constraint for dimension values)
|
|
149
|
+
is_valid, error_message = validate_date_range(billing_period_start, billing_period_end)
|
|
150
|
+
if not is_valid:
|
|
151
|
+
return {'error': error_message}
|
|
70
152
|
|
|
71
153
|
try:
|
|
154
|
+
ce = get_cost_explorer_client()
|
|
72
155
|
response = ce.get_dimension_values(
|
|
73
156
|
TimePeriod={'Start': billing_period_start, 'End': billing_period_end},
|
|
74
157
|
Dimension=key.upper(),
|
|
@@ -77,7 +160,9 @@ def get_dimension_values(
|
|
|
77
160
|
values = [value['Value'] for value in dimension_values]
|
|
78
161
|
return {'dimension': key.upper(), 'values': values}
|
|
79
162
|
except Exception as e:
|
|
80
|
-
logger.error(
|
|
163
|
+
logger.error(
|
|
164
|
+
f'Error getting dimension values for {key.upper()} ({billing_period_start} to {billing_period_end}): {e}'
|
|
165
|
+
)
|
|
81
166
|
return {'error': str(e)}
|
|
82
167
|
|
|
83
168
|
|
|
@@ -85,22 +170,13 @@ def get_tag_values(
|
|
|
85
170
|
tag_key: str, billing_period_start: str, billing_period_end: str
|
|
86
171
|
) -> Dict[str, Any]:
|
|
87
172
|
"""Get available values for a specific tag key."""
|
|
88
|
-
# Validate date
|
|
89
|
-
|
|
90
|
-
if not
|
|
91
|
-
return {'error':
|
|
92
|
-
|
|
93
|
-
is_valid_end, error_end = validate_date_format(billing_period_end)
|
|
94
|
-
if not is_valid_end:
|
|
95
|
-
return {'error': error_end}
|
|
96
|
-
|
|
97
|
-
# Validate date range
|
|
98
|
-
if billing_period_start > billing_period_end:
|
|
99
|
-
return {
|
|
100
|
-
'error': f"Start date '{billing_period_start}' cannot be after end date '{billing_period_end}'"
|
|
101
|
-
}
|
|
173
|
+
# Validate date range (no granularity constraint for tag values)
|
|
174
|
+
is_valid, error_message = validate_date_range(billing_period_start, billing_period_end)
|
|
175
|
+
if not is_valid:
|
|
176
|
+
return {'error': error_message}
|
|
102
177
|
|
|
103
178
|
try:
|
|
179
|
+
ce = get_cost_explorer_client()
|
|
104
180
|
response = ce.get_tags(
|
|
105
181
|
TimePeriod={'Start': billing_period_start, 'End': billing_period_end},
|
|
106
182
|
TagKey=tag_key,
|
|
@@ -108,10 +184,38 @@ def get_tag_values(
|
|
|
108
184
|
tag_values = response['Tags']
|
|
109
185
|
return {'tag_key': tag_key, 'values': tag_values}
|
|
110
186
|
except Exception as e:
|
|
111
|
-
logger.error(
|
|
187
|
+
logger.error(
|
|
188
|
+
f'Error getting tag values for {tag_key} ({billing_period_start} to {billing_period_end}): {e}'
|
|
189
|
+
)
|
|
112
190
|
return {'error': str(e)}
|
|
113
191
|
|
|
114
192
|
|
|
193
|
+
def validate_match_options(match_options: list, filter_type: str) -> Dict[str, Any]:
|
|
194
|
+
"""Validate MatchOptions based on filter type.
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
match_options: List of match options to validate
|
|
198
|
+
filter_type: Type of filter ('Dimensions', 'Tags', 'CostCategories')
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
Empty dictionary if valid, or an error dictionary
|
|
202
|
+
"""
|
|
203
|
+
if filter_type == 'Dimensions':
|
|
204
|
+
valid_options = ['EQUALS', 'CASE_SENSITIVE']
|
|
205
|
+
elif filter_type in ['Tags', 'CostCategories']:
|
|
206
|
+
valid_options = ['EQUALS', 'ABSENT', 'CASE_SENSITIVE']
|
|
207
|
+
else:
|
|
208
|
+
return {'error': f'Unknown filter type: {filter_type}'}
|
|
209
|
+
|
|
210
|
+
for option in match_options:
|
|
211
|
+
if option not in valid_options:
|
|
212
|
+
return {
|
|
213
|
+
'error': f"Invalid MatchOption '{option}' for {filter_type}. Valid values are: {valid_options}"
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
return {}
|
|
217
|
+
|
|
218
|
+
|
|
115
219
|
def validate_expression(
|
|
116
220
|
expression: Dict[str, Any], billing_period_start: str, billing_period_end: str
|
|
117
221
|
) -> Dict[str, Any]:
|
|
@@ -125,20 +229,10 @@ def validate_expression(
|
|
|
125
229
|
Returns:
|
|
126
230
|
Empty dictionary if valid, or an error dictionary
|
|
127
231
|
"""
|
|
128
|
-
# Validate date
|
|
129
|
-
|
|
130
|
-
if not
|
|
131
|
-
return {'error':
|
|
132
|
-
|
|
133
|
-
is_valid_end, error_end = validate_date_format(billing_period_end)
|
|
134
|
-
if not is_valid_end:
|
|
135
|
-
return {'error': error_end}
|
|
136
|
-
|
|
137
|
-
# Validate date range
|
|
138
|
-
if billing_period_start > billing_period_end:
|
|
139
|
-
return {
|
|
140
|
-
'error': f"Start date '{billing_period_start}' cannot be after end date '{billing_period_end}'"
|
|
141
|
-
}
|
|
232
|
+
# Validate date range (no granularity constraint for filter validation)
|
|
233
|
+
is_valid, error_message = validate_date_range(billing_period_start, billing_period_end)
|
|
234
|
+
if not is_valid:
|
|
235
|
+
return {'error': error_message}
|
|
142
236
|
|
|
143
237
|
try:
|
|
144
238
|
if 'Dimensions' in expression:
|
|
@@ -152,6 +246,11 @@ def validate_expression(
|
|
|
152
246
|
'error': 'Dimensions filter must include "Key", "Values", and "MatchOptions".'
|
|
153
247
|
}
|
|
154
248
|
|
|
249
|
+
# Validate MatchOptions for Dimensions
|
|
250
|
+
match_options_result = validate_match_options(dimension['MatchOptions'], 'Dimensions')
|
|
251
|
+
if 'error' in match_options_result:
|
|
252
|
+
return match_options_result
|
|
253
|
+
|
|
155
254
|
dimension_key = dimension['Key']
|
|
156
255
|
dimension_values = dimension['Values']
|
|
157
256
|
valid_values_response = get_dimension_values(
|
|
@@ -171,6 +270,11 @@ def validate_expression(
|
|
|
171
270
|
if 'Key' not in tag or 'Values' not in tag or 'MatchOptions' not in tag:
|
|
172
271
|
return {'error': 'Tags filter must include "Key", "Values", and "MatchOptions".'}
|
|
173
272
|
|
|
273
|
+
# Validate MatchOptions for Tags
|
|
274
|
+
match_options_result = validate_match_options(tag['MatchOptions'], 'Tags')
|
|
275
|
+
if 'error' in match_options_result:
|
|
276
|
+
return match_options_result
|
|
277
|
+
|
|
174
278
|
tag_key = tag['Key']
|
|
175
279
|
tag_values = tag['Values']
|
|
176
280
|
valid_tag_values_response = get_tag_values(
|
|
@@ -196,6 +300,13 @@ def validate_expression(
|
|
|
196
300
|
'error': 'CostCategories filter must include "Key", "Values", and "MatchOptions".'
|
|
197
301
|
}
|
|
198
302
|
|
|
303
|
+
# Validate MatchOptions for CostCategories
|
|
304
|
+
match_options_result = validate_match_options(
|
|
305
|
+
cost_category['MatchOptions'], 'CostCategories'
|
|
306
|
+
)
|
|
307
|
+
if 'error' in match_options_result:
|
|
308
|
+
return match_options_result
|
|
309
|
+
|
|
199
310
|
logical_operators = ['And', 'Or', 'Not']
|
|
200
311
|
logical_count = sum(1 for op in logical_operators if op in expression)
|
|
201
312
|
|
|
@@ -17,28 +17,33 @@
|
|
|
17
17
|
This server provides tools for analyzing AWS costs and usage data through the AWS Cost Explorer API.
|
|
18
18
|
"""
|
|
19
19
|
|
|
20
|
-
import
|
|
21
|
-
import
|
|
20
|
+
import json
|
|
21
|
+
import os
|
|
22
22
|
import pandas as pd
|
|
23
|
+
import sys
|
|
23
24
|
from awslabs.cost_explorer_mcp_server.helpers import (
|
|
25
|
+
format_date_for_api,
|
|
26
|
+
get_cost_explorer_client,
|
|
24
27
|
get_dimension_values,
|
|
25
28
|
get_tag_values,
|
|
26
29
|
validate_date_format,
|
|
30
|
+
validate_date_range,
|
|
27
31
|
validate_expression,
|
|
28
32
|
validate_group_by,
|
|
29
33
|
)
|
|
30
34
|
from datetime import datetime, timedelta
|
|
35
|
+
from loguru import logger
|
|
31
36
|
from mcp.server.fastmcp import Context, FastMCP
|
|
32
37
|
from pydantic import BaseModel, Field, field_validator
|
|
33
38
|
from typing import Any, Dict, Optional, Union
|
|
34
39
|
|
|
35
40
|
|
|
36
|
-
#
|
|
37
|
-
|
|
38
|
-
logger =
|
|
41
|
+
# Configure Loguru logging
|
|
42
|
+
logger.remove()
|
|
43
|
+
logger.add(sys.stderr, level=os.getenv('FASTMCP_LOG_LEVEL', 'WARNING'))
|
|
39
44
|
|
|
40
|
-
#
|
|
41
|
-
|
|
45
|
+
# Constants
|
|
46
|
+
COST_EXPLORER_END_DATE_OFFSET = 1
|
|
42
47
|
|
|
43
48
|
|
|
44
49
|
class DateRange(BaseModel):
|
|
@@ -52,60 +57,26 @@ class DateRange(BaseModel):
|
|
|
52
57
|
..., description='The end date of the billing period in YYYY-MM-DD format.'
|
|
53
58
|
)
|
|
54
59
|
|
|
55
|
-
@field_validator('start_date')
|
|
60
|
+
@field_validator('start_date', 'end_date')
|
|
56
61
|
@classmethod
|
|
57
|
-
def
|
|
58
|
-
"""Validate that
|
|
62
|
+
def validate_individual_dates(cls, v):
|
|
63
|
+
"""Validate that individual dates are in YYYY-MM-DD format and are valid dates."""
|
|
59
64
|
is_valid, error = validate_date_format(v)
|
|
60
65
|
if not is_valid:
|
|
61
66
|
raise ValueError(error)
|
|
62
67
|
return v
|
|
63
68
|
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
"""Validate that end_date is in YYYY-MM-DD format and is a valid date, and not before start_date."""
|
|
68
|
-
is_valid, error = validate_date_format(v)
|
|
69
|
+
def model_post_init(self, __context):
|
|
70
|
+
"""Validate the date range after both dates are set."""
|
|
71
|
+
is_valid, error = validate_date_range(self.start_date, self.end_date)
|
|
69
72
|
if not is_valid:
|
|
70
73
|
raise ValueError(error)
|
|
71
74
|
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
return v
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
class GroupBy(BaseModel):
|
|
81
|
-
"""Group by model for cost queries."""
|
|
82
|
-
|
|
83
|
-
type: str = Field(
|
|
84
|
-
...,
|
|
85
|
-
description='Type of grouping. Valid values are DIMENSION, TAG, and COST_CATEGORY.',
|
|
86
|
-
)
|
|
87
|
-
key: str = Field(
|
|
88
|
-
...,
|
|
89
|
-
description='Key to group by. For DIMENSION type, valid values include AZ, INSTANCE_TYPE, LEGAL_ENTITY_NAME, INVOICING_ENTITY, LINKED_ACCOUNT, OPERATION, PLATFORM, PURCHASE_TYPE, SERVICE, TENANCY, RECORD_TYPE, and USAGE_TYPE.',
|
|
90
|
-
)
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
class FilterExpression(BaseModel):
|
|
94
|
-
"""Filter expression model for cost queries."""
|
|
95
|
-
|
|
96
|
-
filter_json: str = Field(
|
|
97
|
-
...,
|
|
98
|
-
description="Filter criteria as a Python dictionary to narrow down AWS costs. Supports filtering by Dimensions (SERVICE, REGION, etc.), Tags, or CostCategories. You can use logical operators (And, Or, Not) for complex filters. Examples: 1) Simple service filter: {'Dimensions': {'Key': 'SERVICE', 'Values': ['Amazon Elastic Compute Cloud - Compute', 'Amazon Simple Storage Service'], 'MatchOptions': ['EQUALS']}}. 2) Region filter: {'Dimensions': {'Key': 'REGION', 'Values': ['us-east-1'], 'MatchOptions': ['EQUALS']}}. 3) Combined filter: {'And': [{'Dimensions': {'Key': 'SERVICE', 'Values': ['Amazon Elastic Compute Cloud - Compute'], 'MatchOptions': ['EQUALS']}}, {'Dimensions': {'Key': 'REGION', 'Values': ['us-east-1'], 'MatchOptions': ['EQUALS']}}]}.",
|
|
99
|
-
)
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
class CostMetric(BaseModel):
|
|
103
|
-
"""Cost metric model."""
|
|
104
|
-
|
|
105
|
-
metric: str = Field(
|
|
106
|
-
'UnblendedCost',
|
|
107
|
-
description='The metric to return in the query. Valid values are AmortizedCost, BlendedCost, NetAmortizedCost, NetUnblendedCost, NormalizedUsageAmount, UnblendedCost, and UsageQuantity. Note: For UsageQuantity, the service aggregates usage numbers without considering units. To get meaningful UsageQuantity metrics, filter by UsageType or UsageTypeGroups.',
|
|
108
|
-
)
|
|
75
|
+
def validate_with_granularity(self, granularity: str):
|
|
76
|
+
"""Validate the date range with granularity-specific constraints."""
|
|
77
|
+
is_valid, error = validate_date_range(self.start_date, self.end_date, granularity)
|
|
78
|
+
if not is_valid:
|
|
79
|
+
raise ValueError(error)
|
|
109
80
|
|
|
110
81
|
|
|
111
82
|
class DimensionKey(BaseModel):
|
|
@@ -126,7 +97,7 @@ async def get_today_date(ctx: Context) -> Dict[str, str]:
|
|
|
126
97
|
"""Retrieve current date information.
|
|
127
98
|
|
|
128
99
|
This tool retrieves the current date in YYYY-MM-DD format and the current month in YYYY-MM format.
|
|
129
|
-
It's useful for
|
|
100
|
+
It's useful for calculating relevent date when user ask last N months/days.
|
|
130
101
|
|
|
131
102
|
Args:
|
|
132
103
|
ctx: MCP context
|
|
@@ -164,7 +135,7 @@ async def get_dimension_values_tool(
|
|
|
164
135
|
)
|
|
165
136
|
return response
|
|
166
137
|
except Exception as e:
|
|
167
|
-
logger.error(f'Error getting dimension values: {e}')
|
|
138
|
+
logger.error(f'Error getting dimension values for {dimension.dimension_key}: {e}')
|
|
168
139
|
return {'error': f'Error getting dimension values: {str(e)}'}
|
|
169
140
|
|
|
170
141
|
|
|
@@ -191,7 +162,7 @@ async def get_tag_values_tool(
|
|
|
191
162
|
response = get_tag_values(tag_key, date_range.start_date, date_range.end_date)
|
|
192
163
|
return response
|
|
193
164
|
except Exception as e:
|
|
194
|
-
logger.error(f'Error getting tag values: {e}')
|
|
165
|
+
logger.error(f'Error getting tag values for {tag_key}: {e}')
|
|
195
166
|
return {'error': f'Error getting tag values: {str(e)}'}
|
|
196
167
|
|
|
197
168
|
|
|
@@ -204,16 +175,16 @@ async def get_cost_and_usage(
|
|
|
204
175
|
description='The granularity at which cost data is aggregated. Valid values are DAILY, MONTHLY, and HOURLY. If not provided, defaults to MONTHLY.',
|
|
205
176
|
),
|
|
206
177
|
group_by: Optional[Union[Dict[str, str], str]] = Field(
|
|
207
|
-
|
|
178
|
+
'SERVICE',
|
|
208
179
|
description="Either a dictionary with Type and Key for grouping costs, or simply a string key to group by (which will default to DIMENSION type). Example dictionary: {'Type': 'DIMENSION', 'Key': 'SERVICE'}. Example string: 'SERVICE'.",
|
|
209
180
|
),
|
|
210
181
|
filter_expression: Optional[Dict[str, Any]] = Field(
|
|
211
182
|
None,
|
|
212
|
-
description="Filter criteria as a Python dictionary to narrow down AWS costs. Supports filtering by Dimensions (SERVICE, REGION, etc.), Tags, or CostCategories. You can use logical operators (And, Or, Not) for complex filters. Examples: 1) Simple service filter: {'Dimensions': {'Key': 'SERVICE', 'Values': ['Amazon Elastic Compute Cloud - Compute', 'Amazon Simple Storage Service'], 'MatchOptions': ['EQUALS']}}. 2) Region filter: {'Dimensions': {'Key': 'REGION', 'Values': ['us-east-1'], 'MatchOptions': ['EQUALS']}}. 3) Combined filter: {'And': [{'Dimensions': {'Key': 'SERVICE', 'Values': ['Amazon Elastic Compute Cloud - Compute'], 'MatchOptions': ['EQUALS']}}, {'Dimensions': {'Key': 'REGION', 'Values': ['us-east-1'], 'MatchOptions': ['EQUALS']}}]}.",
|
|
183
|
+
description="Filter criteria as a Python dictionary to narrow down AWS costs. Supports filtering by Dimensions (SERVICE, REGION, etc.), Tags, or CostCategories. You can use logical operators (And, Or, Not) for complex filters. MatchOptions validation: For Dimensions, valid values are EQUALS and CASE_SENSITIVE. For Tags and CostCategories, valid values are EQUALS, ABSENT, and CASE_SENSITIVE (defaults to EQUALS and CASE_SENSITIVE). Examples: 1) Simple service filter: {'Dimensions': {'Key': 'SERVICE', 'Values': ['Amazon Elastic Compute Cloud - Compute', 'Amazon Simple Storage Service'], 'MatchOptions': ['EQUALS']}}. 2) Region filter: {'Dimensions': {'Key': 'REGION', 'Values': ['us-east-1'], 'MatchOptions': ['EQUALS']}}. 3) Combined filter: {'And': [{'Dimensions': {'Key': 'SERVICE', 'Values': ['Amazon Elastic Compute Cloud - Compute'], 'MatchOptions': ['EQUALS']}}, {'Dimensions': {'Key': 'REGION', 'Values': ['us-east-1'], 'MatchOptions': ['EQUALS']}}]}.",
|
|
213
184
|
),
|
|
214
185
|
metric: str = Field(
|
|
215
186
|
'UnblendedCost',
|
|
216
|
-
description='The metric to return in the query. Valid values are AmortizedCost, BlendedCost, NetAmortizedCost, NetUnblendedCost, NormalizedUsageAmount, UnblendedCost, and UsageQuantity.',
|
|
187
|
+
description='The metric to return in the query. Valid values are AmortizedCost, BlendedCost, NetAmortizedCost, NetUnblendedCost, NormalizedUsageAmount, UnblendedCost, and UsageQuantity. IMPORTANT: For UsageQuantity, the service aggregates usage numbers without considering units, making results meaningless when mixing different unit types (e.g., compute hours + data transfer GB). To get meaningful UsageQuantity metrics, you MUST filter by USAGE_TYPE or group by USAGE_TYPE/USAGE_TYPE_GROUP to ensure consistent units.',
|
|
217
188
|
),
|
|
218
189
|
) -> Dict[str, Any]:
|
|
219
190
|
"""Retrieve AWS cost and usage data.
|
|
@@ -226,6 +197,10 @@ async def get_cost_and_usage(
|
|
|
226
197
|
"2025-01-31", the results will include data for January 31st. This differs from the AWS Cost Explorer
|
|
227
198
|
API which treats end_date as exclusive.
|
|
228
199
|
|
|
200
|
+
IMPORTANT: When using UsageQuantity metric, AWS aggregates usage numbers without considering units.
|
|
201
|
+
This makes results meaningless when different usage types have different units (e.g., EC2 compute hours
|
|
202
|
+
vs data transfer GB). For meaningful UsageQuantity results, you MUST be very specific with filtering, including USAGE_TYPE or USAGE_TYPE_GROUP.
|
|
203
|
+
|
|
229
204
|
Example: Get monthly costs for EC2 and S3 services in us-east-1 for May 2025
|
|
230
205
|
await get_cost_and_usage(
|
|
231
206
|
ctx=context,
|
|
@@ -256,6 +231,45 @@ async def get_cost_and_usage(
|
|
|
256
231
|
metric="UnblendedCost"
|
|
257
232
|
)
|
|
258
233
|
|
|
234
|
+
Example: Get meaningful UsageQuantity for specific EC2 instance usage
|
|
235
|
+
await get_cost_and_usage(
|
|
236
|
+
ctx=context,
|
|
237
|
+
{
|
|
238
|
+
"date_range": {
|
|
239
|
+
"end_date": "2025-05-01",
|
|
240
|
+
"start_date": "2025-05-31"
|
|
241
|
+
},
|
|
242
|
+
"filter_expression": {
|
|
243
|
+
"And": [
|
|
244
|
+
{
|
|
245
|
+
"Dimensions": {
|
|
246
|
+
"Values": [
|
|
247
|
+
"Amazon Elastic Compute Cloud - Compute"
|
|
248
|
+
],
|
|
249
|
+
"Key": "SERVICE",
|
|
250
|
+
"MatchOptions": [
|
|
251
|
+
"EQUALS"
|
|
252
|
+
]
|
|
253
|
+
}
|
|
254
|
+
},
|
|
255
|
+
{
|
|
256
|
+
"Dimensions": {
|
|
257
|
+
"Values": [
|
|
258
|
+
"EC2: Running Hours"
|
|
259
|
+
],
|
|
260
|
+
"Key": "USAGE_TYPE_GROUP",
|
|
261
|
+
"MatchOptions": [
|
|
262
|
+
"EQUALS"
|
|
263
|
+
]
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
]
|
|
267
|
+
},
|
|
268
|
+
"metric": "UsageQuantity",
|
|
269
|
+
"group_by": "USAGE_TYPE",
|
|
270
|
+
"granularity": "MONTHLY"
|
|
271
|
+
}
|
|
272
|
+
|
|
259
273
|
Args:
|
|
260
274
|
ctx: MCP context
|
|
261
275
|
date_range: The billing period start and end dates in YYYY-MM-DD format (end date is inclusive)
|
|
@@ -267,21 +281,24 @@ async def get_cost_and_usage(
|
|
|
267
281
|
Returns:
|
|
268
282
|
Dictionary containing cost report data grouped according to the specified parameters
|
|
269
283
|
"""
|
|
284
|
+
# Initialize variables at function scope to avoid unbound variable issues
|
|
285
|
+
billing_period_start = date_range.start_date
|
|
286
|
+
billing_period_end = date_range.end_date
|
|
287
|
+
|
|
270
288
|
try:
|
|
271
|
-
# Process inputs
|
|
272
|
-
|
|
273
|
-
granularity = granularity.upper()
|
|
274
|
-
else:
|
|
275
|
-
# Handle case where granularity is a Pydantic FieldInfo object
|
|
276
|
-
granularity = str(granularity).upper()
|
|
289
|
+
# Process inputs - simplified granularity validation
|
|
290
|
+
granularity = str(granularity).upper()
|
|
277
291
|
|
|
278
292
|
if granularity not in ['DAILY', 'MONTHLY', 'HOURLY']:
|
|
279
293
|
return {
|
|
280
294
|
'error': f'Invalid granularity: {granularity}. Valid values are DAILY, MONTHLY, and HOURLY.'
|
|
281
295
|
}
|
|
282
296
|
|
|
283
|
-
|
|
284
|
-
|
|
297
|
+
# Validate date range with granularity-specific constraints
|
|
298
|
+
try:
|
|
299
|
+
date_range.validate_with_granularity(granularity)
|
|
300
|
+
except ValueError as e:
|
|
301
|
+
return {'error': str(e)}
|
|
285
302
|
|
|
286
303
|
# Define valid metrics and their expected data structure
|
|
287
304
|
valid_metrics = {
|
|
@@ -303,7 +320,8 @@ async def get_cost_and_usage(
|
|
|
303
320
|
# Adjust end date for Cost Explorer API (exclusive)
|
|
304
321
|
# Add one day to make the end date inclusive for the user
|
|
305
322
|
billing_period_end_adj = (
|
|
306
|
-
datetime.strptime(billing_period_end, '%Y-%m-%d')
|
|
323
|
+
datetime.strptime(billing_period_end, '%Y-%m-%d')
|
|
324
|
+
+ timedelta(days=COST_EXPLORER_END_DATE_OFFSET)
|
|
307
325
|
).strftime('%Y-%m-%d')
|
|
308
326
|
|
|
309
327
|
# Process filter
|
|
@@ -319,7 +337,7 @@ async def get_cost_and_usage(
|
|
|
319
337
|
return validation_result
|
|
320
338
|
|
|
321
339
|
# Process group_by
|
|
322
|
-
if
|
|
340
|
+
if group_by is None:
|
|
323
341
|
group_by = {'Type': 'DIMENSION', 'Key': 'SERVICE'}
|
|
324
342
|
elif isinstance(group_by, str):
|
|
325
343
|
group_by = {'Type': 'DIMENSION', 'Key': group_by}
|
|
@@ -332,8 +350,8 @@ async def get_cost_and_usage(
|
|
|
332
350
|
# Prepare API call parameters
|
|
333
351
|
common_params = {
|
|
334
352
|
'TimePeriod': {
|
|
335
|
-
'Start': billing_period_start,
|
|
336
|
-
'End': billing_period_end_adj,
|
|
353
|
+
'Start': format_date_for_api(billing_period_start, granularity),
|
|
354
|
+
'End': format_date_for_api(billing_period_end_adj, granularity),
|
|
337
355
|
},
|
|
338
356
|
'Granularity': granularity,
|
|
339
357
|
'GroupBy': [{'Type': group_by['Type'].upper(), 'Key': group_by['Key']}],
|
|
@@ -346,6 +364,8 @@ async def get_cost_and_usage(
|
|
|
346
364
|
# Get cost data
|
|
347
365
|
grouped_costs = {}
|
|
348
366
|
next_token = None
|
|
367
|
+
ce = get_cost_explorer_client()
|
|
368
|
+
|
|
349
369
|
while True:
|
|
350
370
|
if next_token:
|
|
351
371
|
common_params['NextPageToken'] = next_token
|
|
@@ -374,17 +394,6 @@ async def get_cost_and_usage(
|
|
|
374
394
|
'error': f"Metric '{metric}' not found in response for group {group_key}"
|
|
375
395
|
}
|
|
376
396
|
|
|
377
|
-
metric_data = group['Metrics'][metric]
|
|
378
|
-
|
|
379
|
-
# Validate metric data structure
|
|
380
|
-
if 'Amount' not in metric_data:
|
|
381
|
-
logger.error(
|
|
382
|
-
f'Amount not found in metric data for {group_key}: {metric_data}'
|
|
383
|
-
)
|
|
384
|
-
return {
|
|
385
|
-
'error': "Invalid response format: 'Amount' not found in metric data"
|
|
386
|
-
}
|
|
387
|
-
|
|
388
397
|
try:
|
|
389
398
|
metric_data = group['Metrics'][metric]
|
|
390
399
|
|
|
@@ -414,7 +423,7 @@ async def get_cost_and_usage(
|
|
|
414
423
|
amount = float(metric_data['Amount'])
|
|
415
424
|
grouped_costs.setdefault(date, {}).update({group_key: (amount, unit)})
|
|
416
425
|
except (ValueError, TypeError) as e:
|
|
417
|
-
logger.error(f'Error processing metric data: {e},
|
|
426
|
+
logger.error(f'Error processing metric data: {e}, group: {group_key}')
|
|
418
427
|
return {'error': f'Error processing metric data: {str(e)}'}
|
|
419
428
|
|
|
420
429
|
next_token = response.get('NextPageToken')
|
|
@@ -423,7 +432,9 @@ async def get_cost_and_usage(
|
|
|
423
432
|
|
|
424
433
|
# Process results
|
|
425
434
|
if not grouped_costs:
|
|
426
|
-
logger.info(
|
|
435
|
+
logger.info(
|
|
436
|
+
f'No cost data found for the specified parameters: {billing_period_start} to {billing_period_end}'
|
|
437
|
+
)
|
|
427
438
|
return {
|
|
428
439
|
'message': 'No cost data found for the specified parameters',
|
|
429
440
|
'GroupedCosts': {},
|
|
@@ -433,26 +444,34 @@ async def get_cost_and_usage(
|
|
|
433
444
|
if metric_config['is_cost']:
|
|
434
445
|
# Process cost metrics
|
|
435
446
|
df = pd.DataFrame.from_dict(grouped_costs).round(2)
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
447
|
+
|
|
448
|
+
# Dynamic labeling based on group dimension
|
|
449
|
+
group_dimension = group_by['Key'].lower().replace('_', ' ')
|
|
450
|
+
df[f'{group_dimension.title()} Total'] = df.sum(axis=1).round(2)
|
|
451
|
+
df.loc[f'Total {metric}'] = df.sum().round(2)
|
|
452
|
+
df = df.sort_values(by=f'{group_dimension.title()} Total', ascending=False)
|
|
453
|
+
|
|
454
|
+
result = {'GroupedCosts': df.to_dict()}
|
|
439
455
|
else:
|
|
440
|
-
# Process usage metrics
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
(k, 'Unit'): {k1: v1[1] for k1, v1 in v.items()}
|
|
450
|
-
for k, v in grouped_costs.items()
|
|
451
|
-
}
|
|
452
|
-
)
|
|
453
|
-
df = pd.concat([usage_df, units_df], axis=1)
|
|
456
|
+
# Process usage metrics with cleaner structure
|
|
457
|
+
result_data = {}
|
|
458
|
+
for date, groups in grouped_costs.items():
|
|
459
|
+
result_data[date] = {}
|
|
460
|
+
for group_key, (amount, unit) in groups.items():
|
|
461
|
+
result_data[date][group_key] = {
|
|
462
|
+
'amount': round(float(amount), 2),
|
|
463
|
+
'unit': unit,
|
|
464
|
+
}
|
|
454
465
|
|
|
455
|
-
|
|
466
|
+
# Add metadata for usage metrics
|
|
467
|
+
result = {
|
|
468
|
+
'metadata': {
|
|
469
|
+
'grouped_by': group_by['Key'],
|
|
470
|
+
'metric': metric,
|
|
471
|
+
'period': f'{billing_period_start} to {billing_period_end}',
|
|
472
|
+
},
|
|
473
|
+
'GroupedUsage': result_data,
|
|
474
|
+
}
|
|
456
475
|
except Exception as e:
|
|
457
476
|
logger.error(f'Error processing cost data into DataFrame: {e}')
|
|
458
477
|
return {
|
|
@@ -460,29 +479,32 @@ async def get_cost_and_usage(
|
|
|
460
479
|
'raw_data': grouped_costs,
|
|
461
480
|
}
|
|
462
481
|
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
# Convert all keys to strings for JSON serialization
|
|
466
|
-
def stringify_keys(d: Any) -> Any:
|
|
467
|
-
if isinstance(d, dict):
|
|
468
|
-
return {str(k): stringify_keys(v) for k, v in d.items()}
|
|
469
|
-
elif isinstance(d, list):
|
|
470
|
-
return [{} if i is None else stringify_keys(i) for i in d] # Handle None values
|
|
471
|
-
else:
|
|
472
|
-
return d
|
|
473
|
-
|
|
482
|
+
# Test JSON serialization first, only stringify if needed
|
|
474
483
|
try:
|
|
475
|
-
|
|
484
|
+
json.dumps(result)
|
|
476
485
|
return result
|
|
477
|
-
except
|
|
478
|
-
|
|
479
|
-
|
|
486
|
+
except (TypeError, ValueError):
|
|
487
|
+
# Only stringify if JSON serialization fails
|
|
488
|
+
def stringify_keys(d: Any) -> Any:
|
|
489
|
+
if isinstance(d, dict):
|
|
490
|
+
return {str(k): stringify_keys(v) for k, v in d.items()}
|
|
491
|
+
elif isinstance(d, list):
|
|
492
|
+
return [stringify_keys(i) if i is not None else None for i in d]
|
|
493
|
+
else:
|
|
494
|
+
return d
|
|
495
|
+
|
|
496
|
+
try:
|
|
497
|
+
result = stringify_keys(result)
|
|
498
|
+
return result
|
|
499
|
+
except Exception as e:
|
|
500
|
+
logger.error(f'Error serializing result: {e}')
|
|
501
|
+
return {'error': f'Error serializing result: {str(e)}'}
|
|
480
502
|
|
|
481
503
|
except Exception as e:
|
|
482
|
-
logger.error(
|
|
483
|
-
|
|
504
|
+
logger.error(
|
|
505
|
+
f'Error generating cost report for period {billing_period_start} to {billing_period_end}: {e}'
|
|
506
|
+
)
|
|
484
507
|
|
|
485
|
-
logger.error(f'Traceback: {traceback.format_exc()}')
|
|
486
508
|
return {'error': f'Error generating cost report: {str(e)}'}
|
|
487
509
|
|
|
488
510
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: awslabs.cost-explorer-mcp-server
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.5
|
|
4
4
|
Summary: MCP server for analyzing AWS costs and usage data through the AWS Cost Explorer API
|
|
5
5
|
Project-URL: Homepage, https://awslabs.github.io/mcp/
|
|
6
6
|
Project-URL: Documentation, https://awslabs.github.io/mcp/servers/cost-explorer-mcp-server/
|
|
@@ -22,6 +22,7 @@ Classifier: Programming Language :: Python :: 3.12
|
|
|
22
22
|
Classifier: Programming Language :: Python :: 3.13
|
|
23
23
|
Requires-Python: >=3.10
|
|
24
24
|
Requires-Dist: boto3>=1.36.20
|
|
25
|
+
Requires-Dist: loguru>=0.7.0
|
|
25
26
|
Requires-Dist: mcp[cli]>=1.6.0
|
|
26
27
|
Requires-Dist: pandas>=2.2.3
|
|
27
28
|
Requires-Dist: pydantic>=2.10.6
|
|
@@ -63,6 +64,8 @@ MCP server for analyzing AWS costs and usage data through the AWS Cost Explorer
|
|
|
63
64
|
|
|
64
65
|
## Installation
|
|
65
66
|
|
|
67
|
+
[](https://cursor.com/install-mcp?name=awslabs.cost-explorer-mcp-server&config=eyJjb21tYW5kIjoidXZ4IGF3c2xhYnMuY29zdC1leHBsb3Jlci1tY3Atc2VydmVyQGxhdGVzdCIsImVudiI6eyJGQVNUTUNQX0xPR19MRVZFTCI6IkVSUk9SIiwiQVdTX1BST0ZJTEUiOiJ5b3VyLWF3cy1wcm9maWxlIn0sImRpc2FibGVkIjpmYWxzZSwiYXV0b0FwcHJvdmUiOltdfQ%3D%3D)
|
|
68
|
+
|
|
66
69
|
Here are some ways you can work with MCP across AWS, and we'll be adding support to more products including Amazon Q Developer CLI soon: (e.g. for Amazon Q Developer CLI MCP, `~/.aws/amazonq/mcp.json`):
|
|
67
70
|
|
|
68
71
|
```json
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
awslabs/__init__.py,sha256=XlNvbbm4JS0QaAK93MUCbMITZLOSkWkBilYvLI3rBpU,667
|
|
2
|
+
awslabs/cost_explorer_mcp_server/__init__.py,sha256=jj08M9QRfjYVfiV85UhDzpEO4Vseafpeekg31d2DhfM,785
|
|
3
|
+
awslabs/cost_explorer_mcp_server/helpers.py,sha256=p1iMZ_OlKY0bpgT2HR2oEfpeAAERmqQLBFVkVPyIUOE,14418
|
|
4
|
+
awslabs/cost_explorer_mcp_server/server.py,sha256=MEoZpUb104v06VZXnw1CYTpubMnyWveMuCwDvfOMD5g,21806
|
|
5
|
+
awslabs_cost_explorer_mcp_server-0.0.5.dist-info/METADATA,sha256=f7u_oUsGy-ePY3Wm_JH7S8gngvB7kkBCEEYf49Ycq_c,6745
|
|
6
|
+
awslabs_cost_explorer_mcp_server-0.0.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
7
|
+
awslabs_cost_explorer_mcp_server-0.0.5.dist-info/entry_points.txt,sha256=nkewGFi8GZCCtHhFofUmYii3OCeK_5qqgLXE4eUSFZg,98
|
|
8
|
+
awslabs_cost_explorer_mcp_server-0.0.5.dist-info/licenses/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
|
|
9
|
+
awslabs_cost_explorer_mcp_server-0.0.5.dist-info/licenses/NOTICE,sha256=VL_gWrK0xFaHGFxxYj6BcZI30EkRxUH4Dv1u2Qsh3ao,92
|
|
10
|
+
awslabs_cost_explorer_mcp_server-0.0.5.dist-info/RECORD,,
|
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
awslabs/__init__.py,sha256=XlNvbbm4JS0QaAK93MUCbMITZLOSkWkBilYvLI3rBpU,667
|
|
2
|
-
awslabs/cost_explorer_mcp_server/__init__.py,sha256=jj08M9QRfjYVfiV85UhDzpEO4Vseafpeekg31d2DhfM,785
|
|
3
|
-
awslabs/cost_explorer_mcp_server/helpers.py,sha256=8ldRc2TVFuE7-0Js4nQWw3v3e3Om48QgQgbTAXOecgI,10186
|
|
4
|
-
awslabs/cost_explorer_mcp_server/server.py,sha256=joYtlqmNnjGm162Qe71sz_weDNUujAGS6-RvdG5wpT4,21007
|
|
5
|
-
awslabs_cost_explorer_mcp_server-0.0.4.dist-info/METADATA,sha256=RrYXLshB_1ZL448HUxTxw7RZl1ktNgXbLkZXBtvJ4pY,6342
|
|
6
|
-
awslabs_cost_explorer_mcp_server-0.0.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
7
|
-
awslabs_cost_explorer_mcp_server-0.0.4.dist-info/entry_points.txt,sha256=nkewGFi8GZCCtHhFofUmYii3OCeK_5qqgLXE4eUSFZg,98
|
|
8
|
-
awslabs_cost_explorer_mcp_server-0.0.4.dist-info/licenses/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
|
|
9
|
-
awslabs_cost_explorer_mcp_server-0.0.4.dist-info/licenses/NOTICE,sha256=VL_gWrK0xFaHGFxxYj6BcZI30EkRxUH4Dv1u2Qsh3ao,92
|
|
10
|
-
awslabs_cost_explorer_mcp_server-0.0.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|