awslabs.cloudwatch-mcp-server 0.0.11__tar.gz → 0.0.14__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/CHANGELOG.md +11 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/PKG-INFO +7 -3
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/README.md +2 -1
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/awslabs/cloudwatch_mcp_server/__init__.py +2 -1
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/awslabs/cloudwatch_mcp_server/cloudwatch_alarms/models.py +1 -1
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/awslabs/cloudwatch_mcp_server/cloudwatch_alarms/tools.py +2 -2
- awslabs_cloudwatch_mcp_server-0.0.14/awslabs/cloudwatch_mcp_server/cloudwatch_metrics/cloudformation_template_generator.py +162 -0
- awslabs_cloudwatch_mcp_server-0.0.14/awslabs/cloudwatch_mcp_server/cloudwatch_metrics/constants.py +30 -0
- awslabs_cloudwatch_mcp_server-0.0.14/awslabs/cloudwatch_mcp_server/cloudwatch_metrics/metric_analyzer.py +192 -0
- awslabs_cloudwatch_mcp_server-0.0.14/awslabs/cloudwatch_mcp_server/cloudwatch_metrics/metric_data_decomposer.py +218 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/awslabs/cloudwatch_mcp_server/cloudwatch_metrics/models.py +129 -3
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/awslabs/cloudwatch_mcp_server/cloudwatch_metrics/tools.py +377 -33
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/pyproject.toml +5 -2
- awslabs_cloudwatch_mcp_server-0.0.14/tests/cloudwatch_metrics/test_analyze_metric.py +207 -0
- awslabs_cloudwatch_mcp_server-0.0.14/tests/cloudwatch_metrics/test_cloudformation_template_generator.py +203 -0
- awslabs_cloudwatch_mcp_server-0.0.14/tests/cloudwatch_metrics/test_decomposer_trend.py +262 -0
- awslabs_cloudwatch_mcp_server-0.0.14/tests/cloudwatch_metrics/test_metric_analyzer.py +390 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/tests/cloudwatch_metrics/test_metrics_error_handling.py +69 -14
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/tests/cloudwatch_metrics/test_metrics_models.py +117 -21
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/tests/cloudwatch_metrics/test_metrics_server.py +219 -13
- awslabs_cloudwatch_mcp_server-0.0.14/tests/cloudwatch_metrics/test_seasonal_detector.py +496 -0
- awslabs_cloudwatch_mcp_server-0.0.14/tests/cloudwatch_metrics/test_seasonality_enum.py +105 -0
- awslabs_cloudwatch_mcp_server-0.0.14/tests/cloudwatch_metrics/test_utils.py +107 -0
- awslabs_cloudwatch_mcp_server-0.0.14/uv-requirements.txt +27 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/uv.lock +446 -6
- awslabs_cloudwatch_mcp_server-0.0.11/uv-requirements.txt +0 -24
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/.gitignore +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/.python-version +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/Dockerfile +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/LICENSE +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/NOTICE +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/awslabs/__init__.py +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/awslabs/cloudwatch_mcp_server/cloudwatch_logs/models.py +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/awslabs/cloudwatch_mcp_server/cloudwatch_logs/tools.py +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/awslabs/cloudwatch_mcp_server/cloudwatch_metrics/data/metric_metadata.json +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/awslabs/cloudwatch_mcp_server/common.py +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/awslabs/cloudwatch_mcp_server/server.py +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/docker-healthcheck.sh +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/tests/cloudwatch_alarms/test_active_alarms.py +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/tests/cloudwatch_alarms/test_alarm_history.py +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/tests/cloudwatch_alarms/test_alarm_history_integration.py +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/tests/cloudwatch_alarms/test_alarms_error_handling.py +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/tests/cloudwatch_logs/test_logs_error_handling.py +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/tests/cloudwatch_logs/test_logs_models.py +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/tests/cloudwatch_logs/test_logs_server.py +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/tests/cloudwatch_metrics/test_validation_error.py +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/tests/test_common_and_server.py +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/tests/test_init.py +0 -0
- {awslabs_cloudwatch_mcp_server-0.0.11 → awslabs_cloudwatch_mcp_server-0.0.14}/tests/test_main.py +0 -0
|
@@ -6,6 +6,17 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
|
6
6
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
7
7
|
|
|
8
8
|
## Unreleased
|
|
9
|
+
|
|
10
|
+
## [0.0.5] - 2025-10-06
|
|
11
|
+
|
|
12
|
+
### Added
|
|
13
|
+
|
|
14
|
+
- Added tool to analyze CloudWatch Metric data
|
|
15
|
+
|
|
16
|
+
### Changed
|
|
17
|
+
|
|
18
|
+
- Updated Alarm recommendation tool to support CloudWatch Anomaly Detection Alarms
|
|
19
|
+
|
|
9
20
|
## [0.0.4] - 2025-07-11
|
|
10
21
|
|
|
11
22
|
### Changed
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: awslabs.cloudwatch-mcp-server
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.14
|
|
4
4
|
Summary: An AWS Labs Model Context Protocol (MCP) server for cloudwatch
|
|
5
5
|
Project-URL: homepage, https://awslabs.github.io/mcp/
|
|
6
6
|
Project-URL: docs, https://awslabs.github.io/mcp/servers/cloudwatch-mcp-server/
|
|
@@ -23,8 +23,11 @@ Classifier: Programming Language :: Python :: 3.13
|
|
|
23
23
|
Requires-Python: >=3.10
|
|
24
24
|
Requires-Dist: boto3>=1.38.22
|
|
25
25
|
Requires-Dist: loguru>=0.7.0
|
|
26
|
-
Requires-Dist: mcp[cli]>=1.
|
|
26
|
+
Requires-Dist: mcp[cli]>=1.23.0
|
|
27
|
+
Requires-Dist: numpy>=2.0.0
|
|
28
|
+
Requires-Dist: pandas>=2.2.3
|
|
27
29
|
Requires-Dist: pydantic>=2.10.6
|
|
30
|
+
Requires-Dist: statsmodels>=0.14.0
|
|
28
31
|
Description-Content-Type: text/markdown
|
|
29
32
|
|
|
30
33
|
# AWS Labs cloudwatch MCP Server
|
|
@@ -57,7 +60,8 @@ Alarm Recommendations - Suggests recommended alarm configurations for CloudWatch
|
|
|
57
60
|
### Tools for CloudWatch Metrics
|
|
58
61
|
* `get_metric_data` - Retrieves detailed CloudWatch metric data for any CloudWatch metric. Use this for general CloudWatch metrics that aren't specific to Application Signals. Provides ability to query any metric namespace, dimension, and statistic
|
|
59
62
|
* `get_metric_metadata` - Retrieves comprehensive metadata about a specific CloudWatch metric
|
|
60
|
-
* `get_recommended_metric_alarms` - Gets recommended alarms for a CloudWatch metric
|
|
63
|
+
* `get_recommended_metric_alarms` - Gets recommended alarms for a CloudWatch metric based on best practice, and trend, seasonality and statistical analysis.
|
|
64
|
+
* `analyze_metric` - Analyzes CloudWatch metric data to determine trend, seasonality, and statistical properties
|
|
61
65
|
|
|
62
66
|
### Tools for CloudWatch Alarms
|
|
63
67
|
* `get_active_alarms` - Identifies currently active CloudWatch alarms across the account
|
|
@@ -28,7 +28,8 @@ Alarm Recommendations - Suggests recommended alarm configurations for CloudWatch
|
|
|
28
28
|
### Tools for CloudWatch Metrics
|
|
29
29
|
* `get_metric_data` - Retrieves detailed CloudWatch metric data for any CloudWatch metric. Use this for general CloudWatch metrics that aren't specific to Application Signals. Provides ability to query any metric namespace, dimension, and statistic
|
|
30
30
|
* `get_metric_metadata` - Retrieves comprehensive metadata about a specific CloudWatch metric
|
|
31
|
-
* `get_recommended_metric_alarms` - Gets recommended alarms for a CloudWatch metric
|
|
31
|
+
* `get_recommended_metric_alarms` - Gets recommended alarms for a CloudWatch metric based on best practice, and trend, seasonality and statistical analysis.
|
|
32
|
+
* `analyze_metric` - Analyzes CloudWatch metric data to determine trend, seasonality, and statistical properties
|
|
32
33
|
|
|
33
34
|
### Tools for CloudWatch Alarms
|
|
34
35
|
* `get_active_alarms` - Identifies currently active CloudWatch alarms across the account
|
|
@@ -54,7 +54,7 @@ class CompositeAlarmSummary(BaseModel):
|
|
|
54
54
|
|
|
55
55
|
|
|
56
56
|
class ActiveAlarmsResponse(BaseModel):
|
|
57
|
-
"""Response containing active CloudWatch
|
|
57
|
+
"""Response containing active CloudWatch Alarms."""
|
|
58
58
|
|
|
59
59
|
metric_alarms: List[MetricAlarmSummary] = Field(
|
|
60
60
|
default_factory=list, description='List of active metric alarms'
|
|
@@ -80,9 +80,9 @@ class CloudWatchAlarmsTools:
|
|
|
80
80
|
Field(description='AWS region to query. Defaults to us-east-1.'),
|
|
81
81
|
] = 'us-east-1',
|
|
82
82
|
) -> ActiveAlarmsResponse:
|
|
83
|
-
"""Gets all CloudWatch
|
|
83
|
+
"""Gets all CloudWatch Alarms currently in ALARM state.
|
|
84
84
|
|
|
85
|
-
This tool retrieves all CloudWatch
|
|
85
|
+
This tool retrieves all CloudWatch Alarms that are currently in the ALARM state,
|
|
86
86
|
including both metric alarms and composite alarms. Results are optimized for
|
|
87
87
|
LLM reasoning with summary-level information.
|
|
88
88
|
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
import logging
|
|
16
|
+
from awslabs.cloudwatch_mcp_server.cloudwatch_metrics.constants import COMPARISON_OPERATOR_ANOMALY
|
|
17
|
+
from awslabs.cloudwatch_mcp_server.cloudwatch_metrics.models import AnomalyDetectionAlarmThreshold
|
|
18
|
+
from typing import Any, Dict
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class CloudFormationTemplateGenerator:
|
|
25
|
+
"""Generate CloudFormation JSON for CloudWatch Anomaly Detection Alarms."""
|
|
26
|
+
|
|
27
|
+
def generate_metric_alarm_template(self, alarm_data: Dict[str, Any]) -> Dict[str, Any]:
|
|
28
|
+
"""Generate CFN template for a single CloudWatch Alarm."""
|
|
29
|
+
if not self._is_anomaly_detection_alarm(alarm_data):
|
|
30
|
+
return {}
|
|
31
|
+
|
|
32
|
+
# Validate required fields
|
|
33
|
+
if not alarm_data.get('metricName'):
|
|
34
|
+
raise ValueError(
|
|
35
|
+
'Metric Name is required to generate CloudFormation templates for Cloudwatch Alarms'
|
|
36
|
+
)
|
|
37
|
+
if not alarm_data.get('namespace'):
|
|
38
|
+
raise ValueError(
|
|
39
|
+
'Metric Namespace is required to generate CloudFormation templates for Cloudwatch Alarms'
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
# Process alarm data and add computed fields
|
|
43
|
+
formatted_data = self._format_anomaly_detection_alarm_data(alarm_data)
|
|
44
|
+
|
|
45
|
+
# Build resources dict
|
|
46
|
+
anomaly_detector_key = f'{formatted_data["resourceKey"]}AnomalyDetector'
|
|
47
|
+
alarm_key = f'{formatted_data["resourceKey"]}Alarm'
|
|
48
|
+
|
|
49
|
+
resources = {
|
|
50
|
+
anomaly_detector_key: {
|
|
51
|
+
'Type': 'AWS::CloudWatch::AnomalyDetector',
|
|
52
|
+
'Properties': {
|
|
53
|
+
'MetricName': formatted_data['metricName'],
|
|
54
|
+
'Namespace': formatted_data['namespace'],
|
|
55
|
+
'Stat': formatted_data['statistic'],
|
|
56
|
+
'Dimensions': formatted_data['dimensions'],
|
|
57
|
+
},
|
|
58
|
+
},
|
|
59
|
+
alarm_key: {
|
|
60
|
+
'Type': 'AWS::CloudWatch::Alarm',
|
|
61
|
+
'DependsOn': anomaly_detector_key,
|
|
62
|
+
'Properties': {
|
|
63
|
+
'AlarmDescription': formatted_data['alarmDescription'],
|
|
64
|
+
'Metrics': [
|
|
65
|
+
{
|
|
66
|
+
'Expression': f'ANOMALY_DETECTION_BAND(m1, {formatted_data["sensitivity"]})',
|
|
67
|
+
'Id': 'ad1',
|
|
68
|
+
},
|
|
69
|
+
{
|
|
70
|
+
'Id': 'm1',
|
|
71
|
+
'MetricStat': {
|
|
72
|
+
'Metric': {
|
|
73
|
+
'MetricName': formatted_data['metricName'],
|
|
74
|
+
'Namespace': formatted_data['namespace'],
|
|
75
|
+
'Dimensions': formatted_data['dimensions'],
|
|
76
|
+
},
|
|
77
|
+
'Stat': formatted_data['statistic'],
|
|
78
|
+
'Period': formatted_data['period'],
|
|
79
|
+
},
|
|
80
|
+
},
|
|
81
|
+
],
|
|
82
|
+
'EvaluationPeriods': formatted_data['evaluationPeriods'],
|
|
83
|
+
'DatapointsToAlarm': formatted_data['datapointsToAlarm'],
|
|
84
|
+
'ThresholdMetricId': 'ad1',
|
|
85
|
+
'ComparisonOperator': formatted_data['comparisonOperator'],
|
|
86
|
+
'TreatMissingData': formatted_data['treatMissingData'],
|
|
87
|
+
},
|
|
88
|
+
},
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
final_template = {
|
|
92
|
+
'AWSTemplateFormatVersion': '2010-09-09',
|
|
93
|
+
'Description': 'CloudWatch Alarms and Anomaly Detectors',
|
|
94
|
+
'Resources': resources,
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
return final_template
|
|
98
|
+
|
|
99
|
+
def _is_anomaly_detection_alarm(self, alarm_data: Dict[str, Any]) -> bool:
|
|
100
|
+
return alarm_data.get('comparisonOperator') == COMPARISON_OPERATOR_ANOMALY
|
|
101
|
+
|
|
102
|
+
def _format_anomaly_detection_alarm_data(self, alarm_data: Dict[str, Any]) -> Dict[str, Any]:
|
|
103
|
+
"""Sanitize alarm data and add computed fields."""
|
|
104
|
+
formatted_data = alarm_data.copy()
|
|
105
|
+
|
|
106
|
+
# Generate resource key from metric name and namespace
|
|
107
|
+
formatted_data['resourceKey'] = self._generate_resource_key(
|
|
108
|
+
metric_name=alarm_data.get('metricName', ''),
|
|
109
|
+
namespace=alarm_data.get('namespace', ''),
|
|
110
|
+
dimensions=alarm_data.get('dimensions', []),
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
# Process threshold value
|
|
114
|
+
threshold = alarm_data.get('threshold', {})
|
|
115
|
+
formatted_data['sensitivity'] = threshold.get(
|
|
116
|
+
'sensitivity', AnomalyDetectionAlarmThreshold.DEFAULT_SENSITIVITY
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
# Set defaults
|
|
120
|
+
formatted_data.setdefault(
|
|
121
|
+
'alarmDescription', 'CloudWatch Alarm generated by CloudWatch MCP server.'
|
|
122
|
+
)
|
|
123
|
+
formatted_data.setdefault('statistic', 'Average')
|
|
124
|
+
formatted_data.setdefault('period', 300)
|
|
125
|
+
formatted_data.setdefault('evaluationPeriods', 2)
|
|
126
|
+
formatted_data.setdefault('datapointsToAlarm', 2)
|
|
127
|
+
formatted_data.setdefault('comparisonOperator', COMPARISON_OPERATOR_ANOMALY)
|
|
128
|
+
formatted_data.setdefault('treatMissingData', 'missing')
|
|
129
|
+
formatted_data.setdefault('dimensions', [])
|
|
130
|
+
|
|
131
|
+
return formatted_data
|
|
132
|
+
|
|
133
|
+
def _generate_resource_key(self, metric_name: str, namespace: str, dimensions: list) -> str:
|
|
134
|
+
"""Generate CloudFormation resource key from metric components to act as logical id."""
|
|
135
|
+
# Strip AWS/ prefix from namespace (AWS CDK style)
|
|
136
|
+
clean_namespace = namespace.replace('AWS/', '')
|
|
137
|
+
|
|
138
|
+
# Add first dimension key and value for uniqueness if present
|
|
139
|
+
dimension_suffix = ''
|
|
140
|
+
if dimensions:
|
|
141
|
+
first_dim = dimensions[0]
|
|
142
|
+
dim_name = first_dim.get('Name', '')
|
|
143
|
+
dim_value = first_dim.get('Value', '')
|
|
144
|
+
dimension_suffix = f'{dim_name}{dim_value}'
|
|
145
|
+
|
|
146
|
+
resource_base = f'{clean_namespace}{metric_name}{dimension_suffix}'
|
|
147
|
+
return self._sanitize_resource_name(resource_base)
|
|
148
|
+
|
|
149
|
+
def _sanitize_resource_name(self, name: str) -> str:
|
|
150
|
+
"""Sanitize name for CloudFormation resource key."""
|
|
151
|
+
# Remove non-alphanumeric characters
|
|
152
|
+
sanitized = ''.join(c for c in name if c.isalnum())
|
|
153
|
+
|
|
154
|
+
# Ensure it starts with letter
|
|
155
|
+
if not sanitized or not sanitized[0].isalpha():
|
|
156
|
+
sanitized = 'Resource' + sanitized
|
|
157
|
+
|
|
158
|
+
# Truncate if too long
|
|
159
|
+
if len(sanitized) > 255:
|
|
160
|
+
sanitized = sanitized[:255]
|
|
161
|
+
|
|
162
|
+
return sanitized
|
awslabs_cloudwatch_mcp_server-0.0.14/awslabs/cloudwatch_mcp_server/cloudwatch_metrics/constants.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
# CloudWatch MCP Server Constants
|
|
16
|
+
|
|
17
|
+
# Time constants
|
|
18
|
+
SECONDS_PER_MINUTE = 60
|
|
19
|
+
MINUTES_PER_HOUR = 60
|
|
20
|
+
HOURS_PER_DAY = 24
|
|
21
|
+
DAYS_PER_WEEK = 7
|
|
22
|
+
|
|
23
|
+
# Analysis constants
|
|
24
|
+
DEFAULT_ANALYSIS_PERIOD_MINUTES = 20160 # 2 weeks
|
|
25
|
+
|
|
26
|
+
# Threshold constants
|
|
27
|
+
COMPARISON_OPERATOR_ANOMALY = 'LessThanLowerOrGreaterThanUpperThreshold'
|
|
28
|
+
|
|
29
|
+
# Numerical stability
|
|
30
|
+
NUMERICAL_STABILITY_THRESHOLD = 1e-10
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
import numpy as np
|
|
16
|
+
from awslabs.cloudwatch_mcp_server.cloudwatch_metrics.constants import (
|
|
17
|
+
NUMERICAL_STABILITY_THRESHOLD,
|
|
18
|
+
)
|
|
19
|
+
from awslabs.cloudwatch_mcp_server.cloudwatch_metrics.metric_data_decomposer import (
|
|
20
|
+
MetricDataDecomposer,
|
|
21
|
+
)
|
|
22
|
+
from awslabs.cloudwatch_mcp_server.cloudwatch_metrics.models import (
|
|
23
|
+
DecompositionResult,
|
|
24
|
+
MetricData,
|
|
25
|
+
Seasonality,
|
|
26
|
+
Trend,
|
|
27
|
+
)
|
|
28
|
+
from collections import Counter
|
|
29
|
+
from loguru import logger
|
|
30
|
+
from typing import Any, Dict, Optional
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class MetricAnalyzer:
|
|
34
|
+
"""Metric analysis including trend, density, seasonality, and statistical measures."""
|
|
35
|
+
|
|
36
|
+
def __init__(self):
|
|
37
|
+
"""Initialize the metric analyzer."""
|
|
38
|
+
self.decomposer = MetricDataDecomposer()
|
|
39
|
+
|
|
40
|
+
def analyze_metric_data(self, metric_data: MetricData) -> Dict[str, Any]:
|
|
41
|
+
"""Analyze metric data and return comprehensive analysis results.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
metric_data: MetricData object containing timestamps and values
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
Dict containing analysis results including seasonality, trend, statistics, and message
|
|
48
|
+
"""
|
|
49
|
+
if not metric_data.timestamps or not metric_data.values:
|
|
50
|
+
return {'message': 'No metric data available for analysis'}
|
|
51
|
+
|
|
52
|
+
clean_data = [
|
|
53
|
+
(ts, val)
|
|
54
|
+
for ts, val in zip(metric_data.timestamps, metric_data.values)
|
|
55
|
+
if val is not None and not (np.isnan(val) or np.isinf(val))
|
|
56
|
+
]
|
|
57
|
+
|
|
58
|
+
if len(clean_data) < 2:
|
|
59
|
+
return {'message': 'Insufficient valid data points for analysis'}
|
|
60
|
+
|
|
61
|
+
clean_timestamps, clean_values = zip(*clean_data)
|
|
62
|
+
clean_timestamps = list(clean_timestamps)
|
|
63
|
+
clean_values = list(clean_values)
|
|
64
|
+
|
|
65
|
+
try:
|
|
66
|
+
# Compute detailed analysis
|
|
67
|
+
publishing_period_seconds = self._compute_publishing_period(clean_timestamps)
|
|
68
|
+
density_ratio = self._compute_density_ratio(
|
|
69
|
+
clean_timestamps, publishing_period_seconds or 0.0
|
|
70
|
+
)
|
|
71
|
+
decomposition = self._compute_seasonality_and_trend(
|
|
72
|
+
clean_timestamps, clean_values, density_ratio, publishing_period_seconds
|
|
73
|
+
)
|
|
74
|
+
statistics = self._compute_statistics(clean_values)
|
|
75
|
+
|
|
76
|
+
return {
|
|
77
|
+
'data_points_found': len(metric_data.values),
|
|
78
|
+
'seasonality_seconds': decomposition.seasonality.value,
|
|
79
|
+
'trend': decomposition.trend,
|
|
80
|
+
'statistics': statistics,
|
|
81
|
+
'data_quality': {
|
|
82
|
+
'total_points': len(metric_data.values),
|
|
83
|
+
'density_ratio': density_ratio,
|
|
84
|
+
'publishing_period_seconds': publishing_period_seconds,
|
|
85
|
+
},
|
|
86
|
+
'message': 'Metric analysis completed successfully',
|
|
87
|
+
}
|
|
88
|
+
except Exception as e:
|
|
89
|
+
logger.error(f'Error during metric analysis: {str(e)}')
|
|
90
|
+
return {'message': 'Unable to analyze metric data'}
|
|
91
|
+
|
|
92
|
+
def _compute_seasonality_and_trend(
|
|
93
|
+
self,
|
|
94
|
+
timestamps_ms: list[int],
|
|
95
|
+
values: list[float],
|
|
96
|
+
density_ratio: Optional[float],
|
|
97
|
+
publishing_period_seconds: Optional[float],
|
|
98
|
+
):
|
|
99
|
+
"""Compute seasonality and trend using decomposition.
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
DecompositionResult with seasonality and trend
|
|
103
|
+
"""
|
|
104
|
+
if density_ratio is None or publishing_period_seconds is None:
|
|
105
|
+
return DecompositionResult(seasonality=Seasonality.NONE, trend=Trend.NONE)
|
|
106
|
+
|
|
107
|
+
try:
|
|
108
|
+
return self.decomposer.detect_seasonality_and_trend(
|
|
109
|
+
timestamps_ms, values, density_ratio, int(publishing_period_seconds)
|
|
110
|
+
)
|
|
111
|
+
except Exception as e:
|
|
112
|
+
logger.error(f'Error computing seasonality and trend: {e}')
|
|
113
|
+
raise
|
|
114
|
+
|
|
115
|
+
def _compute_publishing_period(self, timestamps_ms: list[int]) -> Optional[float]:
|
|
116
|
+
"""Compute the publishing period in seconds from timestamp gaps."""
|
|
117
|
+
try:
|
|
118
|
+
gaps = [timestamps_ms[i + 1] - timestamps_ms[i] for i in range(len(timestamps_ms) - 1)]
|
|
119
|
+
gap_counts = Counter(gaps)
|
|
120
|
+
|
|
121
|
+
if not gap_counts:
|
|
122
|
+
return None
|
|
123
|
+
|
|
124
|
+
most_common_gap_ms, _ = gap_counts.most_common(1)[0]
|
|
125
|
+
return self._get_closest_cloudwatch_period(most_common_gap_ms / 1000)
|
|
126
|
+
except Exception as e:
|
|
127
|
+
logger.warning(f'Error computing publishing period: {e}')
|
|
128
|
+
return None
|
|
129
|
+
|
|
130
|
+
def _get_closest_cloudwatch_period(self, period_seconds: float) -> float:
|
|
131
|
+
"""Validate and normalize period to CloudWatch valid values."""
|
|
132
|
+
valid_periods = [1, 5, 10, 30] + [
|
|
133
|
+
i * 60 for i in range(1, 3601)
|
|
134
|
+
] # 1min to 1hour multiples
|
|
135
|
+
|
|
136
|
+
# Find closest valid period
|
|
137
|
+
closest_period = min(valid_periods, key=lambda x: abs(x - period_seconds))
|
|
138
|
+
|
|
139
|
+
# Only return if within 10% tolerance
|
|
140
|
+
if abs(closest_period - period_seconds) / closest_period <= 0.1:
|
|
141
|
+
return closest_period
|
|
142
|
+
|
|
143
|
+
return period_seconds # Return original if no close match
|
|
144
|
+
|
|
145
|
+
def _compute_density_ratio(
|
|
146
|
+
self, timestamps_ms: list[int], publishing_period_seconds: float
|
|
147
|
+
) -> Optional[float]:
|
|
148
|
+
"""Calculate density ratio based on perfect timeline."""
|
|
149
|
+
if (
|
|
150
|
+
not publishing_period_seconds
|
|
151
|
+
or publishing_period_seconds <= 0
|
|
152
|
+
or len(timestamps_ms) < 2
|
|
153
|
+
):
|
|
154
|
+
return None
|
|
155
|
+
|
|
156
|
+
try:
|
|
157
|
+
start_time = timestamps_ms[0]
|
|
158
|
+
publishing_period_ms = publishing_period_seconds * 1000
|
|
159
|
+
perfect_end_time = start_time + (publishing_period_ms * (len(timestamps_ms) - 1))
|
|
160
|
+
actual_points_in_range = sum(1 for ts in timestamps_ms if ts <= perfect_end_time)
|
|
161
|
+
return actual_points_in_range / len(timestamps_ms)
|
|
162
|
+
except Exception as e:
|
|
163
|
+
logger.error(f'Error calculating density ratio: {e}', exc_info=True)
|
|
164
|
+
raise
|
|
165
|
+
|
|
166
|
+
def _compute_statistics(self, values: list[float]) -> Dict[str, Any]:
|
|
167
|
+
"""Compute essential statistical measures for LLM consumption."""
|
|
168
|
+
if not values:
|
|
169
|
+
return {
|
|
170
|
+
'min': None,
|
|
171
|
+
'max': None,
|
|
172
|
+
'std_deviation': None,
|
|
173
|
+
'coefficient_of_variation': None,
|
|
174
|
+
'median': None,
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
try:
|
|
178
|
+
values_array = np.array(values)
|
|
179
|
+
mean_val = np.mean(values_array)
|
|
180
|
+
std_dev = np.std(values_array, ddof=0)
|
|
181
|
+
cv = std_dev / abs(mean_val) if abs(mean_val) > NUMERICAL_STABILITY_THRESHOLD else None
|
|
182
|
+
|
|
183
|
+
return {
|
|
184
|
+
'min': float(np.min(values_array)),
|
|
185
|
+
'max': float(np.max(values_array)),
|
|
186
|
+
'std_deviation': float(std_dev),
|
|
187
|
+
'coefficient_of_variation': float(cv) if cv is not None else None,
|
|
188
|
+
'median': float(np.median(values_array)),
|
|
189
|
+
}
|
|
190
|
+
except Exception as e:
|
|
191
|
+
logger.warning(f'Error computing statistics: {e}')
|
|
192
|
+
raise
|