awslabs.cloudwatch-appsignals-mcp-server 0.1.9__tar.gz → 0.1.10__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/Dockerfile +2 -2
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/PKG-INFO +1 -1
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/awslabs/cloudwatch_appsignals_mcp_server/__init__.py +1 -1
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/awslabs/cloudwatch_appsignals_mcp_server/trace_tools.py +71 -13
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/pyproject.toml +1 -1
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/tests/test_server.py +152 -1
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/uv.lock +1 -1
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/.gitignore +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/.python-version +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/CHANGELOG.md +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/LICENSE +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/NOTICE +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/README.md +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/awslabs/__init__.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/awslabs/cloudwatch_appsignals_mcp_server/audit_presentation_utils.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/awslabs/cloudwatch_appsignals_mcp_server/audit_utils.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/awslabs/cloudwatch_appsignals_mcp_server/aws_clients.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/awslabs/cloudwatch_appsignals_mcp_server/server.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/awslabs/cloudwatch_appsignals_mcp_server/service_audit_utils.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/awslabs/cloudwatch_appsignals_mcp_server/service_tools.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/awslabs/cloudwatch_appsignals_mcp_server/sli_report_client.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/awslabs/cloudwatch_appsignals_mcp_server/slo_tools.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/awslabs/cloudwatch_appsignals_mcp_server/utils.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/docker-healthcheck.sh +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/tests/conftest.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/tests/test_audit_presentation_utils.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/tests/test_audit_utils.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/tests/test_aws_profile.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/tests/test_initialization.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/tests/test_server_audit_functions.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/tests/test_server_audit_tools.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/tests/test_service_audit_utils.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/tests/test_service_tools_operations.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/tests/test_sli_report_client.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/tests/test_slo_tools.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/tests/test_utils.py +0 -0
- {awslabs_cloudwatch_appsignals_mcp_server-0.1.9 → awslabs_cloudwatch_appsignals_mcp_server-0.1.10}/uv-requirements.txt +0 -0
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
|
|
15
15
|
# dependabot should continue to update this to the latest hash.
|
|
16
|
-
FROM public.ecr.aws/docker/library/python:3.13
|
|
16
|
+
FROM public.ecr.aws/docker/library/python:3.13-alpine@sha256:070342a0cc1011532c0e69972cce2bbc6cc633eba294bae1d12abea8bd05303b AS uv
|
|
17
17
|
|
|
18
18
|
# Install the project into `/app`
|
|
19
19
|
WORKDIR /app
|
|
@@ -61,7 +61,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
|
|
61
61
|
# Make the directory just in case it doesn't exist
|
|
62
62
|
RUN mkdir -p /root/.local
|
|
63
63
|
|
|
64
|
-
FROM public.ecr.aws/docker/library/python:3.13
|
|
64
|
+
FROM public.ecr.aws/docker/library/python:3.13-alpine@sha256:070342a0cc1011532c0e69972cce2bbc6cc633eba294bae1d12abea8bd05303b
|
|
65
65
|
|
|
66
66
|
# Place executables in the environment at the front of the path and include other binaries
|
|
67
67
|
ENV PATH="/app/.venv/bin:$PATH" \
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: awslabs.cloudwatch-appsignals-mcp-server
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.10
|
|
4
4
|
Summary: An AWS Labs Model Context Protocol (MCP) server for AWS Application Signals
|
|
5
5
|
Project-URL: Homepage, https://awslabs.github.io/mcp/
|
|
6
6
|
Project-URL: Documentation, https://awslabs.github.io/mcp/servers/cloudwatch-appsignals-mcp-server/
|
|
@@ -402,6 +402,26 @@ async def query_sampled_traces(
|
|
|
402
402
|
return obj.isoformat()
|
|
403
403
|
return obj
|
|
404
404
|
|
|
405
|
+
# Helper function to extract fault message from root causes for deduplication
|
|
406
|
+
def get_fault_message(trace_data):
|
|
407
|
+
"""Extract fault message from a trace for deduplication.
|
|
408
|
+
|
|
409
|
+
Only checks FaultRootCauses (5xx server errors) since this is the primary
|
|
410
|
+
use case for root cause investigation. Traces without fault messages are
|
|
411
|
+
not deduplicated.
|
|
412
|
+
"""
|
|
413
|
+
# Only check FaultRootCauses for deduplication
|
|
414
|
+
root_causes = trace_data.get('FaultRootCauses', [])
|
|
415
|
+
if root_causes:
|
|
416
|
+
for cause in root_causes:
|
|
417
|
+
services = cause.get('Services', [])
|
|
418
|
+
for service in services:
|
|
419
|
+
exceptions = service.get('Exceptions', [])
|
|
420
|
+
if exceptions and exceptions[0].get('Message'):
|
|
421
|
+
return exceptions[0].get('Message')
|
|
422
|
+
return None
|
|
423
|
+
|
|
424
|
+
# Build trace summaries (original format)
|
|
405
425
|
trace_summaries = []
|
|
406
426
|
for trace in traces:
|
|
407
427
|
# Create a simplified trace data structure to reduce size
|
|
@@ -417,17 +437,11 @@ async def query_sampled_traces(
|
|
|
417
437
|
|
|
418
438
|
# Only include root causes if they exist (to save space)
|
|
419
439
|
if trace.get('ErrorRootCauses'):
|
|
420
|
-
trace_data['ErrorRootCauses'] = trace.get('ErrorRootCauses', [])[
|
|
421
|
-
:3
|
|
422
|
-
] # Limit to first 3
|
|
440
|
+
trace_data['ErrorRootCauses'] = trace.get('ErrorRootCauses', [])[:3]
|
|
423
441
|
if trace.get('FaultRootCauses'):
|
|
424
|
-
trace_data['FaultRootCauses'] = trace.get('FaultRootCauses', [])[
|
|
425
|
-
:3
|
|
426
|
-
] # Limit to first 3
|
|
442
|
+
trace_data['FaultRootCauses'] = trace.get('FaultRootCauses', [])[:3]
|
|
427
443
|
if trace.get('ResponseTimeRootCauses'):
|
|
428
|
-
trace_data['ResponseTimeRootCauses'] = trace.get('ResponseTimeRootCauses', [])[
|
|
429
|
-
:3
|
|
430
|
-
] # Limit to first 3
|
|
444
|
+
trace_data['ResponseTimeRootCauses'] = trace.get('ResponseTimeRootCauses', [])[:3]
|
|
431
445
|
|
|
432
446
|
# Include limited annotations for key operations
|
|
433
447
|
annotations = trace.get('Annotations', {})
|
|
@@ -447,15 +461,50 @@ async def query_sampled_traces(
|
|
|
447
461
|
# Convert any datetime objects to ISO format strings
|
|
448
462
|
for key, value in trace_data.items():
|
|
449
463
|
trace_data[key] = convert_datetime(value)
|
|
464
|
+
|
|
450
465
|
trace_summaries.append(trace_data)
|
|
451
466
|
|
|
467
|
+
# Deduplicate trace summaries by fault message
|
|
468
|
+
seen_faults = {}
|
|
469
|
+
deduped_trace_summaries = []
|
|
470
|
+
|
|
471
|
+
for trace_summary in trace_summaries:
|
|
472
|
+
# Check if this trace has an error
|
|
473
|
+
has_issues = (
|
|
474
|
+
trace_summary.get('HasError')
|
|
475
|
+
or trace_summary.get('HasFault')
|
|
476
|
+
or trace_summary.get('HasThrottle')
|
|
477
|
+
)
|
|
478
|
+
|
|
479
|
+
if not has_issues:
|
|
480
|
+
# Always include healthy traces
|
|
481
|
+
deduped_trace_summaries.append(trace_summary)
|
|
482
|
+
continue
|
|
483
|
+
|
|
484
|
+
# Extract fault message for deduplication (only checks FaultRootCauses)
|
|
485
|
+
fault_msg = get_fault_message(trace_summary)
|
|
486
|
+
|
|
487
|
+
if fault_msg and fault_msg in seen_faults:
|
|
488
|
+
# Skip this trace - we already have one with the same fault message
|
|
489
|
+
seen_faults[fault_msg]['count'] += 1
|
|
490
|
+
logger.debug(
|
|
491
|
+
f'Skipping duplicate trace {trace_summary.get("Id")} - fault message already seen: {fault_msg[:100]}...'
|
|
492
|
+
)
|
|
493
|
+
continue
|
|
494
|
+
else:
|
|
495
|
+
# First time seeing this fault (or no fault message) - include it
|
|
496
|
+
deduped_trace_summaries.append(trace_summary)
|
|
497
|
+
if fault_msg:
|
|
498
|
+
seen_faults[fault_msg] = {'count': 1}
|
|
499
|
+
|
|
452
500
|
# Check transaction search status
|
|
453
501
|
is_tx_search_enabled, tx_destination, tx_status = check_transaction_search_enabled(region)
|
|
454
502
|
|
|
503
|
+
# Build response with original format but deduplicated traces
|
|
455
504
|
result_data = {
|
|
456
|
-
'TraceSummaries':
|
|
457
|
-
'TraceCount': len(
|
|
458
|
-
'Message': f'Retrieved {len(
|
|
505
|
+
'TraceSummaries': deduped_trace_summaries,
|
|
506
|
+
'TraceCount': len(deduped_trace_summaries),
|
|
507
|
+
'Message': f'Retrieved {len(deduped_trace_summaries)} unique traces from {len(trace_summaries)} total (deduplicated by fault message)',
|
|
459
508
|
'SamplingNote': "⚠️ This data is from X-Ray's 5% sampling. Results may not show all errors or issues.",
|
|
460
509
|
'TransactionSearchStatus': {
|
|
461
510
|
'enabled': is_tx_search_enabled,
|
|
@@ -467,9 +516,18 @@ async def query_sampled_traces(
|
|
|
467
516
|
},
|
|
468
517
|
}
|
|
469
518
|
|
|
519
|
+
# Add dedup stats if we actually deduped anything
|
|
520
|
+
if len(deduped_trace_summaries) < len(trace_summaries):
|
|
521
|
+
duplicates_removed = len(trace_summaries) - len(deduped_trace_summaries)
|
|
522
|
+
result_data['DeduplicationStats'] = {
|
|
523
|
+
'OriginalTraceCount': len(trace_summaries),
|
|
524
|
+
'DuplicatesRemoved': duplicates_removed,
|
|
525
|
+
'UniqueFaultMessages': len(seen_faults),
|
|
526
|
+
}
|
|
527
|
+
|
|
470
528
|
elapsed_time = timer() - start_time_perf
|
|
471
529
|
logger.info(
|
|
472
|
-
f'query_sampled_traces completed in {elapsed_time:.3f}s - retrieved {len(trace_summaries)}
|
|
530
|
+
f'query_sampled_traces completed in {elapsed_time:.3f}s - retrieved {len(deduped_trace_summaries)} unique traces from {len(trace_summaries)} total'
|
|
473
531
|
)
|
|
474
532
|
return json.dumps(result_data, indent=2)
|
|
475
533
|
|
|
@@ -1634,7 +1634,7 @@ async def test_query_sampled_traces_with_fault_causes(mock_aws_clients):
|
|
|
1634
1634
|
'Duration': 100,
|
|
1635
1635
|
'HasFault': True,
|
|
1636
1636
|
'FaultRootCauses': [
|
|
1637
|
-
{'Services': [{'Name': 'service1'}]},
|
|
1637
|
+
{'Services': [{'Name': 'service1', 'Exceptions': [{'Message': 'Test fault error'}]}]},
|
|
1638
1638
|
{'Services': [{'Name': 'service2'}]},
|
|
1639
1639
|
{'Services': [{'Name': 'service3'}]},
|
|
1640
1640
|
{'Services': [{'Name': 'service4'}]}, # Should be limited to 3
|
|
@@ -1709,6 +1709,157 @@ async def test_query_sampled_traces_datetime_conversion(mock_aws_clients):
|
|
|
1709
1709
|
assert 'EndTime' not in trace_summary
|
|
1710
1710
|
|
|
1711
1711
|
|
|
1712
|
+
@pytest.mark.asyncio
|
|
1713
|
+
async def test_query_sampled_traces_deduplication(mock_aws_clients):
|
|
1714
|
+
"""Test query_sampled_traces deduplicates traces with same fault message.
|
|
1715
|
+
|
|
1716
|
+
Note: Only FaultRootCauses are deduplicated, not ErrorRootCauses.
|
|
1717
|
+
This is because the primary use case is investigating server faults (5xx errors),
|
|
1718
|
+
not client errors (4xx).
|
|
1719
|
+
"""
|
|
1720
|
+
# Create 5 traces with the same fault message
|
|
1721
|
+
mock_traces = [
|
|
1722
|
+
{
|
|
1723
|
+
'Id': f'trace{i}',
|
|
1724
|
+
'Duration': 100 + i * 10,
|
|
1725
|
+
'ResponseTime': 95 + i * 10,
|
|
1726
|
+
'HasFault': True,
|
|
1727
|
+
'FaultRootCauses': [
|
|
1728
|
+
{
|
|
1729
|
+
'Services': [
|
|
1730
|
+
{
|
|
1731
|
+
'Name': 'test-service',
|
|
1732
|
+
'Exceptions': [{'Message': 'Database connection timeout'}],
|
|
1733
|
+
}
|
|
1734
|
+
]
|
|
1735
|
+
}
|
|
1736
|
+
],
|
|
1737
|
+
}
|
|
1738
|
+
for i in range(1, 6)
|
|
1739
|
+
]
|
|
1740
|
+
|
|
1741
|
+
# Add 2 traces with ErrorRootCauses (these should NOT be deduplicated)
|
|
1742
|
+
mock_traces.extend(
|
|
1743
|
+
[
|
|
1744
|
+
{
|
|
1745
|
+
'Id': 'trace6',
|
|
1746
|
+
'Duration': 200,
|
|
1747
|
+
'HasError': True,
|
|
1748
|
+
'ErrorRootCauses': [
|
|
1749
|
+
{
|
|
1750
|
+
'Services': [
|
|
1751
|
+
{
|
|
1752
|
+
'Name': 'api-service',
|
|
1753
|
+
'Exceptions': [{'Message': 'Invalid API key'}],
|
|
1754
|
+
}
|
|
1755
|
+
]
|
|
1756
|
+
}
|
|
1757
|
+
],
|
|
1758
|
+
},
|
|
1759
|
+
{
|
|
1760
|
+
'Id': 'trace7',
|
|
1761
|
+
'Duration': 210,
|
|
1762
|
+
'HasError': True,
|
|
1763
|
+
'ErrorRootCauses': [
|
|
1764
|
+
{
|
|
1765
|
+
'Services': [
|
|
1766
|
+
{
|
|
1767
|
+
'Name': 'api-service',
|
|
1768
|
+
'Exceptions': [{'Message': 'Invalid API key'}],
|
|
1769
|
+
}
|
|
1770
|
+
]
|
|
1771
|
+
}
|
|
1772
|
+
],
|
|
1773
|
+
},
|
|
1774
|
+
]
|
|
1775
|
+
)
|
|
1776
|
+
|
|
1777
|
+
# Add 2 healthy traces
|
|
1778
|
+
mock_traces.extend(
|
|
1779
|
+
[
|
|
1780
|
+
{
|
|
1781
|
+
'Id': 'trace8',
|
|
1782
|
+
'Duration': 50,
|
|
1783
|
+
'ResponseTime': 45,
|
|
1784
|
+
'HasError': False,
|
|
1785
|
+
'HasFault': False,
|
|
1786
|
+
},
|
|
1787
|
+
{
|
|
1788
|
+
'Id': 'trace9',
|
|
1789
|
+
'Duration': 55,
|
|
1790
|
+
'ResponseTime': 50,
|
|
1791
|
+
'HasError': False,
|
|
1792
|
+
'HasFault': False,
|
|
1793
|
+
},
|
|
1794
|
+
]
|
|
1795
|
+
)
|
|
1796
|
+
|
|
1797
|
+
with patch(
|
|
1798
|
+
'awslabs.cloudwatch_appsignals_mcp_server.trace_tools.get_trace_summaries_paginated'
|
|
1799
|
+
) as mock_paginated:
|
|
1800
|
+
mock_paginated.return_value = mock_traces
|
|
1801
|
+
|
|
1802
|
+
result_json = await query_sampled_traces(
|
|
1803
|
+
start_time='2024-01-01T00:00:00Z', end_time='2024-01-01T01:00:00Z'
|
|
1804
|
+
)
|
|
1805
|
+
|
|
1806
|
+
result = json.loads(result_json)
|
|
1807
|
+
|
|
1808
|
+
# Verify deduplication worked - should only have 5 traces
|
|
1809
|
+
# 1 for database timeout fault (deduplicated from 5)
|
|
1810
|
+
# 2 for API key errors (NOT deduplicated - only faults are deduped)
|
|
1811
|
+
# 2 healthy traces (not deduplicated)
|
|
1812
|
+
assert result['TraceCount'] == 5
|
|
1813
|
+
assert len(result['TraceSummaries']) == 5
|
|
1814
|
+
|
|
1815
|
+
# Verify deduplication stats
|
|
1816
|
+
assert 'DeduplicationStats' in result
|
|
1817
|
+
assert result['DeduplicationStats']['OriginalTraceCount'] == 9
|
|
1818
|
+
assert result['DeduplicationStats']['DuplicatesRemoved'] == 4 # 9 - 5 = 4
|
|
1819
|
+
assert (
|
|
1820
|
+
result['DeduplicationStats']['UniqueFaultMessages'] == 1
|
|
1821
|
+
) # Only counting FaultRootCauses
|
|
1822
|
+
|
|
1823
|
+
# Find the trace with fault
|
|
1824
|
+
db_trace = next(
|
|
1825
|
+
(
|
|
1826
|
+
t
|
|
1827
|
+
for t in result['TraceSummaries']
|
|
1828
|
+
if t.get('FaultRootCauses')
|
|
1829
|
+
and any(
|
|
1830
|
+
'Database connection timeout' in str(s.get('Exceptions', []))
|
|
1831
|
+
for cause in t['FaultRootCauses']
|
|
1832
|
+
for s in cause.get('Services', [])
|
|
1833
|
+
)
|
|
1834
|
+
),
|
|
1835
|
+
None,
|
|
1836
|
+
)
|
|
1837
|
+
assert db_trace is not None
|
|
1838
|
+
assert db_trace['HasFault'] is True
|
|
1839
|
+
|
|
1840
|
+
# Verify both error traces are present (not deduplicated)
|
|
1841
|
+
error_traces = [
|
|
1842
|
+
t
|
|
1843
|
+
for t in result['TraceSummaries']
|
|
1844
|
+
if t.get('ErrorRootCauses')
|
|
1845
|
+
and any(
|
|
1846
|
+
'Invalid API key' in str(s.get('Exceptions', []))
|
|
1847
|
+
for cause in t['ErrorRootCauses']
|
|
1848
|
+
for s in cause.get('Services', [])
|
|
1849
|
+
)
|
|
1850
|
+
]
|
|
1851
|
+
assert len(error_traces) == 2 # Both error traces should be kept
|
|
1852
|
+
assert all(t['HasError'] is True for t in error_traces)
|
|
1853
|
+
|
|
1854
|
+
# Verify healthy traces are included
|
|
1855
|
+
healthy_count = sum(
|
|
1856
|
+
1
|
|
1857
|
+
for t in result['TraceSummaries']
|
|
1858
|
+
if not t.get('HasError') and not t.get('HasFault') and not t.get('HasThrottle')
|
|
1859
|
+
)
|
|
1860
|
+
assert healthy_count == 2
|
|
1861
|
+
|
|
1862
|
+
|
|
1712
1863
|
def test_main_success(mock_aws_clients):
|
|
1713
1864
|
"""Test main function normal execution."""
|
|
1714
1865
|
with patch('awslabs.cloudwatch_appsignals_mcp_server.server.mcp') as mock_mcp:
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|