aws-cost-calculator-cli 1.2.0__py3-none-any.whl → 1.8.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aws-cost-calculator-cli might be problematic. Click here for more details.
- aws_cost_calculator_cli-1.8.2.dist-info/METADATA +437 -0
- aws_cost_calculator_cli-1.8.2.dist-info/RECORD +15 -0
- cost_calculator/api_client.py +85 -0
- cost_calculator/cli.py +714 -105
- cost_calculator/cur.py +244 -0
- cost_calculator/drill.py +323 -0
- cost_calculator/executor.py +291 -0
- cost_calculator/forensics.py +321 -0
- aws_cost_calculator_cli-1.2.0.dist-info/METADATA +0 -246
- aws_cost_calculator_cli-1.2.0.dist-info/RECORD +0 -10
- {aws_cost_calculator_cli-1.2.0.dist-info → aws_cost_calculator_cli-1.8.2.dist-info}/WHEEL +0 -0
- {aws_cost_calculator_cli-1.2.0.dist-info → aws_cost_calculator_cli-1.8.2.dist-info}/entry_points.txt +0 -0
- {aws_cost_calculator_cli-1.2.0.dist-info → aws_cost_calculator_cli-1.8.2.dist-info}/licenses/LICENSE +0 -0
- {aws_cost_calculator_cli-1.2.0.dist-info → aws_cost_calculator_cli-1.8.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Executor that routes to either API or local execution.
|
|
3
|
+
"""
|
|
4
|
+
import boto3
|
|
5
|
+
import click
|
|
6
|
+
from cost_calculator.api_client import is_api_configured, call_lambda_api
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def get_credentials_dict(config):
|
|
10
|
+
"""
|
|
11
|
+
Extract credentials from config in format needed for API.
|
|
12
|
+
|
|
13
|
+
Returns:
|
|
14
|
+
dict with access_key, secret_key, session_token, or None if profile is 'dummy'
|
|
15
|
+
"""
|
|
16
|
+
if 'aws_profile' in config:
|
|
17
|
+
# Skip credential loading for dummy profile (API-only mode)
|
|
18
|
+
if config['aws_profile'] == 'dummy':
|
|
19
|
+
return None
|
|
20
|
+
|
|
21
|
+
# Get temporary credentials from SSO session
|
|
22
|
+
try:
|
|
23
|
+
session = boto3.Session(profile_name=config['aws_profile'])
|
|
24
|
+
credentials = session.get_credentials()
|
|
25
|
+
frozen_creds = credentials.get_frozen_credentials()
|
|
26
|
+
|
|
27
|
+
return {
|
|
28
|
+
'access_key': frozen_creds.access_key,
|
|
29
|
+
'secret_key': frozen_creds.secret_key,
|
|
30
|
+
'session_token': frozen_creds.token
|
|
31
|
+
}
|
|
32
|
+
except Exception:
|
|
33
|
+
# If profile not found, return None (API will handle)
|
|
34
|
+
return None
|
|
35
|
+
else:
|
|
36
|
+
# Use static credentials
|
|
37
|
+
creds = config.get('credentials', {})
|
|
38
|
+
if not creds:
|
|
39
|
+
return None
|
|
40
|
+
|
|
41
|
+
result = {
|
|
42
|
+
'access_key': creds['aws_access_key_id'],
|
|
43
|
+
'secret_key': creds['aws_secret_access_key']
|
|
44
|
+
}
|
|
45
|
+
if 'aws_session_token' in creds:
|
|
46
|
+
result['session_token'] = creds['aws_session_token']
|
|
47
|
+
return result
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def execute_trends(config, weeks):
|
|
51
|
+
"""
|
|
52
|
+
Execute trends analysis via API or locally.
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
dict: trends data
|
|
56
|
+
"""
|
|
57
|
+
accounts = config['accounts']
|
|
58
|
+
|
|
59
|
+
if is_api_configured():
|
|
60
|
+
# Use API
|
|
61
|
+
click.echo("Using Lambda API...")
|
|
62
|
+
credentials = get_credentials_dict(config)
|
|
63
|
+
return call_lambda_api('trends', credentials, accounts, weeks=weeks)
|
|
64
|
+
else:
|
|
65
|
+
# Use local execution
|
|
66
|
+
click.echo("Using local execution...")
|
|
67
|
+
from cost_calculator.trends import analyze_trends
|
|
68
|
+
|
|
69
|
+
# Initialize boto3 client
|
|
70
|
+
if 'aws_profile' in config:
|
|
71
|
+
session = boto3.Session(profile_name=config['aws_profile'])
|
|
72
|
+
else:
|
|
73
|
+
creds = config['credentials']
|
|
74
|
+
session_kwargs = {
|
|
75
|
+
'aws_access_key_id': creds['aws_access_key_id'],
|
|
76
|
+
'aws_secret_access_key': creds['aws_secret_access_key'],
|
|
77
|
+
'region_name': creds.get('region', 'us-east-1')
|
|
78
|
+
}
|
|
79
|
+
if 'aws_session_token' in creds:
|
|
80
|
+
session_kwargs['aws_session_token'] = creds['aws_session_token']
|
|
81
|
+
session = boto3.Session(**session_kwargs)
|
|
82
|
+
|
|
83
|
+
ce_client = session.client('ce', region_name='us-east-1')
|
|
84
|
+
return analyze_trends(ce_client, accounts, weeks)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def execute_monthly(config, months):
|
|
88
|
+
"""
|
|
89
|
+
Execute monthly analysis via API or locally.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
dict: monthly data
|
|
93
|
+
"""
|
|
94
|
+
accounts = config['accounts']
|
|
95
|
+
|
|
96
|
+
if is_api_configured():
|
|
97
|
+
# Use API
|
|
98
|
+
click.echo("Using Lambda API...")
|
|
99
|
+
credentials = get_credentials_dict(config)
|
|
100
|
+
return call_lambda_api('monthly', credentials, accounts, months=months)
|
|
101
|
+
else:
|
|
102
|
+
# Use local execution
|
|
103
|
+
click.echo("Using local execution...")
|
|
104
|
+
from cost_calculator.monthly import analyze_monthly_trends
|
|
105
|
+
|
|
106
|
+
# Initialize boto3 client
|
|
107
|
+
if 'aws_profile' in config:
|
|
108
|
+
session = boto3.Session(profile_name=config['aws_profile'])
|
|
109
|
+
else:
|
|
110
|
+
creds = config['credentials']
|
|
111
|
+
session_kwargs = {
|
|
112
|
+
'aws_access_key_id': creds['aws_access_key_id'],
|
|
113
|
+
'aws_secret_access_key': creds['aws_secret_access_key'],
|
|
114
|
+
'region_name': creds.get('region', 'us-east-1')
|
|
115
|
+
}
|
|
116
|
+
if 'aws_session_token' in creds:
|
|
117
|
+
session_kwargs['aws_session_token'] = creds['aws_session_token']
|
|
118
|
+
session = boto3.Session(**session_kwargs)
|
|
119
|
+
|
|
120
|
+
ce_client = session.client('ce', region_name='us-east-1')
|
|
121
|
+
return analyze_monthly_trends(ce_client, accounts, months)
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def execute_drill(config, weeks, service_filter=None, account_filter=None, usage_type_filter=None, resources=False):
|
|
125
|
+
"""
|
|
126
|
+
Execute drill-down analysis via API or locally.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
config: Profile configuration
|
|
130
|
+
weeks: Number of weeks to analyze
|
|
131
|
+
service_filter: Optional service name filter
|
|
132
|
+
account_filter: Optional account ID filter
|
|
133
|
+
usage_type_filter: Optional usage type filter
|
|
134
|
+
resources: If True, query CUR for resource-level details
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
dict: drill data or resource data
|
|
138
|
+
"""
|
|
139
|
+
accounts = config['accounts']
|
|
140
|
+
|
|
141
|
+
if resources:
|
|
142
|
+
# Resource-level drill requires service filter
|
|
143
|
+
if not service_filter:
|
|
144
|
+
raise click.ClickException("--service is required when using --resources flag")
|
|
145
|
+
|
|
146
|
+
if is_api_configured():
|
|
147
|
+
# Use API
|
|
148
|
+
click.echo("Using Lambda API for CUR resource query...")
|
|
149
|
+
credentials = get_credentials_dict(config)
|
|
150
|
+
kwargs = {
|
|
151
|
+
'weeks': weeks,
|
|
152
|
+
'service': service_filter,
|
|
153
|
+
'resources': True
|
|
154
|
+
}
|
|
155
|
+
if account_filter:
|
|
156
|
+
kwargs['account'] = account_filter
|
|
157
|
+
return call_lambda_api('drill', credentials, accounts, **kwargs)
|
|
158
|
+
else:
|
|
159
|
+
# Use local Athena client
|
|
160
|
+
click.echo("Using local Athena client for CUR resource query...")
|
|
161
|
+
from cost_calculator.cur import query_cur_resources
|
|
162
|
+
|
|
163
|
+
# Initialize boto3 session
|
|
164
|
+
if 'aws_profile' in config:
|
|
165
|
+
session = boto3.Session(profile_name=config['aws_profile'])
|
|
166
|
+
else:
|
|
167
|
+
creds = config['credentials']
|
|
168
|
+
session_kwargs = {
|
|
169
|
+
'aws_access_key_id': creds['aws_access_key_id'],
|
|
170
|
+
'aws_secret_access_key': creds['aws_secret_access_key'],
|
|
171
|
+
'region_name': creds.get('region', 'us-east-1')
|
|
172
|
+
}
|
|
173
|
+
if 'aws_session_token' in creds:
|
|
174
|
+
session_kwargs['aws_session_token'] = creds['aws_session_token']
|
|
175
|
+
session = boto3.Session(**session_kwargs)
|
|
176
|
+
|
|
177
|
+
athena_client = session.client('athena', region_name='us-east-1')
|
|
178
|
+
return query_cur_resources(
|
|
179
|
+
athena_client, accounts, service_filter, account_filter, weeks
|
|
180
|
+
)
|
|
181
|
+
else:
|
|
182
|
+
# Standard drill-down via Cost Explorer
|
|
183
|
+
if is_api_configured():
|
|
184
|
+
# Use API
|
|
185
|
+
click.echo("Using Lambda API...")
|
|
186
|
+
credentials = get_credentials_dict(config)
|
|
187
|
+
kwargs = {'weeks': weeks}
|
|
188
|
+
if service_filter:
|
|
189
|
+
kwargs['service'] = service_filter
|
|
190
|
+
if account_filter:
|
|
191
|
+
kwargs['account'] = account_filter
|
|
192
|
+
if usage_type_filter:
|
|
193
|
+
kwargs['usage_type'] = usage_type_filter
|
|
194
|
+
return call_lambda_api('drill', credentials, accounts, **kwargs)
|
|
195
|
+
else:
|
|
196
|
+
# Use local execution
|
|
197
|
+
click.echo("Using local execution...")
|
|
198
|
+
from cost_calculator.drill import analyze_drill_down
|
|
199
|
+
|
|
200
|
+
# Initialize boto3 client
|
|
201
|
+
if 'aws_profile' in config:
|
|
202
|
+
session = boto3.Session(profile_name=config['aws_profile'])
|
|
203
|
+
else:
|
|
204
|
+
creds = config['credentials']
|
|
205
|
+
session_kwargs = {
|
|
206
|
+
'aws_access_key_id': creds['aws_access_key_id'],
|
|
207
|
+
'aws_secret_access_key': creds['aws_secret_access_key'],
|
|
208
|
+
'region_name': creds.get('region', 'us-east-1')
|
|
209
|
+
}
|
|
210
|
+
if 'aws_session_token' in creds:
|
|
211
|
+
session_kwargs['aws_session_token'] = creds['aws_session_token']
|
|
212
|
+
session = boto3.Session(**session_kwargs)
|
|
213
|
+
|
|
214
|
+
ce_client = session.client('ce', region_name='us-east-1')
|
|
215
|
+
return analyze_drill_down(
|
|
216
|
+
ce_client, accounts, weeks,
|
|
217
|
+
service_filter=service_filter,
|
|
218
|
+
account_filter=account_filter,
|
|
219
|
+
usage_type_filter=usage_type_filter
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def execute_analyze(config, weeks, analysis_type, pattern=None, min_cost=None):
|
|
224
|
+
"""
|
|
225
|
+
Execute pandas-based analysis via API.
|
|
226
|
+
Note: This only works via API (requires pandas layer).
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
dict: analysis results
|
|
230
|
+
"""
|
|
231
|
+
accounts = config['accounts']
|
|
232
|
+
|
|
233
|
+
if not is_api_configured():
|
|
234
|
+
raise click.ClickException(
|
|
235
|
+
"Analyze command requires API configuration.\n"
|
|
236
|
+
"Set COST_API_SECRET environment variable."
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
credentials = get_credentials_dict(config)
|
|
240
|
+
kwargs = {'weeks': weeks, 'type': analysis_type}
|
|
241
|
+
|
|
242
|
+
if pattern:
|
|
243
|
+
kwargs['pattern'] = pattern
|
|
244
|
+
if min_cost:
|
|
245
|
+
kwargs['min_cost'] = min_cost
|
|
246
|
+
|
|
247
|
+
return call_lambda_api('analyze', credentials, accounts, **kwargs)
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
def execute_profile_operation(operation, profile_name=None, accounts=None, description=None):
|
|
251
|
+
"""
|
|
252
|
+
Execute profile CRUD operations via API.
|
|
253
|
+
|
|
254
|
+
Returns:
|
|
255
|
+
dict: operation result
|
|
256
|
+
"""
|
|
257
|
+
if not is_api_configured():
|
|
258
|
+
raise click.ClickException(
|
|
259
|
+
"Profile commands require API configuration.\n"
|
|
260
|
+
"Set COST_API_SECRET environment variable."
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
# Profile operations don't need AWS credentials, just API secret
|
|
264
|
+
import os
|
|
265
|
+
import requests
|
|
266
|
+
import json
|
|
267
|
+
|
|
268
|
+
api_secret = os.environ.get('COST_API_SECRET', '')
|
|
269
|
+
|
|
270
|
+
# Use profiles endpoint (hardcoded URL)
|
|
271
|
+
url = 'https://64g7jq7sjygec2zmll5lsghrpi0txrzo.lambda-url.us-east-1.on.aws/'
|
|
272
|
+
|
|
273
|
+
payload = {'operation': operation}
|
|
274
|
+
if profile_name:
|
|
275
|
+
payload['profile_name'] = profile_name
|
|
276
|
+
if accounts:
|
|
277
|
+
payload['accounts'] = accounts
|
|
278
|
+
if description:
|
|
279
|
+
payload['description'] = description
|
|
280
|
+
|
|
281
|
+
headers = {
|
|
282
|
+
'X-API-Secret': api_secret,
|
|
283
|
+
'Content-Type': 'application/json'
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
response = requests.post(url, headers=headers, json=payload, timeout=60)
|
|
287
|
+
|
|
288
|
+
if response.status_code != 200:
|
|
289
|
+
raise Exception(f"API call failed: {response.status_code} - {response.text}")
|
|
290
|
+
|
|
291
|
+
return response.json()
|
|
@@ -0,0 +1,321 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Cost forensics module - Resource inventory and CloudTrail analysis
|
|
3
|
+
"""
|
|
4
|
+
import boto3
|
|
5
|
+
from datetime import datetime, timedelta
|
|
6
|
+
from collections import defaultdict
|
|
7
|
+
import json
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def inventory_resources(account_id, profile, region='us-west-2'):
|
|
11
|
+
"""
|
|
12
|
+
Inventory AWS resources in an account
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
account_id: AWS account ID
|
|
16
|
+
profile: AWS profile name (SSO)
|
|
17
|
+
region: AWS region
|
|
18
|
+
|
|
19
|
+
Returns:
|
|
20
|
+
dict with resource inventory
|
|
21
|
+
"""
|
|
22
|
+
session = boto3.Session(profile_name=profile)
|
|
23
|
+
inventory = {
|
|
24
|
+
'account_id': account_id,
|
|
25
|
+
'profile': profile,
|
|
26
|
+
'region': region,
|
|
27
|
+
'timestamp': datetime.utcnow().isoformat(),
|
|
28
|
+
'ec2_instances': [],
|
|
29
|
+
'efs_file_systems': [],
|
|
30
|
+
'load_balancers': [],
|
|
31
|
+
'dynamodb_tables': []
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
# EC2 Instances
|
|
36
|
+
ec2_client = session.client('ec2', region_name=region)
|
|
37
|
+
instances_response = ec2_client.describe_instances()
|
|
38
|
+
|
|
39
|
+
for reservation in instances_response['Reservations']:
|
|
40
|
+
for instance in reservation['Instances']:
|
|
41
|
+
if instance['State']['Name'] == 'running':
|
|
42
|
+
name = 'N/A'
|
|
43
|
+
for tag in instance.get('Tags', []):
|
|
44
|
+
if tag['Key'] == 'Name':
|
|
45
|
+
name = tag['Value']
|
|
46
|
+
break
|
|
47
|
+
|
|
48
|
+
inventory['ec2_instances'].append({
|
|
49
|
+
'instance_id': instance['InstanceId'],
|
|
50
|
+
'instance_type': instance['InstanceType'],
|
|
51
|
+
'name': name,
|
|
52
|
+
'state': instance['State']['Name'],
|
|
53
|
+
'launch_time': instance['LaunchTime'].isoformat(),
|
|
54
|
+
'availability_zone': instance['Placement']['AvailabilityZone']
|
|
55
|
+
})
|
|
56
|
+
|
|
57
|
+
# EFS File Systems
|
|
58
|
+
efs_client = session.client('efs', region_name=region)
|
|
59
|
+
efs_response = efs_client.describe_file_systems()
|
|
60
|
+
|
|
61
|
+
total_efs_size = 0
|
|
62
|
+
for fs in efs_response['FileSystems']:
|
|
63
|
+
size_bytes = fs['SizeInBytes']['Value']
|
|
64
|
+
size_gb = size_bytes / (1024**3)
|
|
65
|
+
total_efs_size += size_gb
|
|
66
|
+
|
|
67
|
+
inventory['efs_file_systems'].append({
|
|
68
|
+
'file_system_id': fs['FileSystemId'],
|
|
69
|
+
'name': fs.get('Name', 'N/A'),
|
|
70
|
+
'size_gb': round(size_gb, 2),
|
|
71
|
+
'creation_time': fs['CreationTime'].isoformat(),
|
|
72
|
+
'number_of_mount_targets': fs['NumberOfMountTargets']
|
|
73
|
+
})
|
|
74
|
+
|
|
75
|
+
inventory['total_efs_size_gb'] = round(total_efs_size, 2)
|
|
76
|
+
|
|
77
|
+
# Load Balancers
|
|
78
|
+
elbv2_client = session.client('elbv2', region_name=region)
|
|
79
|
+
elb_response = elbv2_client.describe_load_balancers()
|
|
80
|
+
|
|
81
|
+
for lb in elb_response['LoadBalancers']:
|
|
82
|
+
inventory['load_balancers'].append({
|
|
83
|
+
'name': lb['LoadBalancerName'],
|
|
84
|
+
'type': lb['Type'],
|
|
85
|
+
'dns_name': lb['DNSName'],
|
|
86
|
+
'scheme': lb['Scheme'],
|
|
87
|
+
'created_time': lb['CreatedTime'].isoformat(),
|
|
88
|
+
'availability_zones': [az['ZoneName'] for az in lb['AvailabilityZones']]
|
|
89
|
+
})
|
|
90
|
+
|
|
91
|
+
# DynamoDB Tables (only if region supports it)
|
|
92
|
+
try:
|
|
93
|
+
ddb_client = session.client('dynamodb', region_name=region)
|
|
94
|
+
tables_response = ddb_client.list_tables()
|
|
95
|
+
|
|
96
|
+
for table_name in tables_response['TableNames'][:20]: # Limit to 20 tables
|
|
97
|
+
table_desc = ddb_client.describe_table(TableName=table_name)
|
|
98
|
+
table_info = table_desc['Table']
|
|
99
|
+
|
|
100
|
+
# Get backup settings
|
|
101
|
+
try:
|
|
102
|
+
backup_desc = ddb_client.describe_continuous_backups(TableName=table_name)
|
|
103
|
+
pitr_status = backup_desc['ContinuousBackupsDescription']['PointInTimeRecoveryDescription']['PointInTimeRecoveryStatus']
|
|
104
|
+
except:
|
|
105
|
+
pitr_status = 'UNKNOWN'
|
|
106
|
+
|
|
107
|
+
size_gb = table_info.get('TableSizeBytes', 0) / (1024**3)
|
|
108
|
+
|
|
109
|
+
inventory['dynamodb_tables'].append({
|
|
110
|
+
'table_name': table_name,
|
|
111
|
+
'size_gb': round(size_gb, 2),
|
|
112
|
+
'item_count': table_info.get('ItemCount', 0),
|
|
113
|
+
'pitr_status': pitr_status,
|
|
114
|
+
'created_time': table_info['CreationDateTime'].isoformat()
|
|
115
|
+
})
|
|
116
|
+
except Exception as e:
|
|
117
|
+
# DynamoDB might not be available in all regions
|
|
118
|
+
pass
|
|
119
|
+
|
|
120
|
+
except Exception as e:
|
|
121
|
+
inventory['error'] = str(e)
|
|
122
|
+
|
|
123
|
+
return inventory
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def analyze_cloudtrail(account_id, profile, start_date, end_date, region='us-west-2'):
|
|
127
|
+
"""
|
|
128
|
+
Analyze CloudTrail events for an account
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
account_id: AWS account ID
|
|
132
|
+
profile: AWS profile name (SSO)
|
|
133
|
+
start_date: Start datetime
|
|
134
|
+
end_date: End datetime
|
|
135
|
+
region: AWS region
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
dict with CloudTrail event summary
|
|
139
|
+
"""
|
|
140
|
+
session = boto3.Session(profile_name=profile)
|
|
141
|
+
ct_client = session.client('cloudtrail', region_name=region)
|
|
142
|
+
|
|
143
|
+
analysis = {
|
|
144
|
+
'account_id': account_id,
|
|
145
|
+
'profile': profile,
|
|
146
|
+
'region': region,
|
|
147
|
+
'start_date': start_date.isoformat(),
|
|
148
|
+
'end_date': end_date.isoformat(),
|
|
149
|
+
'event_summary': {},
|
|
150
|
+
'write_events': [],
|
|
151
|
+
'error': None
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
# Events that indicate resource creation/modification
|
|
155
|
+
write_event_names = [
|
|
156
|
+
'RunInstances', 'CreateVolume', 'AttachVolume',
|
|
157
|
+
'CreateFileSystem', 'ModifyFileSystem',
|
|
158
|
+
'CreateLoadBalancer', 'ModifyLoadBalancerAttributes',
|
|
159
|
+
'CreateTable', 'UpdateTable', 'UpdateContinuousBackups',
|
|
160
|
+
'CreateBackupVault', 'StartBackupJob'
|
|
161
|
+
]
|
|
162
|
+
|
|
163
|
+
try:
|
|
164
|
+
event_counts = defaultdict(int)
|
|
165
|
+
|
|
166
|
+
# Query CloudTrail
|
|
167
|
+
paginator = ct_client.get_paginator('lookup_events')
|
|
168
|
+
|
|
169
|
+
for page in paginator.paginate(
|
|
170
|
+
StartTime=start_date,
|
|
171
|
+
EndTime=end_date,
|
|
172
|
+
MaxResults=50,
|
|
173
|
+
PaginationConfig={'MaxItems': 200}
|
|
174
|
+
):
|
|
175
|
+
for event in page.get('Events', []):
|
|
176
|
+
event_name = event.get('EventName', '')
|
|
177
|
+
event_counts[event_name] += 1
|
|
178
|
+
|
|
179
|
+
# Capture write events
|
|
180
|
+
if event_name in write_event_names:
|
|
181
|
+
event_detail = json.loads(event['CloudTrailEvent'])
|
|
182
|
+
|
|
183
|
+
analysis['write_events'].append({
|
|
184
|
+
'time': event.get('EventTime').isoformat(),
|
|
185
|
+
'event_name': event_name,
|
|
186
|
+
'username': event.get('Username', 'N/A'),
|
|
187
|
+
'resources': [
|
|
188
|
+
{
|
|
189
|
+
'type': r.get('ResourceType', 'N/A'),
|
|
190
|
+
'name': r.get('ResourceName', 'N/A')
|
|
191
|
+
}
|
|
192
|
+
for r in event.get('Resources', [])[:3]
|
|
193
|
+
]
|
|
194
|
+
})
|
|
195
|
+
|
|
196
|
+
# Convert to regular dict and sort
|
|
197
|
+
analysis['event_summary'] = dict(sorted(
|
|
198
|
+
event_counts.items(),
|
|
199
|
+
key=lambda x: x[1],
|
|
200
|
+
reverse=True
|
|
201
|
+
))
|
|
202
|
+
|
|
203
|
+
except Exception as e:
|
|
204
|
+
analysis['error'] = str(e)
|
|
205
|
+
|
|
206
|
+
return analysis
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def format_investigation_report(cost_data, inventories, cloudtrail_data=None):
|
|
210
|
+
"""
|
|
211
|
+
Format investigation data into markdown report
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
cost_data: Cost analysis results from trends/drill
|
|
215
|
+
inventories: List of resource inventories
|
|
216
|
+
cloudtrail_data: List of CloudTrail analyses (optional)
|
|
217
|
+
|
|
218
|
+
Returns:
|
|
219
|
+
str: Markdown formatted report
|
|
220
|
+
"""
|
|
221
|
+
report = []
|
|
222
|
+
report.append("# Cost Investigation Report")
|
|
223
|
+
report.append(f"**Generated:** {datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')}")
|
|
224
|
+
report.append("")
|
|
225
|
+
|
|
226
|
+
# Cost Analysis Section
|
|
227
|
+
if cost_data:
|
|
228
|
+
report.append("## Cost Analysis")
|
|
229
|
+
report.append("")
|
|
230
|
+
# Add cost data formatting here
|
|
231
|
+
# This will be populated from trends/drill results
|
|
232
|
+
|
|
233
|
+
# Resource Inventory Section
|
|
234
|
+
if inventories:
|
|
235
|
+
report.append("## Resource Inventory")
|
|
236
|
+
report.append("")
|
|
237
|
+
|
|
238
|
+
for inv in inventories:
|
|
239
|
+
report.append(f"### Account {inv['account_id']} ({inv['profile']})")
|
|
240
|
+
report.append(f"**Region:** {inv['region']}")
|
|
241
|
+
report.append("")
|
|
242
|
+
|
|
243
|
+
# EC2 Instances
|
|
244
|
+
if inv['ec2_instances']:
|
|
245
|
+
report.append(f"**EC2 Instances:** {len(inv['ec2_instances'])} running")
|
|
246
|
+
for instance in inv['ec2_instances'][:10]: # Show first 10
|
|
247
|
+
report.append(f"- `{instance['instance_id']}`: {instance['instance_type']} ({instance['name']})")
|
|
248
|
+
report.append(f" - Launched: {instance['launch_time'][:10]}, AZ: {instance['availability_zone']}")
|
|
249
|
+
if len(inv['ec2_instances']) > 10:
|
|
250
|
+
report.append(f" ... and {len(inv['ec2_instances']) - 10} more")
|
|
251
|
+
report.append("")
|
|
252
|
+
|
|
253
|
+
# EFS File Systems
|
|
254
|
+
if inv['efs_file_systems']:
|
|
255
|
+
total_size = inv.get('total_efs_size_gb', 0)
|
|
256
|
+
report.append(f"**EFS File Systems:** {len(inv['efs_file_systems'])} total, {total_size:,.0f} GB")
|
|
257
|
+
for fs in inv['efs_file_systems']:
|
|
258
|
+
report.append(f"- `{fs['file_system_id']}` ({fs['name']}): {fs['size_gb']:,.2f} GB")
|
|
259
|
+
report.append(f" - Created: {fs['creation_time'][:10]}")
|
|
260
|
+
report.append("")
|
|
261
|
+
|
|
262
|
+
# Load Balancers
|
|
263
|
+
if inv['load_balancers']:
|
|
264
|
+
report.append(f"**Load Balancers:** {len(inv['load_balancers'])}")
|
|
265
|
+
for lb in inv['load_balancers'][:10]: # Show first 10
|
|
266
|
+
report.append(f"- `{lb['name']}`: {lb['type']}")
|
|
267
|
+
report.append(f" - Created: {lb['created_time'][:10]}, Scheme: {lb['scheme']}")
|
|
268
|
+
if len(inv['load_balancers']) > 10:
|
|
269
|
+
report.append(f" ... and {len(inv['load_balancers']) - 10} more")
|
|
270
|
+
report.append("")
|
|
271
|
+
|
|
272
|
+
# DynamoDB Tables
|
|
273
|
+
if inv['dynamodb_tables']:
|
|
274
|
+
report.append(f"**DynamoDB Tables:** {len(inv['dynamodb_tables'])}")
|
|
275
|
+
for table in inv['dynamodb_tables'][:10]:
|
|
276
|
+
report.append(f"- `{table['table_name']}`: {table['size_gb']:.2f} GB, {table['item_count']:,} items")
|
|
277
|
+
report.append(f" - PITR: {table['pitr_status']}, Created: {table['created_time'][:10]}")
|
|
278
|
+
if len(inv['dynamodb_tables']) > 10:
|
|
279
|
+
report.append(f" ... and {len(inv['dynamodb_tables']) - 10} more")
|
|
280
|
+
report.append("")
|
|
281
|
+
|
|
282
|
+
report.append("---")
|
|
283
|
+
report.append("")
|
|
284
|
+
|
|
285
|
+
# CloudTrail Section
|
|
286
|
+
if cloudtrail_data:
|
|
287
|
+
report.append("## CloudTrail Events")
|
|
288
|
+
report.append("")
|
|
289
|
+
|
|
290
|
+
for ct in cloudtrail_data:
|
|
291
|
+
report.append(f"### Account {ct['account_id']} ({ct['profile']})")
|
|
292
|
+
report.append(f"**Period:** {ct['start_date'][:10]} to {ct['end_date'][:10]}")
|
|
293
|
+
report.append("")
|
|
294
|
+
|
|
295
|
+
if ct.get('error'):
|
|
296
|
+
report.append(f"⚠️ Error: {ct['error']}")
|
|
297
|
+
report.append("")
|
|
298
|
+
continue
|
|
299
|
+
|
|
300
|
+
# Write events (resource changes)
|
|
301
|
+
if ct['write_events']:
|
|
302
|
+
report.append(f"**Resource Changes:** {len(ct['write_events'])} events")
|
|
303
|
+
for evt in ct['write_events'][:10]:
|
|
304
|
+
report.append(f"- `{evt['time'][:19]}` - **{evt['event_name']}**")
|
|
305
|
+
report.append(f" - User: {evt['username']}")
|
|
306
|
+
if evt['resources']:
|
|
307
|
+
for res in evt['resources']:
|
|
308
|
+
report.append(f" - Resource: {res['type']} - {res['name']}")
|
|
309
|
+
report.append("")
|
|
310
|
+
|
|
311
|
+
# Event summary
|
|
312
|
+
if ct['event_summary']:
|
|
313
|
+
report.append("**Top Events:**")
|
|
314
|
+
for event_name, count in list(ct['event_summary'].items())[:15]:
|
|
315
|
+
report.append(f"- {event_name}: {count}")
|
|
316
|
+
report.append("")
|
|
317
|
+
|
|
318
|
+
report.append("---")
|
|
319
|
+
report.append("")
|
|
320
|
+
|
|
321
|
+
return "\n".join(report)
|