aws-cost-calculator-cli 1.0.2__py3-none-any.whl → 1.8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aws-cost-calculator-cli might be problematic. Click here for more details.

cost_calculator/cur.py ADDED
@@ -0,0 +1,244 @@
1
+ """
2
+ CUR (Cost and Usage Report) queries via Athena for resource-level analysis.
3
+ """
4
+ import time
5
+ from datetime import datetime, timedelta
6
+
7
+
8
+ # Service name to CUR product code mapping
9
+ SERVICE_TO_PRODUCT_CODE = {
10
+ 'EC2 - Other': 'AmazonEC2',
11
+ 'Amazon Elastic Compute Cloud - Compute': 'AmazonEC2',
12
+ 'Amazon Relational Database Service': 'AmazonRDS',
13
+ 'Amazon Simple Storage Service': 'AmazonS3',
14
+ 'Load Balancing': 'AWSELB',
15
+ 'Elastic Load Balancing': 'AWSELB',
16
+ 'Amazon DynamoDB': 'AmazonDynamoDB',
17
+ 'AWS Lambda': 'AWSLambda',
18
+ 'Amazon CloudFront': 'AmazonCloudFront',
19
+ 'Amazon ElastiCache': 'AmazonElastiCache',
20
+ 'Amazon Elastic MapReduce': 'ElasticMapReduce',
21
+ 'Amazon Kinesis': 'AmazonKinesis',
22
+ 'Amazon Redshift': 'AmazonRedshift',
23
+ 'Amazon Simple Notification Service': 'AmazonSNS',
24
+ 'Amazon Simple Queue Service': 'AmazonSQS',
25
+ }
26
+
27
+
28
+ def map_service_to_product_code(service_name):
29
+ """Map service name to CUR product code"""
30
+ # Direct mapping
31
+ if service_name in SERVICE_TO_PRODUCT_CODE:
32
+ return SERVICE_TO_PRODUCT_CODE[service_name]
33
+
34
+ # Fuzzy matching
35
+ service_lower = service_name.lower()
36
+ for key, code in SERVICE_TO_PRODUCT_CODE.items():
37
+ if key.lower() in service_lower or service_lower in key.lower():
38
+ return code
39
+
40
+ # Fallback: try to extract from service name
41
+ # "Amazon Relational Database Service" -> "AmazonRDS"
42
+ return service_name.replace(' ', '').replace('-', '')
43
+
44
+
45
+ def get_cur_config():
46
+ """Load CUR configuration from config file or environment variables"""
47
+ import os
48
+ from pathlib import Path
49
+ import json
50
+
51
+ # Try config file first
52
+ config_file = Path.home() / '.config' / 'cost-calculator' / 'cur_config.json'
53
+ if config_file.exists():
54
+ with open(config_file) as f:
55
+ return json.load(f)
56
+
57
+ # Fall back to environment variables
58
+ return {
59
+ 'database': os.environ.get('CUR_DATABASE', 'cur_database'),
60
+ 'table': os.environ.get('CUR_TABLE', 'cur_table'),
61
+ 's3_output': os.environ.get('CUR_S3_OUTPUT', 's3://your-athena-results-bucket/')
62
+ }
63
+
64
+
65
+ def query_cur_resources(athena_client, accounts, service, account_filter, weeks,
66
+ cur_database=None,
67
+ cur_table=None,
68
+ s3_output=None):
69
+ """
70
+ Query CUR via Athena for resource-level cost details.
71
+
72
+ Args:
73
+ athena_client: boto3 Athena client
74
+ accounts: list of account IDs
75
+ service: service name to filter by
76
+ account_filter: specific account ID or None for all accounts
77
+ weeks: number of weeks to analyze
78
+ cur_database: Athena database name
79
+ cur_table: CUR table name
80
+ s3_output: S3 location for query results
81
+
82
+ Returns:
83
+ list of dicts with resource details
84
+ """
85
+ # Load CUR configuration
86
+ cur_config = get_cur_config()
87
+ if cur_database is None:
88
+ cur_database = cur_config['database']
89
+ if cur_table is None:
90
+ cur_table = cur_config['table']
91
+ if s3_output is None:
92
+ s3_output = cur_config['s3_output']
93
+
94
+ # Calculate date range
95
+ end_date = datetime.now() - timedelta(days=2)
96
+ start_date = end_date - timedelta(weeks=weeks)
97
+
98
+ # Map service to product code
99
+ product_code = map_service_to_product_code(service)
100
+
101
+ # Build account filter
102
+ if account_filter:
103
+ account_clause = f"AND line_item_usage_account_id = '{account_filter}'"
104
+ else:
105
+ account_list = "','".join(accounts)
106
+ account_clause = f"AND line_item_usage_account_id IN ('{account_list}')"
107
+
108
+ # Build query - use generic columns that work for all services
109
+ query = f"""
110
+ SELECT
111
+ line_item_usage_account_id as account_id,
112
+ line_item_resource_id as resource_id,
113
+ line_item_usage_type as usage_type,
114
+ product_region as region,
115
+ SUM(line_item_unblended_cost) as total_cost,
116
+ SUM(line_item_usage_amount) as total_usage
117
+ FROM {cur_database}.{cur_table}
118
+ WHERE line_item_product_code = '{product_code}'
119
+ {account_clause}
120
+ AND line_item_resource_id != ''
121
+ AND line_item_line_item_type IN ('Usage', 'Fee')
122
+ AND line_item_usage_start_date >= DATE '{start_date.strftime('%Y-%m-%d')}'
123
+ AND line_item_usage_start_date < DATE '{end_date.strftime('%Y-%m-%d')}'
124
+ GROUP BY 1, 2, 3, 4
125
+ ORDER BY total_cost DESC
126
+ LIMIT 50
127
+ """
128
+
129
+ # Execute Athena query
130
+ try:
131
+ response = athena_client.start_query_execution(
132
+ QueryString=query,
133
+ QueryExecutionContext={'Database': cur_database},
134
+ ResultConfiguration={'OutputLocation': s3_output}
135
+ )
136
+
137
+ query_execution_id = response['QueryExecutionId']
138
+
139
+ # Wait for query to complete (max 60 seconds)
140
+ max_wait = 60
141
+ wait_interval = 2
142
+ elapsed = 0
143
+
144
+ while elapsed < max_wait:
145
+ status_response = athena_client.get_query_execution(
146
+ QueryExecutionId=query_execution_id
147
+ )
148
+ state = status_response['QueryExecution']['Status']['State']
149
+
150
+ if state == 'SUCCEEDED':
151
+ break
152
+ elif state in ['FAILED', 'CANCELLED']:
153
+ reason = status_response['QueryExecution']['Status'].get(
154
+ 'StateChangeReason', 'Unknown error'
155
+ )
156
+ raise Exception(f"Athena query {state}: {reason}")
157
+
158
+ time.sleep(wait_interval)
159
+ elapsed += wait_interval
160
+
161
+ if elapsed >= max_wait:
162
+ raise Exception("Athena query timeout after 60 seconds")
163
+
164
+ # Get results
165
+ results_response = athena_client.get_query_results(
166
+ QueryExecutionId=query_execution_id,
167
+ MaxResults=100
168
+ )
169
+
170
+ # Parse results
171
+ resources = []
172
+ rows = results_response['ResultSet']['Rows']
173
+
174
+ # Skip header row
175
+ for row in rows[1:]:
176
+ data = row['Data']
177
+ resources.append({
178
+ 'account_id': data[0].get('VarCharValue', ''),
179
+ 'resource_id': data[1].get('VarCharValue', ''),
180
+ 'usage_type': data[2].get('VarCharValue', ''),
181
+ 'region': data[3].get('VarCharValue', ''),
182
+ 'total_cost': float(data[4].get('VarCharValue', 0)),
183
+ 'total_usage': float(data[5].get('VarCharValue', 0))
184
+ })
185
+
186
+ return {
187
+ 'resources': resources,
188
+ 'service': service,
189
+ 'product_code': product_code,
190
+ 'account_filter': account_filter,
191
+ 'period': {
192
+ 'start': start_date.strftime('%Y-%m-%d'),
193
+ 'end': end_date.strftime('%Y-%m-%d'),
194
+ 'weeks': weeks
195
+ }
196
+ }
197
+
198
+ except Exception as e:
199
+ raise Exception(f"CUR query failed: {str(e)}")
200
+
201
+
202
+ def format_resource_output(result):
203
+ """Format resource query results for display"""
204
+ resources = result['resources']
205
+ service = result['service']
206
+ account_filter = result.get('account_filter')
207
+ period = result['period']
208
+
209
+ output = []
210
+ output.append(f"\n📊 Resource Breakdown: {service}")
211
+
212
+ if account_filter:
213
+ output.append(f"Account: {account_filter} | Period: {period['start']} to {period['end']} ({period['weeks']} weeks)")
214
+ else:
215
+ output.append(f"All Accounts | Period: {period['start']} to {period['end']} ({period['weeks']} weeks)")
216
+
217
+ output.append("")
218
+
219
+ if not resources:
220
+ output.append("No resources found with costs in this period.")
221
+ return '\n'.join(output)
222
+
223
+ # Format as table
224
+ output.append(f"Top {len(resources)} Resources by Cost:")
225
+ output.append("┌─────────────────────────────┬──────────────────────────────┬────────────┬─────────┐")
226
+ output.append("│ Resource ID │ Usage Type │ Region │ Cost │")
227
+ output.append("├─────────────────────────────┼──────────────────────────────┼────────────┼─────────┤")
228
+
229
+ for resource in resources:
230
+ resource_id = resource['resource_id'][:27] + '...' if len(resource['resource_id']) > 27 else resource['resource_id']
231
+ usage_type = resource['usage_type'][:28] + '..' if len(resource['usage_type']) > 28 else resource['usage_type']
232
+ region = resource['region'][:10] if resource['region'] else 'N/A'
233
+ cost = resource['total_cost']
234
+
235
+ output.append(
236
+ f"│ {resource_id:<27} │ {usage_type:<28} │ {region:<10} │ ${cost:>6.2f} │"
237
+ )
238
+
239
+ output.append("└─────────────────────────────┴──────────────────────────────┴────────────┴─────────┘")
240
+ output.append("")
241
+ output.append("💡 Tip: Use AWS CLI to investigate specific resources:")
242
+ output.append(f" aws ec2 describe-instances --instance-ids <resource-id>")
243
+
244
+ return '\n'.join(output)
@@ -0,0 +1,323 @@
1
+ """
2
+ Drill-down cost analysis module.
3
+ Allows filtering by service, account, and usage type for detailed cost investigation.
4
+ """
5
+ from datetime import datetime, timedelta
6
+ from collections import defaultdict
7
+
8
+
9
+ def get_filtered_costs(ce_client, accounts, time_start, time_end, granularity='DAILY',
10
+ service_filter=None, account_filter=None, usage_type_filter=None):
11
+ """
12
+ Get costs with optional filters, grouped by the next level of detail.
13
+
14
+ Args:
15
+ ce_client: boto3 Cost Explorer client
16
+ accounts: List of account IDs
17
+ time_start: datetime for start
18
+ time_end: datetime for end
19
+ granularity: 'WEEKLY' or 'MONTHLY'
20
+ service_filter: Optional service name to filter
21
+ account_filter: Optional account ID to filter
22
+ usage_type_filter: Optional usage type to filter
23
+
24
+ Returns:
25
+ dict: {dimension_value: total_cost}
26
+ """
27
+ # Build filter
28
+ filters = []
29
+
30
+ # Account filter (either from account_filter or all accounts)
31
+ if account_filter:
32
+ filters.append({
33
+ "Dimensions": {
34
+ "Key": "LINKED_ACCOUNT",
35
+ "Values": [account_filter]
36
+ }
37
+ })
38
+ else:
39
+ filters.append({
40
+ "Dimensions": {
41
+ "Key": "LINKED_ACCOUNT",
42
+ "Values": accounts
43
+ }
44
+ })
45
+
46
+ # Service filter
47
+ if service_filter:
48
+ filters.append({
49
+ "Dimensions": {
50
+ "Key": "SERVICE",
51
+ "Values": [service_filter]
52
+ }
53
+ })
54
+
55
+ # Usage type filter
56
+ if usage_type_filter:
57
+ filters.append({
58
+ "Dimensions": {
59
+ "Key": "USAGE_TYPE",
60
+ "Values": [usage_type_filter]
61
+ }
62
+ })
63
+
64
+ # Determine what to group by (next level of detail)
65
+ if usage_type_filter:
66
+ # Already at usage type level, group by region
67
+ group_by_key = 'REGION'
68
+ elif service_filter and account_filter:
69
+ # Have service and account, show usage types
70
+ group_by_key = 'USAGE_TYPE'
71
+ elif service_filter:
72
+ # Have service, show accounts
73
+ group_by_key = 'LINKED_ACCOUNT'
74
+ elif account_filter:
75
+ # Have account, show services
76
+ group_by_key = 'SERVICE'
77
+ else:
78
+ # No filters, show services (same as trends)
79
+ group_by_key = 'SERVICE'
80
+
81
+ # Build final filter
82
+ if len(filters) > 1:
83
+ final_filter = {"And": filters}
84
+ else:
85
+ final_filter = filters[0]
86
+
87
+ response = ce_client.get_cost_and_usage(
88
+ TimePeriod={
89
+ 'Start': time_start.strftime('%Y-%m-%d'),
90
+ 'End': time_end.strftime('%Y-%m-%d')
91
+ },
92
+ Granularity=granularity,
93
+ Filter=final_filter,
94
+ Metrics=['NetAmortizedCost'],
95
+ GroupBy=[
96
+ {'Type': 'DIMENSION', 'Key': group_by_key}
97
+ ]
98
+ )
99
+
100
+ costs = defaultdict(float)
101
+ for result in response['ResultsByTime']:
102
+ for group in result['Groups']:
103
+ dimension_value = group['Keys'][0]
104
+ cost = float(group['Metrics']['NetAmortizedCost']['Amount'])
105
+ costs[dimension_value] += cost
106
+
107
+ return costs, group_by_key
108
+
109
+
110
+ def compare_periods(prev_costs, curr_costs):
111
+ """
112
+ Compare two periods and find increases/decreases.
113
+
114
+ Returns:
115
+ list of dicts with dimension, prev_cost, curr_cost, change, pct_change
116
+ """
117
+ changes = []
118
+
119
+ # Get all dimensions from both periods
120
+ all_dimensions = set(prev_costs.keys()) | set(curr_costs.keys())
121
+
122
+ for dimension in all_dimensions:
123
+ prev_cost = prev_costs.get(dimension, 0)
124
+ curr_cost = curr_costs.get(dimension, 0)
125
+
126
+ change = curr_cost - prev_cost
127
+ pct_change = (change / prev_cost * 100) if prev_cost > 0 else (100 if curr_cost > 0 else 0)
128
+
129
+ # Only include if change is significant (>$10 and >5%)
130
+ if abs(change) > 10 and abs(pct_change) > 5:
131
+ changes.append({
132
+ 'dimension': dimension,
133
+ 'prev_cost': prev_cost,
134
+ 'curr_cost': curr_cost,
135
+ 'change': change,
136
+ 'pct_change': pct_change
137
+ })
138
+
139
+ return changes
140
+
141
+
142
+ def analyze_drill_down(ce_client, accounts, num_weeks=4, service_filter=None,
143
+ account_filter=None, usage_type_filter=None):
144
+ """
145
+ Analyze cost trends with drill-down filters.
146
+
147
+ Args:
148
+ ce_client: boto3 Cost Explorer client
149
+ accounts: List of account IDs
150
+ num_weeks: Number of weeks to analyze (default: 4)
151
+ service_filter: Optional service name to filter
152
+ account_filter: Optional account ID to filter
153
+ usage_type_filter: Optional usage type to filter
154
+
155
+ Returns:
156
+ dict with weekly comparisons and metadata
157
+ """
158
+ today = datetime.now()
159
+
160
+ # Calculate week boundaries (Monday to Sunday)
161
+ days_since_sunday = (today.weekday() + 1) % 7
162
+ most_recent_sunday = today - timedelta(days=days_since_sunday)
163
+
164
+ weeks = []
165
+ for i in range(num_weeks):
166
+ week_end = most_recent_sunday - timedelta(weeks=i)
167
+ week_start = week_end - timedelta(days=7)
168
+ weeks.append({
169
+ 'start': week_start,
170
+ 'end': week_end,
171
+ 'label': f"Week of {week_start.strftime('%b %d')}"
172
+ })
173
+
174
+ # Reverse so oldest is first
175
+ weeks.reverse()
176
+
177
+ # Get costs for each week
178
+ weekly_costs = []
179
+ group_by_key = None
180
+ for week in weeks:
181
+ costs, group_by_key = get_filtered_costs(
182
+ ce_client, accounts, week['start'], week['end'],
183
+ service_filter=service_filter,
184
+ account_filter=account_filter,
185
+ usage_type_filter=usage_type_filter
186
+ )
187
+ weekly_costs.append({
188
+ 'week': week,
189
+ 'costs': costs
190
+ })
191
+
192
+ # Compare consecutive weeks
193
+ comparisons = []
194
+ for i in range(1, len(weekly_costs)):
195
+ prev = weekly_costs[i-1]
196
+ curr = weekly_costs[i]
197
+
198
+ changes = compare_periods(prev['costs'], curr['costs'])
199
+
200
+ # Sort by absolute change
201
+ changes.sort(key=lambda x: abs(x['change']), reverse=True)
202
+ increases = [c for c in changes if c['change'] > 0][:10]
203
+ decreases = [c for c in changes if c['change'] < 0][:10]
204
+
205
+ comparisons.append({
206
+ 'prev_week': prev['week'],
207
+ 'curr_week': curr['week'],
208
+ 'increases': increases,
209
+ 'decreases': decreases,
210
+ 'total_increase': sum(c['change'] for c in increases),
211
+ 'total_decrease': sum(c['change'] for c in decreases)
212
+ })
213
+
214
+ # Reverse so most recent is first
215
+ comparisons.reverse()
216
+
217
+ return {
218
+ 'weeks': weeks,
219
+ 'comparisons': comparisons,
220
+ 'group_by': group_by_key,
221
+ 'filters': {
222
+ 'service': service_filter,
223
+ 'account': account_filter,
224
+ 'usage_type': usage_type_filter
225
+ }
226
+ }
227
+
228
+
229
+ def format_drill_down_markdown(drill_data):
230
+ """
231
+ Format drill-down data as markdown.
232
+
233
+ Returns:
234
+ str: Markdown formatted report
235
+ """
236
+ lines = []
237
+ lines.append("# AWS Cost Drill-Down Report")
238
+ lines.append(f"**Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
239
+ lines.append("")
240
+
241
+ # Show active filters
242
+ filters = drill_data['filters']
243
+ lines.append("## Filters Applied")
244
+ lines.append("")
245
+ if filters['service']:
246
+ lines.append(f"- **Service:** {filters['service']}")
247
+ if filters['account']:
248
+ lines.append(f"- **Account:** {filters['account']}")
249
+ if filters['usage_type']:
250
+ lines.append(f"- **Usage Type:** {filters['usage_type']}")
251
+ if not any(filters.values()):
252
+ lines.append("- No filters (showing all services)")
253
+ lines.append("")
254
+
255
+ # Show what dimension we're grouping by
256
+ group_by = drill_data['group_by']
257
+ dimension_label = {
258
+ 'SERVICE': 'Service',
259
+ 'LINKED_ACCOUNT': 'Account',
260
+ 'USAGE_TYPE': 'Usage Type',
261
+ 'REGION': 'Region'
262
+ }.get(group_by, group_by)
263
+
264
+ lines.append(f"## Grouped By: {dimension_label}")
265
+ lines.append("")
266
+ lines.append("---")
267
+ lines.append("")
268
+
269
+ for comparison in drill_data['comparisons']:
270
+ prev_week = comparison['prev_week']
271
+ curr_week = comparison['curr_week']
272
+
273
+ lines.append(f"## {prev_week['label']} → {curr_week['label']}")
274
+ lines.append("")
275
+
276
+ # Top increases
277
+ if comparison['increases']:
278
+ lines.append("### 🔴 Top 10 Increases")
279
+ lines.append("")
280
+ lines.append(f"| {dimension_label} | Previous | Current | Change | % |")
281
+ lines.append("|---------|----------|---------|--------|---|")
282
+
283
+ for item in comparison['increases']:
284
+ dimension = item['dimension'][:60]
285
+ prev = f"${item['prev_cost']:,.2f}"
286
+ curr = f"${item['curr_cost']:,.2f}"
287
+ change = f"${item['change']:,.2f}"
288
+ pct = f"{item['pct_change']:+.1f}%"
289
+
290
+ lines.append(f"| {dimension} | {prev} | {curr} | {change} | {pct} |")
291
+
292
+ # Add total row
293
+ total_increase = comparison.get('total_increase', 0)
294
+ lines.append(f"| **TOTAL** | | | **${total_increase:,.2f}** | |")
295
+
296
+ lines.append("")
297
+
298
+ # Top decreases
299
+ if comparison['decreases']:
300
+ lines.append("### 🟢 Top 10 Decreases")
301
+ lines.append("")
302
+ lines.append(f"| {dimension_label} | Previous | Current | Change | % |")
303
+ lines.append("|---------|----------|---------|--------|---|")
304
+
305
+ for item in comparison['decreases']:
306
+ dimension = item['dimension'][:60]
307
+ prev = f"${item['prev_cost']:,.2f}"
308
+ curr = f"${item['curr_cost']:,.2f}"
309
+ change = f"${item['change']:,.2f}"
310
+ pct = f"{item['pct_change']:+.1f}%"
311
+
312
+ lines.append(f"| {dimension} | {prev} | {curr} | {change} | {pct} |")
313
+
314
+ # Add total row
315
+ total_decrease = comparison.get('total_decrease', 0)
316
+ lines.append(f"| **TOTAL** | | | **${total_decrease:,.2f}** | |")
317
+
318
+ lines.append("")
319
+
320
+ lines.append("---")
321
+ lines.append("")
322
+
323
+ return "\n".join(lines)