aws-cost-calculator-cli 1.5.2__py3-none-any.whl → 1.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {aws_cost_calculator_cli-1.5.2.dist-info → aws_cost_calculator_cli-1.6.2.dist-info}/METADATA +150 -24
- aws_cost_calculator_cli-1.6.2.dist-info/RECORD +25 -0
- {aws_cost_calculator_cli-1.5.2.dist-info → aws_cost_calculator_cli-1.6.2.dist-info}/WHEEL +1 -1
- {aws_cost_calculator_cli-1.5.2.dist-info → aws_cost_calculator_cli-1.6.2.dist-info}/top_level.txt +1 -0
- backend/__init__.py +1 -0
- backend/algorithms/__init__.py +1 -0
- backend/algorithms/analyze.py +272 -0
- backend/algorithms/drill.py +323 -0
- backend/algorithms/monthly.py +242 -0
- backend/algorithms/trends.py +353 -0
- backend/handlers/__init__.py +1 -0
- backend/handlers/analyze.py +112 -0
- backend/handlers/drill.py +117 -0
- backend/handlers/monthly.py +106 -0
- backend/handlers/profiles.py +148 -0
- backend/handlers/trends.py +106 -0
- cost_calculator/cli.py +169 -41
- aws_cost_calculator_cli-1.5.2.dist-info/RECORD +0 -13
- {aws_cost_calculator_cli-1.5.2.dist-info → aws_cost_calculator_cli-1.6.2.dist-info}/entry_points.txt +0 -0
- {aws_cost_calculator_cli-1.5.2.dist-info → aws_cost_calculator_cli-1.6.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,353 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Cost trends analysis module
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import boto3
|
|
7
|
+
from datetime import datetime, timedelta
|
|
8
|
+
from collections import defaultdict
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def get_week_costs(ce_client, accounts, week_start, week_end):
|
|
12
|
+
"""
|
|
13
|
+
Get costs for a specific week, grouped by service and usage type.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
ce_client: boto3 Cost Explorer client
|
|
17
|
+
accounts: List of account IDs
|
|
18
|
+
week_start: Start date (datetime)
|
|
19
|
+
week_end: End date (datetime)
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
dict: {service: {usage_type: cost}}
|
|
23
|
+
"""
|
|
24
|
+
cost_filter = {
|
|
25
|
+
"And": [
|
|
26
|
+
{
|
|
27
|
+
"Dimensions": {
|
|
28
|
+
"Key": "LINKED_ACCOUNT",
|
|
29
|
+
"Values": accounts
|
|
30
|
+
}
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
"Dimensions": {
|
|
34
|
+
"Key": "BILLING_ENTITY",
|
|
35
|
+
"Values": ["AWS"]
|
|
36
|
+
}
|
|
37
|
+
},
|
|
38
|
+
{
|
|
39
|
+
"Not": {
|
|
40
|
+
"Dimensions": {
|
|
41
|
+
"Key": "RECORD_TYPE",
|
|
42
|
+
"Values": ["Tax", "Support"]
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
]
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
response = ce_client.get_cost_and_usage(
|
|
50
|
+
TimePeriod={
|
|
51
|
+
'Start': week_start.strftime('%Y-%m-%d'),
|
|
52
|
+
'End': week_end.strftime('%Y-%m-%d')
|
|
53
|
+
},
|
|
54
|
+
Granularity='DAILY',
|
|
55
|
+
Metrics=['NetAmortizedCost'],
|
|
56
|
+
GroupBy=[
|
|
57
|
+
{'Type': 'DIMENSION', 'Key': 'SERVICE'},
|
|
58
|
+
{'Type': 'DIMENSION', 'Key': 'USAGE_TYPE'}
|
|
59
|
+
],
|
|
60
|
+
Filter=cost_filter
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
# Aggregate by service and usage type
|
|
64
|
+
costs = defaultdict(lambda: defaultdict(float))
|
|
65
|
+
|
|
66
|
+
for day in response['ResultsByTime']:
|
|
67
|
+
for group in day.get('Groups', []):
|
|
68
|
+
service = group['Keys'][0]
|
|
69
|
+
usage_type = group['Keys'][1]
|
|
70
|
+
cost = float(group['Metrics']['NetAmortizedCost']['Amount'])
|
|
71
|
+
costs[service][usage_type] += cost
|
|
72
|
+
|
|
73
|
+
return costs
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def compare_weeks(prev_week_costs, curr_week_costs):
|
|
77
|
+
"""
|
|
78
|
+
Compare two weeks and find increases/decreases at service level.
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
list of dicts with service, prev_cost, curr_cost, change, pct_change
|
|
82
|
+
"""
|
|
83
|
+
changes = []
|
|
84
|
+
|
|
85
|
+
# Get all services from both weeks
|
|
86
|
+
all_services = set(prev_week_costs.keys()) | set(curr_week_costs.keys())
|
|
87
|
+
|
|
88
|
+
for service in all_services:
|
|
89
|
+
prev_service = prev_week_costs.get(service, {})
|
|
90
|
+
curr_service = curr_week_costs.get(service, {})
|
|
91
|
+
|
|
92
|
+
# Sum all usage types for this service
|
|
93
|
+
prev_cost = sum(prev_service.values())
|
|
94
|
+
curr_cost = sum(curr_service.values())
|
|
95
|
+
|
|
96
|
+
change = curr_cost - prev_cost
|
|
97
|
+
pct_change = (change / prev_cost * 100) if prev_cost > 0 else (100 if curr_cost > 0 else 0)
|
|
98
|
+
|
|
99
|
+
# Only include if change is significant (>$10 and >5%)
|
|
100
|
+
if abs(change) > 10 and abs(pct_change) > 5:
|
|
101
|
+
changes.append({
|
|
102
|
+
'service': service,
|
|
103
|
+
'prev_cost': prev_cost,
|
|
104
|
+
'curr_cost': curr_cost,
|
|
105
|
+
'change': change,
|
|
106
|
+
'pct_change': pct_change
|
|
107
|
+
})
|
|
108
|
+
|
|
109
|
+
return changes
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def analyze_trends(ce_client, accounts, num_weeks=3):
|
|
113
|
+
"""
|
|
114
|
+
Analyze cost trends over the last N weeks.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
ce_client: boto3 Cost Explorer client
|
|
118
|
+
accounts: List of account IDs
|
|
119
|
+
num_weeks: Number of weeks to analyze (default: 3)
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
dict with weekly comparisons
|
|
123
|
+
"""
|
|
124
|
+
today = datetime.now()
|
|
125
|
+
|
|
126
|
+
# Calculate week boundaries (Monday to Sunday)
|
|
127
|
+
# Go back to most recent Sunday
|
|
128
|
+
days_since_sunday = (today.weekday() + 1) % 7
|
|
129
|
+
most_recent_sunday = today - timedelta(days=days_since_sunday)
|
|
130
|
+
|
|
131
|
+
weeks = []
|
|
132
|
+
for i in range(num_weeks):
|
|
133
|
+
week_end = most_recent_sunday - timedelta(weeks=i)
|
|
134
|
+
week_start = week_end - timedelta(days=7)
|
|
135
|
+
weeks.append({
|
|
136
|
+
'start': week_start,
|
|
137
|
+
'end': week_end,
|
|
138
|
+
'label': f"Week of {week_start.strftime('%b %d')}"
|
|
139
|
+
})
|
|
140
|
+
|
|
141
|
+
# Reverse so oldest is first
|
|
142
|
+
weeks.reverse()
|
|
143
|
+
|
|
144
|
+
# Get costs for each week
|
|
145
|
+
weekly_costs = []
|
|
146
|
+
for week in weeks:
|
|
147
|
+
costs = get_week_costs(ce_client, accounts, week['start'], week['end'])
|
|
148
|
+
weekly_costs.append({
|
|
149
|
+
'week': week,
|
|
150
|
+
'costs': costs
|
|
151
|
+
})
|
|
152
|
+
|
|
153
|
+
# Compare consecutive weeks (week-over-week)
|
|
154
|
+
wow_comparisons = []
|
|
155
|
+
for i in range(1, len(weekly_costs)):
|
|
156
|
+
prev = weekly_costs[i-1]
|
|
157
|
+
curr = weekly_costs[i]
|
|
158
|
+
|
|
159
|
+
changes = compare_weeks(prev['costs'], curr['costs'])
|
|
160
|
+
|
|
161
|
+
# Sort by absolute change
|
|
162
|
+
changes.sort(key=lambda x: abs(x['change']), reverse=True)
|
|
163
|
+
increases = [c for c in changes if c['change'] > 0][:10]
|
|
164
|
+
decreases = [c for c in changes if c['change'] < 0][:10]
|
|
165
|
+
|
|
166
|
+
wow_comparisons.append({
|
|
167
|
+
'prev_week': prev['week'],
|
|
168
|
+
'curr_week': curr['week'],
|
|
169
|
+
'increases': increases,
|
|
170
|
+
'decreases': decreases,
|
|
171
|
+
'total_increase': sum(c['change'] for c in increases),
|
|
172
|
+
'total_decrease': sum(c['change'] for c in decreases)
|
|
173
|
+
})
|
|
174
|
+
|
|
175
|
+
# Compare to 30 days ago (T-30)
|
|
176
|
+
t30_comparisons = []
|
|
177
|
+
for i in range(len(weekly_costs)):
|
|
178
|
+
curr = weekly_costs[i]
|
|
179
|
+
# Find week from ~30 days ago (4-5 weeks back)
|
|
180
|
+
baseline_idx = i - 4 if i >= 4 else None
|
|
181
|
+
|
|
182
|
+
if baseline_idx is not None and baseline_idx >= 0:
|
|
183
|
+
baseline = weekly_costs[baseline_idx]
|
|
184
|
+
|
|
185
|
+
changes = compare_weeks(baseline['costs'], curr['costs'])
|
|
186
|
+
|
|
187
|
+
# Sort by absolute change
|
|
188
|
+
changes.sort(key=lambda x: abs(x['change']), reverse=True)
|
|
189
|
+
increases = [c for c in changes if c['change'] > 0][:10]
|
|
190
|
+
decreases = [c for c in changes if c['change'] < 0][:10]
|
|
191
|
+
|
|
192
|
+
t30_comparisons.append({
|
|
193
|
+
'baseline_week': baseline['week'],
|
|
194
|
+
'curr_week': curr['week'],
|
|
195
|
+
'increases': increases,
|
|
196
|
+
'decreases': decreases,
|
|
197
|
+
'total_increase': sum(c['change'] for c in increases),
|
|
198
|
+
'total_decrease': sum(c['change'] for c in decreases)
|
|
199
|
+
})
|
|
200
|
+
|
|
201
|
+
# Reverse so most recent is first
|
|
202
|
+
wow_comparisons.reverse()
|
|
203
|
+
t30_comparisons.reverse()
|
|
204
|
+
|
|
205
|
+
return {
|
|
206
|
+
'weeks': weeks,
|
|
207
|
+
'wow_comparisons': wow_comparisons,
|
|
208
|
+
't30_comparisons': t30_comparisons
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def format_trends_markdown(trends_data):
|
|
213
|
+
"""
|
|
214
|
+
Format trends data as markdown.
|
|
215
|
+
|
|
216
|
+
Returns:
|
|
217
|
+
str: Markdown formatted report
|
|
218
|
+
"""
|
|
219
|
+
lines = []
|
|
220
|
+
lines.append("# AWS Cost Trends Report (Service Level)")
|
|
221
|
+
lines.append(f"**Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
|
222
|
+
lines.append("")
|
|
223
|
+
lines.append("## Methodology")
|
|
224
|
+
lines.append("")
|
|
225
|
+
lines.append("This report shows two types of comparisons:")
|
|
226
|
+
lines.append("")
|
|
227
|
+
lines.append("1. **Week-over-Week (WoW)**: Compares each week to the previous week")
|
|
228
|
+
lines.append(" - Good for catching immediate changes and spikes")
|
|
229
|
+
lines.append(" - Shows short-term volatility")
|
|
230
|
+
lines.append("")
|
|
231
|
+
lines.append("2. **Trailing 30-Day (T-30)**: Compares each week to the same week 4 weeks ago")
|
|
232
|
+
lines.append(" - Filters out weekly noise")
|
|
233
|
+
lines.append(" - Shows sustained trends and real cost changes")
|
|
234
|
+
lines.append("")
|
|
235
|
+
lines.append("---")
|
|
236
|
+
lines.append("")
|
|
237
|
+
lines.append("# Week-over-Week Changes")
|
|
238
|
+
lines.append("")
|
|
239
|
+
|
|
240
|
+
for comparison in trends_data['wow_comparisons']:
|
|
241
|
+
prev_week = comparison['prev_week']
|
|
242
|
+
curr_week = comparison['curr_week']
|
|
243
|
+
|
|
244
|
+
lines.append(f"## {prev_week['label']} → {curr_week['label']}")
|
|
245
|
+
lines.append("")
|
|
246
|
+
|
|
247
|
+
# Top increases
|
|
248
|
+
if comparison['increases']:
|
|
249
|
+
lines.append("### 🔴 Top 10 Increases")
|
|
250
|
+
lines.append("")
|
|
251
|
+
lines.append("| Service | Previous | Current | Change | % |")
|
|
252
|
+
lines.append("|---------|----------|---------|--------|---|")
|
|
253
|
+
|
|
254
|
+
for item in comparison['increases']:
|
|
255
|
+
service = item['service'][:60]
|
|
256
|
+
prev = f"${item['prev_cost']:,.2f}"
|
|
257
|
+
curr = f"${item['curr_cost']:,.2f}"
|
|
258
|
+
change = f"${item['change']:,.2f}"
|
|
259
|
+
pct = f"{item['pct_change']:+.1f}%"
|
|
260
|
+
|
|
261
|
+
lines.append(f"| {service} | {prev} | {curr} | {change} | {pct} |")
|
|
262
|
+
|
|
263
|
+
# Add total row
|
|
264
|
+
total_increase = comparison.get('total_increase', 0)
|
|
265
|
+
lines.append(f"| **TOTAL** | | | **${total_increase:,.2f}** | |")
|
|
266
|
+
|
|
267
|
+
lines.append("")
|
|
268
|
+
|
|
269
|
+
# Top decreases
|
|
270
|
+
if comparison['decreases']:
|
|
271
|
+
lines.append("### 🟢 Top 10 Decreases")
|
|
272
|
+
lines.append("")
|
|
273
|
+
lines.append("| Service | Previous | Current | Change | % |")
|
|
274
|
+
lines.append("|---------|----------|---------|--------|---|")
|
|
275
|
+
|
|
276
|
+
for item in comparison['decreases']:
|
|
277
|
+
service = item['service'][:60]
|
|
278
|
+
prev = f"${item['prev_cost']:,.2f}"
|
|
279
|
+
curr = f"${item['curr_cost']:,.2f}"
|
|
280
|
+
change = f"${item['change']:,.2f}"
|
|
281
|
+
pct = f"{item['pct_change']:+.1f}%"
|
|
282
|
+
|
|
283
|
+
lines.append(f"| {service} | {prev} | {curr} | {change} | {pct} |")
|
|
284
|
+
|
|
285
|
+
# Add total row
|
|
286
|
+
total_decrease = comparison.get('total_decrease', 0)
|
|
287
|
+
lines.append(f"| **TOTAL** | | | **${total_decrease:,.2f}** | |")
|
|
288
|
+
|
|
289
|
+
lines.append("")
|
|
290
|
+
|
|
291
|
+
lines.append("---")
|
|
292
|
+
lines.append("")
|
|
293
|
+
|
|
294
|
+
# Add T-30 comparisons section
|
|
295
|
+
lines.append("")
|
|
296
|
+
lines.append("# Trailing 30-Day Comparisons (T-30)")
|
|
297
|
+
lines.append("")
|
|
298
|
+
|
|
299
|
+
for comparison in trends_data['t30_comparisons']:
|
|
300
|
+
baseline_week = comparison['baseline_week']
|
|
301
|
+
curr_week = comparison['curr_week']
|
|
302
|
+
|
|
303
|
+
lines.append(f"## {curr_week['label']} vs {baseline_week['label']} (30 days ago)")
|
|
304
|
+
lines.append("")
|
|
305
|
+
|
|
306
|
+
# Top increases
|
|
307
|
+
if comparison['increases']:
|
|
308
|
+
lines.append("### 🔴 Top 10 Increases (vs 30 days ago)")
|
|
309
|
+
lines.append("")
|
|
310
|
+
lines.append("| Service | 30 Days Ago | Current | Change | % |")
|
|
311
|
+
lines.append("|---------|-------------|---------|--------|---|")
|
|
312
|
+
|
|
313
|
+
for item in comparison['increases']:
|
|
314
|
+
service = item['service'][:60]
|
|
315
|
+
prev = f"${item['prev_cost']:,.2f}"
|
|
316
|
+
curr = f"${item['curr_cost']:,.2f}"
|
|
317
|
+
change = f"${item['change']:,.2f}"
|
|
318
|
+
pct = f"{item['pct_change']:+.1f}%"
|
|
319
|
+
|
|
320
|
+
lines.append(f"| {service} | {prev} | {curr} | {change} | {pct} |")
|
|
321
|
+
|
|
322
|
+
# Add total row
|
|
323
|
+
total_increase = comparison.get('total_increase', 0)
|
|
324
|
+
lines.append(f"| **TOTAL** | | | **${total_increase:,.2f}** | |")
|
|
325
|
+
|
|
326
|
+
lines.append("")
|
|
327
|
+
|
|
328
|
+
# Top decreases
|
|
329
|
+
if comparison['decreases']:
|
|
330
|
+
lines.append("### 🟢 Top 10 Decreases (vs 30 days ago)")
|
|
331
|
+
lines.append("")
|
|
332
|
+
lines.append("| Service | 30 Days Ago | Current | Change | % |")
|
|
333
|
+
lines.append("|---------|-------------|---------|--------|---|")
|
|
334
|
+
|
|
335
|
+
for item in comparison['decreases']:
|
|
336
|
+
service = item['service'][:60]
|
|
337
|
+
prev = f"${item['prev_cost']:,.2f}"
|
|
338
|
+
curr = f"${item['curr_cost']:,.2f}"
|
|
339
|
+
change = f"${item['change']:,.2f}"
|
|
340
|
+
pct = f"{item['pct_change']:+.1f}%"
|
|
341
|
+
|
|
342
|
+
lines.append(f"| {service} | {prev} | {curr} | {change} | {pct} |")
|
|
343
|
+
|
|
344
|
+
# Add total row
|
|
345
|
+
total_decrease = comparison.get('total_decrease', 0)
|
|
346
|
+
lines.append(f"| **TOTAL** | | | **${total_decrease:,.2f}** | |")
|
|
347
|
+
|
|
348
|
+
lines.append("")
|
|
349
|
+
|
|
350
|
+
lines.append("---")
|
|
351
|
+
lines.append("")
|
|
352
|
+
|
|
353
|
+
return "\n".join(lines)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# Handlers package
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Lambda handler for pandas-based analysis.
|
|
3
|
+
"""
|
|
4
|
+
import json
|
|
5
|
+
import os
|
|
6
|
+
import boto3
|
|
7
|
+
from algorithms.analyze import analyze_aggregated, search_services
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def handler(event, context):
|
|
11
|
+
"""Handle analysis requests."""
|
|
12
|
+
|
|
13
|
+
# Parse request
|
|
14
|
+
try:
|
|
15
|
+
if isinstance(event.get('body'), str):
|
|
16
|
+
body = json.loads(event['body'])
|
|
17
|
+
else:
|
|
18
|
+
body = event.get('body', {})
|
|
19
|
+
except:
|
|
20
|
+
body = event
|
|
21
|
+
|
|
22
|
+
# Validate API secret
|
|
23
|
+
headers = event.get('headers', {})
|
|
24
|
+
api_secret = headers.get('X-API-Secret') or headers.get('x-api-secret')
|
|
25
|
+
|
|
26
|
+
secret_name = os.environ.get('SECRET_NAME', 'cost-calculator-api-secret')
|
|
27
|
+
secrets_client = boto3.client('secretsmanager')
|
|
28
|
+
|
|
29
|
+
try:
|
|
30
|
+
secret_response = secrets_client.get_secret_value(SecretId=secret_name)
|
|
31
|
+
expected_secret = secret_response['SecretString']
|
|
32
|
+
|
|
33
|
+
if api_secret != expected_secret:
|
|
34
|
+
return {
|
|
35
|
+
'statusCode': 401,
|
|
36
|
+
'headers': {'Content-Type': 'application/json'},
|
|
37
|
+
'body': json.dumps({'error': 'Unauthorized'})
|
|
38
|
+
}
|
|
39
|
+
except Exception as e:
|
|
40
|
+
return {
|
|
41
|
+
'statusCode': 500,
|
|
42
|
+
'headers': {'Content-Type': 'application/json'},
|
|
43
|
+
'body': json.dumps({'error': f'Secret validation failed: {str(e)}'})
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
# Get parameters
|
|
47
|
+
credentials = body.get('credentials', {})
|
|
48
|
+
accounts = body.get('accounts', [])
|
|
49
|
+
weeks = body.get('weeks', 12)
|
|
50
|
+
analysis_type = body.get('type', 'summary')
|
|
51
|
+
|
|
52
|
+
# For search
|
|
53
|
+
pattern = body.get('pattern')
|
|
54
|
+
min_cost = body.get('min_cost')
|
|
55
|
+
|
|
56
|
+
if not accounts:
|
|
57
|
+
return {
|
|
58
|
+
'statusCode': 400,
|
|
59
|
+
'headers': {'Content-Type': 'application/json'},
|
|
60
|
+
'body': json.dumps({'error': 'accounts required'})
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
try:
|
|
64
|
+
# Create Cost Explorer client
|
|
65
|
+
ce_client = boto3.client(
|
|
66
|
+
'ce',
|
|
67
|
+
region_name='us-east-1',
|
|
68
|
+
aws_access_key_id=credentials['access_key'],
|
|
69
|
+
aws_secret_access_key=credentials['secret_key'],
|
|
70
|
+
aws_session_token=credentials.get('session_token')
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
# Run analysis
|
|
74
|
+
if analysis_type == 'search':
|
|
75
|
+
result = search_services(ce_client, accounts, weeks, pattern, min_cost)
|
|
76
|
+
else:
|
|
77
|
+
result = analyze_aggregated(ce_client, accounts, weeks, analysis_type)
|
|
78
|
+
|
|
79
|
+
# Convert datetime objects and handle NaN/Infinity
|
|
80
|
+
import math
|
|
81
|
+
|
|
82
|
+
def convert_values(obj):
|
|
83
|
+
if isinstance(obj, dict):
|
|
84
|
+
return {k: convert_values(v) for k, v in obj.items()}
|
|
85
|
+
elif isinstance(obj, list):
|
|
86
|
+
return [convert_values(item) for item in obj]
|
|
87
|
+
elif hasattr(obj, 'isoformat'):
|
|
88
|
+
return obj.isoformat()
|
|
89
|
+
elif isinstance(obj, float):
|
|
90
|
+
if math.isnan(obj):
|
|
91
|
+
return None
|
|
92
|
+
elif math.isinf(obj):
|
|
93
|
+
return None
|
|
94
|
+
return obj
|
|
95
|
+
|
|
96
|
+
result = convert_values(result)
|
|
97
|
+
|
|
98
|
+
return {
|
|
99
|
+
'statusCode': 200,
|
|
100
|
+
'headers': {
|
|
101
|
+
'Access-Control-Allow-Origin': '*',
|
|
102
|
+
'Content-Type': 'application/json'
|
|
103
|
+
},
|
|
104
|
+
'body': json.dumps(result)
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
except Exception as e:
|
|
108
|
+
return {
|
|
109
|
+
'statusCode': 500,
|
|
110
|
+
'headers': {'Content-Type': 'application/json'},
|
|
111
|
+
'body': json.dumps({'error': str(e)})
|
|
112
|
+
}
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Lambda handler for drill-down analysis.
|
|
3
|
+
"""
|
|
4
|
+
import json
|
|
5
|
+
import boto3
|
|
6
|
+
import os
|
|
7
|
+
from algorithms.drill import analyze_drill_down
|
|
8
|
+
|
|
9
|
+
# Get API secret from Secrets Manager
|
|
10
|
+
secrets_client = boto3.client('secretsmanager')
|
|
11
|
+
api_secret_arn = os.environ['API_SECRET_ARN']
|
|
12
|
+
api_secret = secrets_client.get_secret_value(SecretId=api_secret_arn)['SecretString']
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def handler(event, context):
|
|
16
|
+
"""
|
|
17
|
+
Lambda handler for drill-down analysis.
|
|
18
|
+
|
|
19
|
+
Expected event:
|
|
20
|
+
{
|
|
21
|
+
"credentials": {
|
|
22
|
+
"access_key": "AKIA...",
|
|
23
|
+
"secret_key": "...",
|
|
24
|
+
"session_token": "..." (optional)
|
|
25
|
+
},
|
|
26
|
+
"accounts": ["123456789012"],
|
|
27
|
+
"weeks": 4,
|
|
28
|
+
"service": "EC2 - Other" (optional),
|
|
29
|
+
"account": "123456789012" (optional),
|
|
30
|
+
"usage_type": "DataTransfer-Out-Bytes" (optional)
|
|
31
|
+
}
|
|
32
|
+
"""
|
|
33
|
+
# Handle OPTIONS for CORS
|
|
34
|
+
if event.get('requestContext', {}).get('http', {}).get('method') == 'OPTIONS':
|
|
35
|
+
return {
|
|
36
|
+
'statusCode': 200,
|
|
37
|
+
'headers': {
|
|
38
|
+
'Access-Control-Allow-Origin': '*',
|
|
39
|
+
'Access-Control-Allow-Headers': '*',
|
|
40
|
+
'Access-Control-Allow-Methods': 'POST, OPTIONS'
|
|
41
|
+
},
|
|
42
|
+
'body': ''
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
try:
|
|
46
|
+
# Validate API secret
|
|
47
|
+
headers = event.get('headers', {})
|
|
48
|
+
provided_secret = headers.get('x-api-secret') or headers.get('X-API-Secret')
|
|
49
|
+
|
|
50
|
+
if provided_secret != api_secret:
|
|
51
|
+
return {
|
|
52
|
+
'statusCode': 401,
|
|
53
|
+
'headers': {'Access-Control-Allow-Origin': '*'},
|
|
54
|
+
'body': json.dumps({'error': 'Unauthorized'})
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
# Parse request body
|
|
58
|
+
body = json.loads(event.get('body', '{}'))
|
|
59
|
+
|
|
60
|
+
credentials = body.get('credentials', {})
|
|
61
|
+
accounts = body.get('accounts', [])
|
|
62
|
+
weeks = body.get('weeks', 4)
|
|
63
|
+
service_filter = body.get('service')
|
|
64
|
+
account_filter = body.get('account')
|
|
65
|
+
usage_type_filter = body.get('usage_type')
|
|
66
|
+
|
|
67
|
+
if not credentials or not accounts:
|
|
68
|
+
return {
|
|
69
|
+
'statusCode': 400,
|
|
70
|
+
'headers': {'Access-Control-Allow-Origin': '*'},
|
|
71
|
+
'body': json.dumps({'error': 'Missing credentials or accounts'})
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
# Create Cost Explorer client with provided credentials
|
|
75
|
+
ce_client = boto3.client(
|
|
76
|
+
'ce',
|
|
77
|
+
region_name='us-east-1',
|
|
78
|
+
aws_access_key_id=credentials['access_key'],
|
|
79
|
+
aws_secret_access_key=credentials['secret_key'],
|
|
80
|
+
aws_session_token=credentials.get('session_token')
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
# Run analysis
|
|
84
|
+
drill_data = analyze_drill_down(
|
|
85
|
+
ce_client, accounts, weeks,
|
|
86
|
+
service_filter=service_filter,
|
|
87
|
+
account_filter=account_filter,
|
|
88
|
+
usage_type_filter=usage_type_filter
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# Convert datetime objects to strings for JSON serialization
|
|
92
|
+
def convert_dates(obj):
|
|
93
|
+
if isinstance(obj, dict):
|
|
94
|
+
return {k: convert_dates(v) for k, v in obj.items()}
|
|
95
|
+
elif isinstance(obj, list):
|
|
96
|
+
return [convert_dates(item) for item in obj]
|
|
97
|
+
elif hasattr(obj, 'isoformat'):
|
|
98
|
+
return obj.isoformat()
|
|
99
|
+
return obj
|
|
100
|
+
|
|
101
|
+
drill_data = convert_dates(drill_data)
|
|
102
|
+
|
|
103
|
+
return {
|
|
104
|
+
'statusCode': 200,
|
|
105
|
+
'headers': {
|
|
106
|
+
'Access-Control-Allow-Origin': '*',
|
|
107
|
+
'Content-Type': 'application/json'
|
|
108
|
+
},
|
|
109
|
+
'body': json.dumps(drill_data)
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
except Exception as e:
|
|
113
|
+
return {
|
|
114
|
+
'statusCode': 500,
|
|
115
|
+
'headers': {'Access-Control-Allow-Origin': '*'},
|
|
116
|
+
'body': json.dumps({'error': str(e)})
|
|
117
|
+
}
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Lambda handler for monthly analysis.
|
|
3
|
+
"""
|
|
4
|
+
import json
|
|
5
|
+
import boto3
|
|
6
|
+
import os
|
|
7
|
+
from algorithms.monthly import analyze_monthly_trends
|
|
8
|
+
|
|
9
|
+
# Get API secret from Secrets Manager
|
|
10
|
+
secrets_client = boto3.client('secretsmanager')
|
|
11
|
+
api_secret_arn = os.environ['API_SECRET_ARN']
|
|
12
|
+
api_secret = secrets_client.get_secret_value(SecretId=api_secret_arn)['SecretString']
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def handler(event, context):
|
|
16
|
+
"""
|
|
17
|
+
Lambda handler for monthly analysis.
|
|
18
|
+
|
|
19
|
+
Expected event:
|
|
20
|
+
{
|
|
21
|
+
"credentials": {
|
|
22
|
+
"access_key": "AKIA...",
|
|
23
|
+
"secret_key": "...",
|
|
24
|
+
"session_token": "..." (optional)
|
|
25
|
+
},
|
|
26
|
+
"accounts": ["123456789012"],
|
|
27
|
+
"months": 6
|
|
28
|
+
}
|
|
29
|
+
"""
|
|
30
|
+
# Handle OPTIONS for CORS
|
|
31
|
+
if event.get('requestContext', {}).get('http', {}).get('method') == 'OPTIONS':
|
|
32
|
+
return {
|
|
33
|
+
'statusCode': 200,
|
|
34
|
+
'headers': {
|
|
35
|
+
'Access-Control-Allow-Origin': '*',
|
|
36
|
+
'Access-Control-Allow-Headers': '*',
|
|
37
|
+
'Access-Control-Allow-Methods': 'POST, OPTIONS'
|
|
38
|
+
},
|
|
39
|
+
'body': ''
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
try:
|
|
43
|
+
# Validate API secret
|
|
44
|
+
headers = event.get('headers', {})
|
|
45
|
+
provided_secret = headers.get('x-api-secret') or headers.get('X-API-Secret')
|
|
46
|
+
|
|
47
|
+
if provided_secret != api_secret:
|
|
48
|
+
return {
|
|
49
|
+
'statusCode': 401,
|
|
50
|
+
'headers': {'Access-Control-Allow-Origin': '*'},
|
|
51
|
+
'body': json.dumps({'error': 'Unauthorized'})
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
# Parse request body
|
|
55
|
+
body = json.loads(event.get('body', '{}'))
|
|
56
|
+
|
|
57
|
+
credentials = body.get('credentials', {})
|
|
58
|
+
accounts = body.get('accounts', [])
|
|
59
|
+
months = body.get('months', 6)
|
|
60
|
+
|
|
61
|
+
if not credentials or not accounts:
|
|
62
|
+
return {
|
|
63
|
+
'statusCode': 400,
|
|
64
|
+
'headers': {'Access-Control-Allow-Origin': '*'},
|
|
65
|
+
'body': json.dumps({'error': 'Missing credentials or accounts'})
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
# Create Cost Explorer client with provided credentials
|
|
69
|
+
ce_client = boto3.client(
|
|
70
|
+
'ce',
|
|
71
|
+
region_name='us-east-1',
|
|
72
|
+
aws_access_key_id=credentials['access_key'],
|
|
73
|
+
aws_secret_access_key=credentials['secret_key'],
|
|
74
|
+
aws_session_token=credentials.get('session_token')
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
# Run analysis
|
|
78
|
+
monthly_data = analyze_monthly_trends(ce_client, accounts, months)
|
|
79
|
+
|
|
80
|
+
# Convert datetime objects to strings for JSON serialization
|
|
81
|
+
def convert_dates(obj):
|
|
82
|
+
if isinstance(obj, dict):
|
|
83
|
+
return {k: convert_dates(v) for k, v in obj.items()}
|
|
84
|
+
elif isinstance(obj, list):
|
|
85
|
+
return [convert_dates(item) for item in obj]
|
|
86
|
+
elif hasattr(obj, 'isoformat'):
|
|
87
|
+
return obj.isoformat()
|
|
88
|
+
return obj
|
|
89
|
+
|
|
90
|
+
monthly_data = convert_dates(monthly_data)
|
|
91
|
+
|
|
92
|
+
return {
|
|
93
|
+
'statusCode': 200,
|
|
94
|
+
'headers': {
|
|
95
|
+
'Access-Control-Allow-Origin': '*',
|
|
96
|
+
'Content-Type': 'application/json'
|
|
97
|
+
},
|
|
98
|
+
'body': json.dumps(monthly_data)
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
except Exception as e:
|
|
102
|
+
return {
|
|
103
|
+
'statusCode': 500,
|
|
104
|
+
'headers': {'Access-Control-Allow-Origin': '*'},
|
|
105
|
+
'body': json.dumps({'error': str(e)})
|
|
106
|
+
}
|