aws-cost-calculator-cli 1.6.3__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aws-cost-calculator-cli might be problematic. Click here for more details.
- {aws_cost_calculator_cli-1.6.3.dist-info → aws_cost_calculator_cli-2.0.0.dist-info}/METADATA +13 -1
- aws_cost_calculator_cli-2.0.0.dist-info/RECORD +15 -0
- {aws_cost_calculator_cli-1.6.3.dist-info → aws_cost_calculator_cli-2.0.0.dist-info}/WHEEL +1 -1
- {aws_cost_calculator_cli-1.6.3.dist-info → aws_cost_calculator_cli-2.0.0.dist-info}/top_level.txt +0 -1
- cost_calculator/api_client.py +2 -1
- cost_calculator/cli.py +1020 -109
- cost_calculator/cur.py +244 -0
- cost_calculator/executor.py +105 -95
- cost_calculator/forensics.py +323 -0
- aws_cost_calculator_cli-1.6.3.dist-info/RECORD +0 -25
- backend/__init__.py +0 -1
- backend/algorithms/__init__.py +0 -1
- backend/algorithms/analyze.py +0 -272
- backend/algorithms/drill.py +0 -323
- backend/algorithms/monthly.py +0 -242
- backend/algorithms/trends.py +0 -353
- backend/handlers/__init__.py +0 -1
- backend/handlers/analyze.py +0 -112
- backend/handlers/drill.py +0 -117
- backend/handlers/monthly.py +0 -106
- backend/handlers/profiles.py +0 -148
- backend/handlers/trends.py +0 -106
- {aws_cost_calculator_cli-1.6.3.dist-info → aws_cost_calculator_cli-2.0.0.dist-info}/entry_points.txt +0 -0
- {aws_cost_calculator_cli-1.6.3.dist-info → aws_cost_calculator_cli-2.0.0.dist-info}/licenses/LICENSE +0 -0
cost_calculator/cli.py
CHANGED
|
@@ -67,59 +67,39 @@ def apply_auth_options(config, sso=None, access_key_id=None, secret_access_key=N
|
|
|
67
67
|
return config
|
|
68
68
|
|
|
69
69
|
|
|
70
|
-
def
|
|
71
|
-
"""
|
|
70
|
+
def get_api_secret():
|
|
71
|
+
"""Get API secret from config file or environment variable"""
|
|
72
72
|
import os
|
|
73
|
-
import requests
|
|
74
73
|
|
|
74
|
+
# Check environment variable first
|
|
75
|
+
api_secret = os.environ.get('COST_API_SECRET')
|
|
76
|
+
if api_secret:
|
|
77
|
+
return api_secret
|
|
78
|
+
|
|
79
|
+
# Check config file
|
|
75
80
|
config_dir = Path.home() / '.config' / 'cost-calculator'
|
|
76
|
-
config_file = config_dir / '
|
|
77
|
-
creds_file = config_dir / 'credentials.json'
|
|
81
|
+
config_file = config_dir / 'config.json'
|
|
78
82
|
|
|
79
|
-
# Try local file first
|
|
80
83
|
if config_file.exists():
|
|
81
84
|
with open(config_file) as f:
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
'aws_secret_access_key': os.environ['AWS_SECRET_ACCESS_KEY'],
|
|
95
|
-
'aws_session_token': os.environ.get('AWS_SESSION_TOKEN')
|
|
96
|
-
}
|
|
97
|
-
return profile
|
|
98
|
-
|
|
99
|
-
raise click.ClickException(
|
|
100
|
-
f"No credentials found for profile '{profile_name}'.\n"
|
|
101
|
-
f"Run: cc configure --profile {profile_name}"
|
|
102
|
-
)
|
|
103
|
-
|
|
104
|
-
with open(creds_file) as f:
|
|
105
|
-
creds = json.load(f)
|
|
106
|
-
|
|
107
|
-
if profile_name not in creds:
|
|
108
|
-
raise click.ClickException(
|
|
109
|
-
f"No credentials found for profile '{profile_name}'.\n"
|
|
110
|
-
f"Run: cc configure --profile {profile_name}"
|
|
111
|
-
)
|
|
112
|
-
|
|
113
|
-
profile['credentials'] = creds[profile_name]
|
|
114
|
-
|
|
115
|
-
return profile
|
|
85
|
+
config = json.load(f)
|
|
86
|
+
return config.get('api_secret')
|
|
87
|
+
|
|
88
|
+
return None
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def load_profile(profile_name):
|
|
92
|
+
"""Load profile configuration from DynamoDB API (API-only, no local files)"""
|
|
93
|
+
import requests
|
|
94
|
+
|
|
95
|
+
# Get API secret
|
|
96
|
+
api_secret = get_api_secret()
|
|
116
97
|
|
|
117
|
-
# Profile not found locally - try DynamoDB API
|
|
118
|
-
api_secret = os.environ.get('COST_API_SECRET')
|
|
119
98
|
if not api_secret:
|
|
120
99
|
raise click.ClickException(
|
|
121
|
-
|
|
122
|
-
|
|
100
|
+
"No API secret configured.\n"
|
|
101
|
+
"Run: cc configure --api-secret YOUR_SECRET\n"
|
|
102
|
+
"Or set environment variable: export COST_API_SECRET=YOUR_SECRET"
|
|
123
103
|
)
|
|
124
104
|
|
|
125
105
|
try:
|
|
@@ -136,8 +116,11 @@ def load_profile(profile_name):
|
|
|
136
116
|
profile_data = response_data.get('profile', response_data)
|
|
137
117
|
profile = {'accounts': profile_data['accounts']}
|
|
138
118
|
|
|
119
|
+
# If profile has aws_profile field, use it
|
|
120
|
+
if 'aws_profile' in profile_data:
|
|
121
|
+
profile['aws_profile'] = profile_data['aws_profile']
|
|
139
122
|
# Check for AWS_PROFILE environment variable (SSO support)
|
|
140
|
-
|
|
123
|
+
elif os.environ.get('AWS_PROFILE'):
|
|
141
124
|
profile['aws_profile'] = os.environ['AWS_PROFILE']
|
|
142
125
|
# Use environment credentials
|
|
143
126
|
elif os.environ.get('AWS_ACCESS_KEY_ID'):
|
|
@@ -146,6 +129,33 @@ def load_profile(profile_name):
|
|
|
146
129
|
'aws_secret_access_key': os.environ['AWS_SECRET_ACCESS_KEY'],
|
|
147
130
|
'aws_session_token': os.environ.get('AWS_SESSION_TOKEN')
|
|
148
131
|
}
|
|
132
|
+
else:
|
|
133
|
+
# Try to find a matching AWS profile by name
|
|
134
|
+
# This allows "khoros" profile to work with "khoros_umbrella" AWS profile
|
|
135
|
+
import subprocess
|
|
136
|
+
try:
|
|
137
|
+
result = subprocess.run(
|
|
138
|
+
['aws', 'configure', 'list-profiles'],
|
|
139
|
+
capture_output=True,
|
|
140
|
+
text=True,
|
|
141
|
+
timeout=5
|
|
142
|
+
)
|
|
143
|
+
if result.returncode == 0:
|
|
144
|
+
available_profiles = result.stdout.strip().split('\n')
|
|
145
|
+
# Try exact match first
|
|
146
|
+
if profile_name in available_profiles:
|
|
147
|
+
profile['aws_profile'] = profile_name
|
|
148
|
+
# Try with common suffixes
|
|
149
|
+
elif f"{profile_name}_umbrella" in available_profiles:
|
|
150
|
+
profile['aws_profile'] = f"{profile_name}_umbrella"
|
|
151
|
+
elif f"{profile_name}-umbrella" in available_profiles:
|
|
152
|
+
profile['aws_profile'] = f"{profile_name}-umbrella"
|
|
153
|
+
elif f"{profile_name}_prod" in available_profiles:
|
|
154
|
+
profile['aws_profile'] = f"{profile_name}_prod"
|
|
155
|
+
# If no match found, leave it unset - user must provide --sso
|
|
156
|
+
except:
|
|
157
|
+
# If we can't list profiles, leave it unset - user must provide --sso
|
|
158
|
+
pass
|
|
149
159
|
|
|
150
160
|
return profile
|
|
151
161
|
else:
|
|
@@ -156,7 +166,7 @@ def load_profile(profile_name):
|
|
|
156
166
|
except requests.exceptions.RequestException as e:
|
|
157
167
|
raise click.ClickException(
|
|
158
168
|
f"Failed to fetch profile from API: {e}\n"
|
|
159
|
-
|
|
169
|
+
"Check your API secret and network connection."
|
|
160
170
|
)
|
|
161
171
|
|
|
162
172
|
|
|
@@ -325,7 +335,8 @@ def calculate_costs(profile_config, accounts, start_date, offset, window):
|
|
|
325
335
|
# Calculate days in the month that the support covers
|
|
326
336
|
# Support on Nov 1 covers October (31 days)
|
|
327
337
|
support_month = support_month_date - timedelta(days=1) # Go back to previous month
|
|
328
|
-
|
|
338
|
+
import calendar
|
|
339
|
+
days_in_support_month = calendar.monthrange(support_month.year, support_month.month)[1]
|
|
329
340
|
|
|
330
341
|
# Support allocation: divide by 2 (50% allocation), then by days in month
|
|
331
342
|
support_per_day = (support_cost / 2) / days_in_support_month
|
|
@@ -386,6 +397,43 @@ def cli():
|
|
|
386
397
|
pass
|
|
387
398
|
|
|
388
399
|
|
|
400
|
+
@cli.command('setup-cur')
|
|
401
|
+
@click.option('--database', required=True, prompt='CUR Athena Database', help='Athena database name for CUR')
|
|
402
|
+
@click.option('--table', required=True, prompt='CUR Table Name', help='CUR table name')
|
|
403
|
+
@click.option('--s3-output', required=True, prompt='S3 Output Location', help='S3 bucket for Athena query results')
|
|
404
|
+
def setup_cur(database, table, s3_output):
|
|
405
|
+
"""
|
|
406
|
+
Configure CUR (Cost and Usage Report) settings for resource-level queries
|
|
407
|
+
|
|
408
|
+
Saves CUR configuration to ~/.config/cost-calculator/cur_config.json
|
|
409
|
+
|
|
410
|
+
Example:
|
|
411
|
+
cc setup-cur --database my_cur_db --table cur_table --s3-output s3://my-bucket/
|
|
412
|
+
"""
|
|
413
|
+
import json
|
|
414
|
+
|
|
415
|
+
config_dir = Path.home() / '.config' / 'cost-calculator'
|
|
416
|
+
config_dir.mkdir(parents=True, exist_ok=True)
|
|
417
|
+
|
|
418
|
+
config_file = config_dir / 'cur_config.json'
|
|
419
|
+
|
|
420
|
+
config = {
|
|
421
|
+
'database': database,
|
|
422
|
+
'table': table,
|
|
423
|
+
's3_output': s3_output
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
with open(config_file, 'w') as f:
|
|
427
|
+
json.dump(config, f, indent=2)
|
|
428
|
+
|
|
429
|
+
click.echo(f"✓ CUR configuration saved to {config_file}")
|
|
430
|
+
click.echo(f" Database: {database}")
|
|
431
|
+
click.echo(f" Table: {table}")
|
|
432
|
+
click.echo(f" S3 Output: {s3_output}")
|
|
433
|
+
click.echo("")
|
|
434
|
+
click.echo("You can now use: cc drill --service 'EC2 - Other' --resources")
|
|
435
|
+
|
|
436
|
+
|
|
389
437
|
@cli.command('setup-api')
|
|
390
438
|
@click.option('--api-secret', required=True, prompt=True, hide_input=True, help='COST_API_SECRET value')
|
|
391
439
|
def setup_api(api_secret):
|
|
@@ -669,77 +717,79 @@ def setup():
|
|
|
669
717
|
|
|
670
718
|
|
|
671
719
|
@cli.command()
|
|
672
|
-
@click.option('--
|
|
673
|
-
@click.option('--
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
def configure(profile, access_key_id, secret_access_key, session_token, region):
|
|
678
|
-
"""Configure AWS credentials for a profile (alternative to SSO)"""
|
|
720
|
+
@click.option('--api-secret', help='API secret for DynamoDB profile access')
|
|
721
|
+
@click.option('--show', is_flag=True, help='Show current configuration')
|
|
722
|
+
def configure(api_secret, show):
|
|
723
|
+
"""
|
|
724
|
+
Configure Cost Calculator CLI settings.
|
|
679
725
|
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
creds_file = config_dir / 'credentials.json'
|
|
726
|
+
This tool requires an API secret to access profiles stored in DynamoDB.
|
|
727
|
+
The secret can be configured here or set via COST_API_SECRET environment variable.
|
|
683
728
|
|
|
684
|
-
|
|
729
|
+
Examples:
|
|
730
|
+
# Configure API secret
|
|
731
|
+
cc configure --api-secret YOUR_SECRET_KEY
|
|
732
|
+
|
|
733
|
+
# Show current configuration
|
|
734
|
+
cc configure --show
|
|
735
|
+
|
|
736
|
+
# Use environment variable instead (no configuration needed)
|
|
737
|
+
export COST_API_SECRET=YOUR_SECRET_KEY
|
|
738
|
+
"""
|
|
739
|
+
import os
|
|
740
|
+
|
|
741
|
+
config_dir = Path.home() / '.config' / 'cost-calculator'
|
|
685
742
|
config_dir.mkdir(parents=True, exist_ok=True)
|
|
743
|
+
config_file = config_dir / 'config.json'
|
|
686
744
|
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
745
|
+
if show:
|
|
746
|
+
# Show current configuration
|
|
747
|
+
if config_file.exists():
|
|
690
748
|
with open(config_file) as f:
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
749
|
+
config = json.load(f)
|
|
750
|
+
if 'api_secret' in config:
|
|
751
|
+
masked_secret = config['api_secret'][:8] + '...' + config['api_secret'][-4:]
|
|
752
|
+
click.echo(f"API Secret: {masked_secret} (configured)")
|
|
753
|
+
else:
|
|
754
|
+
click.echo("API Secret: Not configured")
|
|
755
|
+
else:
|
|
756
|
+
click.echo("No configuration file found")
|
|
757
|
+
|
|
758
|
+
# Check environment variable
|
|
759
|
+
import os
|
|
760
|
+
if os.environ.get('COST_API_SECRET'):
|
|
761
|
+
click.echo("Environment: COST_API_SECRET is set")
|
|
762
|
+
else:
|
|
763
|
+
click.echo("Environment: COST_API_SECRET is not set")
|
|
764
|
+
|
|
700
765
|
return
|
|
701
766
|
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
with open(config_file, 'w') as f:
|
|
708
|
-
json.dump(profiles, f, indent=2)
|
|
709
|
-
|
|
710
|
-
# Load or create credentials file
|
|
711
|
-
if creds_file.exists() and creds_file.stat().st_size > 0:
|
|
712
|
-
try:
|
|
713
|
-
with open(creds_file) as f:
|
|
714
|
-
creds = json.load(f)
|
|
715
|
-
except json.JSONDecodeError:
|
|
716
|
-
creds = {}
|
|
717
|
-
else:
|
|
718
|
-
creds = {}
|
|
767
|
+
if not api_secret:
|
|
768
|
+
raise click.ClickException(
|
|
769
|
+
"Please provide --api-secret or use --show to view current configuration\n"
|
|
770
|
+
"Example: cc configure --api-secret YOUR_SECRET_KEY"
|
|
771
|
+
)
|
|
719
772
|
|
|
720
|
-
#
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
}
|
|
773
|
+
# Load existing config
|
|
774
|
+
config = {}
|
|
775
|
+
if config_file.exists():
|
|
776
|
+
with open(config_file) as f:
|
|
777
|
+
config = json.load(f)
|
|
726
778
|
|
|
727
|
-
|
|
728
|
-
|
|
779
|
+
# Update API secret
|
|
780
|
+
config['api_secret'] = api_secret
|
|
729
781
|
|
|
730
|
-
# Save
|
|
731
|
-
with open(
|
|
732
|
-
json.dump(
|
|
782
|
+
# Save config
|
|
783
|
+
with open(config_file, 'w') as f:
|
|
784
|
+
json.dump(config, f, indent=2)
|
|
733
785
|
|
|
734
|
-
# Set
|
|
735
|
-
|
|
786
|
+
# Set restrictive permissions
|
|
787
|
+
os.chmod(config_file, 0o600)
|
|
736
788
|
|
|
737
|
-
|
|
738
|
-
click.echo(f"✓
|
|
739
|
-
click.echo(f"\
|
|
740
|
-
click.echo("\nNote:
|
|
741
|
-
click.echo(" you'll need to reconfigure when they expire.")
|
|
742
|
-
|
|
789
|
+
masked_secret = api_secret[:8] + '...' + api_secret[-4:]
|
|
790
|
+
click.echo(f"✓ API secret configured: {masked_secret}")
|
|
791
|
+
click.echo(f"\nYou can now run: cc calculate --profile PROFILE_NAME")
|
|
792
|
+
click.echo(f"\nNote: Profiles are stored in DynamoDB and accessed via the API.")
|
|
743
793
|
|
|
744
794
|
@cli.command()
|
|
745
795
|
@click.option('--profile', required=True, help='Profile name')
|
|
@@ -887,14 +937,19 @@ def monthly(profile, months, output, json_output, sso, access_key_id, secret_acc
|
|
|
887
937
|
@click.option('--service', help='Filter by service name (e.g., "EC2 - Other")')
|
|
888
938
|
@click.option('--account', help='Filter by account ID')
|
|
889
939
|
@click.option('--usage-type', help='Filter by usage type')
|
|
940
|
+
@click.option('--resources', is_flag=True, help='Show individual resource IDs (requires CUR, uses Athena)')
|
|
890
941
|
@click.option('--output', default='drill_down.md', help='Output markdown file (default: drill_down.md)')
|
|
891
942
|
@click.option('--json-output', is_flag=True, help='Output as JSON')
|
|
892
943
|
@click.option('--sso', help='AWS SSO profile name')
|
|
893
944
|
@click.option('--access-key-id', help='AWS Access Key ID')
|
|
894
945
|
@click.option('--secret-access-key', help='AWS Secret Access Key')
|
|
895
946
|
@click.option('--session-token', help='AWS Session Token')
|
|
896
|
-
def drill(profile, weeks, service, account, usage_type, output, json_output, sso, access_key_id, secret_access_key, session_token):
|
|
897
|
-
"""
|
|
947
|
+
def drill(profile, weeks, service, account, usage_type, resources, output, json_output, sso, access_key_id, secret_access_key, session_token):
|
|
948
|
+
"""
|
|
949
|
+
Drill down into cost changes by service, account, or usage type
|
|
950
|
+
|
|
951
|
+
Add --resources flag to see individual resource IDs and costs (requires CUR data via Athena)
|
|
952
|
+
"""
|
|
898
953
|
|
|
899
954
|
# Load profile
|
|
900
955
|
config = load_profile(profile)
|
|
@@ -908,10 +963,19 @@ def drill(profile, weeks, service, account, usage_type, output, json_output, sso
|
|
|
908
963
|
click.echo(f" Account filter: {account}")
|
|
909
964
|
if usage_type:
|
|
910
965
|
click.echo(f" Usage type filter: {usage_type}")
|
|
966
|
+
if resources:
|
|
967
|
+
click.echo(f" Mode: Resource-level (CUR via Athena)")
|
|
911
968
|
click.echo("")
|
|
912
969
|
|
|
913
970
|
# Execute via API or locally
|
|
914
|
-
drill_data = execute_drill(config, weeks, service, account, usage_type)
|
|
971
|
+
drill_data = execute_drill(config, weeks, service, account, usage_type, resources)
|
|
972
|
+
|
|
973
|
+
# Handle resource-level output differently
|
|
974
|
+
if resources:
|
|
975
|
+
from cost_calculator.cur import format_resource_output
|
|
976
|
+
output_text = format_resource_output(drill_data)
|
|
977
|
+
click.echo(output_text)
|
|
978
|
+
return
|
|
915
979
|
|
|
916
980
|
if json_output:
|
|
917
981
|
# Output as JSON
|
|
@@ -1084,5 +1148,852 @@ def profile(operation, name, accounts, description):
|
|
|
1084
1148
|
click.echo(result.get('message', 'Operation completed'))
|
|
1085
1149
|
|
|
1086
1150
|
|
|
1151
|
+
@cli.command()
|
|
1152
|
+
@click.option('--profile', required=True, help='Profile name')
|
|
1153
|
+
@click.option('--sso', help='AWS SSO profile to use')
|
|
1154
|
+
@click.option('--weeks', default=8, help='Number of weeks to analyze')
|
|
1155
|
+
@click.option('--account', help='Focus on specific account ID')
|
|
1156
|
+
@click.option('--service', help='Focus on specific service')
|
|
1157
|
+
@click.option('--no-cloudtrail', is_flag=True, help='Skip CloudTrail analysis (faster)')
|
|
1158
|
+
@click.option('--output', default='investigation_report.md', help='Output file path')
|
|
1159
|
+
def investigate(profile, sso, weeks, account, service, no_cloudtrail, output):
|
|
1160
|
+
"""
|
|
1161
|
+
Multi-stage cost investigation:
|
|
1162
|
+
1. Analyze cost trends and drill-downs
|
|
1163
|
+
2. Inventory actual resources in problem accounts
|
|
1164
|
+
3. Analyze CloudTrail events (optional)
|
|
1165
|
+
4. Generate comprehensive report
|
|
1166
|
+
"""
|
|
1167
|
+
from cost_calculator.executor import execute_trends, execute_drill, get_credentials_dict
|
|
1168
|
+
from cost_calculator.api_client import call_lambda_api, is_api_configured
|
|
1169
|
+
from cost_calculator.forensics import format_investigation_report
|
|
1170
|
+
from datetime import datetime, timedelta
|
|
1171
|
+
|
|
1172
|
+
click.echo("=" * 80)
|
|
1173
|
+
click.echo("COST INVESTIGATION")
|
|
1174
|
+
click.echo("=" * 80)
|
|
1175
|
+
click.echo(f"Profile: {profile}")
|
|
1176
|
+
click.echo(f"Weeks: {weeks}")
|
|
1177
|
+
if account:
|
|
1178
|
+
click.echo(f"Account: {account}")
|
|
1179
|
+
if service:
|
|
1180
|
+
click.echo(f"Service: {service}")
|
|
1181
|
+
click.echo("")
|
|
1182
|
+
|
|
1183
|
+
# Load profile
|
|
1184
|
+
config = load_profile(profile)
|
|
1185
|
+
|
|
1186
|
+
# Override with SSO if provided
|
|
1187
|
+
if sso:
|
|
1188
|
+
config['aws_profile'] = sso
|
|
1189
|
+
|
|
1190
|
+
# Validate that we have a way to get credentials
|
|
1191
|
+
if 'aws_profile' not in config and 'credentials' not in config:
|
|
1192
|
+
import subprocess
|
|
1193
|
+
try:
|
|
1194
|
+
result = subprocess.run(
|
|
1195
|
+
['aws', 'configure', 'list-profiles'],
|
|
1196
|
+
capture_output=True,
|
|
1197
|
+
text=True,
|
|
1198
|
+
timeout=5
|
|
1199
|
+
)
|
|
1200
|
+
available = result.stdout.strip().split('\n') if result.returncode == 0 else []
|
|
1201
|
+
suggestion = f"\nAvailable AWS profiles: {', '.join(available[:5])}" if available else ""
|
|
1202
|
+
except:
|
|
1203
|
+
suggestion = ""
|
|
1204
|
+
|
|
1205
|
+
raise click.ClickException(
|
|
1206
|
+
f"Profile '{profile}' has no AWS authentication configured.\n"
|
|
1207
|
+
f"Use --sso flag to specify your AWS SSO profile:\n"
|
|
1208
|
+
f" cc investigate --profile {profile} --sso YOUR_AWS_PROFILE{suggestion}"
|
|
1209
|
+
)
|
|
1210
|
+
|
|
1211
|
+
# Step 1: Cost Analysis
|
|
1212
|
+
click.echo("Step 1/3: Analyzing cost trends...")
|
|
1213
|
+
try:
|
|
1214
|
+
trends_data = execute_trends(config, weeks)
|
|
1215
|
+
click.echo(f"✓ Found cost data for {weeks} weeks")
|
|
1216
|
+
except Exception as e:
|
|
1217
|
+
click.echo(f"✗ Error analyzing trends: {str(e)}")
|
|
1218
|
+
trends_data = None
|
|
1219
|
+
|
|
1220
|
+
# Step 2: Drill-down
|
|
1221
|
+
click.echo("\nStep 2/3: Drilling down into costs...")
|
|
1222
|
+
drill_data = None
|
|
1223
|
+
if service or account:
|
|
1224
|
+
try:
|
|
1225
|
+
drill_data = execute_drill(config, weeks, service, account, None, False)
|
|
1226
|
+
click.echo(f"✓ Drill-down complete")
|
|
1227
|
+
except Exception as e:
|
|
1228
|
+
click.echo(f"✗ Error in drill-down: {str(e)}")
|
|
1229
|
+
|
|
1230
|
+
# Step 3: Resource Inventory
|
|
1231
|
+
click.echo("\nStep 3/3: Inventorying resources...")
|
|
1232
|
+
inventories = []
|
|
1233
|
+
cloudtrail_analyses = []
|
|
1234
|
+
|
|
1235
|
+
# Determine which accounts to investigate
|
|
1236
|
+
accounts_to_investigate = []
|
|
1237
|
+
if account:
|
|
1238
|
+
accounts_to_investigate = [account]
|
|
1239
|
+
else:
|
|
1240
|
+
# Extract top cost accounts from trends/drill data
|
|
1241
|
+
# For now, we'll need the user to specify
|
|
1242
|
+
click.echo("⚠️ No account specified. Use --account to inventory resources.")
|
|
1243
|
+
|
|
1244
|
+
# For each account, do inventory and CloudTrail via backend API
|
|
1245
|
+
for acc_id in accounts_to_investigate:
|
|
1246
|
+
click.echo(f"\n Investigating account {acc_id}...")
|
|
1247
|
+
|
|
1248
|
+
# Get credentials (SSO or static)
|
|
1249
|
+
account_creds = get_credentials_dict(config)
|
|
1250
|
+
if not account_creds:
|
|
1251
|
+
click.echo(f" ⚠️ No credentials available for account")
|
|
1252
|
+
continue
|
|
1253
|
+
|
|
1254
|
+
# Inventory resources via backend API only
|
|
1255
|
+
if not is_api_configured():
|
|
1256
|
+
click.echo(f" ✗ API not configured. Set COST_API_SECRET environment variable.")
|
|
1257
|
+
continue
|
|
1258
|
+
|
|
1259
|
+
try:
|
|
1260
|
+
regions = ['us-west-2', 'us-east-1', 'eu-west-1']
|
|
1261
|
+
for region in regions:
|
|
1262
|
+
try:
|
|
1263
|
+
inv = call_lambda_api(
|
|
1264
|
+
'forensics',
|
|
1265
|
+
account_creds,
|
|
1266
|
+
[], # accounts not needed for forensics
|
|
1267
|
+
operation='inventory',
|
|
1268
|
+
account_id=acc_id,
|
|
1269
|
+
region=region
|
|
1270
|
+
)
|
|
1271
|
+
|
|
1272
|
+
if not inv.get('error'):
|
|
1273
|
+
inventories.append(inv)
|
|
1274
|
+
click.echo(f" ✓ Inventory complete for {region}")
|
|
1275
|
+
click.echo(f" - EC2: {len(inv['ec2_instances'])} instances")
|
|
1276
|
+
click.echo(f" - EFS: {len(inv['efs_file_systems'])} file systems ({inv.get('total_efs_size_gb', 0):,.0f} GB)")
|
|
1277
|
+
click.echo(f" - ELB: {len(inv['load_balancers'])} load balancers")
|
|
1278
|
+
break
|
|
1279
|
+
except Exception as e:
|
|
1280
|
+
continue
|
|
1281
|
+
except Exception as e:
|
|
1282
|
+
click.echo(f" ✗ Inventory error: {str(e)}")
|
|
1283
|
+
|
|
1284
|
+
# CloudTrail analysis via backend API only
|
|
1285
|
+
if not no_cloudtrail:
|
|
1286
|
+
if not is_api_configured():
|
|
1287
|
+
click.echo(f" ✗ CloudTrail skipped: API not configured")
|
|
1288
|
+
else:
|
|
1289
|
+
try:
|
|
1290
|
+
start_date = (datetime.now() - timedelta(days=weeks * 7)).isoformat() + 'Z'
|
|
1291
|
+
end_date = datetime.now().isoformat() + 'Z'
|
|
1292
|
+
|
|
1293
|
+
ct_analysis = call_lambda_api(
|
|
1294
|
+
'forensics',
|
|
1295
|
+
account_creds,
|
|
1296
|
+
[],
|
|
1297
|
+
operation='cloudtrail',
|
|
1298
|
+
account_id=acc_id,
|
|
1299
|
+
start_date=start_date,
|
|
1300
|
+
end_date=end_date,
|
|
1301
|
+
region='us-west-2'
|
|
1302
|
+
)
|
|
1303
|
+
|
|
1304
|
+
cloudtrail_analyses.append(ct_analysis)
|
|
1305
|
+
|
|
1306
|
+
if ct_analysis.get('error'):
|
|
1307
|
+
click.echo(f" ⚠️ CloudTrail: {ct_analysis['error']}")
|
|
1308
|
+
else:
|
|
1309
|
+
click.echo(f" ✓ CloudTrail analysis complete")
|
|
1310
|
+
click.echo(f" - {len(ct_analysis['event_summary'])} event types")
|
|
1311
|
+
click.echo(f" - {len(ct_analysis['write_events'])} resource changes")
|
|
1312
|
+
except Exception as e:
|
|
1313
|
+
click.echo(f" ✗ CloudTrail error: {str(e)}")
|
|
1314
|
+
|
|
1315
|
+
# Generate report
|
|
1316
|
+
click.echo(f"\nGenerating report...")
|
|
1317
|
+
report = format_investigation_report(trends_data, inventories, cloudtrail_analyses if not no_cloudtrail else None)
|
|
1318
|
+
|
|
1319
|
+
# Write to file
|
|
1320
|
+
with open(output, 'w') as f:
|
|
1321
|
+
f.write(report)
|
|
1322
|
+
|
|
1323
|
+
click.echo(f"\n✓ Investigation complete!")
|
|
1324
|
+
click.echo(f"✓ Report saved to: {output}")
|
|
1325
|
+
click.echo("")
|
|
1326
|
+
|
|
1327
|
+
|
|
1328
|
+
def find_account_profile(account_id):
|
|
1329
|
+
"""
|
|
1330
|
+
Find the SSO profile name for a given account ID
|
|
1331
|
+
Returns profile name or None
|
|
1332
|
+
"""
|
|
1333
|
+
import subprocess
|
|
1334
|
+
|
|
1335
|
+
try:
|
|
1336
|
+
# Get list of profiles
|
|
1337
|
+
result = subprocess.run(
|
|
1338
|
+
['aws', 'configure', 'list-profiles'],
|
|
1339
|
+
capture_output=True,
|
|
1340
|
+
text=True
|
|
1341
|
+
)
|
|
1342
|
+
|
|
1343
|
+
profiles = result.stdout.strip().split('\n')
|
|
1344
|
+
|
|
1345
|
+
# Check each profile
|
|
1346
|
+
for profile in profiles:
|
|
1347
|
+
try:
|
|
1348
|
+
result = subprocess.run(
|
|
1349
|
+
['aws', 'sts', 'get-caller-identity', '--profile', profile],
|
|
1350
|
+
capture_output=True,
|
|
1351
|
+
text=True,
|
|
1352
|
+
timeout=5
|
|
1353
|
+
)
|
|
1354
|
+
|
|
1355
|
+
if account_id in result.stdout:
|
|
1356
|
+
return profile
|
|
1357
|
+
except:
|
|
1358
|
+
continue
|
|
1359
|
+
|
|
1360
|
+
return None
|
|
1361
|
+
except:
|
|
1362
|
+
return None
|
|
1363
|
+
|
|
1364
|
+
|
|
1365
|
+
@cli.command()
|
|
1366
|
+
@click.option('--profile', required=True, help='Profile name')
|
|
1367
|
+
@click.option('--start-date', help='Start date (YYYY-MM-DD)')
|
|
1368
|
+
@click.option('--end-date', help='End date (YYYY-MM-DD)')
|
|
1369
|
+
@click.option('--days', type=int, default=10, help='Number of days to analyze (default: 10)')
|
|
1370
|
+
@click.option('--service', help='Filter by service name')
|
|
1371
|
+
@click.option('--account', help='Filter by account ID')
|
|
1372
|
+
@click.option('--sso', help='AWS SSO profile name')
|
|
1373
|
+
@click.option('--json', 'output_json', is_flag=True, help='Output as JSON')
|
|
1374
|
+
def daily(profile, start_date, end_date, days, service, account, sso, output_json):
|
|
1375
|
+
"""
|
|
1376
|
+
Get daily cost breakdown with granular detail.
|
|
1377
|
+
|
|
1378
|
+
Shows day-by-day costs for specific services and accounts, useful for:
|
|
1379
|
+
- Identifying cost spikes on specific dates
|
|
1380
|
+
- Validating daily cost patterns
|
|
1381
|
+
- Calculating precise daily averages
|
|
1382
|
+
|
|
1383
|
+
Examples:
|
|
1384
|
+
# Last 10 days of CloudWatch costs for specific account
|
|
1385
|
+
cc daily --profile khoros --days 10 --service AmazonCloudWatch --account 820054669588
|
|
1386
|
+
|
|
1387
|
+
# Custom date range with JSON output for automation
|
|
1388
|
+
cc daily --profile khoros --start-date 2025-10-28 --end-date 2025-11-06 --json
|
|
1389
|
+
|
|
1390
|
+
# Find high-cost days using jq
|
|
1391
|
+
cc daily --profile khoros --days 30 --json | jq '.daily_costs | map(select(.cost > 1000))'
|
|
1392
|
+
"""
|
|
1393
|
+
# Load profile
|
|
1394
|
+
config = load_profile(profile)
|
|
1395
|
+
|
|
1396
|
+
# Apply SSO if provided
|
|
1397
|
+
if sso:
|
|
1398
|
+
config['aws_profile'] = sso
|
|
1399
|
+
|
|
1400
|
+
# Calculate date range
|
|
1401
|
+
if end_date:
|
|
1402
|
+
end = datetime.strptime(end_date, '%Y-%m-%d')
|
|
1403
|
+
else:
|
|
1404
|
+
end = datetime.now()
|
|
1405
|
+
|
|
1406
|
+
if start_date:
|
|
1407
|
+
start = datetime.strptime(start_date, '%Y-%m-%d')
|
|
1408
|
+
else:
|
|
1409
|
+
start = end - timedelta(days=days)
|
|
1410
|
+
|
|
1411
|
+
click.echo(f"Daily breakdown: {start.strftime('%Y-%m-%d')} to {end.strftime('%Y-%m-%d')}")
|
|
1412
|
+
if service:
|
|
1413
|
+
click.echo(f"Service filter: {service}")
|
|
1414
|
+
if account:
|
|
1415
|
+
click.echo(f"Account filter: {account}")
|
|
1416
|
+
click.echo("")
|
|
1417
|
+
|
|
1418
|
+
# Get credentials
|
|
1419
|
+
try:
|
|
1420
|
+
if 'aws_profile' in config:
|
|
1421
|
+
session = boto3.Session(profile_name=config['aws_profile'])
|
|
1422
|
+
else:
|
|
1423
|
+
creds = config['credentials']
|
|
1424
|
+
session = boto3.Session(
|
|
1425
|
+
aws_access_key_id=creds['aws_access_key_id'],
|
|
1426
|
+
aws_secret_access_key=creds['aws_secret_access_key'],
|
|
1427
|
+
aws_session_token=creds.get('aws_session_token')
|
|
1428
|
+
)
|
|
1429
|
+
|
|
1430
|
+
ce_client = session.client('ce', region_name='us-east-1')
|
|
1431
|
+
|
|
1432
|
+
# Build filter
|
|
1433
|
+
filter_parts = []
|
|
1434
|
+
|
|
1435
|
+
# Account filter
|
|
1436
|
+
if account:
|
|
1437
|
+
filter_parts.append({
|
|
1438
|
+
"Dimensions": {
|
|
1439
|
+
"Key": "LINKED_ACCOUNT",
|
|
1440
|
+
"Values": [account]
|
|
1441
|
+
}
|
|
1442
|
+
})
|
|
1443
|
+
else:
|
|
1444
|
+
filter_parts.append({
|
|
1445
|
+
"Dimensions": {
|
|
1446
|
+
"Key": "LINKED_ACCOUNT",
|
|
1447
|
+
"Values": config['accounts']
|
|
1448
|
+
}
|
|
1449
|
+
})
|
|
1450
|
+
|
|
1451
|
+
# Service filter
|
|
1452
|
+
if service:
|
|
1453
|
+
filter_parts.append({
|
|
1454
|
+
"Dimensions": {
|
|
1455
|
+
"Key": "SERVICE",
|
|
1456
|
+
"Values": [service]
|
|
1457
|
+
}
|
|
1458
|
+
})
|
|
1459
|
+
|
|
1460
|
+
# Exclude support and tax
|
|
1461
|
+
filter_parts.append({
|
|
1462
|
+
"Not": {
|
|
1463
|
+
"Dimensions": {
|
|
1464
|
+
"Key": "RECORD_TYPE",
|
|
1465
|
+
"Values": ["Tax", "Support"]
|
|
1466
|
+
}
|
|
1467
|
+
}
|
|
1468
|
+
})
|
|
1469
|
+
|
|
1470
|
+
cost_filter = {"And": filter_parts} if len(filter_parts) > 1 else filter_parts[0]
|
|
1471
|
+
|
|
1472
|
+
# Get daily costs
|
|
1473
|
+
response = ce_client.get_cost_and_usage(
|
|
1474
|
+
TimePeriod={
|
|
1475
|
+
'Start': start.strftime('%Y-%m-%d'),
|
|
1476
|
+
'End': (end + timedelta(days=1)).strftime('%Y-%m-%d')
|
|
1477
|
+
},
|
|
1478
|
+
Granularity='DAILY',
|
|
1479
|
+
Metrics=['UnblendedCost'],
|
|
1480
|
+
Filter=cost_filter
|
|
1481
|
+
)
|
|
1482
|
+
|
|
1483
|
+
# Collect results
|
|
1484
|
+
daily_costs = []
|
|
1485
|
+
total = 0
|
|
1486
|
+
for day in response['ResultsByTime']:
|
|
1487
|
+
date = day['TimePeriod']['Start']
|
|
1488
|
+
cost = float(day['Total']['UnblendedCost']['Amount'])
|
|
1489
|
+
total += cost
|
|
1490
|
+
daily_costs.append({'date': date, 'cost': cost})
|
|
1491
|
+
|
|
1492
|
+
num_days = len(response['ResultsByTime'])
|
|
1493
|
+
daily_avg = total / num_days if num_days > 0 else 0
|
|
1494
|
+
annual = daily_avg * 365
|
|
1495
|
+
|
|
1496
|
+
# Output results
|
|
1497
|
+
if output_json:
|
|
1498
|
+
import json
|
|
1499
|
+
result = {
|
|
1500
|
+
'period': {
|
|
1501
|
+
'start': start.strftime('%Y-%m-%d'),
|
|
1502
|
+
'end': end.strftime('%Y-%m-%d'),
|
|
1503
|
+
'days': num_days
|
|
1504
|
+
},
|
|
1505
|
+
'filters': {
|
|
1506
|
+
'service': service,
|
|
1507
|
+
'account': account
|
|
1508
|
+
},
|
|
1509
|
+
'daily_costs': daily_costs,
|
|
1510
|
+
'summary': {
|
|
1511
|
+
'total': total,
|
|
1512
|
+
'daily_avg': daily_avg,
|
|
1513
|
+
'annual_projection': annual
|
|
1514
|
+
}
|
|
1515
|
+
}
|
|
1516
|
+
click.echo(json.dumps(result, indent=2))
|
|
1517
|
+
else:
|
|
1518
|
+
click.echo("Date | Cost")
|
|
1519
|
+
click.echo("-----------|-----------")
|
|
1520
|
+
for item in daily_costs:
|
|
1521
|
+
click.echo(f"{item['date']} | ${item['cost']:,.2f}")
|
|
1522
|
+
click.echo("-----------|-----------")
|
|
1523
|
+
click.echo(f"Total | ${total:,.2f}")
|
|
1524
|
+
click.echo(f"Daily Avg | ${daily_avg:,.2f}")
|
|
1525
|
+
click.echo(f"Annual | ${annual:,.0f}")
|
|
1526
|
+
|
|
1527
|
+
except Exception as e:
|
|
1528
|
+
raise click.ClickException(f"Failed to get daily costs: {e}")
|
|
1529
|
+
|
|
1530
|
+
|
|
1531
|
+
@cli.command()
|
|
1532
|
+
@click.option('--profile', required=True, help='Profile name')
|
|
1533
|
+
@click.option('--account', help='Account ID to compare')
|
|
1534
|
+
@click.option('--service', help='Service to compare')
|
|
1535
|
+
@click.option('--before', required=True, help='Before period (YYYY-MM-DD:YYYY-MM-DD)')
|
|
1536
|
+
@click.option('--after', required=True, help='After period (YYYY-MM-DD:YYYY-MM-DD)')
|
|
1537
|
+
@click.option('--expected-reduction', type=float, help='Expected reduction percentage')
|
|
1538
|
+
@click.option('--sso', help='AWS SSO profile name')
|
|
1539
|
+
@click.option('--json', 'output_json', is_flag=True, help='Output as JSON')
|
|
1540
|
+
def compare(profile, account, service, before, after, expected_reduction, sso, output_json):
|
|
1541
|
+
"""
|
|
1542
|
+
Compare costs between two periods for validation and analysis.
|
|
1543
|
+
|
|
1544
|
+
Perfect for:
|
|
1545
|
+
- Validating cost optimization savings
|
|
1546
|
+
- Before/after migration analysis
|
|
1547
|
+
- Measuring impact of infrastructure changes
|
|
1548
|
+
- Automated savings validation in CI/CD
|
|
1549
|
+
|
|
1550
|
+
Examples:
|
|
1551
|
+
# Validate Datadog migration savings (expect 50% reduction)
|
|
1552
|
+
cc compare --profile khoros --account 180770971501 --service AmazonCloudWatch \
|
|
1553
|
+
--before "2025-10-28:2025-11-06" --after "2025-11-17:2025-11-26" --expected-reduction 50
|
|
1554
|
+
|
|
1555
|
+
# Compare total costs across all accounts
|
|
1556
|
+
cc compare --profile khoros --before "2025-10-01:2025-10-31" --after "2025-11-01:2025-11-30"
|
|
1557
|
+
|
|
1558
|
+
# JSON output for automated validation
|
|
1559
|
+
cc compare --profile khoros --service EC2 --before "2025-10-01:2025-10-07" \
|
|
1560
|
+
--after "2025-11-08:2025-11-14" --json | jq '.comparison.met_expectation'
|
|
1561
|
+
"""
|
|
1562
|
+
# Load profile
|
|
1563
|
+
config = load_profile(profile)
|
|
1564
|
+
|
|
1565
|
+
# Apply SSO if provided
|
|
1566
|
+
if sso:
|
|
1567
|
+
config['aws_profile'] = sso
|
|
1568
|
+
|
|
1569
|
+
# Parse periods
|
|
1570
|
+
try:
|
|
1571
|
+
before_start, before_end = before.split(':')
|
|
1572
|
+
after_start, after_end = after.split(':')
|
|
1573
|
+
except ValueError:
|
|
1574
|
+
raise click.ClickException("Period format must be 'YYYY-MM-DD:YYYY-MM-DD'")
|
|
1575
|
+
|
|
1576
|
+
if not output_json:
|
|
1577
|
+
click.echo(f"Comparing periods:")
|
|
1578
|
+
click.echo(f" Before: {before_start} to {before_end}")
|
|
1579
|
+
click.echo(f" After: {after_start} to {after_end}")
|
|
1580
|
+
if service:
|
|
1581
|
+
click.echo(f" Service: {service}")
|
|
1582
|
+
if account:
|
|
1583
|
+
click.echo(f" Account: {account}")
|
|
1584
|
+
click.echo("")
|
|
1585
|
+
|
|
1586
|
+
# Get credentials
|
|
1587
|
+
try:
|
|
1588
|
+
if 'aws_profile' in config:
|
|
1589
|
+
session = boto3.Session(profile_name=config['aws_profile'])
|
|
1590
|
+
else:
|
|
1591
|
+
creds = config['credentials']
|
|
1592
|
+
session = boto3.Session(
|
|
1593
|
+
aws_access_key_id=creds['aws_access_key_id'],
|
|
1594
|
+
aws_secret_access_key=creds['aws_secret_access_key'],
|
|
1595
|
+
aws_session_token=creds.get('aws_session_token')
|
|
1596
|
+
)
|
|
1597
|
+
|
|
1598
|
+
ce_client = session.client('ce', region_name='us-east-1')
|
|
1599
|
+
|
|
1600
|
+
# Build filter
|
|
1601
|
+
def build_filter():
|
|
1602
|
+
filter_parts = []
|
|
1603
|
+
|
|
1604
|
+
if account:
|
|
1605
|
+
filter_parts.append({
|
|
1606
|
+
"Dimensions": {
|
|
1607
|
+
"Key": "LINKED_ACCOUNT",
|
|
1608
|
+
"Values": [account]
|
|
1609
|
+
}
|
|
1610
|
+
})
|
|
1611
|
+
else:
|
|
1612
|
+
filter_parts.append({
|
|
1613
|
+
"Dimensions": {
|
|
1614
|
+
"Key": "LINKED_ACCOUNT",
|
|
1615
|
+
"Values": config['accounts']
|
|
1616
|
+
}
|
|
1617
|
+
})
|
|
1618
|
+
|
|
1619
|
+
if service:
|
|
1620
|
+
filter_parts.append({
|
|
1621
|
+
"Dimensions": {
|
|
1622
|
+
"Key": "SERVICE",
|
|
1623
|
+
"Values": [service]
|
|
1624
|
+
}
|
|
1625
|
+
})
|
|
1626
|
+
|
|
1627
|
+
filter_parts.append({
|
|
1628
|
+
"Not": {
|
|
1629
|
+
"Dimensions": {
|
|
1630
|
+
"Key": "RECORD_TYPE",
|
|
1631
|
+
"Values": ["Tax", "Support"]
|
|
1632
|
+
}
|
|
1633
|
+
}
|
|
1634
|
+
})
|
|
1635
|
+
|
|
1636
|
+
return {"And": filter_parts} if len(filter_parts) > 1 else filter_parts[0]
|
|
1637
|
+
|
|
1638
|
+
cost_filter = build_filter()
|
|
1639
|
+
|
|
1640
|
+
# Get before period costs
|
|
1641
|
+
before_response = ce_client.get_cost_and_usage(
|
|
1642
|
+
TimePeriod={
|
|
1643
|
+
'Start': before_start,
|
|
1644
|
+
'End': (datetime.strptime(before_end, '%Y-%m-%d') + timedelta(days=1)).strftime('%Y-%m-%d')
|
|
1645
|
+
},
|
|
1646
|
+
Granularity='DAILY',
|
|
1647
|
+
Metrics=['UnblendedCost'],
|
|
1648
|
+
Filter=cost_filter
|
|
1649
|
+
)
|
|
1650
|
+
|
|
1651
|
+
# Get after period costs
|
|
1652
|
+
after_response = ce_client.get_cost_and_usage(
|
|
1653
|
+
TimePeriod={
|
|
1654
|
+
'Start': after_start,
|
|
1655
|
+
'End': (datetime.strptime(after_end, '%Y-%m-%d') + timedelta(days=1)).strftime('%Y-%m-%d')
|
|
1656
|
+
},
|
|
1657
|
+
Granularity='DAILY',
|
|
1658
|
+
Metrics=['UnblendedCost'],
|
|
1659
|
+
Filter=cost_filter
|
|
1660
|
+
)
|
|
1661
|
+
|
|
1662
|
+
# Calculate totals
|
|
1663
|
+
before_total = sum(float(day['Total']['UnblendedCost']['Amount']) for day in before_response['ResultsByTime'])
|
|
1664
|
+
after_total = sum(float(day['Total']['UnblendedCost']['Amount']) for day in after_response['ResultsByTime'])
|
|
1665
|
+
|
|
1666
|
+
before_days = len(before_response['ResultsByTime'])
|
|
1667
|
+
after_days = len(after_response['ResultsByTime'])
|
|
1668
|
+
|
|
1669
|
+
before_daily = before_total / before_days if before_days > 0 else 0
|
|
1670
|
+
after_daily = after_total / after_days if after_days > 0 else 0
|
|
1671
|
+
|
|
1672
|
+
reduction = before_daily - after_daily
|
|
1673
|
+
reduction_pct = (reduction / before_daily * 100) if before_daily > 0 else 0
|
|
1674
|
+
annual_savings = reduction * 365
|
|
1675
|
+
|
|
1676
|
+
# Output results
|
|
1677
|
+
if output_json:
|
|
1678
|
+
import json
|
|
1679
|
+
result = {
|
|
1680
|
+
'before': {
|
|
1681
|
+
'period': {'start': before_start, 'end': before_end},
|
|
1682
|
+
'total': before_total,
|
|
1683
|
+
'daily_avg': before_daily,
|
|
1684
|
+
'days': before_days
|
|
1685
|
+
},
|
|
1686
|
+
'after': {
|
|
1687
|
+
'period': {'start': after_start, 'end': after_end},
|
|
1688
|
+
'total': after_total,
|
|
1689
|
+
'daily_avg': after_daily,
|
|
1690
|
+
'days': after_days
|
|
1691
|
+
},
|
|
1692
|
+
'comparison': {
|
|
1693
|
+
'daily_reduction': reduction,
|
|
1694
|
+
'reduction_pct': reduction_pct,
|
|
1695
|
+
'annual_savings': annual_savings
|
|
1696
|
+
}
|
|
1697
|
+
}
|
|
1698
|
+
|
|
1699
|
+
if expected_reduction is not None:
|
|
1700
|
+
result['comparison']['expected_reduction_pct'] = expected_reduction
|
|
1701
|
+
result['comparison']['met_expectation'] = reduction_pct >= expected_reduction
|
|
1702
|
+
|
|
1703
|
+
click.echo(json.dumps(result, indent=2))
|
|
1704
|
+
else:
|
|
1705
|
+
click.echo("Before Period:")
|
|
1706
|
+
click.echo(f" Total: ${before_total:,.2f}")
|
|
1707
|
+
click.echo(f" Daily Avg: ${before_daily:,.2f}")
|
|
1708
|
+
click.echo(f" Days: {before_days}")
|
|
1709
|
+
click.echo("")
|
|
1710
|
+
click.echo("After Period:")
|
|
1711
|
+
click.echo(f" Total: ${after_total:,.2f}")
|
|
1712
|
+
click.echo(f" Daily Avg: ${after_daily:,.2f}")
|
|
1713
|
+
click.echo(f" Days: {after_days}")
|
|
1714
|
+
click.echo("")
|
|
1715
|
+
click.echo("Comparison:")
|
|
1716
|
+
click.echo(f" Daily Reduction: ${reduction:,.2f}")
|
|
1717
|
+
click.echo(f" Reduction %: {reduction_pct:.1f}%")
|
|
1718
|
+
click.echo(f" Annual Savings: ${annual_savings:,.0f}")
|
|
1719
|
+
|
|
1720
|
+
if expected_reduction is not None:
|
|
1721
|
+
click.echo("")
|
|
1722
|
+
if reduction_pct >= expected_reduction:
|
|
1723
|
+
click.echo(f"✅ Savings achieved: {reduction_pct:.1f}% (expected {expected_reduction}%)")
|
|
1724
|
+
else:
|
|
1725
|
+
click.echo(f"⚠️ Below target: {reduction_pct:.1f}% (expected {expected_reduction}%)")
|
|
1726
|
+
|
|
1727
|
+
except Exception as e:
|
|
1728
|
+
raise click.ClickException(f"Comparison failed: {e}")
|
|
1729
|
+
|
|
1730
|
+
|
|
1731
|
+
@cli.command()
|
|
1732
|
+
@click.option('--profile', required=True, help='Profile name')
|
|
1733
|
+
@click.option('--tag-key', required=True, help='Tag key to filter by')
|
|
1734
|
+
@click.option('--tag-value', help='Tag value to filter by (optional)')
|
|
1735
|
+
@click.option('--start-date', help='Start date (YYYY-MM-DD)')
|
|
1736
|
+
@click.option('--end-date', help='End date (YYYY-MM-DD)')
|
|
1737
|
+
@click.option('--days', type=int, default=30, help='Number of days to analyze (default: 30)')
|
|
1738
|
+
@click.option('--sso', help='AWS SSO profile name')
|
|
1739
|
+
@click.option('--json', 'output_json', is_flag=True, help='Output as JSON')
|
|
1740
|
+
def tags(profile, tag_key, tag_value, start_date, end_date, days, sso, output_json):
|
|
1741
|
+
"""
|
|
1742
|
+
Analyze costs grouped by resource tags for cost attribution.
|
|
1743
|
+
|
|
1744
|
+
Useful for:
|
|
1745
|
+
- Cost allocation by team, project, or environment
|
|
1746
|
+
- Identifying untagged resources (cost attribution gaps)
|
|
1747
|
+
- Tracking costs by cost center or department
|
|
1748
|
+
- Validating tagging compliance
|
|
1749
|
+
|
|
1750
|
+
Examples:
|
|
1751
|
+
# See all costs by Environment tag
|
|
1752
|
+
cc tags --profile khoros --tag-key "Environment" --days 30
|
|
1753
|
+
|
|
1754
|
+
# Filter to specific tag value
|
|
1755
|
+
cc tags --profile khoros --tag-key "Team" --tag-value "Platform" --days 30
|
|
1756
|
+
|
|
1757
|
+
# Find top cost centers with JSON output
|
|
1758
|
+
cc tags --profile khoros --tag-key "CostCenter" --days 30 --json | \
|
|
1759
|
+
jq '.tag_costs | sort_by(-.cost) | .[:5]'
|
|
1760
|
+
|
|
1761
|
+
# Identify untagged resources (look for empty tag values)
|
|
1762
|
+
cc tags --profile khoros --tag-key "Owner" --days 7
|
|
1763
|
+
"""
|
|
1764
|
+
# Load profile
|
|
1765
|
+
config = load_profile(profile)
|
|
1766
|
+
|
|
1767
|
+
# Apply SSO if provided
|
|
1768
|
+
if sso:
|
|
1769
|
+
config['aws_profile'] = sso
|
|
1770
|
+
|
|
1771
|
+
# Calculate date range
|
|
1772
|
+
if end_date:
|
|
1773
|
+
end = datetime.strptime(end_date, '%Y-%m-%d')
|
|
1774
|
+
else:
|
|
1775
|
+
end = datetime.now()
|
|
1776
|
+
|
|
1777
|
+
if start_date:
|
|
1778
|
+
start = datetime.strptime(start_date, '%Y-%m-%d')
|
|
1779
|
+
else:
|
|
1780
|
+
start = end - timedelta(days=days)
|
|
1781
|
+
|
|
1782
|
+
if not output_json:
|
|
1783
|
+
click.echo(f"Tag-based cost analysis: {start.strftime('%Y-%m-%d')} to {end.strftime('%Y-%m-%d')}")
|
|
1784
|
+
click.echo(f"Tag key: {tag_key}")
|
|
1785
|
+
if tag_value:
|
|
1786
|
+
click.echo(f"Tag value: {tag_value}")
|
|
1787
|
+
click.echo("")
|
|
1788
|
+
|
|
1789
|
+
# Get credentials
|
|
1790
|
+
try:
|
|
1791
|
+
if 'aws_profile' in config:
|
|
1792
|
+
session = boto3.Session(profile_name=config['aws_profile'])
|
|
1793
|
+
else:
|
|
1794
|
+
creds = config['credentials']
|
|
1795
|
+
session = boto3.Session(
|
|
1796
|
+
aws_access_key_id=creds['aws_access_key_id'],
|
|
1797
|
+
aws_secret_access_key=creds['aws_secret_access_key'],
|
|
1798
|
+
aws_session_token=creds.get('aws_session_token')
|
|
1799
|
+
)
|
|
1800
|
+
|
|
1801
|
+
ce_client = session.client('ce', region_name='us-east-1')
|
|
1802
|
+
|
|
1803
|
+
# Build filter
|
|
1804
|
+
filter_parts = [
|
|
1805
|
+
{
|
|
1806
|
+
"Dimensions": {
|
|
1807
|
+
"Key": "LINKED_ACCOUNT",
|
|
1808
|
+
"Values": config['accounts']
|
|
1809
|
+
}
|
|
1810
|
+
},
|
|
1811
|
+
{
|
|
1812
|
+
"Not": {
|
|
1813
|
+
"Dimensions": {
|
|
1814
|
+
"Key": "RECORD_TYPE",
|
|
1815
|
+
"Values": ["Tax", "Support"]
|
|
1816
|
+
}
|
|
1817
|
+
}
|
|
1818
|
+
}
|
|
1819
|
+
]
|
|
1820
|
+
|
|
1821
|
+
# Add tag filter if value specified
|
|
1822
|
+
if tag_value:
|
|
1823
|
+
filter_parts.append({
|
|
1824
|
+
"Tags": {
|
|
1825
|
+
"Key": tag_key,
|
|
1826
|
+
"Values": [tag_value]
|
|
1827
|
+
}
|
|
1828
|
+
})
|
|
1829
|
+
|
|
1830
|
+
cost_filter = {"And": filter_parts}
|
|
1831
|
+
|
|
1832
|
+
# Get costs grouped by tag values
|
|
1833
|
+
response = ce_client.get_cost_and_usage(
|
|
1834
|
+
TimePeriod={
|
|
1835
|
+
'Start': start.strftime('%Y-%m-%d'),
|
|
1836
|
+
'End': (end + timedelta(days=1)).strftime('%Y-%m-%d')
|
|
1837
|
+
},
|
|
1838
|
+
Granularity='MONTHLY',
|
|
1839
|
+
Metrics=['UnblendedCost'],
|
|
1840
|
+
GroupBy=[{
|
|
1841
|
+
'Type': 'TAG',
|
|
1842
|
+
'Key': tag_key
|
|
1843
|
+
}],
|
|
1844
|
+
Filter=cost_filter
|
|
1845
|
+
)
|
|
1846
|
+
|
|
1847
|
+
# Collect results
|
|
1848
|
+
tag_costs = {}
|
|
1849
|
+
for period in response['ResultsByTime']:
|
|
1850
|
+
for group in period['Groups']:
|
|
1851
|
+
tag_val = group['Keys'][0].split('$')[1] if '$' in group['Keys'][0] else group['Keys'][0]
|
|
1852
|
+
cost = float(group['Metrics']['UnblendedCost']['Amount'])
|
|
1853
|
+
tag_costs[tag_val] = tag_costs.get(tag_val, 0) + cost
|
|
1854
|
+
|
|
1855
|
+
# Sort by cost
|
|
1856
|
+
sorted_tags = sorted(tag_costs.items(), key=lambda x: x[1], reverse=True)
|
|
1857
|
+
|
|
1858
|
+
total = sum(tag_costs.values())
|
|
1859
|
+
num_days = (end - start).days
|
|
1860
|
+
daily_avg = total / num_days if num_days > 0 else 0
|
|
1861
|
+
|
|
1862
|
+
# Output results
|
|
1863
|
+
if output_json:
|
|
1864
|
+
import json
|
|
1865
|
+
result = {
|
|
1866
|
+
'period': {
|
|
1867
|
+
'start': start.strftime('%Y-%m-%d'),
|
|
1868
|
+
'end': end.strftime('%Y-%m-%d'),
|
|
1869
|
+
'days': num_days
|
|
1870
|
+
},
|
|
1871
|
+
'tag_key': tag_key,
|
|
1872
|
+
'tag_value_filter': tag_value,
|
|
1873
|
+
'tag_costs': [{'tag_value': k, 'cost': v, 'percentage': (v/total*100) if total > 0 else 0} for k, v in sorted_tags],
|
|
1874
|
+
'summary': {
|
|
1875
|
+
'total': total,
|
|
1876
|
+
'daily_avg': daily_avg,
|
|
1877
|
+
'annual_projection': daily_avg * 365
|
|
1878
|
+
}
|
|
1879
|
+
}
|
|
1880
|
+
click.echo(json.dumps(result, indent=2))
|
|
1881
|
+
else:
|
|
1882
|
+
click.echo(f"Tag Value{' '*(30-len('Tag Value'))} | Cost | %")
|
|
1883
|
+
click.echo("-" * 60)
|
|
1884
|
+
for tag_val, cost in sorted_tags:
|
|
1885
|
+
pct = (cost / total * 100) if total > 0 else 0
|
|
1886
|
+
tag_display = tag_val[:30].ljust(30)
|
|
1887
|
+
click.echo(f"{tag_display} | ${cost:>9,.2f} | {pct:>5.1f}%")
|
|
1888
|
+
click.echo("-" * 60)
|
|
1889
|
+
click.echo(f"{'Total'.ljust(30)} | ${total:>9,.2f} | 100.0%")
|
|
1890
|
+
click.echo("")
|
|
1891
|
+
click.echo(f"Daily Avg: ${daily_avg:,.2f}")
|
|
1892
|
+
click.echo(f"Annual Projection: ${daily_avg * 365:,.0f}")
|
|
1893
|
+
|
|
1894
|
+
except Exception as e:
|
|
1895
|
+
raise click.ClickException(f"Tag analysis failed: {e}")
|
|
1896
|
+
|
|
1897
|
+
|
|
1898
|
+
@cli.command()
|
|
1899
|
+
@click.option('--profile', required=True, help='Profile name')
|
|
1900
|
+
@click.option('--query', required=True, help='SQL query to execute')
|
|
1901
|
+
@click.option('--database', default='athenacurcfn_cloud_intelligence_dashboard', help='Athena database name')
|
|
1902
|
+
@click.option('--output-bucket', help='S3 bucket for query results (default: from profile)')
|
|
1903
|
+
@click.option('--sso', help='AWS SSO profile name')
|
|
1904
|
+
def query(profile, query, database, output_bucket, sso):
|
|
1905
|
+
"""
|
|
1906
|
+
Execute custom Athena SQL query on CUR data
|
|
1907
|
+
|
|
1908
|
+
Example:
|
|
1909
|
+
cc query --profile khoros --query "SELECT line_item_usage_account_id, SUM(line_item_unblended_cost) as cost FROM cloud_intelligence_dashboard WHERE line_item_usage_start_date >= DATE '2025-11-01' GROUP BY 1 ORDER BY 2 DESC LIMIT 10"
|
|
1910
|
+
"""
|
|
1911
|
+
# Load profile
|
|
1912
|
+
config = load_profile(profile)
|
|
1913
|
+
|
|
1914
|
+
# Apply SSO if provided
|
|
1915
|
+
if sso:
|
|
1916
|
+
config['aws_profile'] = sso
|
|
1917
|
+
|
|
1918
|
+
# Get credentials
|
|
1919
|
+
try:
|
|
1920
|
+
if 'aws_profile' in config:
|
|
1921
|
+
session = boto3.Session(profile_name=config['aws_profile'])
|
|
1922
|
+
else:
|
|
1923
|
+
creds = config['credentials']
|
|
1924
|
+
session = boto3.Session(
|
|
1925
|
+
aws_access_key_id=creds['aws_access_key_id'],
|
|
1926
|
+
aws_secret_access_key=creds['aws_secret_access_key'],
|
|
1927
|
+
aws_session_token=creds.get('aws_session_token')
|
|
1928
|
+
)
|
|
1929
|
+
|
|
1930
|
+
athena_client = session.client('athena', region_name='us-east-1')
|
|
1931
|
+
|
|
1932
|
+
# Default output location
|
|
1933
|
+
if not output_bucket:
|
|
1934
|
+
output_bucket = 's3://khoros-finops-athena/athena/'
|
|
1935
|
+
|
|
1936
|
+
click.echo(f"Executing query on database: {database}")
|
|
1937
|
+
click.echo(f"Output location: {output_bucket}")
|
|
1938
|
+
click.echo("")
|
|
1939
|
+
|
|
1940
|
+
# Execute query
|
|
1941
|
+
response = athena_client.start_query_execution(
|
|
1942
|
+
QueryString=query,
|
|
1943
|
+
QueryExecutionContext={'Database': database},
|
|
1944
|
+
ResultConfiguration={'OutputLocation': output_bucket}
|
|
1945
|
+
)
|
|
1946
|
+
|
|
1947
|
+
query_id = response['QueryExecutionId']
|
|
1948
|
+
click.echo(f"Query ID: {query_id}")
|
|
1949
|
+
click.echo("Waiting for query to complete...")
|
|
1950
|
+
|
|
1951
|
+
# Wait for completion
|
|
1952
|
+
import time
|
|
1953
|
+
max_wait = 60
|
|
1954
|
+
waited = 0
|
|
1955
|
+
while waited < max_wait:
|
|
1956
|
+
status_response = athena_client.get_query_execution(QueryExecutionId=query_id)
|
|
1957
|
+
status = status_response['QueryExecution']['Status']['State']
|
|
1958
|
+
|
|
1959
|
+
if status == 'SUCCEEDED':
|
|
1960
|
+
click.echo("✓ Query completed successfully")
|
|
1961
|
+
break
|
|
1962
|
+
elif status in ['FAILED', 'CANCELLED']:
|
|
1963
|
+
reason = status_response['QueryExecution']['Status'].get('StateChangeReason', 'Unknown')
|
|
1964
|
+
raise click.ClickException(f"Query {status}: {reason}")
|
|
1965
|
+
|
|
1966
|
+
time.sleep(2)
|
|
1967
|
+
waited += 2
|
|
1968
|
+
|
|
1969
|
+
if waited >= max_wait:
|
|
1970
|
+
raise click.ClickException(f"Query timeout after {max_wait}s. Check query ID: {query_id}")
|
|
1971
|
+
|
|
1972
|
+
# Get results
|
|
1973
|
+
results = athena_client.get_query_results(QueryExecutionId=query_id)
|
|
1974
|
+
|
|
1975
|
+
# Display results
|
|
1976
|
+
rows = results['ResultSet']['Rows']
|
|
1977
|
+
if not rows:
|
|
1978
|
+
click.echo("No results returned")
|
|
1979
|
+
return
|
|
1980
|
+
|
|
1981
|
+
# Header
|
|
1982
|
+
headers = [col['VarCharValue'] for col in rows[0]['Data']]
|
|
1983
|
+
click.echo(" | ".join(headers))
|
|
1984
|
+
click.echo("-" * (len(" | ".join(headers))))
|
|
1985
|
+
|
|
1986
|
+
# Data rows
|
|
1987
|
+
for row in rows[1:]:
|
|
1988
|
+
values = [col.get('VarCharValue', '') for col in row['Data']]
|
|
1989
|
+
click.echo(" | ".join(values))
|
|
1990
|
+
|
|
1991
|
+
click.echo("")
|
|
1992
|
+
click.echo(f"Returned {len(rows)-1} rows")
|
|
1993
|
+
|
|
1994
|
+
except Exception as e:
|
|
1995
|
+
raise click.ClickException(f"Query failed: {e}")
|
|
1996
|
+
|
|
1997
|
+
|
|
1087
1998
|
if __name__ == '__main__':
|
|
1088
1999
|
cli()
|