cloud-cost-cli 0.2.0 → 0.3.0-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,54 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.costsCommand = costsCommand;
7
+ const cost_tracker_1 = require("../utils/cost-tracker");
8
+ const logger_1 = require("../utils/logger");
9
+ const chalk_1 = __importDefault(require("chalk"));
10
+ async function costsCommand(options) {
11
+ const tracker = new cost_tracker_1.CostTracker();
12
+ if (options.clear) {
13
+ tracker.clear();
14
+ (0, logger_1.info)('Cost tracking data cleared');
15
+ return;
16
+ }
17
+ const days = parseInt(options.days || '30');
18
+ const summary = tracker.getSummary(days);
19
+ console.log(chalk_1.default.bold(`\nšŸ’° AI Cost Summary (last ${days} days):\n`));
20
+ if (summary.totalOperations === 0) {
21
+ console.log(chalk_1.default.dim('No AI operations tracked yet.'));
22
+ console.log(chalk_1.default.dim('Use --explain or ask commands to generate AI insights.\n'));
23
+ return;
24
+ }
25
+ console.log(chalk_1.default.bold('Total:'));
26
+ console.log(` Operations: ${summary.totalOperations}`);
27
+ console.log(` Cost: $${summary.totalCost.toFixed(4)}`);
28
+ console.log();
29
+ console.log(chalk_1.default.bold('By Provider:'));
30
+ Object.entries(summary.byProvider).forEach(([provider, stats]) => {
31
+ const icon = provider === 'openai' ? 'ā˜ļø ' : 'šŸ ';
32
+ console.log(` ${icon} ${provider}:`);
33
+ console.log(` Operations: ${stats.operations}`);
34
+ console.log(` Cost: $${stats.cost.toFixed(4)}`);
35
+ });
36
+ console.log();
37
+ console.log(chalk_1.default.bold('By Model:'));
38
+ Object.entries(summary.byModel).forEach(([model, stats]) => {
39
+ console.log(` ${model}:`);
40
+ console.log(` Operations: ${stats.operations}`);
41
+ console.log(` Cost: $${stats.cost.toFixed(4)}`);
42
+ });
43
+ console.log();
44
+ // Show average cost per operation
45
+ const avgCost = summary.totalCost / summary.totalOperations;
46
+ console.log(chalk_1.default.dim(`Average cost per operation: $${avgCost.toFixed(4)}`));
47
+ // Estimate monthly cost at current rate
48
+ const daysElapsed = days;
49
+ const monthlyEstimate = (summary.totalCost / daysElapsed) * 30;
50
+ if (daysElapsed >= 7) {
51
+ console.log(chalk_1.default.dim(`Estimated monthly cost: $${monthlyEstimate.toFixed(2)}`));
52
+ }
53
+ console.log();
54
+ }
@@ -10,6 +10,9 @@ interface ScanCommandOptions {
10
10
  minSavings?: string;
11
11
  verbose?: boolean;
12
12
  accurate?: boolean;
13
+ explain?: boolean;
14
+ aiProvider?: string;
15
+ aiModel?: string;
13
16
  }
14
17
  export declare function scanCommand(options: ScanCommandOptions): Promise<void>;
15
18
  export {};
@@ -17,6 +17,9 @@ const public_ips_1 = require("../providers/azure/public-ips");
17
17
  const table_1 = require("../reporters/table");
18
18
  const json_1 = require("../reporters/json");
19
19
  const logger_1 = require("../utils/logger");
20
+ const ai_1 = require("../services/ai");
21
+ const ask_1 = require("./ask");
22
+ const config_1 = require("../utils/config");
20
23
  async function scanCommand(options) {
21
24
  try {
22
25
  if (options.provider === 'aws') {
@@ -110,11 +113,51 @@ async function scanAWS(options) {
110
113
  };
111
114
  // Render output
112
115
  const topN = parseInt(options.top || '5');
116
+ let aiService;
117
+ if (options.explain) {
118
+ // Load config file to get defaults
119
+ const fileConfig = config_1.ConfigLoader.load();
120
+ // CLI flags override config file
121
+ const provider = options.aiProvider || fileConfig.ai?.provider || 'openai';
122
+ const model = options.aiModel || fileConfig.ai?.model;
123
+ const maxExplanations = fileConfig.ai?.maxExplanations;
124
+ // Debug logging
125
+ if (process.env.DEBUG) {
126
+ console.error('options.aiProvider:', options.aiProvider, '(type:', typeof options.aiProvider, ')');
127
+ console.error('fileConfig.ai?.provider:', fileConfig.ai?.provider);
128
+ console.error('Provider detected:', provider);
129
+ console.error('Has API key in config:', !!fileConfig.ai?.apiKey);
130
+ console.error('Has env API key:', !!process.env.OPENAI_API_KEY);
131
+ }
132
+ if (provider === 'openai' && !process.env.OPENAI_API_KEY && !fileConfig.ai?.apiKey) {
133
+ (0, logger_1.error)('--explain with OpenAI requires OPENAI_API_KEY environment variable or config file');
134
+ (0, logger_1.info)('Set it with: export OPENAI_API_KEY="sk-..."');
135
+ (0, logger_1.info)('Or use --ai-provider ollama for local AI (requires Ollama installed)');
136
+ process.exit(1);
137
+ }
138
+ try {
139
+ aiService = new ai_1.AIService({
140
+ provider,
141
+ apiKey: provider === 'openai' ? (process.env.OPENAI_API_KEY || fileConfig.ai?.apiKey) : undefined,
142
+ model,
143
+ maxExplanations,
144
+ });
145
+ if (provider === 'ollama') {
146
+ (0, logger_1.info)('Using local Ollama for AI explanations (privacy-first, no API costs)');
147
+ }
148
+ }
149
+ catch (error) {
150
+ error(`Failed to initialize AI service: ${error.message}`);
151
+ process.exit(1);
152
+ }
153
+ }
154
+ // Save scan cache for natural language queries
155
+ (0, ask_1.saveScanCache)(options.provider, options.region, report);
113
156
  if (options.output === 'json') {
114
157
  (0, json_1.renderJSON)(report);
115
158
  }
116
159
  else {
117
- (0, table_1.renderTable)(report, topN);
160
+ await (0, table_1.renderTable)(report, topN, aiService);
118
161
  }
119
162
  }
120
163
  async function scanAzure(options) {
@@ -129,6 +172,14 @@ async function scanAzure(options) {
129
172
  else {
130
173
  (0, logger_1.info)('Scanning all locations (no filter specified)');
131
174
  }
175
+ // Test Azure credentials before scanning
176
+ try {
177
+ await client.testConnection();
178
+ }
179
+ catch (err) {
180
+ (0, logger_1.error)(err.message);
181
+ process.exit(1);
182
+ }
132
183
  if (options.accurate) {
133
184
  (0, logger_1.info)('Note: --accurate flag is not yet implemented. Using estimated pricing.');
134
185
  }
@@ -189,10 +240,50 @@ async function scanAzure(options) {
189
240
  };
190
241
  // Render output
191
242
  const topN = parseInt(options.top || '5');
243
+ let aiService;
244
+ if (options.explain) {
245
+ // Load config file to get defaults
246
+ const fileConfig = config_1.ConfigLoader.load();
247
+ // CLI flags override config file
248
+ const provider = options.aiProvider || fileConfig.ai?.provider || 'openai';
249
+ const model = options.aiModel || fileConfig.ai?.model;
250
+ const maxExplanations = fileConfig.ai?.maxExplanations;
251
+ // Debug logging
252
+ if (process.env.DEBUG) {
253
+ console.error('options.aiProvider:', options.aiProvider, '(type:', typeof options.aiProvider, ')');
254
+ console.error('fileConfig.ai?.provider:', fileConfig.ai?.provider);
255
+ console.error('Provider detected:', provider);
256
+ console.error('Has API key in config:', !!fileConfig.ai?.apiKey);
257
+ console.error('Has env API key:', !!process.env.OPENAI_API_KEY);
258
+ }
259
+ if (provider === 'openai' && !process.env.OPENAI_API_KEY && !fileConfig.ai?.apiKey) {
260
+ (0, logger_1.error)('--explain with OpenAI requires OPENAI_API_KEY environment variable or config file');
261
+ (0, logger_1.info)('Set it with: export OPENAI_API_KEY="sk-..."');
262
+ (0, logger_1.info)('Or use --ai-provider ollama for local AI (requires Ollama installed)');
263
+ process.exit(1);
264
+ }
265
+ try {
266
+ aiService = new ai_1.AIService({
267
+ provider,
268
+ apiKey: provider === 'openai' ? (process.env.OPENAI_API_KEY || fileConfig.ai?.apiKey) : undefined,
269
+ model,
270
+ maxExplanations,
271
+ });
272
+ if (provider === 'ollama') {
273
+ (0, logger_1.info)('Using local Ollama for AI explanations (privacy-first, no API costs)');
274
+ }
275
+ }
276
+ catch (error) {
277
+ error(`Failed to initialize AI service: ${error.message}`);
278
+ process.exit(1);
279
+ }
280
+ }
281
+ // Save scan cache for natural language queries
282
+ (0, ask_1.saveScanCache)('azure', client.location, report);
192
283
  if (options.output === 'json') {
193
284
  (0, json_1.renderJSON)(report);
194
285
  }
195
286
  else {
196
- (0, table_1.renderTable)(report, topN);
287
+ await (0, table_1.renderTable)(report, topN, aiService);
197
288
  }
198
289
  }
@@ -0,0 +1,8 @@
1
+ import { SavingsOpportunity } from '../types';
2
+ interface ScriptCommandOptions {
3
+ opportunity: string;
4
+ output?: string;
5
+ }
6
+ export declare function scriptCommand(options: ScriptCommandOptions): Promise<void>;
7
+ export declare function generateScriptForOpportunity(opportunity: SavingsOpportunity): string | null;
8
+ export {};
@@ -0,0 +1,27 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.scriptCommand = scriptCommand;
4
+ exports.generateScriptForOpportunity = generateScriptForOpportunity;
5
+ const script_generator_1 = require("../services/script-generator");
6
+ const logger_1 = require("../utils/logger");
7
+ async function scriptCommand(options) {
8
+ (0, logger_1.info)('Script generation is currently only available after running a scan.');
9
+ (0, logger_1.info)('Usage: cloud-cost-cli scan --provider aws --region us-east-1');
10
+ (0, logger_1.info)('Then use the displayed resource IDs to generate scripts.');
11
+ // This is a placeholder - in a real implementation, we'd:
12
+ // 1. Load scan results from a cache/temp file
13
+ // 2. Find the opportunity by index or ID
14
+ // 3. Generate the script
15
+ // 4. Output to file or stdout
16
+ (0, logger_1.error)('Script generation requires a recent scan. Run "scan" first.');
17
+ process.exit(1);
18
+ }
19
+ // Helper function to generate script for a single opportunity
20
+ function generateScriptForOpportunity(opportunity) {
21
+ const generator = new script_generator_1.ScriptGenerator();
22
+ const script = generator.generateRemediation(opportunity);
23
+ if (!script) {
24
+ return null;
25
+ }
26
+ return generator.renderScript(script);
27
+ }
@@ -12,6 +12,7 @@ export declare class AzureClient {
12
12
  subscriptionId: string;
13
13
  location: string;
14
14
  constructor(config?: AzureClientConfig);
15
+ testConnection(): Promise<void>;
15
16
  getComputeClient(): ComputeManagementClient;
16
17
  getStorageClient(): StorageManagementClient;
17
18
  getSqlClient(): SqlManagementClient;
@@ -22,6 +22,29 @@ class AzureClient {
22
22
  // Default to East US if no location specified
23
23
  this.location = config.location || '';
24
24
  }
25
+ // Test Azure credentials by making a lightweight API call
26
+ async testConnection() {
27
+ try {
28
+ const computeClient = this.getComputeClient();
29
+ // Try to list VMs (we'll just get an iterator, not actually iterate)
30
+ const vmsIterator = computeClient.virtualMachines.listAll();
31
+ // Get first page to test auth
32
+ await vmsIterator.next();
33
+ }
34
+ catch (error) {
35
+ const errorMsg = error.message || '';
36
+ if (errorMsg.includes('No subscriptions found') ||
37
+ errorMsg.includes('authentication') ||
38
+ errorMsg.includes('credentials') ||
39
+ errorMsg.includes('login') ||
40
+ error.statusCode === 401 ||
41
+ error.code === 'CredentialUnavailableError') {
42
+ throw new Error('Azure authentication failed. Please run "az login" first or set up service principal credentials.\n' +
43
+ 'See: https://learn.microsoft.com/en-us/cli/azure/authenticate-azure-cli');
44
+ }
45
+ throw error;
46
+ }
47
+ }
25
48
  getComputeClient() {
26
49
  return new arm_compute_1.ComputeManagementClient(this.credential, this.subscriptionId);
27
50
  }
@@ -1,2 +1,3 @@
1
1
  import { ScanReport } from '../types';
2
- export declare function renderTable(report: ScanReport, topN?: number): void;
2
+ import { AIService } from '../services/ai';
3
+ export declare function renderTable(report: ScanReport, topN?: number, aiService?: AIService): Promise<void>;
@@ -7,7 +7,7 @@ exports.renderTable = renderTable;
7
7
  const cli_table3_1 = __importDefault(require("cli-table3"));
8
8
  const chalk_1 = __importDefault(require("chalk"));
9
9
  const utils_1 = require("../utils");
10
- function renderTable(report, topN = 5) {
10
+ async function renderTable(report, topN = 5, aiService) {
11
11
  console.log(chalk_1.default.bold('\nCloud Cost Optimization Report'));
12
12
  console.log(`Provider: ${report.provider} | Region: ${report.region} | Account: ${report.accountId}`);
13
13
  console.log(`Analyzed: ${report.scanPeriod.start.toISOString().split('T')[0]} to ${report.scanPeriod.end.toISOString().split('T')[0]}\n`);
@@ -21,7 +21,8 @@ function renderTable(report, topN = 5) {
21
21
  console.log(chalk_1.default.bold(`Top ${opportunities.length} Savings Opportunities (est. ${(0, utils_1.formatCurrency)(report.totalPotentialSavings)}/month):\n`));
22
22
  const table = new cli_table3_1.default({
23
23
  head: ['#', 'Type', 'Resource ID', 'Recommendation', 'Savings/mo'],
24
- colWidths: [5, 10, 25, 50, 15],
24
+ colWidths: [5, 12, 40, 60, 15],
25
+ wordWrap: true,
25
26
  style: {
26
27
  head: ['cyan'],
27
28
  },
@@ -36,8 +37,71 @@ function renderTable(report, topN = 5) {
36
37
  ]);
37
38
  });
38
39
  console.log(table.toString());
40
+ // Show AI explanations if enabled
41
+ if (aiService && aiService.isEnabled()) {
42
+ console.log(chalk_1.default.bold('\nšŸ¤– AI-Powered Insights:\n'));
43
+ const maxExplanations = aiService.getMaxExplanations();
44
+ const opportunitiesToExplain = opportunities.slice(0, Math.min(maxExplanations, opportunities.length));
45
+ for (let i = 0; i < opportunitiesToExplain.length; i++) {
46
+ const opp = opportunitiesToExplain[i];
47
+ try {
48
+ console.log(chalk_1.default.cyan(`Analyzing opportunity #${i + 1}...`));
49
+ const explanation = await aiService.explainOpportunity(opp);
50
+ const cacheIndicator = explanation.cached ? chalk_1.default.dim(' (cached)') : '';
51
+ console.log(chalk_1.default.bold(`\nšŸ’” Opportunity #${i + 1}: ${opp.resourceId}${cacheIndicator}`));
52
+ console.log(chalk_1.default.dim('─'.repeat(80)));
53
+ console.log(chalk_1.default.white(explanation.summary));
54
+ console.log();
55
+ console.log(chalk_1.default.bold('Why this is wasteful:'));
56
+ console.log(explanation.whyWasteful);
57
+ if (explanation.actionPlan.length > 0) {
58
+ console.log();
59
+ console.log(chalk_1.default.bold('Action plan:'));
60
+ explanation.actionPlan.forEach((step) => {
61
+ console.log(chalk_1.default.green(` ${step}`));
62
+ });
63
+ }
64
+ console.log();
65
+ console.log(`Risk: ${getRiskEmoji(explanation.riskLevel)} ${explanation.riskLevel.toUpperCase()}`);
66
+ console.log(`Time: ā±ļø ${explanation.estimatedTime}`);
67
+ // Try to generate remediation script
68
+ try {
69
+ const script = await aiService.generateRemediationScript(opp);
70
+ if (script) {
71
+ console.log();
72
+ console.log(chalk_1.default.bold('šŸ”§ Remediation Script:'));
73
+ console.log(chalk_1.default.dim('─'.repeat(80)));
74
+ console.log(chalk_1.default.gray(script));
75
+ }
76
+ }
77
+ catch (error) {
78
+ // Script generation failed, skip silently
79
+ }
80
+ console.log();
81
+ }
82
+ catch (error) {
83
+ console.log(chalk_1.default.yellow(`āš ļø AI explanation failed: ${error.message}`));
84
+ }
85
+ }
86
+ }
87
+ // Show total count if there are more opportunities
88
+ if (report.opportunities.length > topN) {
89
+ console.log(chalk_1.default.dim(`\n... and ${report.opportunities.length - topN} more opportunities (use --top ${report.opportunities.length} to see all)`));
90
+ }
39
91
  console.log(chalk_1.default.bold(`\nTotal potential savings: ${chalk_1.default.green((0, utils_1.formatCurrency)(report.totalPotentialSavings))}/month (${chalk_1.default.green((0, utils_1.formatCurrency)(report.totalPotentialSavings * 12))}/year)`));
40
92
  console.log(`\nSummary: ${report.summary.totalResources} resources analyzed | ${report.summary.idleResources} idle | ${report.summary.oversizedResources} oversized | ${report.summary.unusedResources} unused`);
41
93
  console.log(chalk_1.default.dim(`\nšŸ’” Note: Cost estimates based on us-east-1 pricing and may vary by region.`));
42
94
  console.log(chalk_1.default.dim(` For more accurate estimates, actual costs depend on your usage and region.\n`));
43
95
  }
96
+ function getRiskEmoji(risk) {
97
+ switch (risk) {
98
+ case 'low':
99
+ return 'āœ…';
100
+ case 'medium':
101
+ return 'āš ļø';
102
+ case 'high':
103
+ return '🚨';
104
+ default:
105
+ return 'ā“';
106
+ }
107
+ }
@@ -0,0 +1,44 @@
1
+ import { SavingsOpportunity } from '../types';
2
+ export interface AIExplanation {
3
+ summary: string;
4
+ whyWasteful: string;
5
+ actionPlan: string[];
6
+ riskLevel: 'low' | 'medium' | 'high';
7
+ estimatedTime: string;
8
+ script?: string;
9
+ cached?: boolean;
10
+ }
11
+ export interface QueryAnswer {
12
+ response: string;
13
+ suggestions?: string[];
14
+ relatedOpportunities?: any[];
15
+ }
16
+ export type AIProvider = 'openai' | 'ollama';
17
+ export interface AIConfig {
18
+ provider: AIProvider;
19
+ apiKey?: string;
20
+ model?: string;
21
+ maxExplanations?: number;
22
+ }
23
+ export declare class AIService {
24
+ private openaiClient;
25
+ private ollamaClient;
26
+ private provider;
27
+ private model;
28
+ private enabled;
29
+ private maxExplanations;
30
+ private cache;
31
+ private useCache;
32
+ private costTracker;
33
+ constructor(config?: AIConfig);
34
+ isEnabled(): boolean;
35
+ getMaxExplanations(): number;
36
+ explainOpportunity(opportunity: SavingsOpportunity): Promise<AIExplanation>;
37
+ generateRemediationScript(opportunity: SavingsOpportunity): Promise<string | null>;
38
+ private buildPrompt;
39
+ private parseExplanation;
40
+ answerQuery(query: string, scanReport: any): Promise<QueryAnswer>;
41
+ private buildQueryContext;
42
+ private buildQueryPrompt;
43
+ private parseQueryAnswer;
44
+ }