megabuff 0.9.2 → 0.10.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -13,6 +13,7 @@ import { getApiKeyInfo, setApiKey, removeApiKey, hasApiKey, getConfig, getProvid
13
13
  import { getDefaultModel } from "./models.js";
14
14
  import { themes, getAllThemeNames, isValidTheme } from "./themes.js";
15
15
  import { getCurrentTheme, clearThemeCache } from "./theme-utils.js";
16
+ import { estimateOptimizationCost, estimateAnalysisCost, formatCost, formatTokens, getDefaultModelForProvider, calculateCost, getPricingBreakdown } from "./cost.js";
16
17
  const program = new Command();
17
18
  // Initialize theme at startup
18
19
  let theme = await getCurrentTheme();
@@ -103,21 +104,170 @@ async function getInput(inlinePrompt, options) {
103
104
  });
104
105
  }
105
106
  /**
106
- * Optimize a prompt using OpenAI
107
+ * Get style-specific optimization instructions
107
108
  */
108
- async function optimizePromptOpenAI(prompt, apiKey, model) {
109
- const openai = new OpenAI({ apiKey });
110
- const selectedModel = model ?? getDefaultModel("openai");
111
- const systemPrompt = `You are an expert prompt engineer. Your task is to analyze and optimize prompts for AI language models.
109
+ function getStyleInstructions(style) {
110
+ const styleInstructions = {
111
+ "balanced": "", // Default behavior, no special instructions
112
+ "concise": `
113
+
114
+ OPTIMIZATION STYLE: CONCISE
115
+ - Prioritize brevity and clarity
116
+ - Remove unnecessary words while maintaining precision
117
+ - Use direct, action-oriented language
118
+ - Keep the optimized prompt as short as possible without losing essential information`,
119
+ "detailed": `
120
+
121
+ OPTIMIZATION STYLE: DETAILED
122
+ - Add comprehensive context and background information
123
+ - Include specific examples and edge cases
124
+ - Provide detailed guidance on expected outputs
125
+ - Elaborate on requirements and constraints`,
126
+ "technical": `
127
+
128
+ OPTIMIZATION STYLE: TECHNICAL
129
+ - Use precise technical terminology
130
+ - Include specific implementation details and requirements
131
+ - Add technical constraints and specifications
132
+ - Structure for code generation or technical tasks`,
133
+ "creative": `
134
+
135
+ OPTIMIZATION STYLE: CREATIVE
136
+ - Encourage imaginative and original outputs
137
+ - Add inspiration and creative direction
138
+ - Allow for flexibility and exploration
139
+ - Focus on generating unique, innovative results`,
140
+ "formal": `
141
+
142
+ OPTIMIZATION STYLE: FORMAL
143
+ - Use professional, formal language
144
+ - Structure with clear sections and hierarchy
145
+ - Emphasize precision and academic/professional tone
146
+ - Suitable for business or academic contexts`,
147
+ "casual": `
148
+
149
+ OPTIMIZATION STYLE: CASUAL
150
+ - Use conversational, friendly language
151
+ - Keep instructions approachable and easy to understand
152
+ - Maintain clarity without excessive formality
153
+ - Make the prompt feel natural and accessible`
154
+ };
155
+ return styleInstructions[style];
156
+ }
157
+ /**
158
+ * Generate a system prompt optimized for the specific provider and model
159
+ */
160
+ function getSystemPrompt(provider, model, style = "balanced", customPrompt) {
161
+ // If custom prompt is provided, use it directly
162
+ if (customPrompt) {
163
+ return customPrompt;
164
+ }
165
+ const basePrompt = `You are an expert prompt engineer. Your task is to analyze and optimize prompts for AI language models.
166
+
167
+ CRITICAL: You are optimizing THE PROMPT ITSELF, not answering or executing it.
168
+
169
+ For example:
170
+ - Input: "Write a function to validate emails"
171
+ - Output: "Create a robust email validation function in JavaScript that checks for common email format rules including @ symbol, domain name, and TLD. Include edge case handling for special characters and provide clear error messages for invalid formats."
172
+
173
+ NOT: "Here's a function to validate emails: function validateEmail(email) { ... }"
112
174
 
113
175
  When given a prompt, you should:
114
176
  1. Identify ambiguities or unclear instructions
115
177
  2. Add relevant context that would improve results
116
- 3. Structure the prompt for clarity
117
- 4. Specify expected output format if not present
118
- 5. Make the prompt more specific and actionable
178
+ 3. Structure the prompt for better clarity
179
+ 4. Ensure specificity and actionable requests
180
+ 5. Maintain the original intent while enhancing effectiveness
181
+
182
+ Return ONLY the optimized prompt without explanations, meta-commentary, or answers to the prompt itself.`;
183
+ // Add style-specific instructions
184
+ const styleInstructions = getStyleInstructions(style);
185
+ // Provider-specific enhancements
186
+ const providerEnhancements = {
187
+ "openai": `
188
+
189
+ Special considerations for OpenAI models:
190
+ - Emphasize clear, structured outputs with numbered lists or step-by-step instructions
191
+ - Include explicit format specifications (JSON, markdown, etc.) when relevant
192
+ - Front-load important context and instructions for better attention`,
193
+ "anthropic": `
194
+
195
+ Special considerations for Claude (Anthropic):
196
+ - Leverage Claude's strong reasoning by including "think step-by-step" guidance when appropriate
197
+ - Use XML tags for structured sections when complex parsing is needed
198
+ - Emphasize nuanced, detailed instructions that benefit from deep analysis
199
+ - Include relevant examples when demonstrating complex tasks`,
200
+ "google": `
201
+
202
+ Special considerations for Gemini:
203
+ - Emphasize concise, clear instructions with specific goals
204
+ - Structure prompts with clear headings and logical sections
205
+ - Include concrete examples when demonstrating desired output format`,
206
+ "xai": `
207
+
208
+ Special considerations for Grok:
209
+ - Focus on direct, actionable instructions
210
+ - Emphasize clarity and specificity
211
+ - Structure complex information hierarchically`,
212
+ "deepseek": `
213
+
214
+ Special considerations for DeepSeek:
215
+ - For reasoning tasks: Include analytical thinking prompts and step-by-step breakdown requests
216
+ - For conversational tasks: Emphasize clear but precise instructions
217
+ - Structure complex tasks into logical sequential steps`,
218
+ "azure-openai": `
219
+
220
+ Special considerations for Azure OpenAI:
221
+ - Follow OpenAI best practices with enterprise-focused clarity
222
+ - Emphasize structured outputs and explicit format requirements`
223
+ };
224
+ // Model-specific enhancements
225
+ let modelEnhancement = "";
226
+ // DeepSeek reasoner gets special treatment for analytical tasks
227
+ if (model === "deepseek-reasoner") {
228
+ modelEnhancement = `
229
+
230
+ IMPORTANT: This is a reasoning-focused model. When optimizing:
231
+ - Add explicit instructions to "think through the problem step by step"
232
+ - Structure the prompt to encourage analytical breakdown and verification
233
+ - Include reasoning checkpoints or self-verification steps where appropriate`;
234
+ }
235
+ // Claude Opus models excel at complex reasoning
236
+ else if (model.includes("opus")) {
237
+ modelEnhancement = `
238
+
239
+ IMPORTANT: This is a highly capable reasoning model. When optimizing:
240
+ - Don't hesitate to add complexity and nuance for sophisticated tasks
241
+ - Include multi-step reasoning requirements when beneficial
242
+ - Add quality checks or self-verification steps for complex outputs`;
243
+ }
244
+ // Fast/efficient models benefit from conciseness
245
+ else if (model.includes("mini") || model.includes("flash") || model.includes("haiku") || model.includes("lite")) {
246
+ modelEnhancement = `
119
247
 
120
- Return ONLY the optimized prompt, without explanations or meta-commentary.`;
248
+ IMPORTANT: This is a fast, efficient model. When optimizing:
249
+ - Keep prompts concise but complete - avoid unnecessary verbosity
250
+ - Front-load the most critical instructions and requirements
251
+ - Maintain clarity while optimizing for token efficiency`;
252
+ }
253
+ // Vision models need special handling
254
+ else if (model.includes("vision")) {
255
+ modelEnhancement = `
256
+
257
+ IMPORTANT: This model supports vision capabilities. When optimizing:
258
+ - If the prompt involves images, include specific guidance about what to analyze
259
+ - Structure image analysis requests with clear focus areas
260
+ - Specify desired output format for visual information extraction`;
261
+ }
262
+ return basePrompt + styleInstructions + providerEnhancements[provider] + modelEnhancement;
263
+ }
264
+ /**
265
+ * Optimize a prompt using OpenAI
266
+ */
267
+ async function optimizePromptOpenAI(prompt, apiKey, model, style = "balanced", customPrompt) {
268
+ const openai = new OpenAI({ apiKey });
269
+ const selectedModel = model ?? getDefaultModel("openai");
270
+ const systemPrompt = getSystemPrompt("openai", selectedModel, style, customPrompt);
121
271
  try {
122
272
  debugLog("openai.request.start", { model: selectedModel, promptLength: prompt.length });
123
273
  const response = await openai.chat.completions.create({
@@ -128,8 +278,16 @@ Return ONLY the optimized prompt, without explanations or meta-commentary.`;
128
278
  ],
129
279
  temperature: 0.7,
130
280
  });
131
- debugLog("openai.request.done", { choices: response.choices?.length });
132
- return response.choices[0]?.message?.content || "Error: No response from OpenAI";
281
+ debugLog("openai.request.done", {
282
+ choices: response.choices?.length,
283
+ usage: response.usage
284
+ });
285
+ const result = response.choices[0]?.message?.content || "Error: No response from OpenAI";
286
+ const usage = {
287
+ inputTokens: response.usage?.prompt_tokens || 0,
288
+ outputTokens: response.usage?.completion_tokens || 0
289
+ };
290
+ return { result, usage };
133
291
  }
134
292
  catch (error) {
135
293
  if (error instanceof Error) {
@@ -141,19 +299,10 @@ Return ONLY the optimized prompt, without explanations or meta-commentary.`;
141
299
  /**
142
300
  * Optimize a prompt using Anthropic Claude
143
301
  */
144
- async function optimizePromptAnthropic(prompt, apiKey, model) {
302
+ async function optimizePromptAnthropic(prompt, apiKey, model, style = "balanced", customPrompt) {
145
303
  const anthropic = new Anthropic({ apiKey });
146
304
  const selectedModel = model ?? getDefaultModel("anthropic");
147
- const systemPrompt = `You are an expert prompt engineer. Your task is to analyze and optimize prompts for AI language models.
148
-
149
- When given a prompt, you should:
150
- 1. Identify ambiguities or unclear instructions
151
- 2. Add relevant context that would improve results
152
- 3. Structure the prompt for better clarity
153
- 4. Ensure the prompt follows best practices
154
- 5. Make it more specific and actionable
155
-
156
- Return ONLY the optimized prompt without explanations or meta-commentary.`;
305
+ const systemPrompt = getSystemPrompt("anthropic", selectedModel, style, customPrompt);
157
306
  try {
158
307
  debugLog("anthropic.request.start", { model: selectedModel, promptLength: prompt.length });
159
308
  const response = await anthropic.messages.create({
@@ -167,10 +316,17 @@ Return ONLY the optimized prompt without explanations or meta-commentary.`;
167
316
  }
168
317
  ]
169
318
  });
170
- debugLog("anthropic.request.done", { contentItems: response.content?.length });
319
+ debugLog("anthropic.request.done", {
320
+ contentItems: response.content?.length,
321
+ usage: response.usage
322
+ });
171
323
  const content = response.content?.[0];
172
324
  if (content?.type === "text") {
173
- return content.text;
325
+ const usage = {
326
+ inputTokens: response.usage.input_tokens,
327
+ outputTokens: response.usage.output_tokens
328
+ };
329
+ return { result: content.text, usage };
174
330
  }
175
331
  throw new Error("Unexpected response format from Anthropic API");
176
332
  }
@@ -184,19 +340,10 @@ Return ONLY the optimized prompt without explanations or meta-commentary.`;
184
340
  /**
185
341
  * Optimize a prompt using Google Gemini
186
342
  */
187
- async function optimizePromptGemini(prompt, apiKey, modelName) {
343
+ async function optimizePromptGemini(prompt, apiKey, modelName, style = "balanced", customPrompt) {
188
344
  const genAI = new GoogleGenerativeAI(apiKey);
189
345
  const selectedModel = modelName ?? getDefaultModel("google");
190
- const systemPrompt = `You are an expert prompt engineer. Your task is to analyze and optimize prompts for AI language models.
191
-
192
- When given a prompt, you should:
193
- 1. Identify ambiguities or unclear instructions
194
- 2. Add relevant context that would improve results
195
- 3. Structure the prompt for better clarity
196
- 4. Ensure the prompt follows best practices
197
- 5. Make it more specific and actionable
198
-
199
- Return ONLY the optimized prompt without explanations or meta-commentary.`;
346
+ const systemPrompt = getSystemPrompt("google", selectedModel, style, customPrompt);
200
347
  try {
201
348
  debugLog("gemini.request.start", { model: selectedModel, promptLength: prompt.length });
202
349
  const model = genAI.getGenerativeModel({
@@ -206,11 +353,20 @@ Return ONLY the optimized prompt without explanations or meta-commentary.`;
206
353
  const result = await model.generateContent(`Optimize this prompt:\n\n${prompt}`);
207
354
  const response = result.response;
208
355
  const text = response.text();
209
- debugLog("gemini.request.done", { responseLength: text.length });
356
+ // Extract token usage from response
357
+ const usageMetadata = response.usageMetadata;
358
+ const usage = {
359
+ inputTokens: usageMetadata?.promptTokenCount || 0,
360
+ outputTokens: usageMetadata?.candidatesTokenCount || 0
361
+ };
362
+ debugLog("gemini.request.done", {
363
+ responseLength: text.length,
364
+ usage
365
+ });
210
366
  if (!text) {
211
367
  throw new Error("No response from Gemini API");
212
368
  }
213
- return text;
369
+ return { result: text, usage };
214
370
  }
215
371
  catch (error) {
216
372
  if (error instanceof Error) {
@@ -222,18 +378,9 @@ Return ONLY the optimized prompt without explanations or meta-commentary.`;
222
378
  /**
223
379
  * Optimize a prompt using xAI (Grok)
224
380
  */
225
- async function optimizePromptXAI(prompt, apiKey, modelName) {
381
+ async function optimizePromptXAI(prompt, apiKey, modelName, style = "balanced", customPrompt) {
226
382
  const selectedModel = modelName ?? getDefaultModel("xai");
227
- const systemPrompt = `You are an expert prompt engineer. Your task is to analyze and optimize prompts for AI language models.
228
-
229
- When given a prompt, you should:
230
- 1. Identify ambiguities or unclear instructions
231
- 2. Add relevant context that would improve results
232
- 3. Structure the prompt for better clarity
233
- 4. Ensure the prompt follows best practices
234
- 5. Make it more specific and actionable
235
-
236
- Return ONLY the optimized prompt without explanations or meta-commentary.`;
383
+ const systemPrompt = getSystemPrompt("xai", selectedModel, style, customPrompt);
237
384
  try {
238
385
  debugLog("xai.request.start", { model: selectedModel, promptLength: prompt.length });
239
386
  // xAI SDK requires the API key to be set as an environment variable
@@ -245,11 +392,19 @@ Return ONLY the optimized prompt without explanations or meta-commentary.`;
245
392
  system: systemPrompt,
246
393
  prompt: `Optimize this prompt:\n\n${prompt}`,
247
394
  });
248
- debugLog("xai.request.done", { textLength: result.text.length });
395
+ // Extract token usage from ai SDK response
396
+ const usage = {
397
+ inputTokens: result.usage?.promptTokens || 0,
398
+ outputTokens: result.usage?.completionTokens || 0
399
+ };
400
+ debugLog("xai.request.done", {
401
+ textLength: result.text.length,
402
+ usage
403
+ });
249
404
  if (!result.text) {
250
405
  throw new Error("No response from xAI API");
251
406
  }
252
- return result.text;
407
+ return { result: result.text, usage };
253
408
  }
254
409
  finally {
255
410
  // Restore original environment variable
@@ -271,23 +426,14 @@ Return ONLY the optimized prompt without explanations or meta-commentary.`;
271
426
  /**
272
427
  * Optimize a prompt using DeepSeek
273
428
  */
274
- async function optimizePromptDeepSeek(prompt, apiKey, modelName) {
429
+ async function optimizePromptDeepSeek(prompt, apiKey, modelName, style = "balanced", customPrompt) {
275
430
  // DeepSeek uses OpenAI-compatible API
276
431
  const openai = new OpenAI({
277
432
  apiKey: apiKey,
278
433
  baseURL: "https://api.deepseek.com"
279
434
  });
280
435
  const selectedModel = modelName ?? getDefaultModel("deepseek");
281
- const systemPrompt = `You are an expert prompt engineer. Your task is to analyze and optimize prompts for AI language models.
282
-
283
- When given a prompt, you should:
284
- 1. Identify ambiguities or unclear instructions
285
- 2. Add relevant context that would improve results
286
- 3. Structure the prompt for better clarity
287
- 4. Ensure the prompt follows best practices
288
- 5. Make it more specific and actionable
289
-
290
- Return ONLY the optimized prompt without explanations or meta-commentary.`;
436
+ const systemPrompt = getSystemPrompt("deepseek", selectedModel, style, customPrompt);
291
437
  try {
292
438
  debugLog("deepseek.request.start", { model: selectedModel, promptLength: prompt.length });
293
439
  const response = await openai.chat.completions.create({
@@ -303,12 +449,239 @@ Return ONLY the optimized prompt without explanations or meta-commentary.`;
303
449
  }
304
450
  ]
305
451
  });
306
- debugLog("deepseek.request.done", { choices: response.choices?.length });
307
452
  const content = response.choices[0]?.message?.content;
453
+ const usage = {
454
+ inputTokens: response.usage?.prompt_tokens || 0,
455
+ outputTokens: response.usage?.completion_tokens || 0
456
+ };
457
+ debugLog("deepseek.request.done", {
458
+ choices: response.choices?.length,
459
+ usage
460
+ });
308
461
  if (!content) {
309
462
  throw new Error("No response from DeepSeek API");
310
463
  }
311
- return content;
464
+ return { result: content, usage };
465
+ }
466
+ catch (error) {
467
+ if (error instanceof Error) {
468
+ throw new Error(`DeepSeek API error: ${error.message}`);
469
+ }
470
+ throw error;
471
+ }
472
+ }
473
+ /**
474
+ * Generate a system prompt for prompt analysis
475
+ */
476
+ function getAnalysisSystemPrompt() {
477
+ return `You are an expert prompt engineer analyzing prompts for AI language models.
478
+
479
+ Your task is to provide a comprehensive analysis of the given prompt. Structure your analysis as follows:
480
+
481
+ ## 📊 Overall Assessment
482
+ Provide a brief 2-3 sentence summary of the prompt's quality and primary purpose.
483
+
484
+ ## ✅ Strengths
485
+ List 3-5 specific strengths of this prompt. Be concrete and explain why each strength matters.
486
+
487
+ ## ⚠️ Weaknesses & Issues
488
+ Identify 3-5 specific problems, ambiguities, or areas for improvement. Explain the impact of each issue.
489
+
490
+ ## 💡 Specific Suggestions
491
+ Provide 3-5 actionable recommendations for improvement. Be specific about what to change and why.
492
+
493
+ ## 🎯 Key Improvements
494
+ Highlight the 2-3 most important changes that would have the biggest impact.
495
+
496
+ ## 📈 Clarity Score
497
+ Rate the prompt's clarity on a scale of 1-10, with a brief justification.
498
+
499
+ Be direct, constructive, and specific in your analysis. Focus on actionable feedback that will genuinely improve the prompt's effectiveness.`;
500
+ }
501
+ /**
502
+ * Analyze a prompt using OpenAI
503
+ */
504
+ async function analyzePromptOpenAI(prompt, apiKey, model) {
505
+ const openai = new OpenAI({ apiKey });
506
+ const selectedModel = model ?? getDefaultModel("openai");
507
+ const systemPrompt = getAnalysisSystemPrompt();
508
+ try {
509
+ debugLog("openai.analyze.start", { model: selectedModel, promptLength: prompt.length });
510
+ const response = await openai.chat.completions.create({
511
+ model: selectedModel,
512
+ messages: [
513
+ { role: "system", content: systemPrompt },
514
+ { role: "user", content: `Analyze this prompt:\n\n${prompt}` }
515
+ ],
516
+ temperature: 0.7,
517
+ });
518
+ const result = response.choices[0]?.message?.content || "Error: No response from OpenAI";
519
+ const usage = {
520
+ inputTokens: response.usage?.prompt_tokens || 0,
521
+ outputTokens: response.usage?.completion_tokens || 0
522
+ };
523
+ debugLog("openai.analyze.done", {
524
+ choices: response.choices?.length,
525
+ usage
526
+ });
527
+ return { result, usage };
528
+ }
529
+ catch (error) {
530
+ if (error instanceof Error) {
531
+ throw new Error(`OpenAI API error: ${error.message}`);
532
+ }
533
+ throw error;
534
+ }
535
+ }
536
+ /**
537
+ * Analyze a prompt using Anthropic Claude
538
+ */
539
+ async function analyzePromptAnthropic(prompt, apiKey, model) {
540
+ const anthropic = new Anthropic({ apiKey });
541
+ const selectedModel = model ?? getDefaultModel("anthropic");
542
+ const systemPrompt = getAnalysisSystemPrompt();
543
+ try {
544
+ debugLog("anthropic.analyze.start", { model: selectedModel, promptLength: prompt.length });
545
+ const response = await anthropic.messages.create({
546
+ model: selectedModel,
547
+ max_tokens: 4096,
548
+ system: systemPrompt,
549
+ messages: [
550
+ {
551
+ role: "user",
552
+ content: `Analyze this prompt:\n\n${prompt}`
553
+ }
554
+ ]
555
+ });
556
+ debugLog("anthropic.analyze.done", {
557
+ contentItems: response.content?.length,
558
+ usage: response.usage
559
+ });
560
+ const content = response.content?.[0];
561
+ if (content?.type === "text") {
562
+ const usage = {
563
+ inputTokens: response.usage.input_tokens,
564
+ outputTokens: response.usage.output_tokens
565
+ };
566
+ return { result: content.text, usage };
567
+ }
568
+ throw new Error("Unexpected response format from Anthropic API");
569
+ }
570
+ catch (error) {
571
+ if (error instanceof Error) {
572
+ throw new Error(`Anthropic API error: ${error.message}`);
573
+ }
574
+ throw error;
575
+ }
576
+ }
577
+ /**
578
+ * Analyze a prompt using Google Gemini
579
+ */
580
+ async function analyzePromptGemini(prompt, apiKey, model) {
581
+ const selectedModel = model ?? getDefaultModel("google");
582
+ const systemPrompt = getAnalysisSystemPrompt();
583
+ const genAI = new GoogleGenerativeAI(apiKey);
584
+ const gemini = genAI.getGenerativeModel({ model: selectedModel });
585
+ try {
586
+ debugLog("google.analyze.start", { model: selectedModel, promptLength: prompt.length });
587
+ const result = await gemini.generateContent(`${systemPrompt}\n\nAnalyze this prompt:\n\n${prompt}`);
588
+ const response = result.response;
589
+ const text = response.text();
590
+ const usageMetadata = response.usageMetadata;
591
+ const usage = {
592
+ inputTokens: usageMetadata?.promptTokenCount || 0,
593
+ outputTokens: usageMetadata?.candidatesTokenCount || 0
594
+ };
595
+ debugLog("google.analyze.done", {
596
+ responseLength: text.length,
597
+ usage
598
+ });
599
+ return { result: text, usage };
600
+ }
601
+ catch (error) {
602
+ if (error instanceof Error) {
603
+ throw new Error(`Google Gemini API error: ${error.message}`);
604
+ }
605
+ throw error;
606
+ }
607
+ }
608
+ /**
609
+ * Analyze a prompt using xAI (Grok)
610
+ */
611
+ async function analyzePromptXAI(prompt, apiKey, model) {
612
+ const selectedModel = model ?? getDefaultModel("xai");
613
+ const systemPrompt = getAnalysisSystemPrompt();
614
+ try {
615
+ debugLog("xai.analyze.start", { model: selectedModel, promptLength: prompt.length });
616
+ // xAI SDK requires the API key to be set as an environment variable
617
+ const originalKey = process.env.XAI_API_KEY;
618
+ process.env.XAI_API_KEY = apiKey;
619
+ try {
620
+ const result = await generateText({
621
+ model: xai(selectedModel),
622
+ system: systemPrompt,
623
+ prompt: `Analyze this prompt:\n\n${prompt}`,
624
+ });
625
+ const usage = {
626
+ inputTokens: result.usage?.promptTokens || 0,
627
+ outputTokens: result.usage?.completionTokens || 0
628
+ };
629
+ debugLog("xai.analyze.done", {
630
+ textLength: result.text.length,
631
+ usage
632
+ });
633
+ if (!result.text) {
634
+ throw new Error("No response from xAI API");
635
+ }
636
+ return { result: result.text, usage };
637
+ }
638
+ finally {
639
+ // Restore original environment variable
640
+ if (originalKey !== undefined) {
641
+ process.env.XAI_API_KEY = originalKey;
642
+ }
643
+ else {
644
+ delete process.env.XAI_API_KEY;
645
+ }
646
+ }
647
+ }
648
+ catch (error) {
649
+ if (error instanceof Error) {
650
+ throw new Error(`xAI API error: ${error.message}`);
651
+ }
652
+ throw error;
653
+ }
654
+ }
655
+ /**
656
+ * Analyze a prompt using DeepSeek
657
+ */
658
+ async function analyzePromptDeepSeek(prompt, apiKey, model) {
659
+ const openai = new OpenAI({
660
+ apiKey,
661
+ baseURL: "https://api.deepseek.com"
662
+ });
663
+ const selectedModel = model ?? getDefaultModel("deepseek");
664
+ const systemPrompt = getAnalysisSystemPrompt();
665
+ try {
666
+ debugLog("deepseek.analyze.start", { model: selectedModel, promptLength: prompt.length });
667
+ const response = await openai.chat.completions.create({
668
+ model: selectedModel,
669
+ messages: [
670
+ { role: "system", content: systemPrompt },
671
+ { role: "user", content: `Analyze this prompt:\n\n${prompt}` }
672
+ ],
673
+ temperature: 0.7,
674
+ });
675
+ const result = response.choices[0]?.message?.content || "Error: No response from DeepSeek";
676
+ const usage = {
677
+ inputTokens: response.usage?.prompt_tokens || 0,
678
+ outputTokens: response.usage?.completion_tokens || 0
679
+ };
680
+ debugLog("deepseek.analyze.done", {
681
+ choices: response.choices?.length,
682
+ usage
683
+ });
684
+ return { result, usage };
312
685
  }
313
686
  catch (error) {
314
687
  if (error instanceof Error) {
@@ -339,6 +712,7 @@ function createSpinner(message) {
339
712
  let i = 0;
340
713
  let timer;
341
714
  let lastLen = 0;
715
+ let currentMessage = message;
342
716
  const render = (text) => {
343
717
  const frame = theme.colors.primary(frames[i++ % frames.length]);
344
718
  const line = `${frame} ${theme.colors.dim(text)}`;
@@ -350,8 +724,14 @@ function createSpinner(message) {
350
724
  start() {
351
725
  if (!enabled)
352
726
  return;
353
- render(message);
354
- timer = setInterval(() => render(message), 80);
727
+ render(currentMessage);
728
+ timer = setInterval(() => render(currentMessage), 80);
729
+ },
730
+ update(newMessage) {
731
+ if (!enabled)
732
+ return;
733
+ currentMessage = newMessage;
734
+ render(currentMessage);
355
735
  },
356
736
  stop(finalText) {
357
737
  if (!enabled)
@@ -359,7 +739,7 @@ function createSpinner(message) {
359
739
  if (timer)
360
740
  clearInterval(timer);
361
741
  timer = undefined;
362
- const text = finalText ?? message;
742
+ const text = finalText ?? currentMessage;
363
743
  const padded = theme.colors.success(`✓ ${text}`) + " ".repeat(Math.max(0, lastLen - text.length));
364
744
  process.stderr.write(`\r${padded}\n`);
365
745
  },
@@ -369,7 +749,7 @@ function createSpinner(message) {
369
749
  if (timer)
370
750
  clearInterval(timer);
371
751
  timer = undefined;
372
- const text = finalText ?? message;
752
+ const text = finalText ?? currentMessage;
373
753
  const padded = theme.colors.error(`✗ ${text}`) + " ".repeat(Math.max(0, lastLen - text.length));
374
754
  process.stderr.write(`\r${padded}\n`);
375
755
  }
@@ -1079,6 +1459,444 @@ themeCmd
1079
1459
  process.exit(1);
1080
1460
  }
1081
1461
  });
1462
+ // Comparison mode function
1463
+ async function runComparisonMode(original, style, customPrompt, iterations, options) {
1464
+ console.log("");
1465
+ console.log(theme.colors.primary("🔍 Comparison Mode") + theme.colors.dim(" - Testing multiple providers..."));
1466
+ console.log("");
1467
+ // Parse requested providers if specified
1468
+ let requestedProviders;
1469
+ if (options.providers) {
1470
+ const providerList = options.providers.split(",").map((p) => p.trim().toLowerCase());
1471
+ requestedProviders = [];
1472
+ for (const p of providerList) {
1473
+ if (!PROVIDERS.includes(p)) {
1474
+ console.error("");
1475
+ console.error(theme.colors.error("❌ Error: ") + theme.colors.warning(`Invalid provider '${p}'`));
1476
+ console.error("");
1477
+ console.error(theme.colors.dim(" Valid providers: ") + theme.colors.info(PROVIDERS.join(", ")));
1478
+ console.error("");
1479
+ process.exit(1);
1480
+ }
1481
+ requestedProviders.push(p);
1482
+ }
1483
+ }
1484
+ // Parse per-provider models if specified
1485
+ const providerModels = {};
1486
+ if (options.models) {
1487
+ const modelPairs = options.models.split(",").map((m) => m.trim());
1488
+ for (const pair of modelPairs) {
1489
+ const [providerStr, model] = pair.split(":").map((s) => s.trim());
1490
+ if (!providerStr || !model) {
1491
+ console.error("");
1492
+ console.error(theme.colors.error("❌ Error: ") + theme.colors.warning(`Invalid model specification '${pair}'`));
1493
+ console.error("");
1494
+ console.error(theme.colors.dim(" Format: ") + theme.colors.info("provider:model (e.g., 'openai:gpt-4o,anthropic:claude-opus-4-5')"));
1495
+ console.error("");
1496
+ process.exit(1);
1497
+ }
1498
+ const provider = providerStr.toLowerCase();
1499
+ if (!PROVIDERS.includes(provider)) {
1500
+ console.error("");
1501
+ console.error(theme.colors.error("❌ Error: ") + theme.colors.warning(`Invalid provider '${provider}' in model specification`));
1502
+ console.error("");
1503
+ console.error(theme.colors.dim(" Valid providers: ") + theme.colors.info(PROVIDERS.join(", ")));
1504
+ console.error("");
1505
+ process.exit(1);
1506
+ }
1507
+ // Verify the model belongs to the provider
1508
+ const modelProvider = getProviderForModel(model);
1509
+ if (!modelProvider || modelProvider !== provider) {
1510
+ console.error("");
1511
+ console.error(theme.colors.error("❌ Error: ") + theme.colors.warning(`Model '${model}' does not belong to provider '${provider}'`));
1512
+ console.error("");
1513
+ if (modelProvider) {
1514
+ console.error(theme.colors.dim(` '${model}' is a ${modelProvider} model`));
1515
+ }
1516
+ else {
1517
+ console.error(theme.colors.dim(` '${model}' is not a recognized model`));
1518
+ }
1519
+ console.error("");
1520
+ process.exit(1);
1521
+ }
1522
+ providerModels[provider] = model;
1523
+ }
1524
+ }
1525
+ // Get all available providers with configured API keys
1526
+ const availableProviders = [];
1527
+ const providerKeys = {};
1528
+ const providersToCheck = requestedProviders || PROVIDERS;
1529
+ for (const p of providersToCheck) {
1530
+ try {
1531
+ const { apiKey } = await getApiKeyInfo(p, options.apiKey);
1532
+ if (apiKey) {
1533
+ availableProviders.push(p);
1534
+ providerKeys[p] = apiKey;
1535
+ }
1536
+ }
1537
+ catch {
1538
+ // Skip providers without API keys
1539
+ }
1540
+ }
1541
+ if (availableProviders.length === 0) {
1542
+ const errorMsg = requestedProviders
1543
+ ? `None of the requested providers (${requestedProviders.join(", ")}) have configured API keys`
1544
+ : "No providers have configured API keys";
1545
+ console.error(theme.colors.error("❌ Error: ") + theme.colors.warning(errorMsg));
1546
+ console.error("");
1547
+ console.error(theme.colors.dim(" Configure API keys using:"));
1548
+ console.error(theme.colors.accent(" megabuff config set --provider <provider> <api-key>"));
1549
+ console.error("");
1550
+ process.exit(1);
1551
+ }
1552
+ if (availableProviders.length === 1) {
1553
+ const warningMsg = requestedProviders
1554
+ ? `Only one of the requested providers has a configured API key`
1555
+ : "Only one provider has a configured API key";
1556
+ console.error(theme.colors.warning("⚠️ Warning: ") + theme.colors.dim(warningMsg));
1557
+ console.error("");
1558
+ console.error(theme.colors.dim(" Comparison mode requires at least 2 providers"));
1559
+ if (requestedProviders) {
1560
+ console.error(theme.colors.dim(" Add more providers to the --providers list or configure more API keys"));
1561
+ }
1562
+ else {
1563
+ console.error(theme.colors.dim(" Configure more providers using:"));
1564
+ console.error(theme.colors.accent(" megabuff config set --provider <provider> <api-key>"));
1565
+ }
1566
+ console.error("");
1567
+ process.exit(1);
1568
+ }
1569
+ console.log(theme.colors.dim(` Testing ${availableProviders.length} providers: ${availableProviders.map(formatProviderName).join(", ")}`));
1570
+ console.log("");
1571
+ // Show cost estimate for comparison mode if requested
1572
+ let totalEstimatedCost = 0; // Store for accuracy calculation later
1573
+ if (options.showCost) {
1574
+ console.log(theme.colors.primary("💰 Cost Estimate (All Providers)"));
1575
+ console.log(theme.colors.dim("─".repeat(80)));
1576
+ for (const provider of availableProviders) {
1577
+ // Determine which model to use for cost estimation
1578
+ // Priority: 1) --models flag, 2) global config, 3) provider default
1579
+ let modelToUse;
1580
+ if (providerModels[provider]) {
1581
+ modelToUse = providerModels[provider];
1582
+ }
1583
+ else {
1584
+ const configuredModel = await getModel();
1585
+ modelToUse = configuredModel && getProviderForModel(configuredModel) === provider ? configuredModel : undefined;
1586
+ }
1587
+ const modelForCost = modelToUse || getDefaultModelForProvider(provider);
1588
+ const costEstimate = estimateOptimizationCost(original, modelForCost, iterations);
1589
+ const pricingInfo = getPricingBreakdown(modelForCost);
1590
+ console.log(theme.colors.info(` ${formatProviderName(provider)}: `) + theme.colors.secondary(formatCost(costEstimate.estimatedCost)) + theme.colors.dim(` (${modelForCost})`));
1591
+ if (pricingInfo) {
1592
+ console.log(theme.colors.dim(` Input: $${pricingInfo.inputPricePer1M.toFixed(2)}/1M tokens (${pricingInfo.inputPricePerToken}/token)`));
1593
+ console.log(theme.colors.dim(` Output: $${pricingInfo.outputPricePer1M.toFixed(2)}/1M tokens (${pricingInfo.outputPricePerToken}/token)`));
1594
+ }
1595
+ totalEstimatedCost += costEstimate.estimatedCost;
1596
+ }
1597
+ console.log(theme.colors.dim("─".repeat(80)));
1598
+ console.log(theme.colors.info(` Total estimated cost: `) + theme.colors.accent(formatCost(totalEstimatedCost)));
1599
+ console.log(theme.colors.dim("─".repeat(80)));
1600
+ console.log("");
1601
+ // Prompt user to confirm proceeding
1602
+ if (process.stdin.isTTY && process.stdout.isTTY) {
1603
+ const rl = readline.createInterface({
1604
+ input: process.stdin,
1605
+ output: process.stdout
1606
+ });
1607
+ const answer = await rl.question(theme.colors.warning("Do you want to proceed with this operation? (y/n): "));
1608
+ rl.close();
1609
+ if (answer.toLowerCase() !== 'y' && answer.toLowerCase() !== 'yes') {
1610
+ console.log("");
1611
+ console.log(theme.colors.dim("Operation cancelled."));
1612
+ console.log("");
1613
+ return;
1614
+ }
1615
+ console.log("");
1616
+ }
1617
+ }
1618
+ // Run optimization for each provider
1619
+ // Use sequential execution if we need interactive prompts for iterations
1620
+ const needsSequentialExecution = options.showCost && iterations > 1 && process.stdin.isTTY && process.stdout.isTTY;
1621
+ const results = [];
1622
+ const processProvider = async (provider) => {
1623
+ const providerEmoji = provider === "openai" ? "🤖" : provider === "anthropic" ? "🧠" : provider === "google" ? "✨" : provider === "xai" ? "🚀" : provider === "deepseek" ? "🔮" : "🔧";
1624
+ const spinner = createSpinner(`${providerEmoji} Optimizing with ${formatProviderName(provider)}...`);
1625
+ spinner.start();
1626
+ const startTime = Date.now();
1627
+ let optimized = original;
1628
+ let totalInputTokens = 0;
1629
+ let totalOutputTokens = 0;
1630
+ const iterationCosts = [];
1631
+ try {
1632
+ const apiKey = providerKeys[provider];
1633
+ // Determine which model to use for this provider
1634
+ // Priority: 1) --models flag, 2) global config, 3) provider default
1635
+ let modelToUse;
1636
+ if (providerModels[provider]) {
1637
+ // Use model specified in --models flag for this provider
1638
+ modelToUse = providerModels[provider];
1639
+ }
1640
+ else {
1641
+ // Fall back to global configured model if it matches this provider
1642
+ const configuredModel = await getModel();
1643
+ modelToUse = configuredModel && getProviderForModel(configuredModel) === provider ? configuredModel : undefined;
1644
+ }
1645
+ // Run iterations for this provider
1646
+ for (let i = 0; i < iterations; i++) {
1647
+ // Update spinner to show current iteration
1648
+ if (iterations > 1) {
1649
+ spinner.update(`${providerEmoji} ${formatProviderName(provider)} - iteration ${i + 1}/${iterations}...`);
1650
+ }
1651
+ let response;
1652
+ if (provider === "openai") {
1653
+ response = await optimizePromptOpenAI(optimized, apiKey, modelToUse, style, customPrompt);
1654
+ }
1655
+ else if (provider === "anthropic") {
1656
+ response = await optimizePromptAnthropic(optimized, apiKey, modelToUse, style, customPrompt);
1657
+ }
1658
+ else if (provider === "google") {
1659
+ response = await optimizePromptGemini(optimized, apiKey, modelToUse, style, customPrompt);
1660
+ }
1661
+ else if (provider === "xai") {
1662
+ response = await optimizePromptXAI(optimized, apiKey, modelToUse, style, customPrompt);
1663
+ }
1664
+ else if (provider === "deepseek") {
1665
+ response = await optimizePromptDeepSeek(optimized, apiKey, modelToUse, style, customPrompt);
1666
+ }
1667
+ else {
1668
+ throw new Error(`Unsupported provider: ${provider}`);
1669
+ }
1670
+ optimized = response.result;
1671
+ totalInputTokens += response.usage.inputTokens;
1672
+ totalOutputTokens += response.usage.outputTokens;
1673
+ const actualModel = modelToUse || getDefaultModelForProvider(provider);
1674
+ const iterationCost = calculateCost(response.usage.inputTokens, response.usage.outputTokens, actualModel);
1675
+ // Track cost per iteration
1676
+ iterationCosts.push({
1677
+ iteration: i + 1,
1678
+ inputTokens: response.usage.inputTokens,
1679
+ outputTokens: response.usage.outputTokens,
1680
+ cost: iterationCost
1681
+ });
1682
+ // Stop spinner temporarily for iteration results
1683
+ if (iterations > 1) {
1684
+ spinner.stop();
1685
+ }
1686
+ // Show iteration output if verbose mode is enabled
1687
+ if (options.verbose && iterations > 1) {
1688
+ console.log("");
1689
+ console.log(theme.colors.primary(`📝 ${formatProviderName(provider)} - Iteration ${i + 1}/${iterations} Output:`));
1690
+ console.log(theme.colors.dim("─".repeat(80)));
1691
+ console.log(optimized);
1692
+ console.log(theme.colors.dim("─".repeat(80)));
1693
+ console.log("");
1694
+ }
1695
+ // Show cost for this iteration if --show-cost is enabled
1696
+ if (options.showCost && iterations > 1) {
1697
+ console.log("");
1698
+ console.log(theme.colors.primary(`💰 ${formatProviderName(provider)} - Iteration ${i + 1}/${iterations} Actual Cost`));
1699
+ console.log(theme.colors.dim("─".repeat(80)));
1700
+ console.log(theme.colors.info(` Input tokens: `) + theme.colors.secondary(formatTokens(response.usage.inputTokens)));
1701
+ console.log(theme.colors.info(` Output tokens: `) + theme.colors.secondary(formatTokens(response.usage.outputTokens)));
1702
+ console.log(theme.colors.info(` Cost: `) + theme.colors.accent(formatCost(iterationCost)));
1703
+ console.log(theme.colors.dim("─".repeat(80)));
1704
+ console.log("");
1705
+ }
1706
+ // Show cost estimate for NEXT iteration and prompt for confirmation (if not the last iteration)
1707
+ if (options.showCost && iterations > 1 && i < iterations - 1) {
1708
+ const nextIterationEstimate = estimateOptimizationCost(optimized, actualModel, 1);
1709
+ console.log("");
1710
+ console.log(theme.colors.primary(`💰 ${formatProviderName(provider)} - Iteration ${i + 2}/${iterations} Cost Estimate`));
1711
+ console.log(theme.colors.dim("─".repeat(80)));
1712
+ console.log(theme.colors.info(` Model: `) + theme.colors.secondary(actualModel));
1713
+ console.log(theme.colors.info(` Estimated cost: `) + theme.colors.accent(formatCost(nextIterationEstimate.estimatedCost)));
1714
+ console.log(theme.colors.dim("─".repeat(80)));
1715
+ console.log("");
1716
+ // Prompt user to confirm proceeding with next iteration
1717
+ if (process.stdin.isTTY && process.stdout.isTTY) {
1718
+ const rl = readline.createInterface({
1719
+ input: process.stdin,
1720
+ output: process.stdout
1721
+ });
1722
+ const answer = await rl.question(theme.colors.warning(`${formatProviderName(provider)}: Continue with iteration ${i + 2}/${iterations}? (y/n): `));
1723
+ rl.close();
1724
+ if (answer.toLowerCase() !== 'y' && answer.toLowerCase() !== 'yes') {
1725
+ console.log("");
1726
+ console.log(theme.colors.dim(`${formatProviderName(provider)}: Stopped after ${i + 1} iteration(s).`));
1727
+ console.log("");
1728
+ break;
1729
+ }
1730
+ console.log("");
1731
+ }
1732
+ }
1733
+ // Restart spinner for next iteration
1734
+ if (iterations > 1 && i < iterations - 1) {
1735
+ spinner.start();
1736
+ }
1737
+ }
1738
+ const duration = Date.now() - startTime;
1739
+ const actualModel = modelToUse || getDefaultModelForProvider(provider);
1740
+ const actualCost = calculateCost(totalInputTokens, totalOutputTokens, actualModel);
1741
+ spinner.stop(`✨ ${formatProviderName(provider)} complete in ${(duration / 1000).toFixed(1)}s`);
1742
+ const resultData = {
1743
+ provider,
1744
+ result: optimized,
1745
+ duration,
1746
+ totalInputTokens,
1747
+ totalOutputTokens,
1748
+ actualCost,
1749
+ model: actualModel
1750
+ };
1751
+ if (iterations > 1) {
1752
+ resultData.iterationCosts = iterationCosts;
1753
+ }
1754
+ results.push(resultData);
1755
+ }
1756
+ catch (error) {
1757
+ const duration = Date.now() - startTime;
1758
+ const configuredModel = await getModel();
1759
+ const modelToUse = configuredModel && getProviderForModel(configuredModel) === provider ? configuredModel : undefined;
1760
+ const actualModel = modelToUse || getDefaultModelForProvider(provider);
1761
+ spinner.fail(`❌ ${formatProviderName(provider)} failed`);
1762
+ results.push({
1763
+ provider,
1764
+ result: "",
1765
+ duration,
1766
+ totalInputTokens: 0,
1767
+ totalOutputTokens: 0,
1768
+ actualCost: 0,
1769
+ model: actualModel,
1770
+ error: error instanceof Error ? error.message : String(error)
1771
+ });
1772
+ }
1773
+ };
1774
+ // Execute providers sequentially if we need interactive prompts, otherwise run in parallel
1775
+ if (needsSequentialExecution) {
1776
+ for (const provider of availableProviders) {
1777
+ await processProvider(provider);
1778
+ }
1779
+ }
1780
+ else {
1781
+ await Promise.all(availableProviders.map(processProvider));
1782
+ }
1783
+ // Sort results by provider name for consistent display
1784
+ results.sort((a, b) => a.provider.localeCompare(b.provider));
1785
+ // Summary statistics
1786
+ const successfulResults = results.filter(r => !r.error);
1787
+ if (successfulResults.length > 0) {
1788
+ const avgDuration = successfulResults.reduce((sum, r) => sum + r.duration, 0) / successfulResults.length;
1789
+ const avgLength = successfulResults.reduce((sum, r) => sum + r.result.length, 0) / successfulResults.length;
1790
+ const totalCost = successfulResults.reduce((sum, r) => sum + r.actualCost, 0);
1791
+ const totalInputTokens = successfulResults.reduce((sum, r) => sum + r.totalInputTokens, 0);
1792
+ const totalOutputTokens = successfulResults.reduce((sum, r) => sum + r.totalOutputTokens, 0);
1793
+ // Calculate estimate accuracy if cost tracking is enabled
1794
+ const estimateAccuracy = options.showCost && totalEstimatedCost > 0
1795
+ ? ((totalEstimatedCost / totalCost) * 100).toFixed(1)
1796
+ : null;
1797
+ // Pretty banner summary
1798
+ console.log("");
1799
+ console.log(theme.colors.primary("╔══════════════════════════════════════════════════════════════════════════════╗"));
1800
+ console.log(theme.colors.primary("║") + " " + theme.colors.accent("📈 COMPARISON SUMMARY") + " " + theme.colors.primary("║"));
1801
+ console.log(theme.colors.primary("╚══════════════════════════════════════════════════════════════════════════════╝"));
1802
+ console.log("");
1803
+ // Display detailed comparison results for each provider
1804
+ console.log(theme.colors.info(" 📊 Comparison Results"));
1805
+ console.log(theme.colors.dim(" ────────────────────────────────────────────────────────────────────────────"));
1806
+ console.log("");
1807
+ for (const { provider, result, duration, error, totalInputTokens, totalOutputTokens, actualCost, model } of results) {
1808
+ const providerEmoji = provider === "openai" ? "🤖" : provider === "anthropic" ? "🧠" : provider === "google" ? "✨" : provider === "xai" ? "🚀" : provider === "deepseek" ? "🔮" : "🔧";
1809
+ console.log(theme.colors.secondary(` ${providerEmoji} ${formatProviderName(provider).toUpperCase()}`) + theme.colors.dim(` (${model})`));
1810
+ console.log(theme.colors.dim(` Duration: ${(duration / 1000).toFixed(1)}s | Length: ${result.length} chars`));
1811
+ if (!error) {
1812
+ console.log(theme.colors.dim(` Tokens: ${formatTokens(totalInputTokens)} in + ${formatTokens(totalOutputTokens)} out | Cost: ${formatCost(actualCost)}`));
1813
+ }
1814
+ console.log("");
1815
+ if (error) {
1816
+ console.log(theme.colors.error(` ❌ Error: ${error}`));
1817
+ }
1818
+ else {
1819
+ console.log(theme.colors.dim(" Result:"));
1820
+ console.log(result.split("\n").map(line => ` ${line}`).join("\n"));
1821
+ }
1822
+ console.log("");
1823
+ }
1824
+ console.log(theme.colors.dim(" ────────────────────────────────────────────────────────────────────────────"));
1825
+ console.log("");
1826
+ console.log(theme.colors.info(" 📈 Statistics"));
1827
+ console.log(theme.colors.dim(" ────────────────────────────────────────────────────────────────────────────"));
1828
+ console.log(theme.colors.dim(` Successful providers: `) + theme.colors.success(`${successfulResults.length}/${results.length}`));
1829
+ console.log(theme.colors.dim(` Average duration: `) + theme.colors.secondary(`${(avgDuration / 1000).toFixed(1)}s`));
1830
+ console.log(theme.colors.dim(` Average length: `) + theme.colors.secondary(`${Math.round(avgLength)} chars`));
1831
+ console.log("");
1832
+ console.log(theme.colors.info(" 🤖 Models Used"));
1833
+ console.log(theme.colors.dim(" ────────────────────────────────────────────────────────────────────────────"));
1834
+ for (const result of successfulResults) {
1835
+ const providerEmoji = result.provider === "openai" ? "🤖" :
1836
+ result.provider === "anthropic" ? "🧠" :
1837
+ result.provider === "google" ? "✨" :
1838
+ result.provider === "xai" ? "🚀" :
1839
+ result.provider === "deepseek" ? "🔮" : "🔧";
1840
+ console.log(theme.colors.dim(` ${providerEmoji} ${formatProviderName(result.provider)}: `) + theme.colors.secondary(result.model));
1841
+ }
1842
+ console.log("");
1843
+ if (options.showCost) {
1844
+ console.log(theme.colors.info(" 💰 Cost Analysis"));
1845
+ console.log(theme.colors.dim(" ────────────────────────────────────────────────────────────────────────────"));
1846
+ console.log(theme.colors.dim(` Total tokens: `) + theme.colors.secondary(`${formatTokens(totalInputTokens)} in + ${formatTokens(totalOutputTokens)} out`));
1847
+ console.log(theme.colors.dim(` Total cost: `) + theme.colors.accent(formatCost(totalCost)));
1848
+ console.log(theme.colors.dim(` Average cost per provider: `) + theme.colors.secondary(formatCost(totalCost / successfulResults.length)));
1849
+ if (estimateAccuracy) {
1850
+ console.log(theme.colors.dim(` Estimate accuracy: `) + theme.colors.info(`${estimateAccuracy}%`));
1851
+ }
1852
+ console.log("");
1853
+ // Show pricing details for each provider
1854
+ console.log(theme.colors.info(" 📋 Model Pricing Details"));
1855
+ console.log(theme.colors.dim(" ────────────────────────────────────────────────────────────────────────────"));
1856
+ for (const result of successfulResults) {
1857
+ const pricingInfo = getPricingBreakdown(result.model);
1858
+ const providerEmoji = result.provider === "openai" ? "🤖" :
1859
+ result.provider === "anthropic" ? "🧠" :
1860
+ result.provider === "google" ? "✨" :
1861
+ result.provider === "xai" ? "🚀" :
1862
+ result.provider === "deepseek" ? "🔮" : "🔧";
1863
+ console.log(theme.colors.dim(` ${providerEmoji} ${formatProviderName(result.provider)} (${result.model}):`));
1864
+ if (pricingInfo) {
1865
+ console.log(theme.colors.dim(` Input: $${pricingInfo.inputPricePer1M.toFixed(2)}/1M tokens ($${pricingInfo.inputPricePerToken}/token)`));
1866
+ console.log(theme.colors.dim(` Output: $${pricingInfo.outputPricePer1M.toFixed(2)}/1M tokens ($${pricingInfo.outputPricePerToken}/token)`));
1867
+ }
1868
+ console.log(theme.colors.dim(` Cost: `) + theme.colors.accent(formatCost(result.actualCost)));
1869
+ }
1870
+ console.log("");
1871
+ // Show per-iteration cost breakdown if there are multiple iterations
1872
+ if (iterations > 1) {
1873
+ console.log(theme.colors.info(" 📊 Per-Iteration Cost Breakdown"));
1874
+ console.log(theme.colors.dim(" ────────────────────────────────────────────────────────────────────────────"));
1875
+ for (const result of successfulResults) {
1876
+ if (!result.iterationCosts || result.iterationCosts.length === 0)
1877
+ continue;
1878
+ const providerEmoji = result.provider === "openai" ? "🤖" :
1879
+ result.provider === "anthropic" ? "🧠" :
1880
+ result.provider === "google" ? "✨" :
1881
+ result.provider === "xai" ? "🚀" :
1882
+ result.provider === "deepseek" ? "🔮" : "🔧";
1883
+ console.log(theme.colors.dim(` ${providerEmoji} ${formatProviderName(result.provider)} (${result.model}):`));
1884
+ for (const iterCost of result.iterationCosts) {
1885
+ console.log(theme.colors.dim(` Iteration ${iterCost.iteration}: `) +
1886
+ theme.colors.secondary(`${formatTokens(iterCost.inputTokens)} in + ${formatTokens(iterCost.outputTokens)} out`) +
1887
+ theme.colors.dim(` = `) + theme.colors.accent(formatCost(iterCost.cost)));
1888
+ }
1889
+ console.log("");
1890
+ }
1891
+ }
1892
+ }
1893
+ console.log(theme.colors.dim(" ════════════════════════════════════════════════════════════════════════════"));
1894
+ console.log("");
1895
+ }
1896
+ // Don't copy to clipboard or write to file in comparison mode
1897
+ console.log(theme.colors.dim("💡 Tip: Choose the result that best fits your needs and run optimization again with that specific provider"));
1898
+ console.log("");
1899
+ }
1082
1900
  // Optimize command
1083
1901
  program
1084
1902
  .command("optimize")
@@ -1090,6 +1908,16 @@ program
1090
1908
  .option("--no-copy", "Don't copy optimized prompt to clipboard (copy is default)")
1091
1909
  .option("-k, --api-key <key>", "Provider API key/token (overrides saved config)")
1092
1910
  .option("-p, --provider <provider>", `Provider (${PROVIDERS.join(", ")})`)
1911
+ .option("-s, --style <style>", "Optimization style (balanced, concise, detailed, technical, creative, formal, casual)", "balanced")
1912
+ .option("--system-prompt <prompt>", "Custom system prompt (overrides all other prompts)")
1913
+ .option("--iterations <number>", "Number of optimization passes (1-5, default: 1)", "1")
1914
+ .option("-c, --compare", "Compare optimizations from multiple providers side-by-side")
1915
+ .option("--providers <providers>", "Comma-separated list of providers to compare (e.g., 'openai,anthropic,google')")
1916
+ .option("--models <models>", "Specify models per provider in comparison mode (e.g., 'openai:gpt-4o,anthropic:claude-opus-4-5')")
1917
+ .option("-v, --verbose", "Show output from each iteration (useful with --iterations)")
1918
+ .option("-a, --analyze-first", "Analyze the prompt before optimizing to see what will be improved")
1919
+ .option("--show-cost", "Display estimated cost before running and actual cost after")
1920
+ .option("--estimate-only", "Only show cost estimate without running optimization")
1093
1921
  .action(async (inlinePrompt, options) => {
1094
1922
  try {
1095
1923
  debugLog("optimize.invoked", {
@@ -1135,46 +1963,488 @@ program
1135
1963
  const configuredModel = await getModel();
1136
1964
  const modelToUse = configuredModel && getProviderForModel(configuredModel) === provider ? configuredModel : undefined;
1137
1965
  debugLog("model.selected", { configuredModel, modelToUse, provider });
1966
+ // Validate optimization style
1967
+ const validStyles = ["balanced", "concise", "detailed", "technical", "creative", "formal", "casual"];
1968
+ const style = options.style;
1969
+ if (!validStyles.includes(style)) {
1970
+ console.error("");
1971
+ console.error(theme.colors.error("❌ Error: ") + theme.colors.warning(`Invalid style '${options.style}'`));
1972
+ console.error("");
1973
+ console.error(theme.colors.dim(" Valid styles: ") + theme.colors.info(validStyles.join(", ")));
1974
+ console.error("");
1975
+ process.exit(1);
1976
+ }
1977
+ const customPrompt = options.systemPrompt;
1978
+ if (customPrompt) {
1979
+ debugLog("customPrompt.provided", { length: customPrompt.length });
1980
+ }
1981
+ // Validate and parse iterations
1982
+ const iterations = parseInt(options.iterations, 10);
1983
+ if (isNaN(iterations) || iterations < 1 || iterations > 5) {
1984
+ console.error("");
1985
+ console.error(theme.colors.error("❌ Error: ") + theme.colors.warning(`Invalid iterations '${options.iterations}'`));
1986
+ console.error("");
1987
+ console.error(theme.colors.dim(" Iterations must be between 1 and 5"));
1988
+ console.error("");
1989
+ process.exit(1);
1990
+ }
1991
+ if (iterations > 1) {
1992
+ debugLog("iterations.enabled", { count: iterations });
1993
+ }
1994
+ // Cost estimation (only for non-comparison mode)
1995
+ let costEstimate;
1996
+ if (!options.compare) {
1997
+ const modelForCost = modelToUse || getDefaultModelForProvider(provider);
1998
+ costEstimate = estimateOptimizationCost(original, modelForCost, iterations);
1999
+ if (options.showCost || options.estimateOnly) {
2000
+ const pricingInfo = getPricingBreakdown(modelForCost);
2001
+ console.log("");
2002
+ console.log(theme.colors.primary("💰 Cost Estimate"));
2003
+ console.log(theme.colors.dim("─".repeat(80)));
2004
+ console.log(theme.colors.info(` Model: `) + theme.colors.secondary(modelForCost));
2005
+ if (pricingInfo) {
2006
+ console.log(theme.colors.dim(` Pricing:`));
2007
+ console.log(theme.colors.dim(` Input: $${pricingInfo.inputPricePer1M.toFixed(2)}/1M tokens (${pricingInfo.inputPricePerToken}/token)`));
2008
+ console.log(theme.colors.dim(` Output: $${pricingInfo.outputPricePer1M.toFixed(2)}/1M tokens (${pricingInfo.outputPricePerToken}/token)`));
2009
+ console.log("");
2010
+ }
2011
+ console.log(theme.colors.info(` Input tokens: `) + theme.colors.secondary(formatTokens(costEstimate.inputTokens)));
2012
+ console.log(theme.colors.info(` Output tokens (est): `) + theme.colors.secondary(formatTokens(costEstimate.outputTokens)));
2013
+ console.log(theme.colors.info(` Estimated cost: `) + theme.colors.accent(formatCost(costEstimate.estimatedCost)));
2014
+ console.log(theme.colors.dim("─".repeat(80)));
2015
+ console.log("");
2016
+ if (options.estimateOnly) {
2017
+ console.log(theme.colors.dim("💡 Tip: Remove --estimate-only to run the actual optimization"));
2018
+ console.log("");
2019
+ return;
2020
+ }
2021
+ // Prompt user to confirm proceeding with the operation
2022
+ if (process.stdin.isTTY && process.stdout.isTTY) {
2023
+ const rl = readline.createInterface({
2024
+ input: process.stdin,
2025
+ output: process.stdout
2026
+ });
2027
+ const answer = await rl.question(theme.colors.warning("Do you want to proceed with this operation? (y/n): "));
2028
+ rl.close();
2029
+ if (answer.toLowerCase() !== 'y' && answer.toLowerCase() !== 'yes') {
2030
+ console.log("");
2031
+ console.log(theme.colors.dim("Operation cancelled."));
2032
+ console.log("");
2033
+ return;
2034
+ }
2035
+ console.log("");
2036
+ }
2037
+ }
2038
+ }
2039
+ // Analyze first mode: show analysis before optimizing
2040
+ let analyzeInputTokens = 0;
2041
+ let analyzeOutputTokens = 0;
2042
+ if (options.analyzeFirst) {
2043
+ console.log("");
2044
+ console.log(theme.colors.primary("🔍 Step 1: Analyzing your prompt..."));
2045
+ console.log("");
2046
+ const analyzeSpinner = createSpinner(`Analyzing with ${formatProviderName(provider)}...`);
2047
+ analyzeSpinner.start();
2048
+ const analyzeStart = Date.now();
2049
+ let analysisResult;
2050
+ try {
2051
+ if (provider === "openai") {
2052
+ analysisResult = await analyzePromptOpenAI(original, apiKey, modelToUse);
2053
+ }
2054
+ else if (provider === "anthropic") {
2055
+ analysisResult = await analyzePromptAnthropic(original, apiKey, modelToUse);
2056
+ }
2057
+ else if (provider === "google") {
2058
+ analysisResult = await analyzePromptGemini(original, apiKey, modelToUse);
2059
+ }
2060
+ else if (provider === "xai") {
2061
+ analysisResult = await analyzePromptXAI(original, apiKey, modelToUse);
2062
+ }
2063
+ else if (provider === "deepseek") {
2064
+ analysisResult = await analyzePromptDeepSeek(original, apiKey, modelToUse);
2065
+ }
2066
+ else {
2067
+ analyzeSpinner.fail();
2068
+ console.error("");
2069
+ console.error(theme.colors.error("❌ Error: ") + theme.colors.warning(`Provider '${provider}' is not supported for analysis`));
2070
+ console.error("");
2071
+ process.exit(1);
2072
+ }
2073
+ analyzeInputTokens = analysisResult.usage.inputTokens;
2074
+ analyzeOutputTokens = analysisResult.usage.outputTokens;
2075
+ const analyzeDuration = ((Date.now() - analyzeStart) / 1000).toFixed(1);
2076
+ analyzeSpinner.stop(`Analysis complete in ${analyzeDuration}s`);
2077
+ console.log("");
2078
+ console.log(theme.colors.dim("─".repeat(80)));
2079
+ console.log("");
2080
+ console.log(analysisResult.result);
2081
+ console.log("");
2082
+ console.log(theme.colors.dim("─".repeat(80)));
2083
+ console.log("");
2084
+ console.log(theme.colors.primary("🔧 Step 2: Proceeding with optimization..."));
2085
+ console.log("");
2086
+ }
2087
+ catch (error) {
2088
+ analyzeSpinner.fail("Analysis failed");
2089
+ console.error("");
2090
+ console.error(theme.colors.error("❌ Analysis Error: ") + theme.colors.warning(error instanceof Error ? error.message : String(error)));
2091
+ console.error("");
2092
+ console.error(theme.colors.dim(" Proceeding with optimization anyway..."));
2093
+ console.log("");
2094
+ }
2095
+ }
2096
+ // Comparison mode: run optimization across multiple providers
2097
+ if (options.compare) {
2098
+ await runComparisonMode(original, style, customPrompt, iterations, options);
2099
+ return;
2100
+ }
1138
2101
  // Route to the appropriate provider's optimization function
1139
2102
  const providerEmoji = provider === "openai" ? "🤖" : provider === "anthropic" ? "🧠" : provider === "google" ? "✨" : provider === "xai" ? "🚀" : provider === "deepseek" ? "🔮" : "🔧";
1140
- const spinner = createSpinner(`${providerEmoji} Optimizing your prompt with ${formatProviderName(provider)}${modelToUse ? ` (${modelToUse})` : ""}...`);
2103
+ let optimized = original;
2104
+ let totalInputTokens = 0;
2105
+ let totalOutputTokens = 0;
2106
+ const t0 = Date.now();
2107
+ try {
2108
+ // Iterative optimization loop
2109
+ for (let i = 1; i <= iterations; i++) {
2110
+ const iterationLabel = iterations > 1 ? ` (iteration ${i}/${iterations})` : "";
2111
+ const spinner = createSpinner(`${providerEmoji} Optimizing your prompt with ${formatProviderName(provider)}${modelToUse ? ` (${modelToUse})` : ""}${iterationLabel}...`);
2112
+ spinner.start();
2113
+ const iterationStart = Date.now();
2114
+ let response;
2115
+ if (provider === "openai") {
2116
+ response = await optimizePromptOpenAI(optimized, apiKey, modelToUse, style, customPrompt);
2117
+ }
2118
+ else if (provider === "anthropic") {
2119
+ response = await optimizePromptAnthropic(optimized, apiKey, modelToUse, style, customPrompt);
2120
+ }
2121
+ else if (provider === "google") {
2122
+ response = await optimizePromptGemini(optimized, apiKey, modelToUse, style, customPrompt);
2123
+ }
2124
+ else if (provider === "xai") {
2125
+ response = await optimizePromptXAI(optimized, apiKey, modelToUse, style, customPrompt);
2126
+ }
2127
+ else if (provider === "deepseek") {
2128
+ response = await optimizePromptDeepSeek(optimized, apiKey, modelToUse, style, customPrompt);
2129
+ }
2130
+ else {
2131
+ spinner.fail();
2132
+ console.error("");
2133
+ console.error(theme.colors.error("❌ Error: ") + theme.colors.warning(`Provider '${provider}' is not supported for optimization`));
2134
+ console.error("");
2135
+ console.error(theme.colors.dim(" Supported providers: ") + theme.colors.info("openai, anthropic, google, xai, deepseek"));
2136
+ console.error("");
2137
+ process.exit(1);
2138
+ }
2139
+ optimized = response.result;
2140
+ totalInputTokens += response.usage.inputTokens;
2141
+ totalOutputTokens += response.usage.outputTokens;
2142
+ const iterationDuration = ((Date.now() - iterationStart) / 1000).toFixed(1);
2143
+ debugLog("optimize.iteration.done", { provider, iteration: i, ms: Date.now() - iterationStart, length: optimized.length });
2144
+ if (iterations > 1) {
2145
+ spinner.stop(`✨ Iteration ${i}/${iterations} complete in ${iterationDuration}s`);
2146
+ }
2147
+ else {
2148
+ spinner.stop(`✨ Optimization complete in ${iterationDuration}s!`);
2149
+ }
2150
+ // Show cost for this iteration if --show-cost is enabled
2151
+ if (options.showCost && iterations > 1) {
2152
+ const actualModel = modelToUse || getDefaultModelForProvider(provider);
2153
+ const iterationCost = calculateCost(response.usage.inputTokens, response.usage.outputTokens, actualModel);
2154
+ console.log("");
2155
+ console.log(theme.colors.primary(`💰 Iteration ${i}/${iterations} Actual Cost`));
2156
+ console.log(theme.colors.dim("─".repeat(80)));
2157
+ console.log(theme.colors.info(` Input tokens: `) + theme.colors.secondary(formatTokens(response.usage.inputTokens)));
2158
+ console.log(theme.colors.info(` Output tokens: `) + theme.colors.secondary(formatTokens(response.usage.outputTokens)));
2159
+ console.log(theme.colors.info(` Cost: `) + theme.colors.accent(formatCost(iterationCost)));
2160
+ console.log(theme.colors.dim("─".repeat(80)));
2161
+ console.log("");
2162
+ }
2163
+ // Show iteration output if verbose mode is enabled
2164
+ if (options.verbose && iterations > 1) {
2165
+ console.log("");
2166
+ console.log(theme.colors.primary(`📝 Iteration ${i}/${iterations} Output:`));
2167
+ console.log(theme.colors.dim("─".repeat(80)));
2168
+ console.log(optimized);
2169
+ console.log(theme.colors.dim("─".repeat(80)));
2170
+ console.log("");
2171
+ }
2172
+ // Show cost estimate for NEXT iteration and prompt for confirmation (if not the last iteration)
2173
+ if (options.showCost && iterations > 1 && i < iterations) {
2174
+ const actualModel = modelToUse || getDefaultModelForProvider(provider);
2175
+ const nextIterationEstimate = estimateOptimizationCost(optimized, actualModel, 1);
2176
+ const pricingInfo = getPricingBreakdown(actualModel);
2177
+ console.log("");
2178
+ console.log(theme.colors.primary(`💰 Iteration ${i + 1}/${iterations} Cost Estimate`));
2179
+ console.log(theme.colors.dim("─".repeat(80)));
2180
+ console.log(theme.colors.info(` Model: `) + theme.colors.secondary(actualModel));
2181
+ if (pricingInfo) {
2182
+ console.log(theme.colors.dim(` Pricing: $${pricingInfo.inputPricePer1M.toFixed(2)}/1M in, $${pricingInfo.outputPricePer1M.toFixed(2)}/1M out`));
2183
+ }
2184
+ console.log(theme.colors.info(` Estimated cost: `) + theme.colors.accent(formatCost(nextIterationEstimate.estimatedCost)));
2185
+ console.log(theme.colors.dim("─".repeat(80)));
2186
+ console.log("");
2187
+ // Prompt user to confirm proceeding with next iteration
2188
+ if (process.stdin.isTTY && process.stdout.isTTY) {
2189
+ const rl = readline.createInterface({
2190
+ input: process.stdin,
2191
+ output: process.stdout
2192
+ });
2193
+ const answer = await rl.question(theme.colors.warning(`Continue with iteration ${i + 1}/${iterations}? (y/n): `));
2194
+ rl.close();
2195
+ if (answer.toLowerCase() !== 'y' && answer.toLowerCase() !== 'yes') {
2196
+ console.log("");
2197
+ console.log(theme.colors.dim(`Stopped after ${i} iteration(s).`));
2198
+ console.log("");
2199
+ break;
2200
+ }
2201
+ console.log("");
2202
+ }
2203
+ }
2204
+ }
2205
+ const totalDuration = ((Date.now() - t0) / 1000).toFixed(1);
2206
+ if (iterations > 1) {
2207
+ console.log(theme.colors.success(`🎉 All ${iterations} iterations complete in ${totalDuration}s!`));
2208
+ }
2209
+ debugLog("optimize.done", { provider, iterations, totalMs: Date.now() - t0, finalLength: optimized.length });
2210
+ // Display actual cost if requested
2211
+ if (options.showCost) {
2212
+ const actualModel = modelToUse || getDefaultModelForProvider(provider);
2213
+ const optimizationCost = calculateCost(totalInputTokens, totalOutputTokens, actualModel);
2214
+ const totalCost = optimizationCost + (analyzeInputTokens > 0 ? calculateCost(analyzeInputTokens, analyzeOutputTokens, actualModel) : 0);
2215
+ const pricingInfo = getPricingBreakdown(actualModel);
2216
+ console.log("");
2217
+ console.log(theme.colors.primary("💰 Actual Cost"));
2218
+ console.log(theme.colors.dim("─".repeat(80)));
2219
+ console.log(theme.colors.info(` Model: `) + theme.colors.secondary(actualModel));
2220
+ if (pricingInfo) {
2221
+ console.log(theme.colors.dim(` Pricing:`));
2222
+ console.log(theme.colors.dim(` Input: $${pricingInfo.inputPricePer1M.toFixed(2)}/1M tokens ($${pricingInfo.inputPricePerToken}/token)`));
2223
+ console.log(theme.colors.dim(` Output: $${pricingInfo.outputPricePer1M.toFixed(2)}/1M tokens ($${pricingInfo.outputPricePerToken}/token)`));
2224
+ console.log("");
2225
+ }
2226
+ if (analyzeInputTokens > 0) {
2227
+ const analyzeCost = calculateCost(analyzeInputTokens, analyzeOutputTokens, actualModel);
2228
+ console.log(theme.colors.info(` Analysis tokens: `) + theme.colors.secondary(`${formatTokens(analyzeInputTokens)} in + ${formatTokens(analyzeOutputTokens)} out`));
2229
+ console.log(theme.colors.info(` Analysis cost: `) + theme.colors.secondary(formatCost(analyzeCost)));
2230
+ }
2231
+ console.log(theme.colors.info(` Optimization tokens: `) + theme.colors.secondary(`${formatTokens(totalInputTokens)} in + ${formatTokens(totalOutputTokens)} out`));
2232
+ console.log(theme.colors.info(` Optimization cost: `) + theme.colors.secondary(formatCost(optimizationCost)));
2233
+ if (analyzeInputTokens > 0) {
2234
+ console.log(theme.colors.info(` Total cost: `) + theme.colors.accent(formatCost(totalCost)));
2235
+ }
2236
+ else {
2237
+ console.log(theme.colors.info(` Total cost: `) + theme.colors.accent(formatCost(optimizationCost)));
2238
+ }
2239
+ // Show comparison with estimate
2240
+ if (costEstimate) {
2241
+ const accuracy = ((costEstimate.estimatedCost / (analyzeInputTokens > 0 ? totalCost : optimizationCost)) * 100).toFixed(1);
2242
+ console.log(theme.colors.dim(` Estimate accuracy: ${accuracy}%`));
2243
+ }
2244
+ console.log(theme.colors.dim("─".repeat(80)));
2245
+ console.log("");
2246
+ }
2247
+ }
2248
+ catch (e) {
2249
+ debugLog("optimize.error", { provider, ms: Date.now() - t0, error: e instanceof Error ? e.message : String(e) });
2250
+ console.error("");
2251
+ throw e;
2252
+ }
2253
+ await outputResult(original, optimized, options);
2254
+ }
2255
+ catch (error) {
2256
+ console.error("");
2257
+ console.error(theme.colors.error("❌ Error: ") + theme.colors.warning(error instanceof Error ? error.message : String(error)));
2258
+ console.error("");
2259
+ process.exit(1);
2260
+ }
2261
+ });
2262
+ // Analyze command - get detailed feedback on a prompt
2263
+ program
2264
+ .command("analyze")
2265
+ .description("Analyze a prompt and get detailed feedback")
2266
+ .argument("[prompt]", "The prompt to analyze (or omit to use other input methods)")
2267
+ .option("-f, --file <path>", "Read prompt from file")
2268
+ .option("-o, --output <path>", "Write analysis to file")
2269
+ .option("--no-copy", "Don't copy analysis to clipboard (copy is default)")
2270
+ .option("-k, --api-key <key>", "Provider API key/token (overrides saved config)")
2271
+ .option("-p, --provider <provider>", `Provider (${PROVIDERS.join(", ")})`)
2272
+ .option("--show-cost", "Display estimated cost before running")
2273
+ .option("--estimate-only", "Only show cost estimate without running analysis")
2274
+ .action(async (inlinePrompt, options) => {
2275
+ try {
2276
+ debugLog("analyze.invoked", {
2277
+ argv: process.argv.slice(2),
2278
+ tty: { stdin: !!process.stdin.isTTY, stdout: !!process.stdout.isTTY, stderr: !!process.stderr.isTTY },
2279
+ options: { file: options.file, output: options.output, copy: options.copy !== false, provider: options.provider, hasApiKeyFlag: !!options.apiKey }
2280
+ });
2281
+ const original = await getInput(inlinePrompt, options);
2282
+ if (!original.trim()) {
2283
+ console.error("");
2284
+ console.error(theme.colors.error("❌ Error: ") + theme.colors.warning("No prompt provided"));
2285
+ console.error(theme.colors.dim(" Provide a prompt inline, via --file, or through stdin"));
2286
+ console.error("");
2287
+ process.exit(1);
2288
+ }
2289
+ let provider = await getProvider(options.provider);
2290
+ debugLog("provider.selected", { provider });
2291
+ // Get API key with priority: CLI flag > env var > keychain > config file
2292
+ let { apiKey, source } = await getApiKeyInfo(provider, options.apiKey);
2293
+ debugLog("token.resolved", { provider, source, token: maskSecret(apiKey) });
2294
+ // Interactive first-run setup (TTY only)
2295
+ if (!apiKey && process.stdin.isTTY && process.stdout.isTTY) {
2296
+ debugLog("token.missing.firstRunPrompt.start");
2297
+ const firstRun = await promptFirstRunConfig();
2298
+ debugLog("token.missing.firstRunPrompt.done", { provider: firstRun.provider, useKeychain: firstRun.useKeychain, token: maskSecret(firstRun.apiKey) });
2299
+ await setApiKey(firstRun.provider, firstRun.apiKey, firstRun.useKeychain);
2300
+ provider = firstRun.provider;
2301
+ ({ apiKey, source } = await getApiKeyInfo(provider));
2302
+ debugLog("token.resolved.afterFirstRun", { provider, source, token: maskSecret(apiKey) });
2303
+ }
2304
+ if (!apiKey) {
2305
+ console.error("");
2306
+ console.error(theme.colors.error("❌ Error: ") + theme.colors.warning(`No API key configured for ${formatProviderName(provider)}`));
2307
+ console.error("");
2308
+ console.error(theme.colors.dim(" Configure your API key using:"));
2309
+ console.error(theme.colors.accent(` megabuff config set --provider ${provider} <your-api-key>`));
2310
+ console.error("");
2311
+ console.error(theme.colors.dim(" Or set an environment variable for this provider"));
2312
+ console.error("");
2313
+ process.exit(1);
2314
+ }
2315
+ // Get the configured model (if any) for this provider
2316
+ const configuredModel = await getModel();
2317
+ const modelToUse = configuredModel && getProviderForModel(configuredModel) === provider ? configuredModel : undefined;
2318
+ debugLog("model.selected", { configuredModel, modelToUse, provider });
2319
+ // Cost estimation
2320
+ const modelForCost = modelToUse || getDefaultModelForProvider(provider);
2321
+ const costEstimate = estimateAnalysisCost(original, modelForCost);
2322
+ if (options.showCost || options.estimateOnly) {
2323
+ const pricingInfo = getPricingBreakdown(modelForCost);
2324
+ console.log("");
2325
+ console.log(theme.colors.primary("💰 Cost Estimate"));
2326
+ console.log(theme.colors.dim("─".repeat(80)));
2327
+ console.log(theme.colors.info(` Model: `) + theme.colors.secondary(modelForCost));
2328
+ if (pricingInfo) {
2329
+ console.log(theme.colors.dim(` Pricing:`));
2330
+ console.log(theme.colors.dim(` Input: $${pricingInfo.inputPricePer1M.toFixed(2)}/1M tokens (${pricingInfo.inputPricePerToken}/token)`));
2331
+ console.log(theme.colors.dim(` Output: $${pricingInfo.outputPricePer1M.toFixed(2)}/1M tokens (${pricingInfo.outputPricePerToken}/token)`));
2332
+ console.log("");
2333
+ }
2334
+ console.log(theme.colors.info(` Input tokens: `) + theme.colors.secondary(formatTokens(costEstimate.inputTokens)));
2335
+ console.log(theme.colors.info(` Output tokens (est): `) + theme.colors.secondary(formatTokens(costEstimate.outputTokens)));
2336
+ console.log(theme.colors.info(` Estimated cost: `) + theme.colors.accent(formatCost(costEstimate.estimatedCost)));
2337
+ console.log(theme.colors.dim("─".repeat(80)));
2338
+ console.log("");
2339
+ if (options.estimateOnly) {
2340
+ console.log(theme.colors.dim("💡 Tip: Remove --estimate-only to run the actual analysis"));
2341
+ console.log("");
2342
+ return;
2343
+ }
2344
+ // Prompt user to confirm proceeding with the operation
2345
+ if (process.stdin.isTTY && process.stdout.isTTY) {
2346
+ const rl = readline.createInterface({
2347
+ input: process.stdin,
2348
+ output: process.stdout
2349
+ });
2350
+ const answer = await rl.question(theme.colors.warning("Do you want to proceed with this operation? (y/n): "));
2351
+ rl.close();
2352
+ if (answer.toLowerCase() !== 'y' && answer.toLowerCase() !== 'yes') {
2353
+ console.log("");
2354
+ console.log(theme.colors.dim("Operation cancelled."));
2355
+ console.log("");
2356
+ return;
2357
+ }
2358
+ console.log("");
2359
+ }
2360
+ }
2361
+ console.log("");
2362
+ console.log(theme.colors.primary("🔍 Analyzing your prompt..."));
2363
+ console.log("");
2364
+ const spinner = createSpinner(`Analyzing with ${formatProviderName(provider)}...`);
1141
2365
  spinner.start();
1142
- let optimized;
1143
2366
  const t0 = Date.now();
2367
+ let analysisResult;
1144
2368
  try {
1145
2369
  if (provider === "openai") {
1146
- optimized = await optimizePromptOpenAI(original, apiKey, modelToUse);
2370
+ analysisResult = await analyzePromptOpenAI(original, apiKey, modelToUse);
1147
2371
  }
1148
2372
  else if (provider === "anthropic") {
1149
- optimized = await optimizePromptAnthropic(original, apiKey, modelToUse);
2373
+ analysisResult = await analyzePromptAnthropic(original, apiKey, modelToUse);
1150
2374
  }
1151
2375
  else if (provider === "google") {
1152
- optimized = await optimizePromptGemini(original, apiKey, modelToUse);
2376
+ analysisResult = await analyzePromptGemini(original, apiKey, modelToUse);
1153
2377
  }
1154
2378
  else if (provider === "xai") {
1155
- optimized = await optimizePromptXAI(original, apiKey, modelToUse);
2379
+ analysisResult = await analyzePromptXAI(original, apiKey, modelToUse);
1156
2380
  }
1157
2381
  else if (provider === "deepseek") {
1158
- optimized = await optimizePromptDeepSeek(original, apiKey, modelToUse);
2382
+ analysisResult = await analyzePromptDeepSeek(original, apiKey, modelToUse);
1159
2383
  }
1160
2384
  else {
1161
- console.error("");
1162
- console.error(theme.colors.error("❌ Error: ") + theme.colors.warning(`Provider '${provider}' is not supported for optimization`));
1163
- console.error("");
1164
- console.error(theme.colors.dim(" Supported providers: ") + theme.colors.info("openai, anthropic, google, xai, deepseek"));
1165
- console.error("");
1166
- process.exit(1);
2385
+ throw new Error(`Unsupported provider: ${provider}`);
1167
2386
  }
1168
2387
  const duration = ((Date.now() - t0) / 1000).toFixed(1);
1169
- debugLog("optimize.done", { provider, ms: Date.now() - t0, optimizedLength: optimized.length });
1170
- spinner.stop(`✨ Optimization complete in ${duration}s!`);
2388
+ spinner.stop(`Analysis complete in ${duration}s`);
2389
+ debugLog("analyze.done", { provider, ms: Date.now() - t0, analysisLength: analysisResult.result.length });
1171
2390
  }
1172
2391
  catch (e) {
1173
- debugLog("optimize.error", { provider, ms: Date.now() - t0, error: e instanceof Error ? e.message : String(e) });
1174
- spinner.fail(`💥 Optimization failed with ${formatProviderName(provider)}`);
2392
+ debugLog("analyze.error", { provider, ms: Date.now() - t0, error: e instanceof Error ? e.message : String(e) });
2393
+ spinner.fail("Analysis failed");
2394
+ console.error("");
1175
2395
  throw e;
1176
2396
  }
1177
- await outputResult(original, optimized, options);
2397
+ // Output the analysis
2398
+ console.log("");
2399
+ console.log(theme.colors.dim("─".repeat(80)));
2400
+ console.log("");
2401
+ console.log(analysisResult.result);
2402
+ console.log("");
2403
+ console.log(theme.colors.dim("─".repeat(80)));
2404
+ console.log("");
2405
+ // Display actual cost if requested
2406
+ if (options.showCost) {
2407
+ const actualModel = modelToUse || getDefaultModelForProvider(provider);
2408
+ const actualCost = calculateCost(analysisResult.usage.inputTokens, analysisResult.usage.outputTokens, actualModel);
2409
+ const pricingInfo = getPricingBreakdown(actualModel);
2410
+ console.log(theme.colors.primary("💰 Actual Cost"));
2411
+ console.log(theme.colors.dim("─".repeat(80)));
2412
+ console.log(theme.colors.info(` Model: `) + theme.colors.secondary(actualModel));
2413
+ if (pricingInfo) {
2414
+ console.log(theme.colors.dim(` Pricing:`));
2415
+ console.log(theme.colors.dim(` Input: $${pricingInfo.inputPricePer1M.toFixed(2)}/1M tokens ($${pricingInfo.inputPricePerToken}/token)`));
2416
+ console.log(theme.colors.dim(` Output: $${pricingInfo.outputPricePer1M.toFixed(2)}/1M tokens ($${pricingInfo.outputPricePerToken}/token)`));
2417
+ console.log("");
2418
+ }
2419
+ console.log(theme.colors.info(` Input tokens: `) + theme.colors.secondary(formatTokens(analysisResult.usage.inputTokens)));
2420
+ console.log(theme.colors.info(` Output tokens: `) + theme.colors.secondary(formatTokens(analysisResult.usage.outputTokens)));
2421
+ console.log(theme.colors.info(` Actual cost: `) + theme.colors.accent(formatCost(actualCost)));
2422
+ // Show comparison with estimate
2423
+ if (costEstimate) {
2424
+ const accuracy = ((costEstimate.estimatedCost / actualCost) * 100).toFixed(1);
2425
+ console.log(theme.colors.dim(` Estimate accuracy: ${accuracy}%`));
2426
+ }
2427
+ console.log(theme.colors.dim("─".repeat(80)));
2428
+ console.log("");
2429
+ }
2430
+ const analysis = analysisResult.result;
2431
+ // Handle output options
2432
+ if (options.output) {
2433
+ await fs.writeFile(options.output, analysis, "utf-8");
2434
+ console.log(theme.colors.success(`💾 Analysis saved to ${options.output}`));
2435
+ console.log("");
2436
+ }
2437
+ // Copy to clipboard by default (unless --no-copy is specified)
2438
+ if (options.copy !== false) {
2439
+ try {
2440
+ await clipboardy.default.write(analysis);
2441
+ console.log(theme.colors.info("📋 Analysis copied to clipboard"));
2442
+ console.log("");
2443
+ }
2444
+ catch (error) {
2445
+ debugLog("clipboard.error", { error: error instanceof Error ? error.message : String(error) });
2446
+ }
2447
+ }
1178
2448
  }
1179
2449
  catch (error) {
1180
2450
  console.error("");