cost-katana 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cost_katana/cli.py CHANGED
@@ -6,30 +6,28 @@ import argparse
6
6
  import json
7
7
  import sys
8
8
  from pathlib import Path
9
- from typing import Optional
10
9
  from rich.console import Console
11
10
  from rich.table import Table
12
11
  from rich.panel import Panel
13
12
  from rich.prompt import Prompt, Confirm
14
- from rich.syntax import Syntax
15
13
 
16
14
  try:
17
- from . import configure, GenerativeModel, CostKatanaClient
15
+ from . import configure, create_generative_model, CostKatanaClient
18
16
  from .config import Config
19
17
  from .exceptions import CostKatanaError
20
18
  except ImportError:
21
19
  # Handle case when running as script
22
- import cost_katana as ck
23
20
  from cost_katana.config import Config
24
21
  from cost_katana.exceptions import CostKatanaError
25
22
 
26
23
  console = Console()
27
24
 
25
+
28
26
  def create_sample_config():
29
27
  """Create a sample configuration file"""
30
28
  sample_config = {
31
29
  "api_key": "dak_your_api_key_here",
32
- "base_url": "https://api.costkatana.com",
30
+ "base_url": "https://cost-katana-backend.store",
33
31
  "default_model": "gemini-2.0-flash",
34
32
  "default_temperature": 0.7,
35
33
  "default_max_tokens": 2000,
@@ -40,83 +38,71 @@ def create_sample_config():
40
38
  "model_mappings": {
41
39
  "gemini": "gemini-2.0-flash-exp",
42
40
  "claude": "anthropic.claude-3-sonnet-20240229-v1:0",
43
- "gpt4": "gpt-4-turbo-preview"
41
+ "gpt4": "gpt-4-turbo-preview",
44
42
  },
45
43
  "providers": {
46
- "google": {
47
- "priority": 1,
48
- "models": ["gemini-2.0-flash", "gemini-pro"]
49
- },
44
+ "google": {"priority": 1, "models": ["gemini-2.0-flash", "gemini-pro"]},
50
45
  "anthropic": {
51
46
  "priority": 2,
52
- "models": ["claude-3-sonnet", "claude-3-haiku"]
47
+ "models": ["claude-3-sonnet", "claude-3-haiku"],
53
48
  },
54
- "openai": {
55
- "priority": 3,
56
- "models": ["gpt-4", "gpt-3.5-turbo"]
57
- }
58
- }
49
+ "openai": {"priority": 3, "models": ["gpt-4", "gpt-3.5-turbo"]},
50
+ },
59
51
  }
60
52
  return sample_config
61
53
 
54
+
62
55
  def init_config(args):
63
56
  """Initialize configuration"""
64
- config_path = Path(args.config or 'cost_katana_config.json')
65
-
57
+ config_path = Path(args.config or "cost_katana_config.json")
58
+
66
59
  if config_path.exists() and not args.force:
67
60
  console.print(f"[yellow]Configuration file already exists: {config_path}[/yellow]")
68
61
  if not Confirm.ask("Overwrite existing configuration?"):
69
62
  return
70
-
63
+
71
64
  console.print("[bold blue]Setting up Cost Katana configuration...[/bold blue]")
72
-
65
+
73
66
  # Get API key
74
67
  api_key = Prompt.ask(
75
- "Enter your Cost Katana API key",
76
- default=args.api_key if args.api_key else None
68
+ "Enter your Cost Katana API key", default=args.api_key if args.api_key else None
77
69
  )
78
-
70
+
79
71
  # Get base URL
80
- base_url = Prompt.ask(
81
- "Enter base URL",
82
- default="https://api.costkatana.com"
83
- )
84
-
72
+ base_url = Prompt.ask("Enter base URL", default="https://cost-katana-backend.store")
73
+
85
74
  # Get default model
86
75
  default_model = Prompt.ask(
87
76
  "Enter default model",
88
77
  default="gemini-2.0-flash",
89
- choices=["gemini-2.0-flash", "claude-3-sonnet", "gpt-4", "nova-pro"]
78
+ choices=["gemini-2.0-flash", "claude-3-sonnet", "gpt-4", "nova-pro"],
90
79
  )
91
-
80
+
92
81
  # Create configuration
93
82
  config_data = create_sample_config()
94
- config_data.update({
95
- "api_key": api_key,
96
- "base_url": base_url,
97
- "default_model": default_model
98
- })
99
-
83
+ config_data.update({"api_key": api_key, "base_url": base_url, "default_model": default_model})
84
+
100
85
  # Save configuration
101
86
  try:
102
- with open(config_path, 'w') as f:
87
+ with open(config_path, "w") as f:
103
88
  json.dump(config_data, f, indent=2)
104
-
89
+
105
90
  console.print(f"[green]Configuration saved to: {config_path}[/green]")
106
91
  console.print("\n[bold]Next steps:[/bold]")
107
92
  console.print("1. Test the configuration: [cyan]cost-katana test[/cyan]")
108
93
  console.print("2. Start a chat session: [cyan]cost-katana chat[/cyan]")
109
94
  console.print("3. See available models: [cyan]cost-katana models[/cyan]")
110
-
95
+
111
96
  except Exception as e:
112
97
  console.print(f"[red]Failed to save configuration: {e}[/red]")
113
98
  sys.exit(1)
114
99
 
100
+
115
101
  def test_connection(args):
116
102
  """Test connection to Cost Katana API"""
117
103
  try:
118
- config_path = args.config or 'cost_katana_config.json'
119
-
104
+ config_path = args.config or "cost_katana_config.json"
105
+
120
106
  if Path(config_path).exists():
121
107
  configure(config_file=config_path)
122
108
  elif args.api_key:
@@ -124,31 +110,36 @@ def test_connection(args):
124
110
  else:
125
111
  console.print("[red]No configuration found. Run 'cost-katana init' first.[/red]")
126
112
  return
127
-
113
+
128
114
  console.print("[bold blue]Testing Cost Katana connection...[/bold blue]")
129
-
115
+
130
116
  # Test with a simple model
131
- model = GenerativeModel('gemini-2.0-flash')
132
- response = model.generate_content("Hello! Please respond with just 'OK' to test the connection.")
133
-
134
- console.print(Panel(
135
- f"[green]✓ Connection successful![/green]\n"
136
- f"Model: {response.usage_metadata.model}\n"
137
- f"Response: {response.text}\n"
138
- f"Cost: ${response.usage_metadata.cost:.4f}\n"
139
- f"Latency: {response.usage_metadata.latency:.2f}s",
140
- title="Test Results"
141
- ))
142
-
117
+ model = create_generative_model("gemini-2.0-flash")
118
+ response = model.generate_content(
119
+ "Hello! Please respond with just 'OK' to test the connection."
120
+ )
121
+
122
+ console.print(
123
+ Panel(
124
+ f"[green]✓ Connection successful![/green]\n"
125
+ f"Model: {response.usage_metadata.model}\n"
126
+ f"Response: {response.text}\n"
127
+ f"Cost: ${response.usage_metadata.cost:.4f}\n"
128
+ f"Latency: {response.usage_metadata.latency:.2f}s",
129
+ title="Test Results",
130
+ )
131
+ )
132
+
143
133
  except Exception as e:
144
134
  console.print(f"[red]✗ Connection failed: {e}[/red]")
145
135
  sys.exit(1)
146
136
 
137
+
147
138
  def list_models(args):
148
139
  """List available models"""
149
140
  try:
150
- config_path = args.config or 'cost_katana_config.json'
151
-
141
+ config_path = args.config or "cost_katana_config.json"
142
+
152
143
  if Path(config_path).exists():
153
144
  configure(config_file=config_path)
154
145
  elif args.api_key:
@@ -156,35 +147,36 @@ def list_models(args):
156
147
  else:
157
148
  console.print("[red]No configuration found. Run 'cost-katana init' first.[/red]")
158
149
  return
159
-
150
+
160
151
  client = CostKatanaClient(config_file=config_path if Path(config_path).exists() else None)
161
152
  models = client.get_available_models()
162
-
153
+
163
154
  table = Table(title="Available Models")
164
155
  table.add_column("Model ID", style="cyan", no_wrap=True)
165
156
  table.add_column("Display Name", style="magenta")
166
157
  table.add_column("Provider", style="green")
167
158
  table.add_column("Type", style="yellow")
168
-
159
+
169
160
  for model in models:
170
- model_id = model.get('id', model.get('modelId', 'Unknown'))
171
- name = model.get('name', model.get('displayName', model_id))
172
- provider = model.get('provider', 'Unknown')
173
- model_type = model.get('type', 'Text')
174
-
161
+ model_id = model.get("id", model.get("modelId", "Unknown"))
162
+ name = model.get("name", model.get("displayName", model_id))
163
+ provider = model.get("provider", "Unknown")
164
+ model_type = model.get("type", "Text")
165
+
175
166
  table.add_row(model_id, name, provider, model_type)
176
-
167
+
177
168
  console.print(table)
178
-
169
+
179
170
  except Exception as e:
180
171
  console.print(f"[red]Failed to fetch models: {e}[/red]")
181
172
  sys.exit(1)
182
173
 
174
+
183
175
  def start_chat(args):
184
176
  """Start an interactive chat session"""
185
177
  try:
186
- config_path = args.config or 'cost_katana_config.json'
187
-
178
+ config_path = args.config or "cost_katana_config.json"
179
+
188
180
  if Path(config_path).exists():
189
181
  configure(config_file=config_path)
190
182
  config = Config.from_file(config_path)
@@ -194,42 +186,44 @@ def start_chat(args):
194
186
  else:
195
187
  console.print("[red]No configuration found. Run 'cost-katana init' first.[/red]")
196
188
  return
197
-
189
+
198
190
  model_name = args.model or config.default_model
199
-
200
- console.print(Panel(
201
- f"[bold blue]Cost Katana Chat Session[/bold blue]\n"
202
- f"Model: {model_name}\n"
203
- f"Type 'quit' to exit, 'clear' to clear history",
204
- title="Welcome"
205
- ))
206
-
207
- model = GenerativeModel(model_name)
191
+
192
+ console.print(
193
+ Panel(
194
+ f"[bold blue]Cost Katana Chat Session[/bold blue]\n"
195
+ f"Model: {model_name}\n"
196
+ f"Type 'quit' to exit, 'clear' to clear history",
197
+ title="Welcome",
198
+ )
199
+ )
200
+
201
+ model = create_generative_model(model_name)
208
202
  chat = model.start_chat()
209
-
203
+
210
204
  total_cost = 0.0
211
-
205
+
212
206
  while True:
213
207
  try:
214
208
  message = Prompt.ask("[bold cyan]You[/bold cyan]")
215
-
216
- if message.lower() in ['quit', 'exit', 'q']:
209
+
210
+ if message.lower() in ["quit", "exit", "q"]:
217
211
  break
218
- elif message.lower() == 'clear':
212
+ elif message.lower() == "clear":
219
213
  chat.clear_history()
220
214
  console.print("[yellow]Chat history cleared.[/yellow]")
221
215
  continue
222
- elif message.lower() == 'cost':
216
+ elif message.lower() == "cost":
223
217
  console.print(f"[green]Total session cost: ${total_cost:.4f}[/green]")
224
218
  continue
225
-
219
+
226
220
  console.print("[bold green]Assistant[/bold green]: ", end="")
227
-
221
+
228
222
  with console.status("Thinking..."):
229
223
  response = chat.send_message(message)
230
-
224
+
231
225
  console.print(response.text)
232
-
226
+
233
227
  # Show cost info
234
228
  total_cost += response.usage_metadata.cost
235
229
  console.print(
@@ -237,67 +231,535 @@ def start_chat(args):
237
231
  f"Total: ${total_cost:.4f} | "
238
232
  f"Tokens: {response.usage_metadata.total_tokens}[/dim]\n"
239
233
  )
240
-
234
+
241
235
  except KeyboardInterrupt:
242
236
  console.print("\n[yellow]Chat session interrupted.[/yellow]")
243
237
  break
244
238
  except Exception as e:
245
239
  console.print(f"[red]Error: {e}[/red]")
246
240
  continue
247
-
241
+
248
242
  console.print(f"\n[bold]Session Summary:[/bold]")
249
243
  console.print(f"Total Cost: ${total_cost:.4f}")
250
244
  console.print("Thanks for using Cost Katana!")
251
-
245
+
252
246
  except Exception as e:
253
247
  console.print(f"[red]Failed to start chat: {e}[/red]")
254
248
  sys.exit(1)
255
249
 
250
+
251
+ def get_prompt_from_args_or_file(args):
252
+ """Get prompt from command line argument or file"""
253
+ if hasattr(args, "prompt") and args.prompt:
254
+ return args.prompt
255
+
256
+ if hasattr(args, "file") and args.file:
257
+ try:
258
+ with open(args.file, "r", encoding="utf-8") as f:
259
+ return f.read().strip()
260
+ except FileNotFoundError:
261
+ console.print(f"[red]Error: File '{args.file}' not found[/red]")
262
+ sys.exit(1)
263
+ except Exception as e:
264
+ console.print(f"[red]Error reading file: {e}[/red]")
265
+ sys.exit(1)
266
+
267
+ # Interactive input
268
+ return Prompt.ask("Enter prompt to process")
269
+
270
+
271
+ def handle_sast_command(args):
272
+ """Handle SAST subcommands"""
273
+ if not args.sast_command:
274
+ console.print("[red]Please specify a SAST subcommand. Use 'sast --help' for options.[/red]")
275
+ return
276
+
277
+ try:
278
+ config_path = args.config or "cost_katana_config.json"
279
+
280
+ if Path(config_path).exists():
281
+ client = CostKatanaClient(config_file=config_path)
282
+ elif args.api_key:
283
+ client = CostKatanaClient(api_key=args.api_key)
284
+ else:
285
+ console.print(
286
+ "[red]Error: No configuration found. Run 'cost-katana init' first or provide --api-key[/red]"
287
+ )
288
+ return
289
+
290
+ if args.sast_command == "optimize":
291
+ sast_optimize_command(client, args)
292
+ elif args.sast_command == "compare":
293
+ sast_compare_command(client, args)
294
+ elif args.sast_command == "vocabulary":
295
+ sast_vocabulary_command(client, args)
296
+ elif args.sast_command == "telescope":
297
+ sast_telescope_command(client, args)
298
+ elif args.sast_command == "stats":
299
+ sast_stats_command(client, args)
300
+ elif args.sast_command == "showcase":
301
+ sast_showcase_command(client, args)
302
+ elif args.sast_command == "universal":
303
+ sast_universal_command(client, args)
304
+
305
+ except CostKatanaError as e:
306
+ console.print(f"[red]SAST Error: {e}[/red]")
307
+ sys.exit(1)
308
+ except Exception as e:
309
+ console.print(f"[red]Unexpected error: {e}[/red]")
310
+ sys.exit(1)
311
+
312
+
313
+ def sast_optimize_command(client, args):
314
+ """Handle SAST optimize command"""
315
+ prompt = get_prompt_from_args_or_file(args)
316
+
317
+ console.print("[blue]🧬 Optimizing with SAST...[/blue]")
318
+
319
+ result = client.optimize_with_sast(
320
+ prompt=prompt,
321
+ language=args.language,
322
+ cross_lingual=args.cross_lingual,
323
+ preserve_ambiguity=args.preserve_ambiguity,
324
+ )
325
+
326
+ if result.get("success"):
327
+ data = result["data"]
328
+
329
+ # Display results
330
+ console.print("\n[green]✅ SAST Optimization Complete[/green]")
331
+ console.print("=" * 60)
332
+
333
+ console.print(f"\n[cyan]Original Prompt:[/cyan]")
334
+ console.print(Panel(data.get("originalPrompt", "N/A"), title="Original"))
335
+
336
+ console.print(f"\n[cyan]Optimized Prompt:[/cyan]")
337
+ console.print(Panel(data.get("optimizedPrompt", "N/A"), title="SAST Optimized"))
338
+
339
+ # Metrics
340
+ console.print(f"\n[cyan]📈 Optimization Metrics:[/cyan]")
341
+ metrics_table = Table(show_header=True)
342
+ metrics_table.add_column("Metric", style="cyan")
343
+ metrics_table.add_column("Value", style="green")
344
+
345
+ improvement = data.get("improvementPercentage", 0)
346
+ tokens_saved = data.get("tokensSaved", 0)
347
+ cost_saved = data.get("costSaved", 0)
348
+
349
+ metrics_table.add_row("Token Reduction", f"{improvement:.1f}%")
350
+ metrics_table.add_row("Tokens Saved", str(tokens_saved))
351
+ metrics_table.add_row("Cost Saved", f"${cost_saved:.4f}")
352
+
353
+ console.print(metrics_table)
354
+
355
+ # SAST specific data
356
+ if "metadata" in data and "sast" in data["metadata"]:
357
+ sast_data = data["metadata"]["sast"]
358
+ console.print(f"\n[magenta]🧬 SAST Analysis:[/magenta]")
359
+ sast_table = Table(show_header=True)
360
+ sast_table.add_column("Aspect", style="magenta")
361
+ sast_table.add_column("Value", style="blue")
362
+
363
+ sast_table.add_row(
364
+ "Semantic Primitives",
365
+ str(sast_data.get("semanticPrimitives", {}).get("totalVocabulary", 0)),
366
+ )
367
+ sast_table.add_row("Ambiguities Resolved", str(sast_data.get("ambiguitiesResolved", 0)))
368
+ sast_table.add_row(
369
+ "Universal Compatible", "✓" if sast_data.get("universalCompatibility") else "✗"
370
+ )
371
+
372
+ console.print(sast_table)
373
+
374
+ # Save output if requested
375
+ if args.output:
376
+ with open(args.output, "w", encoding="utf-8") as f:
377
+ f.write(data.get("optimizedPrompt", ""))
378
+ console.print(f"\n[green]✅ Saved optimized prompt to: {args.output}[/green]")
379
+ else:
380
+ console.print(
381
+ f"[red]❌ Optimization failed: {result.get('message', 'Unknown error')}[/red]"
382
+ )
383
+
384
+
385
+ def sast_compare_command(client, args):
386
+ """Handle SAST compare command"""
387
+ prompt = get_prompt_from_args_or_file(args)
388
+
389
+ console.print("[blue]⚖️ Comparing Traditional vs SAST...[/blue]")
390
+
391
+ result = client.compare_sast_vs_traditional(prompt=prompt, language=args.language)
392
+
393
+ if result.get("success"):
394
+ data = result["data"]
395
+
396
+ console.print("\n[green]✅ Comparison Complete[/green]")
397
+ console.print("=" * 60)
398
+
399
+ console.print(f"\n[cyan]📝 Input Text:[/cyan]")
400
+ console.print(f'"{data.get("inputText", "")}"')
401
+
402
+ # Comparison table
403
+ comparison_table = Table(show_header=True, title="Traditional vs SAST Comparison")
404
+ comparison_table.add_column("Metric", style="cyan")
405
+ comparison_table.add_column("Traditional", style="yellow")
406
+ comparison_table.add_column("SAST", style="magenta")
407
+ comparison_table.add_column("Improvement", style="green")
408
+
409
+ trad = data.get("traditionalCortex", {})
410
+ sast = data.get("sastCortex", {})
411
+ improvements = data.get("improvements", {})
412
+
413
+ comparison_table.add_row(
414
+ "Token Count",
415
+ str(trad.get("tokenCount", 0)),
416
+ str(sast.get("primitiveCount", 0)),
417
+ f"{improvements.get('tokenReduction', 0):+.1f}%",
418
+ )
419
+
420
+ comparison_table.add_row(
421
+ "Semantic Explicitness",
422
+ f"{trad.get('semanticExplicitness', 0) * 100:.1f}%",
423
+ f"{sast.get('semanticExplicitness', 0) * 100:.1f}%",
424
+ f"{improvements.get('semanticClarityGain', 0) * 100:+.1f}%",
425
+ )
426
+
427
+ comparison_table.add_row(
428
+ "Ambiguities",
429
+ trad.get("ambiguityLevel", "N/A"),
430
+ str(sast.get("ambiguitiesResolved", 0)) + " resolved",
431
+ f"{improvements.get('ambiguityReduction', 0):+.1f}%",
432
+ )
433
+
434
+ console.print(comparison_table)
435
+
436
+ recommended = data.get("metadata", {}).get("recommendedApproach", "unknown")
437
+ console.print(f"\n[yellow]🎯 Recommended Approach: {recommended.upper()}[/yellow]")
438
+ else:
439
+ console.print(f"[red]❌ Comparison failed: {result.get('message', 'Unknown error')}[/red]")
440
+
441
+
442
+ def sast_vocabulary_command(client, args):
443
+ """Handle SAST vocabulary command"""
444
+ if args.search or args.category or args.language:
445
+ console.print(f"[blue]📚 Searching SAST vocabulary...[/blue]")
446
+
447
+ result = client.search_semantic_primitives(
448
+ term=args.search, category=args.category, language=args.language, limit=args.limit
449
+ )
450
+
451
+ if result.get("success"):
452
+ data = result["data"]
453
+ console.print(
454
+ f"\n[green]📚 Found {len(data.get('results', []))} semantic primitives[/green]"
455
+ )
456
+
457
+ for i, item in enumerate(data.get("results", []), 1):
458
+ primitive = item["primitive"]
459
+ console.print(f"\n[cyan]{i}. {primitive['baseForm']} ({primitive['id']})[/cyan]")
460
+ console.print(f" Category: {primitive['category']}")
461
+ console.print(f" Definition: {primitive['definition']}")
462
+ console.print(f" Relevance: {item['relevanceScore'] * 100:.1f}%")
463
+
464
+ if primitive.get("synonyms"):
465
+ synonyms = primitive["synonyms"][:3]
466
+ console.print(
467
+ f" Synonyms: {', '.join(synonyms)}{'...' if len(primitive['synonyms']) > 3 else ''}"
468
+ )
469
+ else:
470
+ console.print("[blue]📊 Getting SAST vocabulary statistics...[/blue]")
471
+
472
+ result = client.get_sast_vocabulary_stats()
473
+
474
+ if result.get("success"):
475
+ data = result["data"]
476
+
477
+ console.print(f"\n[green]📊 SAST Vocabulary Statistics[/green]")
478
+ console.print(f"Total Primitives: [blue]{data.get('totalPrimitives', 0)}[/blue]")
479
+ console.print(
480
+ f"Average Translations: [cyan]{data.get('averageTranslations', 0):.1f}[/cyan]"
481
+ )
482
+
483
+ if "primitivesByCategory" in data:
484
+ console.print("\n[yellow]📂 By Category:[/yellow]")
485
+ for category, count in data["primitivesByCategory"].items():
486
+ console.print(f" {category}: [blue]{count}[/blue]")
487
+
488
+ if "coverageByLanguage" in data:
489
+ console.print("\n[green]🌍 Language Coverage:[/green]")
490
+ for lang, count in data["coverageByLanguage"].items():
491
+ console.print(f" {lang.upper()}: [blue]{count}[/blue] terms")
492
+
493
+
494
+ def sast_telescope_command(client, args):
495
+ """Handle SAST telescope demo command"""
496
+ console.print("[blue]🔭 Running telescope ambiguity demonstration...[/blue]")
497
+
498
+ result = client.get_telescope_demo()
499
+
500
+ if result.get("success"):
501
+ data = result["data"]
502
+ explanation = data.get("explanation", {})
503
+ stats = data.get("sastStats", {})
504
+
505
+ console.print(f"\n[green]🔭 Telescope Ambiguity Demonstration[/green]")
506
+ console.print("=" * 60)
507
+
508
+ console.print(f"\n[cyan]📝 Original Sentence:[/cyan]")
509
+ console.print(f'"{explanation.get("sentence", "")}"')
510
+
511
+ console.print(f"\n[yellow]🤔 Ambiguity Type:[/yellow]")
512
+ console.print(explanation.get("ambiguityType", ""))
513
+
514
+ console.print(f"\n[cyan]💭 Possible Interpretations:[/cyan]")
515
+ for i, interpretation in enumerate(explanation.get("interpretations", []), 1):
516
+ console.print(f" {i}. {interpretation}")
517
+
518
+ console.print(f"\n[green]✅ SAST Resolution:[/green]")
519
+ console.print(explanation.get("resolution", ""))
520
+
521
+ console.print(f"\n[magenta]📊 SAST Performance:[/magenta]")
522
+ perf_table = Table(show_header=True)
523
+ perf_table.add_column("Metric", style="magenta")
524
+ perf_table.add_column("Value", style="blue")
525
+
526
+ perf_table.add_row("Ambiguities Resolved", str(stats.get("ambiguitiesResolved", 0)))
527
+ perf_table.add_row("Semantic Accuracy", f"{stats.get('semanticAccuracy', 0) * 100:.1f}%")
528
+ perf_table.add_row("Processing Time", f"{stats.get('averageProcessingTime', 0):.1f}ms")
529
+
530
+ console.print(perf_table)
531
+
532
+
533
+ def sast_stats_command(client, args):
534
+ """Handle SAST stats command"""
535
+ console.print("[blue]📊 Fetching SAST performance statistics...[/blue]")
536
+
537
+ result = client.get_sast_stats()
538
+
539
+ if result.get("success"):
540
+ data = result["data"]
541
+ encoding = data.get("encoding", {})
542
+ comparison = data.get("comparison", {})
543
+
544
+ console.print(f"\n[green]📊 SAST Performance Statistics[/green]")
545
+ console.print("=" * 60)
546
+
547
+ console.print(f"\n[cyan]🧬 Encoding Performance:[/cyan]")
548
+ enc_table = Table(show_header=True)
549
+ enc_table.add_column("Metric", style="cyan")
550
+ enc_table.add_column("Value", style="blue")
551
+
552
+ total_encodings = encoding.get("totalEncodings", 0)
553
+ successful = encoding.get("successfulEncodings", 0)
554
+ success_rate = (successful / total_encodings * 100) if total_encodings > 0 else 0
555
+
556
+ enc_table.add_row("Total Encodings", f"{total_encodings:,}")
557
+ enc_table.add_row("Success Rate", f"{success_rate:.1f}%")
558
+ enc_table.add_row("Ambiguities Resolved", f"{encoding.get('ambiguitiesResolved', 0):,}")
559
+ enc_table.add_row("Avg Processing", f"{encoding.get('averageProcessingTime', 0):.2f}ms")
560
+ enc_table.add_row("Semantic Accuracy", f"{encoding.get('semanticAccuracy', 0) * 100:.1f}%")
561
+
562
+ console.print(enc_table)
563
+
564
+ console.print(f"\n[magenta]⚖️ Comparison Performance:[/magenta]")
565
+ comp_table = Table(show_header=True)
566
+ comp_table.add_column("Metric", style="magenta")
567
+ comp_table.add_column("Value", style="green")
568
+
569
+ comp_table.add_row("Total Comparisons", f"{comparison.get('totalComparisons', 0):,}")
570
+ comp_table.add_row(
571
+ "SAST Wins",
572
+ f"{comparison.get('sastWins', 0)} ({comparison.get('sastWinRate', 0):.1f}%)",
573
+ )
574
+ comp_table.add_row("Traditional Wins", str(comparison.get("traditionalWins", 0)))
575
+ comp_table.add_row("Avg Improvement", f"{comparison.get('averageImprovement', 0):.1f}%")
576
+ comp_table.add_row(
577
+ "Ambiguity Resolution", f"{comparison.get('ambiguityResolutionRate', 0):.1f}%"
578
+ )
579
+
580
+ console.print(comp_table)
581
+
582
+
583
+ def sast_showcase_command(client, args):
584
+ """Handle SAST showcase command"""
585
+ console.print("[blue]🎯 Fetching SAST showcase...[/blue]")
586
+
587
+ result = client.get_sast_showcase()
588
+
589
+ if result.get("success"):
590
+ data = result["data"]
591
+ summary = data.get("summary", {})
592
+
593
+ console.print(f"\n[green]🎯 SAST Showcase[/green]")
594
+ console.print("=" * 60)
595
+
596
+ # Summary stats
597
+ summary_table = Table(show_header=True, title="SAST Summary Statistics")
598
+ summary_table.add_column("Metric", style="cyan")
599
+ summary_table.add_column("Value", style="green")
600
+
601
+ summary_table.add_row("Total Examples", str(summary.get("totalExamples", 0)))
602
+ summary_table.add_row(
603
+ "Avg Token Reduction", f"{summary.get('averageTokenReduction', 0):.1f}%"
604
+ )
605
+ summary_table.add_row(
606
+ "Universal Compatibility", f"{summary.get('universalCompatibility', 0) * 100:.0f}%"
607
+ )
608
+ summary_table.add_row(
609
+ "Ambiguity Resolution", f"{summary.get('ambiguityResolutionRate', 0):.1f}%"
610
+ )
611
+
612
+ console.print(summary_table)
613
+
614
+ # Show a few examples
615
+ examples = data.get("examples", [])
616
+ if examples:
617
+ console.print(f"\n[cyan]📝 Example Comparisons:[/cyan]")
618
+ for i, example in enumerate(examples[:3], 1): # Show first 3 examples
619
+ console.print(
620
+ f"\n[yellow]Example {i}: {example.get('category', 'Unknown')}[/yellow]"
621
+ )
622
+ console.print(f"Input: \"{example.get('input', '')}\"")
623
+
624
+ improvements = example.get("improvements", {})
625
+ console.print(
626
+ f"Token Reduction: [green]{improvements.get('tokenReduction', 0):.1f}%[/green]"
627
+ )
628
+ console.print(
629
+ f"Semantic Clarity: [blue]{improvements.get('semanticClarityGain', 0) * 100:.1f}%[/blue]"
630
+ )
631
+
632
+
633
+ def sast_universal_command(client, args):
634
+ """Handle SAST universal semantics test command"""
635
+ languages = [lang.strip() for lang in args.languages.split(",")]
636
+
637
+ console.print(
638
+ f"[blue]🌍 Testing universal semantics for '{args.concept}' across {len(languages)} languages...[/blue]"
639
+ )
640
+
641
+ result = client.test_universal_semantics(concept=args.concept, languages=languages)
642
+
643
+ if result.get("success"):
644
+ data = result["data"]
645
+
646
+ console.print(f"\n[green]🌍 Universal Test Results for: '{data.get('concept', '')}[/green]")
647
+ console.print("=" * 60)
648
+
649
+ unification_score = data.get("unificationScore", 0)
650
+ is_universal = data.get("isUniversal", False)
651
+
652
+ console.print(f"Unification Score: [blue]{unification_score * 100:.1f}%[/blue]")
653
+ console.print(f"Universal Compatible: {'✓' if is_universal else '🔸 Partial'}")
654
+
655
+ console.print(f"\n[cyan]🗣️ Translations:[/cyan]")
656
+ translations = data.get("translations", {})
657
+ for lang, translation in translations.items():
658
+ console.print(f' {lang.upper()}: "{translation}"')
659
+
660
+ console.print(f"\n[magenta]🧬 SAST Representations:[/magenta]")
661
+ representations = data.get("sastRepresentations", {})
662
+ for lang, repr_data in representations.items():
663
+ primitives_count = len(repr_data.get("primitives", {}))
664
+ frame_type = repr_data.get("frameType", "unknown")
665
+ console.print(f" {lang.upper()}: {frame_type} frame ({primitives_count} primitives)")
666
+
667
+
256
668
  def main():
257
669
  """Main CLI entry point"""
258
670
  parser = argparse.ArgumentParser(
259
671
  description="Cost Katana - Unified AI interface with cost optimization"
260
672
  )
261
673
  parser.add_argument(
262
- '--config', '-c',
263
- help='Configuration file path (default: cost_katana_config.json)'
264
- )
265
- parser.add_argument(
266
- '--api-key', '-k',
267
- help='Cost Katana API key'
674
+ "--config",
675
+ "-c",
676
+ help="Configuration file path (default: cost_katana_config.json)",
268
677
  )
269
-
270
- subparsers = parser.add_subparsers(dest='command', help='Available commands')
271
-
678
+ parser.add_argument("--api-key", "-k", help="Cost Katana API key")
679
+
680
+ subparsers = parser.add_subparsers(dest="command", help="Available commands")
681
+
272
682
  # Init command
273
- init_parser = subparsers.add_parser('init', help='Initialize configuration')
274
- init_parser.add_argument('--force', action='store_true', help='Overwrite existing config')
275
-
683
+ init_parser = subparsers.add_parser("init", help="Initialize configuration")
684
+ init_parser.add_argument("--force", action="store_true", help="Overwrite existing config")
685
+
276
686
  # Test command
277
- subparsers.add_parser('test', help='Test API connection')
278
-
687
+ subparsers.add_parser("test", help="Test API connection")
688
+
279
689
  # Models command
280
- subparsers.add_parser('models', help='List available models')
281
-
690
+ subparsers.add_parser("models", help="List available models")
691
+
282
692
  # Chat command
283
- chat_parser = subparsers.add_parser('chat', help='Start interactive chat')
284
- chat_parser.add_argument('--model', '-m', help='Model to use for chat')
285
-
693
+ chat_parser = subparsers.add_parser("chat", help="Start interactive chat")
694
+ chat_parser.add_argument("--model", "-m", help="Model to use for chat")
695
+
696
+ # SAST commands
697
+ sast_parser = subparsers.add_parser(
698
+ "sast", help="SAST (Semantic Abstract Syntax Tree) operations"
699
+ )
700
+ sast_subparsers = sast_parser.add_subparsers(dest="sast_command", help="SAST commands")
701
+
702
+ # SAST optimize command
703
+ sast_optimize_parser = sast_subparsers.add_parser("optimize", help="Optimize prompt using SAST")
704
+ sast_optimize_parser.add_argument("prompt", nargs="?", help="Prompt to optimize")
705
+ sast_optimize_parser.add_argument("--file", "-f", help="File containing prompt to optimize")
706
+ sast_optimize_parser.add_argument(
707
+ "--language", "-l", default="en", help="Language for SAST processing"
708
+ )
709
+ sast_optimize_parser.add_argument("--model", "-m", default="gpt-4o-mini", help="Model to use")
710
+ sast_optimize_parser.add_argument(
711
+ "--cross-lingual", action="store_true", help="Enable cross-lingual mode"
712
+ )
713
+ sast_optimize_parser.add_argument(
714
+ "--preserve-ambiguity", action="store_true", help="Preserve ambiguity for analysis"
715
+ )
716
+ sast_optimize_parser.add_argument("--output", "-o", help="Output file for results")
717
+
718
+ # SAST compare command
719
+ sast_compare_parser = sast_subparsers.add_parser(
720
+ "compare", help="Compare traditional vs SAST optimization"
721
+ )
722
+ sast_compare_parser.add_argument("prompt", nargs="?", help="Prompt to compare")
723
+ sast_compare_parser.add_argument("--file", "-f", help="File containing prompt")
724
+ sast_compare_parser.add_argument("--language", "-l", default="en", help="Language for analysis")
725
+
726
+ # SAST vocabulary command
727
+ sast_vocab_parser = sast_subparsers.add_parser("vocabulary", help="Explore SAST vocabulary")
728
+ sast_vocab_parser.add_argument("--search", "-s", help="Search term for primitives")
729
+ sast_vocab_parser.add_argument("--category", "-c", help="Filter by category")
730
+ sast_vocab_parser.add_argument("--language", "-l", help="Filter by language")
731
+ sast_vocab_parser.add_argument("--limit", type=int, default=10, help="Limit results")
732
+
733
+ # SAST demo commands
734
+ sast_subparsers.add_parser("telescope", help="Telescope ambiguity demo")
735
+ sast_subparsers.add_parser("stats", help="SAST performance statistics")
736
+ sast_subparsers.add_parser("showcase", help="SAST showcase with examples")
737
+
738
+ # SAST universal test command
739
+ sast_universal_parser = sast_subparsers.add_parser("universal", help="Test universal semantics")
740
+ sast_universal_parser.add_argument("concept", help="Concept to test universally")
741
+ sast_universal_parser.add_argument(
742
+ "--languages", default="en,es,fr", help="Comma-separated language codes"
743
+ )
744
+
286
745
  args = parser.parse_args()
287
-
746
+
288
747
  if not args.command:
289
748
  parser.print_help()
290
749
  return
291
-
750
+
292
751
  # Route to appropriate function
293
- if args.command == 'init':
752
+ if args.command == "init":
294
753
  init_config(args)
295
- elif args.command == 'test':
754
+ elif args.command == "test":
296
755
  test_connection(args)
297
- elif args.command == 'models':
756
+ elif args.command == "models":
298
757
  list_models(args)
299
- elif args.command == 'chat':
758
+ elif args.command == "chat":
300
759
  start_chat(args)
760
+ elif args.command == "sast":
761
+ handle_sast_command(args)
762
+
301
763
 
302
- if __name__ == '__main__':
303
- main()
764
+ if __name__ == "__main__":
765
+ main()