cost-katana 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cost_katana/client.py CHANGED
@@ -4,7 +4,6 @@ Handles communication with the Cost Katana backend API
4
4
  """
5
5
 
6
6
  import json
7
- import os
8
7
  from typing import Dict, Any, Optional, List
9
8
  import httpx
10
9
  from .config import Config
@@ -13,63 +12,63 @@ from .exceptions import (
13
12
  AuthenticationError,
14
13
  ModelNotAvailableError,
15
14
  RateLimitError,
16
- CostLimitExceededError
15
+ CostLimitExceededError,
17
16
  )
18
17
 
19
18
  # Global client instance for the configure function
20
19
  _global_client = None
21
20
 
21
+
22
22
  def configure(
23
- api_key: str = None,
24
- base_url: str = None,
25
- config_file: str = None,
26
- **kwargs
23
+ api_key: Optional[str] = None,
24
+ base_url: Optional[str] = None,
25
+ config_file: Optional[str] = None,
26
+ **kwargs,
27
27
  ):
28
28
  """
29
29
  Configure Cost Katana client globally.
30
-
30
+
31
31
  Args:
32
32
  api_key: Your Cost Katana API key (starts with 'dak_')
33
33
  base_url: Base URL for Cost Katana API (optional)
34
34
  config_file: Path to JSON configuration file (optional)
35
35
  **kwargs: Additional configuration options
36
-
36
+
37
37
  Example:
38
38
  # Using API key
39
39
  cost_katana.configure(api_key='dak_your_key_here')
40
-
40
+
41
41
  # Using config file
42
42
  cost_katana.configure(config_file='config.json')
43
43
  """
44
44
  global _global_client
45
45
  _global_client = CostKatanaClient(
46
- api_key=api_key,
47
- base_url=base_url,
48
- config_file=config_file,
49
- **kwargs
46
+ api_key=api_key, base_url=base_url, config_file=config_file, **kwargs
50
47
  )
51
48
  return _global_client
52
49
 
50
+
53
51
  def get_global_client():
54
52
  """Get the global client instance"""
55
53
  if _global_client is None:
56
54
  raise CostKatanaError("Cost Katana not configured. Call cost_katana.configure() first.")
57
55
  return _global_client
58
56
 
57
+
59
58
  class CostKatanaClient:
60
59
  """HTTP client for Cost Katana API"""
61
-
60
+
62
61
  def __init__(
63
62
  self,
64
- api_key: str = None,
65
- base_url: str = None,
66
- config_file: str = None,
63
+ api_key: Optional[str] = None,
64
+ base_url: Optional[str] = None,
65
+ config_file: Optional[str] = None,
67
66
  timeout: int = 30,
68
- **kwargs
67
+ **kwargs,
69
68
  ):
70
69
  """
71
70
  Initialize Cost Katana client.
72
-
71
+
73
72
  Args:
74
73
  api_key: Your Cost Katana API key
75
74
  base_url: Base URL for the API
@@ -77,94 +76,92 @@ class CostKatanaClient:
77
76
  timeout: Request timeout in seconds
78
77
  """
79
78
  self.config = Config.from_file(config_file) if config_file else Config()
80
-
79
+
81
80
  # Override with provided parameters
82
81
  if api_key:
83
82
  self.config.api_key = api_key
84
83
  if base_url:
85
84
  self.config.base_url = base_url
86
-
85
+
87
86
  # Apply additional config
88
87
  for key, value in kwargs.items():
89
88
  setattr(self.config, key, value)
90
-
89
+
91
90
  # Validate configuration
92
91
  if not self.config.api_key:
93
92
  raise AuthenticationError(
94
- "API key is required. Get one from https://costkatana.com/dashboard/api-keys"
93
+ "API key is required. Get one from https://costkatana.com/integrations"
95
94
  )
96
-
95
+
97
96
  # Initialize HTTP client
98
97
  self.client = httpx.Client(
99
98
  base_url=self.config.base_url,
100
99
  timeout=timeout,
101
100
  headers={
102
- 'Authorization': f'Bearer {self.config.api_key}',
103
- 'Content-Type': 'application/json',
104
- 'User-Agent': f'cost-katana-python/1.0.0'
105
- }
101
+ "Authorization": f"Bearer {self.config.api_key}",
102
+ "Content-Type": "application/json",
103
+ "User-Agent": f"cost-katana-python/1.0.0",
104
+ },
106
105
  )
107
-
106
+
108
107
  def __enter__(self):
109
108
  return self
110
-
109
+
111
110
  def __exit__(self, exc_type, exc_val, exc_tb):
112
111
  self.close()
113
-
112
+
114
113
  def close(self):
115
114
  """Close the HTTP client"""
116
- if hasattr(self, 'client'):
115
+ if hasattr(self, "client"):
117
116
  self.client.close()
118
-
117
+
119
118
  def _handle_response(self, response: httpx.Response) -> Dict[str, Any]:
120
119
  """Handle HTTP response and raise appropriate exceptions"""
121
120
  try:
122
121
  data = response.json()
123
122
  except json.JSONDecodeError:
124
123
  raise CostKatanaError(f"Invalid JSON response: {response.text}")
125
-
124
+
126
125
  if response.status_code == 401:
127
- raise AuthenticationError(data.get('message', 'Authentication failed'))
126
+ raise AuthenticationError(data.get("message", "Authentication failed"))
128
127
  elif response.status_code == 403:
129
- raise AuthenticationError(data.get('message', 'Access forbidden'))
128
+ raise AuthenticationError(data.get("message", "Access forbidden"))
130
129
  elif response.status_code == 404:
131
- raise ModelNotAvailableError(data.get('message', 'Model not found'))
130
+ raise ModelNotAvailableError(data.get("message", "Model not found"))
132
131
  elif response.status_code == 429:
133
- raise RateLimitError(data.get('message', 'Rate limit exceeded'))
134
- elif response.status_code == 400 and 'cost' in data.get('message', '').lower():
135
- raise CostLimitExceededError(data.get('message', 'Cost limit exceeded'))
132
+ raise RateLimitError(data.get("message", "Rate limit exceeded"))
133
+ elif response.status_code == 400 and "cost" in data.get("message", "").lower():
134
+ raise CostLimitExceededError(data.get("message", "Cost limit exceeded"))
136
135
  elif not response.is_success:
137
- raise CostKatanaError(
138
- data.get('message', f'API error: {response.status_code}')
139
- )
140
-
136
+ raise CostKatanaError(data.get("message", f"API error: {response.status_code}"))
137
+
141
138
  return data
142
-
139
+
143
140
  def get_available_models(self) -> List[Dict[str, Any]]:
144
141
  """Get list of available models"""
145
142
  try:
146
- response = self.client.get('/api/chat/models')
143
+ response = self.client.get("/api/chat/models")
147
144
  data = self._handle_response(response)
148
- return data.get('data', [])
145
+ return data.get("data", [])
149
146
  except Exception as e:
150
147
  if isinstance(e, CostKatanaError):
151
148
  raise
152
149
  raise CostKatanaError(f"Failed to get models: {str(e)}")
153
-
150
+
154
151
  def send_message(
155
152
  self,
156
153
  message: str,
157
154
  model_id: str,
158
- conversation_id: str = None,
155
+ conversation_id: Optional[str] = None,
159
156
  temperature: float = 0.7,
160
157
  max_tokens: int = 2000,
161
- chat_mode: str = 'balanced',
158
+ chat_mode: str = "balanced",
162
159
  use_multi_agent: bool = False,
163
- **kwargs
160
+ **kwargs,
164
161
  ) -> Dict[str, Any]:
165
162
  """
166
163
  Send a message to the AI model via Cost Katana.
167
-
164
+
168
165
  Args:
169
166
  message: The message to send
170
167
  model_id: ID of the model to use
@@ -173,63 +170,289 @@ class CostKatanaClient:
173
170
  max_tokens: Maximum tokens to generate
174
171
  chat_mode: Chat optimization mode ('fastest', 'cheapest', 'balanced')
175
172
  use_multi_agent: Whether to use multi-agent processing
176
-
173
+
177
174
  Returns:
178
175
  Response data from the API
179
176
  """
180
177
  payload = {
181
- 'message': message,
182
- 'modelId': model_id,
183
- 'temperature': temperature,
184
- 'maxTokens': max_tokens,
185
- 'chatMode': chat_mode,
186
- 'useMultiAgent': use_multi_agent,
187
- **kwargs
178
+ "message": message,
179
+ "modelId": model_id,
180
+ "temperature": temperature,
181
+ "maxTokens": max_tokens,
182
+ "chatMode": chat_mode,
183
+ "useMultiAgent": use_multi_agent,
184
+ **kwargs,
188
185
  }
189
-
186
+
190
187
  if conversation_id:
191
- payload['conversationId'] = conversation_id
192
-
188
+ payload["conversationId"] = conversation_id
189
+
193
190
  try:
194
- response = self.client.post('/api/chat/message', json=payload)
191
+ response = self.client.post("/api/chat/message", json=payload)
195
192
  return self._handle_response(response)
196
193
  except Exception as e:
197
194
  if isinstance(e, CostKatanaError):
198
195
  raise
199
196
  raise CostKatanaError(f"Failed to send message: {str(e)}")
200
-
201
- def create_conversation(self, title: str = None, model_id: str = None) -> Dict[str, Any]:
197
+
198
+ def create_conversation(
199
+ self, title: Optional[str] = None, model_id: Optional[str] = None
200
+ ) -> Dict[str, Any]:
202
201
  """Create a new conversation"""
203
202
  payload = {}
204
203
  if title:
205
- payload['title'] = title
204
+ payload["title"] = title
206
205
  if model_id:
207
- payload['modelId'] = model_id
208
-
206
+ payload["modelId"] = model_id
207
+
209
208
  try:
210
- response = self.client.post('/api/chat/conversations', json=payload)
209
+ response = self.client.post("/api/chat/conversations", json=payload)
211
210
  return self._handle_response(response)
212
211
  except Exception as e:
213
212
  if isinstance(e, CostKatanaError):
214
213
  raise
215
214
  raise CostKatanaError(f"Failed to create conversation: {str(e)}")
216
-
215
+
217
216
  def get_conversation_history(self, conversation_id: str) -> Dict[str, Any]:
218
217
  """Get conversation history"""
219
218
  try:
220
- response = self.client.get(f'/api/chat/conversations/{conversation_id}/history')
219
+ response = self.client.get(f"/api/chat/conversations/{conversation_id}/history")
221
220
  return self._handle_response(response)
222
221
  except Exception as e:
223
222
  if isinstance(e, CostKatanaError):
224
223
  raise
225
224
  raise CostKatanaError(f"Failed to get conversation history: {str(e)}")
226
-
225
+
227
226
  def delete_conversation(self, conversation_id: str) -> Dict[str, Any]:
228
227
  """Delete a conversation"""
229
228
  try:
230
- response = self.client.delete(f'/api/chat/conversations/{conversation_id}')
229
+ response = self.client.delete(f"/api/chat/conversations/{conversation_id}")
230
+ return self._handle_response(response)
231
+ except Exception as e:
232
+ if isinstance(e, CostKatanaError):
233
+ raise
234
+ raise CostKatanaError(f"Failed to delete conversation: {str(e)}")
235
+
236
+ # SAST (Semantic Abstract Syntax Tree) Methods
237
+
238
+ def optimize_with_sast(
239
+ self,
240
+ prompt: str,
241
+ service: str = "openai",
242
+ model: str = "gpt-4o-mini",
243
+ language: str = "en",
244
+ ambiguity_resolution: bool = True,
245
+ cross_lingual: bool = False,
246
+ disambiguation_strategy: str = "hybrid",
247
+ preserve_ambiguity: bool = False,
248
+ max_primitives: int = 100,
249
+ semantic_threshold: float = 0.7,
250
+ **kwargs,
251
+ ) -> Dict[str, Any]:
252
+ """
253
+ Optimize a prompt using SAST (Semantic Abstract Syntax Tree) processing.
254
+
255
+ Args:
256
+ prompt: The text prompt to optimize
257
+ service: AI service to use ('openai', 'anthropic', etc.)
258
+ model: Model to use for optimization
259
+ language: Language for SAST processing
260
+ ambiguity_resolution: Enable ambiguity resolution
261
+ cross_lingual: Enable cross-lingual semantic mapping
262
+ disambiguation_strategy: Strategy for disambiguation ('strict', 'permissive', 'hybrid')
263
+ preserve_ambiguity: Keep ambiguous structures for analysis
264
+ max_primitives: Maximum semantic primitives to use
265
+ semantic_threshold: Semantic confidence threshold
266
+ **kwargs: Additional parameters
267
+
268
+ Returns:
269
+ Dict containing optimization results with SAST metadata
270
+ """
271
+ payload = {
272
+ "prompt": prompt,
273
+ "service": service,
274
+ "model": model,
275
+ "enableCortex": True,
276
+ "cortexOperation": "sast",
277
+ "cortexStyle": "conversational",
278
+ "cortexFormat": "plain",
279
+ "cortexSemanticCache": True,
280
+ "cortexPreserveSemantics": True,
281
+ "cortexIntelligentRouting": True,
282
+ "cortexSastProcessing": True,
283
+ "cortexAmbiguityResolution": ambiguity_resolution,
284
+ "cortexCrossLingualMode": cross_lingual,
285
+ **kwargs,
286
+ }
287
+
288
+ headers = {
289
+ "CostKatana-Cortex-Operation": "sast",
290
+ "CostKatana-Cortex-Sast-Language": language,
291
+ "CostKatana-Cortex-Disambiguation-Strategy": disambiguation_strategy,
292
+ "CostKatana-Cortex-Preserve-Ambiguity": str(preserve_ambiguity).lower(),
293
+ "CostKatana-Cortex-Max-Primitives": str(max_primitives),
294
+ "CostKatana-Cortex-Semantic-Threshold": str(semantic_threshold),
295
+ }
296
+
297
+ try:
298
+ response = self.client.post("/api/optimizations", json=payload, headers=headers)
299
+ return self._handle_response(response)
300
+ except Exception as e:
301
+ if isinstance(e, CostKatanaError):
302
+ raise
303
+ raise CostKatanaError(f"Failed to optimize with SAST: {str(e)}")
304
+
305
+ def compare_sast_vs_traditional(
306
+ self,
307
+ prompt: str,
308
+ service: str = "openai",
309
+ model: str = "gpt-4o-mini",
310
+ language: str = "en",
311
+ **kwargs,
312
+ ) -> Dict[str, Any]:
313
+ """
314
+ Compare traditional Cortex optimization vs SAST optimization.
315
+
316
+ Args:
317
+ prompt: The text prompt to compare
318
+ service: AI service to use
319
+ model: Model to use
320
+ language: Language for analysis
321
+ **kwargs: Additional parameters
322
+
323
+ Returns:
324
+ Dict containing comparison results
325
+ """
326
+ payload = {"text": prompt, "language": language, **kwargs}
327
+
328
+ try:
329
+ response = self.client.post("/api/optimizations/sast/compare", json=payload)
330
+ return self._handle_response(response)
331
+ except Exception as e:
332
+ if isinstance(e, CostKatanaError):
333
+ raise
334
+ raise CostKatanaError(f"Failed to compare SAST vs traditional: {str(e)}")
335
+
336
+ def get_sast_vocabulary_stats(self) -> Dict[str, Any]:
337
+ """
338
+ Get SAST semantic primitives vocabulary statistics.
339
+
340
+ Returns:
341
+ Dict containing vocabulary statistics
342
+ """
343
+ try:
344
+ response = self.client.get("/api/optimizations/sast/vocabulary")
345
+ return self._handle_response(response)
346
+ except Exception as e:
347
+ if isinstance(e, CostKatanaError):
348
+ raise
349
+ raise CostKatanaError(f"Failed to get SAST vocabulary stats: {str(e)}")
350
+
351
+ def search_semantic_primitives(
352
+ self,
353
+ term: Optional[str] = None,
354
+ category: Optional[str] = None,
355
+ language: Optional[str] = None,
356
+ limit: int = 10,
357
+ ) -> Dict[str, Any]:
358
+ """
359
+ Search semantic primitives by term, category, or language.
360
+
361
+ Args:
362
+ term: Search term for primitives
363
+ category: Filter by category ('concept', 'action', 'property', etc.)
364
+ language: Filter by language support
365
+ limit: Maximum number of results
366
+
367
+ Returns:
368
+ Dict containing search results
369
+ """
370
+ payload = {}
371
+ if term:
372
+ payload["term"] = term
373
+ if category:
374
+ payload["category"] = category
375
+ if language:
376
+ payload["language"] = language
377
+ payload["limit"] = limit
378
+
379
+ try:
380
+ response = self.client.post("/api/optimizations/sast/search", json=payload)
381
+ return self._handle_response(response)
382
+ except Exception as e:
383
+ if isinstance(e, CostKatanaError):
384
+ raise
385
+ raise CostKatanaError(f"Failed to search semantic primitives: {str(e)}")
386
+
387
+ def get_telescope_demo(self) -> Dict[str, Any]:
388
+ """
389
+ Get the telescope ambiguity resolution demonstration.
390
+
391
+ Returns:
392
+ Dict containing telescope demo results
393
+ """
394
+ try:
395
+ response = self.client.get("/api/optimizations/sast/telescope-demo")
396
+ return self._handle_response(response)
397
+ except Exception as e:
398
+ if isinstance(e, CostKatanaError):
399
+ raise
400
+ raise CostKatanaError(f"Failed to get telescope demo: {str(e)}")
401
+
402
+ def test_universal_semantics(
403
+ self,
404
+ concept: str,
405
+ languages: Optional[List[str]] = None,
406
+ ) -> Dict[str, Any]:
407
+ """
408
+ Test universal semantic representation across languages.
409
+
410
+ Args:
411
+ concept: Concept to test universally
412
+ languages: List of language codes to test (default: ['en', 'es', 'fr'])
413
+
414
+ Returns:
415
+ Dict containing universal semantics test results
416
+ """
417
+ if languages is None:
418
+ languages = ["en", "es", "fr"]
419
+
420
+ payload = {"concept": concept, "languages": languages}
421
+
422
+ try:
423
+ response = self.client.post("/api/optimizations/sast/universal-test", json=payload)
424
+ return self._handle_response(response)
425
+ except Exception as e:
426
+ if isinstance(e, CostKatanaError):
427
+ raise
428
+ raise CostKatanaError(f"Failed to test universal semantics: {str(e)}")
429
+
430
+ def get_sast_stats(self) -> Dict[str, Any]:
431
+ """
432
+ Get SAST performance and usage statistics.
433
+
434
+ Returns:
435
+ Dict containing SAST statistics
436
+ """
437
+ try:
438
+ response = self.client.get("/api/optimizations/sast/stats")
439
+ return self._handle_response(response)
440
+ except Exception as e:
441
+ if isinstance(e, CostKatanaError):
442
+ raise
443
+ raise CostKatanaError(f"Failed to get SAST stats: {str(e)}")
444
+
445
+ def get_sast_showcase(self) -> Dict[str, Any]:
446
+ """
447
+ Get SAST showcase with examples and performance analytics.
448
+
449
+ Returns:
450
+ Dict containing SAST showcase data
451
+ """
452
+ try:
453
+ response = self.client.get("/api/optimizations/sast/showcase")
231
454
  return self._handle_response(response)
232
455
  except Exception as e:
233
456
  if isinstance(e, CostKatanaError):
234
457
  raise
235
- raise CostKatanaError(f"Failed to delete conversation: {str(e)}")
458
+ raise CostKatanaError(f"Failed to get SAST showcase: {str(e)}")