lucidicai 1.2.19__py3-none-any.whl → 1.2.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lucidicai/__init__.py CHANGED
@@ -16,7 +16,8 @@ from .telemetry.otel_handlers import (
16
16
  OTelAnthropicHandler,
17
17
  OTelLangChainHandler,
18
18
  OTelPydanticAIHandler,
19
- OTelOpenAIAgentsHandler
19
+ OTelOpenAIAgentsHandler,
20
+ OTelLiteLLMHandler
20
21
  )
21
22
 
22
23
  # Import telemetry manager
@@ -25,7 +26,7 @@ from .telemetry.otel_init import LucidicTelemetry
25
26
  # Import decorators
26
27
  from .decorators import step, event
27
28
 
28
- ProviderType = Literal["openai", "anthropic", "langchain", "pydantic_ai", "openai_agents"]
29
+ ProviderType = Literal["openai", "anthropic", "langchain", "pydantic_ai", "openai_agents", "litellm"]
29
30
 
30
31
  # Configure logging
31
32
  logger = logging.getLogger("Lucidic")
@@ -77,6 +78,9 @@ def _setup_providers(client: Client, providers: List[ProviderType]) -> None:
77
78
  except Exception as e:
78
79
  logger.error(f"Failed to set up OpenAI Agents provider: {e}")
79
80
  raise
81
+ elif provider == "litellm":
82
+ client.set_provider(OTelLiteLLMHandler())
83
+ setup_providers.add("litellm")
80
84
 
81
85
  __all__ = [
82
86
  'Client',
@@ -267,6 +271,13 @@ def end_session(
267
271
  client = Client()
268
272
  if not client.session:
269
273
  return
274
+
275
+ # Wait for any pending LiteLLM callbacks before ending session
276
+ for provider in client.providers:
277
+ if hasattr(provider, '_callback') and hasattr(provider._callback, 'wait_for_pending_callbacks'):
278
+ logger.info("Waiting for LiteLLM callbacks to complete before ending session...")
279
+ provider._callback.wait_for_pending_callbacks(timeout=5.0)
280
+
270
281
  client.session.update_session(is_finished=True, **locals())
271
282
  client.clear()
272
283
 
lucidicai/event.py CHANGED
@@ -33,7 +33,7 @@ class Event:
33
33
  from .client import Client
34
34
  if 'screenshots' in kwargs and kwargs['screenshots'] is not None:
35
35
  for i in range(len(kwargs['screenshots'])):
36
- presigned_url, bucket_name, object_key = get_presigned_url(Client().agent_id, session_id=self.session_id, event_id=self.event_id, nthscreenshot=i + len(self.screenshots))
36
+ presigned_url, bucket_name, object_key = get_presigned_url(Client().agent_id, session_id=self.session_id, event_id=self.event_id, nthscreenshot=len(self.screenshots))
37
37
  upload_image_to_s3(presigned_url, kwargs['screenshots'][i], "JPEG")
38
38
  self.screenshots.append(kwargs['screenshots'][i])
39
39
  if 'is_finished' in kwargs:
@@ -29,9 +29,9 @@ MODEL_PRICING = {
29
29
  # OpenAI o-Series (Reasoning Models) - Verified 2025
30
30
  "o1": {"input": 15.0, "output": 60.0},
31
31
  "o1-preview": {"input": 15.0, "output": 60.0},
32
- "o1-mini": {"input": 3.0, "output": 15.0}, # Corrected: was 12.0
33
- "o3": {"input": 15.0, "output": 60.0}, # Estimated based on o1
34
- "o3-mini": {"input": 1.1, "output": 4.4}, # Verified
32
+ "o1-mini": {"input": 3.0, "output": 15.0},
33
+ "o3": {"input": 15.0, "output": 60.0},
34
+ "o3-mini": {"input": 1.1, "output": 4.4},
35
35
  "o4-mini": {"input": 4.00, "output": 16.0},
36
36
 
37
37
  # OpenAI Legacy Models
@@ -48,7 +48,7 @@ MODEL_PRICING = {
48
48
  # Claude 3.5 Models - Verified 2025
49
49
  "claude-3-5-sonnet": {"input": 3.0, "output": 15.0},
50
50
  "claude-3-5-sonnet-latest": {"input": 3.0, "output": 15.0},
51
- "claude-3-5-haiku": {"input": 1.0, "output": 5.0}, # Corrected: was 0.8/4.0
51
+ "claude-3-5-haiku": {"input": 1.0, "output": 5.0},
52
52
  "claude-3-5-haiku-latest": {"input": 1.0, "output": 5.0},
53
53
  "claude-3-7-sonnet": {"input": 3.0, "output": 15.0}, # Same as 3.5 sonnet
54
54
  "claude-3-7-sonnet-latest": {"input": 3.0, "output": 15.0},
@@ -106,17 +106,17 @@ MODEL_PRICING = {
106
106
  "meta-llama/llama-guard-4-12b-128k": {"input": 0.20, "output": 0.20},
107
107
 
108
108
  # Meta Llama 3.x Series - Verified 2025 (Together AI pricing)
109
- "llama-3.3-70b": {"input": 0.54, "output": 0.88}, # Corrected
110
- "llama-3.1-405b": {"input": 6.0, "output": 12.0}, # Corrected (Cerebras pricing)
111
- "llama-3.1-70b": {"input": 0.54, "output": 0.88}, # Corrected
112
- "llama-3.1-8b": {"input": 0.10, "output": 0.18}, # Corrected
113
- "llama-3-70b": {"input": 0.54, "output": 0.88}, # Using consistent pricing
114
- "llama-3-8b": {"input": 0.10, "output": 0.18}, # Using consistent pricing
109
+ "llama-3.3-70b": {"input": 0.54, "output": 0.88},
110
+ "llama-3.1-405b": {"input": 6.0, "output": 12.0},
111
+ "llama-3.1-70b": {"input": 0.54, "output": 0.88},
112
+ "llama-3.1-8b": {"input": 0.10, "output": 0.18},
113
+ "llama-3-70b": {"input": 0.54, "output": 0.88},
114
+ "llama-3-8b": {"input": 0.10, "output": 0.18},
115
115
  "llama-guard-3-8b": {"input": 0.20, "output": 0.20},
116
- "meta-llama/llama-3.3-70b-versatile-128k": {"input": 0.54, "output": 0.88}, # Updated
117
- "meta-llama/llama-3.1-8b-instant-128k": {"input": 0.10, "output": 0.18}, # Updated
118
- "meta-llama/llama-3-70b-8k": {"input": 0.54, "output": 0.88}, # Updated
119
- "meta-llama/llama-3-8b-8k": {"input": 0.10, "output": 0.18}, # Updated
116
+ "meta-llama/llama-3.3-70b-versatile-128k": {"input": 0.54, "output": 0.88},
117
+ "meta-llama/llama-3.1-8b-instant-128k": {"input": 0.10, "output": 0.18},
118
+ "meta-llama/llama-3-70b-8k": {"input": 0.54, "output": 0.88},
119
+ "meta-llama/llama-3-8b-8k": {"input": 0.10, "output": 0.18},
120
120
  "meta-llama/llama-guard-3-8b-8k": {"input": 0.20, "output": 0.20},
121
121
 
122
122
  # Mistral Models
@@ -149,7 +149,7 @@ MODEL_PRICING = {
149
149
  "qwen-turbo": {"input": 0.3, "output": 0.6},
150
150
  "qwen-plus": {"input": 0.5, "output": 2.0},
151
151
  "qwen-max": {"input": 2.0, "output": 6.0},
152
- "qwen2.5-32b-instruct": {"input": 0.7, "output": "2.8"},
152
+ "qwen2.5-32b-instruct": {"input": 0.7, "output": 2.8},
153
153
  "qwen2.5-max": {"input": 1.6, "output": 6.4},
154
154
 
155
155
  # Google Gemma Models
@@ -0,0 +1,369 @@
1
+ """Bridge between LiteLLM's CustomLogger and Lucidic's telemetry system"""
2
+ import logging
3
+ import os
4
+ import time
5
+ import threading
6
+ from typing import Dict, Any, Optional, List
7
+ from datetime import datetime
8
+
9
+ try:
10
+ from litellm import CustomLogger
11
+ except ImportError:
12
+ # Create a dummy CustomLogger if litellm is not installed
13
+ class CustomLogger:
14
+ def __init__(self, **kwargs):
15
+ pass
16
+
17
+ from lucidicai.client import Client
18
+ from lucidicai.model_pricing import calculate_cost
19
+
20
+ logger = logging.getLogger("Lucidic")
21
+ DEBUG = os.getenv("LUCIDIC_DEBUG", "False") == "True"
22
+
23
+
24
+ class LucidicLiteLLMCallback(CustomLogger):
25
+ """
26
+ Custom callback for LiteLLM that bridges to Lucidic's event system.
27
+
28
+ This callback integrates LiteLLM's logging with Lucidic's session/step/event hierarchy,
29
+ enabling automatic tracking of all LiteLLM-supported providers.
30
+ """
31
+
32
+ def __init__(self, **kwargs):
33
+ super().__init__(**kwargs)
34
+ self._active_events = {} # Track active events for streaming
35
+ self._pending_callbacks = set() # Track pending callback executions
36
+ self._callback_lock = threading.Lock() # Thread-safe callback tracking
37
+
38
+ def _register_callback(self, callback_id: str):
39
+ """Register a callback as pending"""
40
+ with self._callback_lock:
41
+ self._pending_callbacks.add(callback_id)
42
+ if DEBUG:
43
+ logger.info(f"LiteLLM Bridge: Registered callback {callback_id}, pending: {len(self._pending_callbacks)}")
44
+
45
+ def _complete_callback(self, callback_id: str):
46
+ """Mark a callback as completed"""
47
+ with self._callback_lock:
48
+ self._pending_callbacks.discard(callback_id)
49
+ if DEBUG:
50
+ logger.info(f"LiteLLM Bridge: Completed callback {callback_id}, pending: {len(self._pending_callbacks)}")
51
+
52
+ def wait_for_pending_callbacks(self, timeout: float = 5.0):
53
+ """Wait for all pending callbacks to complete"""
54
+ start_time = time.time()
55
+
56
+ while time.time() - start_time < timeout:
57
+ with self._callback_lock:
58
+ if not self._pending_callbacks:
59
+ if DEBUG:
60
+ logger.info("LiteLLM Bridge: All callbacks completed")
61
+ return True
62
+
63
+ time.sleep(0.1) # Check every 100ms
64
+
65
+ # Timeout reached
66
+ with self._callback_lock:
67
+ pending_count = len(self._pending_callbacks)
68
+ if pending_count > 0:
69
+ logger.warning(f"LiteLLM Bridge: Timeout waiting for {pending_count} callbacks")
70
+ # Clear pending callbacks to avoid memory leak
71
+ self._pending_callbacks.clear()
72
+
73
+ return False
74
+
75
+ def log_pre_api_call(self, model, messages, kwargs):
76
+ """Called before the LLM API call"""
77
+ try:
78
+ client = Client()
79
+ if not client.session:
80
+ return
81
+
82
+ # Extract description from messages
83
+ description = self._format_messages(messages)
84
+
85
+ # Apply masking if configured
86
+ if hasattr(client, 'mask') and callable(client.mask):
87
+ description = client.mask(description)
88
+
89
+ # Store pre-call info for later use
90
+ call_id = kwargs.get("litellm_call_id", str(time.time())) if kwargs else str(time.time())
91
+ self._active_events[call_id] = {
92
+ "model": model,
93
+ "messages": messages,
94
+ "description": description,
95
+ "start_time": time.time()
96
+ }
97
+
98
+ except Exception as e:
99
+ logger.error(f"LiteLLM Bridge error in pre_api_call: {e}")
100
+ if DEBUG:
101
+ import traceback
102
+ traceback.print_exc()
103
+
104
+ def log_success_event(self, kwargs, response_obj, start_time, end_time):
105
+ """Called on successful LLM completion"""
106
+ # Generate unique callback ID
107
+ callback_id = f"success_{id(kwargs)}_{start_time}"
108
+ self._register_callback(callback_id)
109
+
110
+ try:
111
+ client = Client()
112
+ if not client.session:
113
+ self._complete_callback(callback_id)
114
+ return
115
+
116
+ # Get call info
117
+ call_id = kwargs.get("litellm_call_id", str(start_time))
118
+ pre_call_info = self._active_events.pop(call_id, {})
119
+
120
+ # Extract model and provider info
121
+ model = kwargs.get("model", pre_call_info.get("model", "unknown"))
122
+ provider = self._extract_provider(model)
123
+
124
+ # Get messages for description
125
+ messages = kwargs.get("messages", pre_call_info.get("messages", []))
126
+ description = pre_call_info.get("description") or self._format_messages(messages)
127
+
128
+ # Extract response content
129
+ result = self._extract_response_content(response_obj)
130
+
131
+ # Apply masking to result if configured
132
+ if hasattr(client, 'mask') and callable(client.mask):
133
+ result = client.mask(result)
134
+
135
+ # Calculate cost if usage info is available
136
+ usage = self._extract_usage(response_obj)
137
+ cost = None
138
+ if usage:
139
+ cost = self._calculate_litellm_cost(model, usage)
140
+
141
+ # Extract any images from multimodal requests
142
+ images = self._extract_images_from_messages(messages)
143
+
144
+ # Create event
145
+ event_kwargs = {
146
+ "description": description,
147
+ "result": result,
148
+ "model": f"{provider}/{model}" if "/" not in model else model,
149
+ "is_finished": True
150
+ }
151
+
152
+ if cost is not None:
153
+ event_kwargs["cost_added"] = cost
154
+
155
+ if images:
156
+ event_kwargs["screenshots"] = images
157
+
158
+ # Add metadata
159
+ metadata = {
160
+ "provider": provider,
161
+ "duration_ms": (end_time - start_time) * 1000,
162
+ "litellm": True
163
+ }
164
+ if usage:
165
+ metadata["usage"] = usage
166
+
167
+ event_kwargs["metadata"] = metadata
168
+
169
+ # Create the event
170
+ client.session.create_event(**event_kwargs)
171
+
172
+ if DEBUG:
173
+ logger.info(f"LiteLLM Bridge: Created event for {model} completion")
174
+
175
+ except Exception as e:
176
+ logger.error(f"LiteLLM Bridge error in log_success_event: {e}")
177
+ if DEBUG:
178
+ import traceback
179
+ traceback.print_exc()
180
+ finally:
181
+ self._complete_callback(callback_id)
182
+
183
+ def log_failure_event(self, kwargs, response_obj, start_time, end_time):
184
+ """Called on failed LLM completion"""
185
+ # Generate unique callback ID
186
+ callback_id = f"failure_{id(kwargs)}_{start_time}"
187
+ self._register_callback(callback_id)
188
+
189
+ try:
190
+ client = Client()
191
+ if not client.session:
192
+ self._complete_callback(callback_id)
193
+ return
194
+
195
+ # Get call info
196
+ call_id = kwargs.get("litellm_call_id", str(start_time))
197
+ pre_call_info = self._active_events.pop(call_id, {})
198
+
199
+ # Extract model info
200
+ model = kwargs.get("model", pre_call_info.get("model", "unknown"))
201
+ provider = self._extract_provider(model)
202
+
203
+ # Get messages for description
204
+ messages = kwargs.get("messages", pre_call_info.get("messages", []))
205
+ description = pre_call_info.get("description") or self._format_messages(messages)
206
+
207
+ # Format error
208
+ error_msg = str(response_obj) if response_obj else "Unknown error"
209
+
210
+ # Create failed event
211
+ event_kwargs = {
212
+ "description": description,
213
+ "result": f"Error: {error_msg}",
214
+ "model": f"{provider}/{model}" if "/" not in model else model,
215
+ "is_finished": True,
216
+ "metadata": {
217
+ "provider": provider,
218
+ "duration_ms": (end_time - start_time) * 1000,
219
+ "litellm": True,
220
+ "error": True
221
+ }
222
+ }
223
+
224
+ client.session.create_event(**event_kwargs)
225
+
226
+ if DEBUG:
227
+ logger.info(f"LiteLLM Bridge: Created error event for {model}")
228
+
229
+ except Exception as e:
230
+ logger.error(f"LiteLLM Bridge error in log_failure_event: {e}")
231
+ if DEBUG:
232
+ import traceback
233
+ traceback.print_exc()
234
+ finally:
235
+ self._complete_callback(callback_id)
236
+
237
+ def log_stream_event(self, kwargs, response_obj, start_time, end_time):
238
+ """Called for streaming responses"""
239
+ # For now, we'll handle the complete response in log_success_event
240
+ # This could be enhanced to show real-time streaming updates
241
+ pass
242
+
243
+ async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
244
+ """Async version of log_success_event"""
245
+ # Delegate to sync version - Lucidic client handles both sync/async internally
246
+ self.log_success_event(kwargs, response_obj, start_time, end_time)
247
+
248
+ async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time):
249
+ """Async version of log_failure_event"""
250
+ # Delegate to sync version
251
+ self.log_failure_event(kwargs, response_obj, start_time, end_time)
252
+
253
+ async def async_log_stream_event(self, kwargs, response_obj, start_time, end_time):
254
+ """Async version of log_stream_event"""
255
+ self.log_stream_event(kwargs, response_obj, start_time, end_time)
256
+
257
+ def _extract_provider(self, model: str) -> str:
258
+ """Extract provider from model string"""
259
+ if "/" in model:
260
+ return model.split("/")[0]
261
+
262
+ # Try to infer provider from model name patterns
263
+ model_lower = model.lower()
264
+ if "gpt" in model_lower:
265
+ return "openai"
266
+ elif "claude" in model_lower:
267
+ return "anthropic"
268
+ elif "gemini" in model_lower:
269
+ return "vertex_ai"
270
+ elif "llama" in model_lower:
271
+ return "meta"
272
+ elif "mistral" in model_lower:
273
+ return "mistral"
274
+
275
+ return "unknown"
276
+
277
+ def _format_messages(self, messages: List[Dict[str, Any]]) -> str:
278
+ """Format messages into a description string"""
279
+ if not messages:
280
+ return "LiteLLM Request"
281
+
282
+ formatted = []
283
+ for msg in messages:
284
+ if isinstance(msg, dict):
285
+ role = msg.get("role", "unknown")
286
+ content = msg.get("content", "")
287
+
288
+ if isinstance(content, str):
289
+ formatted.append(f"{role}: {content}")
290
+ elif isinstance(content, list):
291
+ # Handle multimodal content
292
+ texts = []
293
+ for item in content:
294
+ if isinstance(item, dict) and item.get("type") == "text":
295
+ texts.append(item.get("text", ""))
296
+ if texts:
297
+ formatted.append(f"{role}: {' '.join(texts)}")
298
+
299
+ return "\n".join(formatted) if formatted else "LiteLLM Request"
300
+
301
+ def _extract_response_content(self, response_obj) -> str:
302
+ """Extract response content from LiteLLM response object"""
303
+ try:
304
+ # Handle different response types
305
+ if hasattr(response_obj, "choices") and response_obj.choices:
306
+ # Standard completion response
307
+ choice = response_obj.choices[0]
308
+ if hasattr(choice, "message") and hasattr(choice.message, "content"):
309
+ return choice.message.content or "No content"
310
+ elif hasattr(choice, "text"):
311
+ return choice.text or "No content"
312
+
313
+ # Fallback to string representation
314
+ return str(response_obj)
315
+
316
+ except Exception as e:
317
+ logger.error(f"Error extracting response content: {e}")
318
+ return "Response received"
319
+
320
+ def _extract_usage(self, response_obj) -> Optional[Dict[str, int]]:
321
+ """Extract usage information from response"""
322
+ try:
323
+ if hasattr(response_obj, "usage"):
324
+ usage = response_obj.usage
325
+ if hasattr(usage, "prompt_tokens") and hasattr(usage, "completion_tokens"):
326
+ return {
327
+ "prompt_tokens": usage.prompt_tokens,
328
+ "completion_tokens": usage.completion_tokens,
329
+ "total_tokens": usage.total_tokens if hasattr(usage, "total_tokens") else (usage.prompt_tokens + usage.completion_tokens)
330
+ }
331
+ except Exception as e:
332
+ logger.debug(f"Could not extract usage: {e}")
333
+
334
+ return None
335
+
336
+ def _calculate_litellm_cost(self, model: str, usage: Dict[str, int]) -> Optional[float]:
337
+ """Calculate cost using Lucidic's pricing model"""
338
+ try:
339
+ # LiteLLM model names might need normalization for pricing lookup
340
+ normalized_model = model
341
+ if "/" in model:
342
+ # Extract the model name after the provider prefix
343
+ # e.g., "openai/gpt-4o" -> "gpt-4o"
344
+ parts = model.split("/", 1)
345
+ if len(parts) == 2:
346
+ normalized_model = parts[1]
347
+
348
+ return calculate_cost(normalized_model, usage)
349
+ except Exception as e:
350
+ logger.debug(f"Could not calculate cost for {model}: {e}")
351
+ return None
352
+
353
+ def _extract_images_from_messages(self, messages: List[Dict[str, Any]]) -> List[str]:
354
+ """Extract base64 images from multimodal messages"""
355
+ images = []
356
+
357
+ for msg in messages:
358
+ if isinstance(msg, dict):
359
+ content = msg.get("content", "")
360
+ if isinstance(content, list):
361
+ for item in content:
362
+ if isinstance(item, dict) and item.get("type") == "image_url":
363
+ image_url = item.get("image_url", {})
364
+ if isinstance(image_url, dict):
365
+ url = image_url.get("url", "")
366
+ if url.startswith("data:image"):
367
+ images.append(url)
368
+
369
+ return images
@@ -263,4 +263,97 @@ class OTelOpenAIAgentsHandler(BaseProvider):
263
263
  def undo_override(self):
264
264
  """Disable instrumentation"""
265
265
  self._is_instrumented = False
266
- logger.info("[OTel OpenAI Agents Handler] Instrumentation will be disabled on shutdown")
266
+ logger.info("[OTel OpenAI Agents Handler] Instrumentation will be disabled on shutdown")
267
+
268
+
269
+ class OTelLiteLLMHandler(BaseProvider):
270
+ """LiteLLM handler using CustomLogger callback system"""
271
+
272
+ def __init__(self):
273
+ super().__init__()
274
+ self._provider_name = "LiteLLM"
275
+ self.telemetry = LucidicTelemetry()
276
+ self._callback = None
277
+ self._original_callbacks = None
278
+
279
+ def handle_response(self, response, kwargs, session: Optional = None):
280
+ """Not needed with callback approach"""
281
+ return response
282
+
283
+ def override(self):
284
+ """Enable LiteLLM instrumentation via callbacks"""
285
+ try:
286
+ import litellm
287
+ from lucidicai.client import Client
288
+ from .litellm_bridge import LucidicLiteLLMCallback
289
+
290
+ client = Client()
291
+
292
+ # Initialize telemetry if needed
293
+ if not self.telemetry.is_initialized():
294
+ self.telemetry.initialize(agent_id=client.agent_id)
295
+
296
+ # Create our callback instance
297
+ self._callback = LucidicLiteLLMCallback()
298
+
299
+ # Store original callbacks
300
+ self._original_callbacks = litellm.callbacks if hasattr(litellm, 'callbacks') else None
301
+
302
+ # Add our callback to LiteLLM
303
+ if litellm.callbacks is None:
304
+ litellm.callbacks = []
305
+
306
+ # Add our callback if not already present
307
+ if self._callback not in litellm.callbacks:
308
+ litellm.callbacks.append(self._callback)
309
+
310
+ # Also set success/failure callbacks
311
+ if not hasattr(litellm, 'success_callback') or litellm.success_callback is None:
312
+ litellm.success_callback = []
313
+ if not hasattr(litellm, 'failure_callback') or litellm.failure_callback is None:
314
+ litellm.failure_callback = []
315
+
316
+ # Add to callback lists if not present
317
+ if self._callback not in litellm.success_callback:
318
+ litellm.success_callback.append(self._callback)
319
+ if self._callback not in litellm.failure_callback:
320
+ litellm.failure_callback.append(self._callback)
321
+
322
+ logger.info("[OTel LiteLLM Handler] Callback instrumentation enabled")
323
+
324
+ except ImportError:
325
+ logger.error("LiteLLM not installed. Please install with: pip install litellm")
326
+ raise
327
+ except Exception as e:
328
+ logger.error(f"Failed to enable LiteLLM instrumentation: {e}")
329
+ raise
330
+
331
+ def undo_override(self):
332
+ """Disable LiteLLM instrumentation"""
333
+ try:
334
+ import litellm
335
+
336
+ # Wait for pending callbacks to complete before cleanup
337
+ if self._callback and hasattr(self._callback, 'wait_for_pending_callbacks'):
338
+ logger.info("[OTel LiteLLM Handler] Waiting for pending callbacks to complete...")
339
+ self._callback.wait_for_pending_callbacks(timeout=5.0)
340
+
341
+ # Remove our callback from all callback lists
342
+ if self._callback:
343
+ if hasattr(litellm, 'callbacks') and litellm.callbacks and self._callback in litellm.callbacks:
344
+ litellm.callbacks.remove(self._callback)
345
+
346
+ if hasattr(litellm, 'success_callback') and litellm.success_callback and self._callback in litellm.success_callback:
347
+ litellm.success_callback.remove(self._callback)
348
+
349
+ if hasattr(litellm, 'failure_callback') and litellm.failure_callback and self._callback in litellm.failure_callback:
350
+ litellm.failure_callback.remove(self._callback)
351
+
352
+ # Restore original callbacks if we stored them
353
+ if self._original_callbacks is not None:
354
+ litellm.callbacks = self._original_callbacks
355
+
356
+ logger.info("[OTel LiteLLM Handler] Instrumentation disabled")
357
+
358
+ except Exception as e:
359
+ logger.error(f"Error disabling LiteLLM instrumentation: {e}")
@@ -81,6 +81,9 @@ class LucidicTelemetry:
81
81
  elif provider == "openai_agents":
82
82
  # OpenAI Agents uses the same OpenAI instrumentation
83
83
  self._instrument_openai_agents()
84
+ elif provider == "litellm":
85
+ # LiteLLM uses callbacks, not OpenTelemetry instrumentation
86
+ logger.info(f"[LucidicTelemetry] LiteLLM will use callback-based instrumentation")
84
87
  except Exception as e:
85
88
  logger.error(f"Failed to instrument {provider}: {e}")
86
89
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lucidicai
3
- Version: 1.2.19
3
+ Version: 1.2.20
4
4
  Summary: Lucidic AI Python SDK
5
5
  Author: Andy Liang
6
6
  Author-email: andy@lucidic.ai
@@ -1,12 +1,12 @@
1
- lucidicai/__init__.py,sha256=qu1NTg-te86yT2j8BVz2NSi_Zs2G842K_YpxlMs9m6Q,20036
1
+ lucidicai/__init__.py,sha256=sUfrleiPQPuXq5e0UnkXPjuTShiVoHnPtl_O0nzPcd8,20589
2
2
  lucidicai/action.py,sha256=sPRd1hTIVXDqnvG9ZXWEipUFh0bsXcE0Fm7RVqmVccM,237
3
3
  lucidicai/client.py,sha256=3yBVppvwBwesca_pZSKgTUDihzZe5JhVgh1AALCpJ_Q,6620
4
4
  lucidicai/constants.py,sha256=_u0z3M4geZgS1g-CrOZUVjtcew8l70dKQnpVQvlXh9w,2172
5
5
  lucidicai/decorators.py,sha256=oqXyfHk9f9UmeaIquuU8mtzed1qZtO_-svwadpoat6g,13950
6
6
  lucidicai/errors.py,sha256=gTg0bdzjuTcUnakRbZnxjngO4gZnRLVwRHRglpZZJsM,970
7
- lucidicai/event.py,sha256=2j5G4drwVhmdQkk9-armYQj1ctmLm2H_rNfPsfV_8wA,2004
7
+ lucidicai/event.py,sha256=iPbNBRb2ZFohBsrRQZHzfa9GbxCsgFIw3M8w4gFrSi4,2000
8
8
  lucidicai/image_upload.py,sha256=6SRudg-BpInM2gzMx1Yf1Rz_Zyh8inwoJ7U4pBw7ruY,3807
9
- lucidicai/model_pricing.py,sha256=Q_Kd8OtKPqtvZ4pIh__ztKb3RXe-cX-8xgrM-AIUj9E,12189
9
+ lucidicai/model_pricing.py,sha256=o1yWCaF5Qxj4tloXxVG3SZXcTMKtk56J_Nfdo8M4uR0,11947
10
10
  lucidicai/session.py,sha256=T_4z3CLKMp5DOKSVpdWwMZkZG9ndXU6oiI5Al6DHSTY,5501
11
11
  lucidicai/singleton.py,sha256=gfT3XdWLXSIWMqDXbY6-pnesMZ8RGRitaEPhIsgrRPw,1272
12
12
  lucidicai/state.py,sha256=4Tb1X6l2or6w_e62FYSuEeghAv3xXm5gquKwzCpvdok,235
@@ -31,19 +31,20 @@ lucidicai/providers/text_storage.py,sha256=L62MMJ8E23TDqDTUv2aRntdKMCItsXV7XjY6c
31
31
  lucidicai/providers/universal_image_interceptor.py,sha256=7d-hw4xihRwvvA1AP8-vqYNChtmVXKmn09MN4pDS7KQ,12126
32
32
  lucidicai/telemetry/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
33
  lucidicai/telemetry/base_provider.py,sha256=nrZVr4Y9xcAiMn4uAN3t3k6DlHNTvlXrA4qQg7lANOQ,544
34
+ lucidicai/telemetry/litellm_bridge.py,sha256=mOdjEfvP--ToDv8snoOMU4pRQx_Yg4s2o3BMTMzeRK8,14979
34
35
  lucidicai/telemetry/lucidic_exporter.py,sha256=h2GOnEk22Fpeke4Zc7SSk391yr0joUApVwolV5Q4hz4,10818
35
36
  lucidicai/telemetry/lucidic_span_processor.py,sha256=LhkyJGBnTKpTWdBi2wyW9th4CWZ9EtZ8ulaE0rwYj0Y,27432
36
37
  lucidicai/telemetry/openai_agents_instrumentor.py,sha256=__wIbeglMnEEf4AGTQ--FXeWCKmz2yy8SBupwprEdZA,12694
37
38
  lucidicai/telemetry/opentelemetry_converter.py,sha256=xOHCqoTyO4hUkL6k7fxy84PbljPpYep6ET9ZqbkJehc,17665
38
- lucidicai/telemetry/otel_handlers.py,sha256=XgeDCL7ZjtaQ7HX8HKmYGTDqO6yne7gXZaScpdKvq98,10527
39
- lucidicai/telemetry/otel_init.py,sha256=RMsWRG3WZWvZN6WeLWSehsfUB_vWSuclVMCcSO31GEU,7770
39
+ lucidicai/telemetry/otel_handlers.py,sha256=HqGYIWJI_Vp8So2-HMpPjnrgTBSgBHHLDu01z_sq-Qk,14646
40
+ lucidicai/telemetry/otel_init.py,sha256=d0JcdDD23U_0xyBCAPOcZkFuOwv_7kI9Mih4msin6fw,7997
40
41
  lucidicai/telemetry/otel_provider.py,sha256=GZPhSEsEPVKrr5A0u5-WNn7OmxNgRWBcam9dyqjf91o,7042
41
42
  lucidicai/telemetry/pydantic_ai_handler.py,sha256=WPa3tFcVgVnPPO3AxcNOTbNkmODLgNOrU2_3GVtWqUw,28261
42
43
  lucidicai/telemetry/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
43
44
  lucidicai/telemetry/utils/image_storage.py,sha256=4Z59ZpVexr7-lcExfr8GsqXe0y2VZmr8Yjwa-3DeOxU,1457
44
45
  lucidicai/telemetry/utils/text_storage.py,sha256=L62MMJ8E23TDqDTUv2aRntdKMCItsXV7XjY6cFwx2DE,1503
45
46
  lucidicai/telemetry/utils/universal_image_interceptor.py,sha256=zPfVsMjtKxJP2n2OOjKbtPiQJTZ0sf5_28GWprOnJP4,12185
46
- lucidicai-1.2.19.dist-info/METADATA,sha256=WxcYafsohbT_xHO3Nw9b1XhcvhnERuzuHL6b8QrKvvQ,903
47
- lucidicai-1.2.19.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
48
- lucidicai-1.2.19.dist-info/top_level.txt,sha256=vSSdM3lclF4I5tyVC0xxUk8eIRnnYXMe1hW-eO91HUo,10
49
- lucidicai-1.2.19.dist-info/RECORD,,
47
+ lucidicai-1.2.20.dist-info/METADATA,sha256=vn20bB7YfIoEnDP-o_rATqNh2vDhvBORz5snd_0Sjxw,903
48
+ lucidicai-1.2.20.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
49
+ lucidicai-1.2.20.dist-info/top_level.txt,sha256=vSSdM3lclF4I5tyVC0xxUk8eIRnnYXMe1hW-eO91HUo,10
50
+ lucidicai-1.2.20.dist-info/RECORD,,