lucidicai 1.3.5__py3-none-any.whl → 2.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,192 @@
1
+ """Extraction utilities matching TypeScript SDK for span attribute processing."""
2
+ import json
3
+ from typing import List, Dict, Any, Optional
4
+
5
+
6
+ def detect_is_llm_span(span) -> bool:
7
+ """Check if span is LLM-related - matches TypeScript logic."""
8
+ name = (span.name or "").lower()
9
+ patterns = ['openai', 'anthropic', 'chat', 'completion', 'embedding', 'llm',
10
+ 'gemini', 'claude', 'bedrock', 'vertex', 'cohere', 'groq']
11
+
12
+ if any(p in name for p in patterns):
13
+ return True
14
+
15
+ if hasattr(span, 'attributes') and span.attributes:
16
+ for key in span.attributes:
17
+ if isinstance(key, str) and (key.startswith('gen_ai.') or key.startswith('llm.')):
18
+ return True
19
+
20
+ return False
21
+
22
+
23
+ def extract_images(attrs: Dict[str, Any]) -> List[str]:
24
+ """Extract images from span attributes - matches TypeScript logic.
25
+
26
+ Looks for images in:
27
+ - gen_ai.prompt.{i}.content arrays with image_url items
28
+ - Direct image attributes
29
+ """
30
+ images = []
31
+
32
+ # Check indexed prompt content for images (gen_ai.prompt.{i}.content)
33
+ for i in range(50):
34
+ key = f"gen_ai.prompt.{i}.content"
35
+ if key in attrs:
36
+ content = attrs[key]
37
+
38
+ # Parse if JSON string
39
+ if isinstance(content, str):
40
+ try:
41
+ content = json.loads(content)
42
+ except:
43
+ continue
44
+
45
+ # Extract images from content array
46
+ if isinstance(content, list):
47
+ for item in content:
48
+ if isinstance(item, dict):
49
+ if item.get("type") == "image_url":
50
+ image_url = item.get("image_url", {})
51
+ if isinstance(image_url, dict):
52
+ url = image_url.get("url", "")
53
+ if url.startswith("data:image"):
54
+ images.append(url)
55
+ elif item.get("type") == "image":
56
+ # Anthropic format
57
+ source = item.get("source", {})
58
+ if isinstance(source, dict):
59
+ data = source.get("data", "")
60
+ media_type = source.get("media_type", "image/jpeg")
61
+ if data:
62
+ images.append(f"data:{media_type};base64,{data}")
63
+
64
+ # Also check direct gen_ai.prompt list
65
+ prompt_list = attrs.get("gen_ai.prompt")
66
+ if isinstance(prompt_list, list):
67
+ for msg in prompt_list:
68
+ if isinstance(msg, dict):
69
+ content = msg.get("content")
70
+ if isinstance(content, list):
71
+ for item in content:
72
+ if isinstance(item, dict) and item.get("type") == "image_url":
73
+ image_url = item.get("image_url", {})
74
+ if isinstance(image_url, dict):
75
+ url = image_url.get("url", "")
76
+ if url.startswith("data:image"):
77
+ images.append(url)
78
+
79
+ return images
80
+
81
+
82
+ def extract_prompts(attrs: Dict[str, Any]) -> Optional[List[Dict]]:
83
+ """Extract prompts as message list from span attributes.
84
+
85
+ Returns messages in format: [{"role": "user", "content": "..."}]
86
+ """
87
+ messages = []
88
+
89
+ # Check indexed format (gen_ai.prompt.{i}.role/content)
90
+ for i in range(50):
91
+ role_key = f"gen_ai.prompt.{i}.role"
92
+ content_key = f"gen_ai.prompt.{i}.content"
93
+
94
+ if role_key not in attrs and content_key not in attrs:
95
+ break
96
+
97
+ role = attrs.get(role_key, "user")
98
+ content = attrs.get(content_key, "")
99
+
100
+ # Parse content if it's JSON
101
+ if isinstance(content, str):
102
+ try:
103
+ parsed = json.loads(content)
104
+ content = parsed
105
+ except:
106
+ pass
107
+
108
+ # Format content
109
+ if isinstance(content, list):
110
+ # Content array format (with text/image items)
111
+ text_parts = []
112
+ for item in content:
113
+ if isinstance(item, dict) and item.get("type") == "text":
114
+ text_parts.append(item.get("text", ""))
115
+ if text_parts:
116
+ content = " ".join(text_parts)
117
+
118
+ messages.append({"role": role, "content": content})
119
+
120
+ if messages:
121
+ return messages
122
+
123
+ # Check for direct message list
124
+ prompt_list = attrs.get("gen_ai.prompt") or attrs.get("gen_ai.messages")
125
+ if isinstance(prompt_list, list):
126
+ return prompt_list
127
+
128
+ # Check AI SDK format
129
+ ai_prompt = attrs.get("ai.prompt.messages")
130
+ if isinstance(ai_prompt, str):
131
+ try:
132
+ parsed = json.loads(ai_prompt)
133
+ if isinstance(parsed, list):
134
+ return parsed
135
+ except:
136
+ pass
137
+
138
+ return None
139
+
140
+
141
+ def extract_completions(span, attrs: Dict[str, Any]) -> Optional[str]:
142
+ """Extract completion/response text from span attributes."""
143
+ completions = []
144
+
145
+ # Check indexed format (gen_ai.completion.{i}.content)
146
+ for i in range(50):
147
+ key = f"gen_ai.completion.{i}.content"
148
+ if key not in attrs:
149
+ break
150
+ content = attrs[key]
151
+ if isinstance(content, str):
152
+ completions.append(content)
153
+ else:
154
+ try:
155
+ completions.append(json.dumps(content))
156
+ except:
157
+ completions.append(str(content))
158
+
159
+ if completions:
160
+ return "\n".join(completions)
161
+
162
+ # Check direct completion attribute
163
+ completion = attrs.get("gen_ai.completion") or attrs.get("llm.completions")
164
+ if isinstance(completion, str):
165
+ return completion
166
+ elif isinstance(completion, list) and completion:
167
+ return "\n".join(str(c) for c in completion)
168
+
169
+ # Check AI SDK format
170
+ ai_completion = attrs.get("ai.response.text")
171
+ if isinstance(ai_completion, str):
172
+ return ai_completion
173
+
174
+ # Check for error status
175
+ if hasattr(span, 'status'):
176
+ from opentelemetry.trace import StatusCode
177
+ if span.status.status_code == StatusCode.ERROR:
178
+ return f"Error: {span.status.description or 'Unknown error'}"
179
+
180
+ return None
181
+
182
+
183
+ def extract_model(attrs: Dict[str, Any]) -> Optional[str]:
184
+ """Extract model name from span attributes."""
185
+ return (
186
+ attrs.get("gen_ai.response.model") or
187
+ attrs.get("gen_ai.request.model") or
188
+ attrs.get("llm.response.model") or
189
+ attrs.get("llm.request.model") or
190
+ attrs.get("ai.model.id") or
191
+ attrs.get("ai.model.name")
192
+ )
@@ -16,6 +16,7 @@ except ImportError:
16
16
 
17
17
  from lucidicai.client import Client
18
18
  from lucidicai.model_pricing import calculate_cost
19
+ from lucidicai.context import current_parent_event_id
19
20
 
20
21
  logger = logging.getLogger("Lucidic")
21
22
  DEBUG = os.getenv("LUCIDIC_DEBUG", "False") == "True"
@@ -25,7 +26,7 @@ class LucidicLiteLLMCallback(CustomLogger):
25
26
  """
26
27
  Custom callback for LiteLLM that bridges to Lucidic's event system.
27
28
 
28
- This callback integrates LiteLLM's logging with Lucidic's session/step/event hierarchy,
29
+ This callback integrates LiteLLM's logging with Lucidic's session/event hierarchy,
29
30
  enabling automatic tracking of all LiteLLM-supported providers.
30
31
  """
31
32
 
@@ -102,7 +103,7 @@ class LucidicLiteLLMCallback(CustomLogger):
102
103
  traceback.print_exc()
103
104
 
104
105
  def log_success_event(self, kwargs, response_obj, start_time, end_time):
105
- """Called on successful LLM completion"""
106
+ """Called on successful LLM completion -> create typed LLM_GENERATION event"""
106
107
  # Generate unique callback ID
107
108
  callback_id = f"success_{id(kwargs)}_{start_time}"
108
109
  self._register_callback(callback_id)
@@ -141,33 +142,30 @@ class LucidicLiteLLMCallback(CustomLogger):
141
142
  # Extract any images from multimodal requests
142
143
  images = self._extract_images_from_messages(messages)
143
144
 
144
- # Create event
145
- event_kwargs = {
146
- "description": description,
147
- "result": result,
148
- "model": f"{provider}/{model}" if "/" not in model else model,
149
- "is_finished": True
150
- }
151
-
152
- if cost is not None:
153
- event_kwargs["cost_added"] = cost
154
-
155
- if images:
156
- event_kwargs["screenshots"] = images
157
-
158
- # Add metadata
159
- metadata = {
160
- "provider": provider,
161
- "duration_ms": (end_time - start_time) * 1000,
162
- "litellm": True
163
- }
164
- if usage:
165
- metadata["usage"] = usage
166
-
167
- event_kwargs["metadata"] = metadata
168
-
169
- # Create the event
170
- client.session.create_event(**event_kwargs)
145
+ # Create LLM_GENERATION typed event
146
+ parent_id = None
147
+ try:
148
+ parent_id = current_parent_event_id.get(None)
149
+ except Exception:
150
+ parent_id = None
151
+
152
+ # occurred_at/duration from datetimes
153
+ occ_dt = start_time if isinstance(start_time, datetime) else None
154
+ duration_secs = (end_time - start_time).total_seconds() if isinstance(start_time, datetime) and isinstance(end_time, datetime) else None
155
+
156
+ client.create_event(
157
+ type="llm_generation",
158
+ provider=provider,
159
+ model=model,
160
+ messages=messages,
161
+ output=result,
162
+ input_tokens=(usage or {}).get("prompt_tokens", 0),
163
+ output_tokens=(usage or {}).get("completion_tokens", 0),
164
+ cost=cost,
165
+ parent_event_id=parent_id,
166
+ occurred_at=occ_dt,
167
+ duration=duration_secs,
168
+ )
171
169
 
172
170
  if DEBUG:
173
171
  logger.info(f"LiteLLM Bridge: Created event for {model} completion")
@@ -207,21 +205,24 @@ class LucidicLiteLLMCallback(CustomLogger):
207
205
  # Format error
208
206
  error_msg = str(response_obj) if response_obj else "Unknown error"
209
207
 
210
- # Create failed event
211
- event_kwargs = {
212
- "description": description,
213
- "result": f"Error: {error_msg}",
214
- "model": f"{provider}/{model}" if "/" not in model else model,
215
- "is_finished": True,
216
- "metadata": {
217
- "provider": provider,
218
- "duration_ms": (end_time - start_time) * 1000,
219
- "litellm": True,
220
- "error": True
221
- }
222
- }
223
-
224
- client.session.create_event(**event_kwargs)
208
+ # Create error typed event under current parent if any
209
+ parent_id = None
210
+ try:
211
+ parent_id = current_parent_event_id.get(None)
212
+ except Exception:
213
+ parent_id = None
214
+ occ_dt = start_time if isinstance(start_time, datetime) else None
215
+ duration_secs = (end_time - start_time).total_seconds() if isinstance(start_time, datetime) and isinstance(end_time, datetime) else None
216
+
217
+ client.create_event(
218
+ type="error_traceback",
219
+ error=error_msg,
220
+ traceback="",
221
+ parent_event_id=parent_id,
222
+ occurred_at=occ_dt,
223
+ duration=duration_secs,
224
+ metadata={"provider": provider, "litellm": True}
225
+ )
225
226
 
226
227
  if DEBUG:
227
228
  logger.info(f"LiteLLM Bridge: Created error event for {model}")
@@ -366,4 +367,38 @@ class LucidicLiteLLMCallback(CustomLogger):
366
367
  if url.startswith("data:image"):
367
368
  images.append(url)
368
369
 
369
- return images
370
+ return images
371
+
372
+
373
+ def setup_litellm_callback():
374
+ """Registers the LucidicLiteLLMCallback with LiteLLM if available.
375
+
376
+ This function ensures only one instance of the callback is registered,
377
+ preventing duplicates across multiple SDK initializations.
378
+ """
379
+ try:
380
+ import litellm
381
+ except ImportError:
382
+ logger.info("[LiteLLM] litellm not installed, skipping callback setup")
383
+ return
384
+
385
+ # Initialize callbacks list if needed
386
+ if not hasattr(litellm, 'callbacks'):
387
+ litellm.callbacks = []
388
+ elif litellm.callbacks is None:
389
+ litellm.callbacks = []
390
+
391
+ # Check for existing registration to prevent duplicates
392
+ for existing in litellm.callbacks:
393
+ if isinstance(existing, LucidicLiteLLMCallback):
394
+ if DEBUG:
395
+ logger.debug("[LiteLLM] Callback already registered")
396
+ return
397
+
398
+ # Register new callback
399
+ try:
400
+ cb = LucidicLiteLLMCallback()
401
+ litellm.callbacks.append(cb)
402
+ logger.info("[LiteLLM] Registered Lucidic callback for event tracking")
403
+ except Exception as e:
404
+ logger.error(f"[LiteLLM] Failed to register callback: {e}")