lucidicai 2.1.2__py3-none-any.whl → 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. lucidicai/__init__.py +32 -390
  2. lucidicai/api/client.py +260 -92
  3. lucidicai/api/resources/__init__.py +16 -1
  4. lucidicai/api/resources/dataset.py +422 -82
  5. lucidicai/api/resources/event.py +399 -27
  6. lucidicai/api/resources/experiment.py +108 -0
  7. lucidicai/api/resources/feature_flag.py +78 -0
  8. lucidicai/api/resources/prompt.py +84 -0
  9. lucidicai/api/resources/session.py +545 -38
  10. lucidicai/client.py +395 -480
  11. lucidicai/core/config.py +73 -48
  12. lucidicai/core/errors.py +3 -3
  13. lucidicai/sdk/bound_decorators.py +321 -0
  14. lucidicai/sdk/context.py +20 -2
  15. lucidicai/sdk/decorators.py +283 -74
  16. lucidicai/sdk/event.py +538 -36
  17. lucidicai/sdk/event_builder.py +2 -4
  18. lucidicai/sdk/features/dataset.py +408 -232
  19. lucidicai/sdk/features/feature_flag.py +344 -3
  20. lucidicai/sdk/init.py +50 -279
  21. lucidicai/sdk/session.py +502 -0
  22. lucidicai/sdk/shutdown_manager.py +103 -46
  23. lucidicai/session_obj.py +321 -0
  24. lucidicai/telemetry/context_capture_processor.py +13 -6
  25. lucidicai/telemetry/extract.py +60 -63
  26. lucidicai/telemetry/litellm_bridge.py +3 -44
  27. lucidicai/telemetry/lucidic_exporter.py +143 -131
  28. lucidicai/telemetry/openai_agents_instrumentor.py +2 -2
  29. lucidicai/telemetry/openai_patch.py +7 -6
  30. lucidicai/telemetry/telemetry_manager.py +183 -0
  31. lucidicai/telemetry/utils/model_pricing.py +21 -30
  32. lucidicai/telemetry/utils/provider.py +77 -0
  33. lucidicai/utils/images.py +30 -14
  34. lucidicai/utils/queue.py +2 -2
  35. lucidicai/utils/serialization.py +27 -0
  36. {lucidicai-2.1.2.dist-info → lucidicai-3.0.0.dist-info}/METADATA +1 -1
  37. {lucidicai-2.1.2.dist-info → lucidicai-3.0.0.dist-info}/RECORD +39 -30
  38. {lucidicai-2.1.2.dist-info → lucidicai-3.0.0.dist-info}/WHEEL +0 -0
  39. {lucidicai-2.1.2.dist-info → lucidicai-3.0.0.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,7 @@
1
1
  """Extraction utilities matching TypeScript SDK for span attribute processing."""
2
2
  import json
3
3
  from typing import List, Dict, Any, Optional
4
+ from ..utils.logger import debug, info, warning, error, verbose, truncate_id
4
5
 
5
6
 
6
7
  def detect_is_llm_span(span) -> bool:
@@ -20,65 +21,6 @@ def detect_is_llm_span(span) -> bool:
20
21
  return False
21
22
 
22
23
 
23
- def extract_images(attrs: Dict[str, Any]) -> List[str]:
24
- """Extract images from span attributes - matches TypeScript logic.
25
-
26
- Looks for images in:
27
- - gen_ai.prompt.{i}.content arrays with image_url items
28
- - Direct image attributes
29
- """
30
- images = []
31
-
32
- # Check indexed prompt content for images (gen_ai.prompt.{i}.content)
33
- for i in range(50):
34
- key = f"gen_ai.prompt.{i}.content"
35
- if key in attrs:
36
- content = attrs[key]
37
-
38
- # Parse if JSON string
39
- if isinstance(content, str):
40
- try:
41
- content = json.loads(content)
42
- except:
43
- continue
44
-
45
- # Extract images from content array
46
- if isinstance(content, list):
47
- for item in content:
48
- if isinstance(item, dict):
49
- if item.get("type") == "image_url":
50
- image_url = item.get("image_url", {})
51
- if isinstance(image_url, dict):
52
- url = image_url.get("url", "")
53
- if url.startswith("data:image"):
54
- images.append(url)
55
- elif item.get("type") == "image":
56
- # Anthropic format
57
- source = item.get("source", {})
58
- if isinstance(source, dict):
59
- data = source.get("data", "")
60
- media_type = source.get("media_type", "image/jpeg")
61
- if data:
62
- images.append(f"data:{media_type};base64,{data}")
63
-
64
- # Also check direct gen_ai.prompt list
65
- prompt_list = attrs.get("gen_ai.prompt")
66
- if isinstance(prompt_list, list):
67
- for msg in prompt_list:
68
- if isinstance(msg, dict):
69
- content = msg.get("content")
70
- if isinstance(content, list):
71
- for item in content:
72
- if isinstance(item, dict) and item.get("type") == "image_url":
73
- image_url = item.get("image_url", {})
74
- if isinstance(image_url, dict):
75
- url = image_url.get("url", "")
76
- if url.startswith("data:image"):
77
- images.append(url)
78
-
79
- return images
80
-
81
-
82
24
  def extract_prompts(attrs: Dict[str, Any]) -> Optional[List[Dict]]:
83
25
  """Extract prompts as message list from span attributes.
84
26
 
@@ -102,7 +44,7 @@ def extract_prompts(attrs: Dict[str, Any]) -> Optional[List[Dict]]:
102
44
  try:
103
45
  parsed = json.loads(content)
104
46
  content = parsed
105
- except:
47
+ except (ValueError, TypeError):
106
48
  pass
107
49
 
108
50
  # Format content
@@ -114,6 +56,29 @@ def extract_prompts(attrs: Dict[str, Any]) -> Optional[List[Dict]]:
114
56
  text_parts.append(item.get("text", ""))
115
57
  if text_parts:
116
58
  content = " ".join(text_parts)
59
+
60
+ # if we have no content here then that means we have a tool call
61
+ # NOTE: for now, I am assumign that tools call history only shows up if there is no content
62
+ # based on my testing of otel spans in different cases. Should be revisited if this is not the case.
63
+ if not content:
64
+ # look for tool calls in the attributes
65
+ j = 0
66
+ tool_calls = []
67
+ while True:
68
+ tool_key_name = f"gen_ai.prompt.{i}.tool_calls.{j}.name"
69
+ tool_key_arguments = f"gen_ai.prompt.{i}.tool_calls.{j}.arguments"
70
+ if tool_key_name not in attrs:
71
+ break
72
+ name = attrs[tool_key_name]
73
+ arguments = attrs[tool_key_arguments]
74
+ tool_calls.append({"name": name, "arguments": arguments})
75
+ j += 1
76
+
77
+ # for now, just make content as "Tool Calls:\n 1) <tool call 1> \n 2) <tool call 2> \n ..."
78
+ if tool_calls:
79
+ content = 'Tool Calls:' if len(tool_calls) > 1 else 'Tool Call:'
80
+ for k, tool_call in enumerate(tool_calls):
81
+ content += f'\n{k + 1}) {json.dumps(tool_call, indent=4)}'
117
82
 
118
83
  messages.append({"role": role, "content": content})
119
84
 
@@ -132,7 +97,7 @@ def extract_prompts(attrs: Dict[str, Any]) -> Optional[List[Dict]]:
132
97
  parsed = json.loads(ai_prompt)
133
98
  if isinstance(parsed, list):
134
99
  return parsed
135
- except:
100
+ except (ValueError, TypeError):
136
101
  pass
137
102
 
138
103
  return None
@@ -143,7 +108,8 @@ def extract_completions(span, attrs: Dict[str, Any]) -> Optional[str]:
143
108
  completions = []
144
109
 
145
110
  # Check indexed format (gen_ai.completion.{i}.content)
146
- for i in range(50):
111
+ i = 0
112
+ while True:
147
113
  key = f"gen_ai.completion.{i}.content"
148
114
  if key not in attrs:
149
115
  break
@@ -153,8 +119,9 @@ def extract_completions(span, attrs: Dict[str, Any]) -> Optional[str]:
153
119
  else:
154
120
  try:
155
121
  completions.append(json.dumps(content))
156
- except:
122
+ except (ValueError, TypeError):
157
123
  completions.append(str(content))
124
+ i += 1
158
125
 
159
126
  if completions:
160
127
  return "\n".join(completions)
@@ -180,6 +147,36 @@ def extract_completions(span, attrs: Dict[str, Any]) -> Optional[str]:
180
147
  return None
181
148
 
182
149
 
150
+ def extract_tool_calls(span, attrs: Dict[str, Any]) -> Optional[List[Dict]]:
151
+ """Extract tool calls from span attributes."""
152
+
153
+ debug(f"[Telemetry] Extracting tool calls from span")
154
+
155
+ # check if this is a tool call span
156
+ if not attrs.get("gen_ai.completion.0.finish_reason") == "tool_calls":
157
+ debug(f"[Telemetry] Not a tool call span {span.name}")
158
+ return None
159
+
160
+ tool_calls = []
161
+ i = 0
162
+ while True:
163
+ key_name = f"gen_ai.completion.0.tool_calls.{i}.name"
164
+ key_arguments = f"gen_ai.completion.0.tool_calls.{i}.arguments"
165
+ if key_name not in attrs:
166
+ break
167
+ name = attrs[key_name]
168
+ arguments = attrs[key_arguments]
169
+ debug(f"[Telemetry] Extracted tool call {name} with arguments: {arguments}")
170
+ tool_calls.append({"name": name, "arguments": arguments})
171
+ i += 1
172
+
173
+ if tool_calls:
174
+ # prettify the tool calls and return as a string
175
+ tool_calls_str = [json.dumps(tool_call, indent=4) for tool_call in tool_calls]
176
+ return "\n".join(tool_calls_str)
177
+
178
+ return None
179
+
183
180
  def extract_model(attrs: Dict[str, Any]) -> Optional[str]:
184
181
  """Extract model name from span attributes."""
185
182
  return (
@@ -18,6 +18,7 @@ from lucidicai.sdk.event import create_event
18
18
  from lucidicai.sdk.init import get_session_id
19
19
  from lucidicai.telemetry.utils.model_pricing import calculate_cost
20
20
  from lucidicai.sdk.context import current_parent_event_id
21
+ from lucidicai.telemetry.utils.provider import detect_provider
21
22
 
22
23
  logger = logging.getLogger("Lucidic")
23
24
  DEBUG = os.getenv("LUCIDIC_DEBUG", "False") == "True"
@@ -117,7 +118,7 @@ class LucidicLiteLLMCallback(CustomLogger):
117
118
 
118
119
  # Extract model and provider info
119
120
  model = kwargs.get("model", pre_call_info.get("model", "unknown"))
120
- provider = self._extract_provider(model)
121
+ provider = detect_provider(model=model)
121
122
 
122
123
  # Get messages for description
123
124
  messages = kwargs.get("messages", pre_call_info.get("messages", []))
@@ -132,9 +133,6 @@ class LucidicLiteLLMCallback(CustomLogger):
132
133
  if usage:
133
134
  cost = self._calculate_litellm_cost(model, usage)
134
135
 
135
- # Extract any images from multimodal requests
136
- images = self._extract_images_from_messages(messages)
137
-
138
136
  # Get parent event ID from context
139
137
  parent_id = None
140
138
  try:
@@ -191,7 +189,7 @@ class LucidicLiteLLMCallback(CustomLogger):
191
189
 
192
190
  # Extract model info
193
191
  model = kwargs.get("model", pre_call_info.get("model", "unknown"))
194
- provider = self._extract_provider(model)
192
+ provider = detect_provider(model=model)
195
193
 
196
194
  # Get messages for description
197
195
  messages = kwargs.get("messages", pre_call_info.get("messages", []))
@@ -251,26 +249,6 @@ class LucidicLiteLLMCallback(CustomLogger):
251
249
  """Async version of log_stream_event"""
252
250
  self.log_stream_event(kwargs, response_obj, start_time, end_time)
253
251
 
254
- def _extract_provider(self, model: str) -> str:
255
- """Extract provider from model string"""
256
- if "/" in model:
257
- return model.split("/")[0]
258
-
259
- # Try to infer provider from model name patterns
260
- model_lower = model.lower()
261
- if "gpt" in model_lower:
262
- return "openai"
263
- elif "claude" in model_lower:
264
- return "anthropic"
265
- elif "gemini" in model_lower:
266
- return "vertex_ai"
267
- elif "llama" in model_lower:
268
- return "meta"
269
- elif "mistral" in model_lower:
270
- return "mistral"
271
-
272
- return "unknown"
273
-
274
252
  def _format_messages(self, messages: List[Dict[str, Any]]) -> str:
275
253
  """Format messages into a description string"""
276
254
  if not messages:
@@ -347,25 +325,6 @@ class LucidicLiteLLMCallback(CustomLogger):
347
325
  logger.debug(f"Could not calculate cost for {model}: {e}")
348
326
  return None
349
327
 
350
- def _extract_images_from_messages(self, messages: List[Dict[str, Any]]) -> List[str]:
351
- """Extract base64 images from multimodal messages"""
352
- images = []
353
-
354
- for msg in messages:
355
- if isinstance(msg, dict):
356
- content = msg.get("content", "")
357
- if isinstance(content, list):
358
- for item in content:
359
- if isinstance(item, dict) and item.get("type") == "image_url":
360
- image_url = item.get("image_url", {})
361
- if isinstance(image_url, dict):
362
- url = image_url.get("url", "")
363
- if url.startswith("data:image"):
364
- images.append(url)
365
-
366
- return images
367
-
368
-
369
328
  def setup_litellm_callback():
370
329
  """Registers the LucidicLiteLLMCallback with LiteLLM if available.
371
330
 
@@ -1,26 +1,60 @@
1
1
  """Custom OpenTelemetry exporter for Lucidic (Exporter-only mode).
2
2
 
3
- Converts completed spans into immutable typed LLM events via Client.create_event(),
4
- which enqueues non-blocking delivery through the EventQueue.
3
+ Converts completed spans into immutable typed LLM events via emit_event(),
4
+ which fires events in the background without blocking the exporter.
5
5
  """
6
- import json
7
- from typing import Sequence, Optional, Dict, Any, List
6
+ from typing import Sequence, Optional, Dict, Any, List, TYPE_CHECKING
8
7
  from datetime import datetime, timezone
9
8
  from opentelemetry.sdk.trace import ReadableSpan
10
9
  from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
11
- from opentelemetry.trace import StatusCode
12
10
  from opentelemetry.semconv_ai import SpanAttributes
11
+ import threading
13
12
 
14
- from ..sdk.event import create_event
13
+ from ..sdk.event import emit_event
15
14
  from ..sdk.init import get_session_id
16
15
  from ..sdk.context import current_session_id, current_parent_event_id
17
16
  from ..telemetry.utils.model_pricing import calculate_cost
18
- from .extract import detect_is_llm_span, extract_images, extract_prompts, extract_completions, extract_model
17
+ from .extract import detect_is_llm_span, extract_prompts, extract_completions, extract_model, extract_tool_calls
18
+ from .utils.provider import detect_provider
19
19
  from ..utils.logger import debug, info, warning, error, verbose, truncate_id
20
20
 
21
+ if TYPE_CHECKING:
22
+ from ..client import LucidicAI
23
+
21
24
 
22
25
  class LucidicSpanExporter(SpanExporter):
23
- """Exporter that creates immutable LLM events for completed spans."""
26
+ """Exporter that creates immutable LLM events for completed spans.
27
+
28
+ Uses emit_event() for fire-and-forget event creation without blocking.
29
+ Supports multi-client routing via client registry.
30
+ """
31
+
32
+ def __init__(self):
33
+ """Initialize the exporter."""
34
+ self._shutdown = False
35
+ # Client registry for multi-client support
36
+ self._client_registry: Dict[str, "LucidicAI"] = {}
37
+ self._registry_lock = threading.Lock()
38
+
39
+ def register_client(self, client: "LucidicAI") -> None:
40
+ """Register a client for span routing.
41
+
42
+ Args:
43
+ client: The LucidicAI client to register
44
+ """
45
+ with self._registry_lock:
46
+ self._client_registry[client._client_id] = client
47
+ debug(f"[Exporter] Registered client {client._client_id[:8]}...")
48
+
49
+ def unregister_client(self, client_id: str) -> None:
50
+ """Unregister a client.
51
+
52
+ Args:
53
+ client_id: The client ID to unregister
54
+ """
55
+ with self._registry_lock:
56
+ self._client_registry.pop(client_id, None)
57
+ debug(f"[Exporter] Unregistered client {client_id[:8]}...")
24
58
 
25
59
  def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
26
60
  try:
@@ -43,6 +77,8 @@ class LucidicSpanExporter(SpanExporter):
43
77
  return
44
78
 
45
79
  debug(f"[Telemetry] Processing LLM span: {span.name}")
80
+ verbose(f"[Telemetry] Span: {span.attributes}")
81
+ verbose(f"[Telemetry] Span name: {span.name}")
46
82
 
47
83
  attributes = dict(span.attributes or {})
48
84
 
@@ -66,7 +102,8 @@ class LucidicSpanExporter(SpanExporter):
66
102
  if not target_session_id:
67
103
  try:
68
104
  target_session_id = current_session_id.get(None)
69
- except Exception:
105
+ except Exception as e:
106
+ debug(f"[Telemetry] Failed to get session_id from contextvar: {e}")
70
107
  target_session_id = None
71
108
  if not target_session_id:
72
109
  target_session_id = get_session_id()
@@ -83,7 +120,8 @@ class LucidicSpanExporter(SpanExporter):
83
120
  parent_id = current_parent_event_id.get(None)
84
121
  if parent_id:
85
122
  debug(f"[Telemetry] Got parent_id from context for span {span.name}: {truncate_id(parent_id)}")
86
- except Exception:
123
+ except Exception as e:
124
+ debug(f"[Telemetry] Failed to get parent_event_id from contextvar: {e}")
87
125
  parent_id = None
88
126
 
89
127
  if not parent_id:
@@ -96,159 +134,127 @@ class LucidicSpanExporter(SpanExporter):
96
134
 
97
135
  # Typed fields using extract utilities
98
136
  model = extract_model(attributes) or 'unknown'
99
- provider = self._detect_provider_name(attributes)
137
+ provider = detect_provider(model=model, attributes=attributes)
100
138
  messages = extract_prompts(attributes) or []
101
139
  params = self._extract_params(attributes)
102
140
  output_text = extract_completions(span, attributes)
141
+ tool_calls = extract_tool_calls(span, attributes)
142
+ debug(f"[Telemetry] Extracted tool calls: {tool_calls}")
103
143
 
104
144
  # Debug for responses.create
105
145
  if span.name == "openai.responses.create":
106
146
  debug(f"[Telemetry] Extracted messages: {messages}")
107
147
  debug(f"[Telemetry] Extracted output: {output_text}")
148
+ debug(f"[Telemetry] Extracted tool calls: {tool_calls}")
149
+
150
+
151
+ # see if tool calls need to be used instead of output_text
152
+ if not output_text or output_text == "Response received" or not tool_calls:
153
+
154
+ if tool_calls:
155
+ debug(f"[Telemetry] Using tool calls for span {span.name}")
156
+ output_text = tool_calls
108
157
 
109
- # Skip spans with no meaningful output (likely incomplete or duplicate instrumentation)
110
- if not output_text or output_text == "Response received":
111
158
  # Only use "Response received" if we have other meaningful data
112
- if not messages and not attributes.get("lucidic.instrumented"):
159
+ if not messages and not tool_calls and not attributes.get("lucidic.instrumented"):
113
160
  verbose(f"[Telemetry] Skipping span {span.name} with no meaningful content")
114
161
  return
115
162
  # Use a more descriptive default if we must
116
163
  if not output_text:
164
+ debug(f"[Telemetry] No output text for span {span.name}. Using default 'Response received'")
117
165
  output_text = "Response received"
118
166
 
119
167
  input_tokens = self._extract_prompt_tokens(attributes)
120
168
  output_tokens = self._extract_completion_tokens(attributes)
121
169
  cost = self._calculate_cost(attributes)
122
- images = extract_images(attributes)
123
170
 
171
+ # Prepare event data for async creation
172
+ event_data = {
173
+ 'type': 'llm_generation',
174
+ 'session_id': target_session_id,
175
+ 'occurred_at': occurred_at,
176
+ 'duration': duration_seconds,
177
+ 'provider': provider,
178
+ 'model': model,
179
+ 'messages': messages,
180
+ 'params': params,
181
+ 'output': output_text,
182
+ 'input_tokens': input_tokens,
183
+ 'output_tokens': output_tokens,
184
+ 'cost': cost,
185
+ 'raw': None,
186
+ 'parent_event_id': parent_id,
187
+ }
188
+
189
+ # Get client_id for routing
190
+ client_id = attributes.get("lucidic.client_id")
191
+
192
+ if not self._shutdown:
193
+ self._send_event_async(event_data, span.name, parent_id, client_id)
194
+
195
+ debug(
196
+ f"[Telemetry] Queued LLM event creation for span {span.name} "
197
+ f"(session: {truncate_id(target_session_id)}, client: {truncate_id(client_id)})"
198
+ )
199
+
200
+ except Exception as e:
201
+ error(f"[Telemetry] Failed to process span {span.name}: {e}")
202
+
203
+ def _send_event_async(
204
+ self,
205
+ event_data: Dict[str, Any],
206
+ span_name: str,
207
+ parent_id: Optional[str],
208
+ client_id: Optional[str] = None,
209
+ ) -> None:
210
+ """Send event asynchronously in a background thread.
211
+
212
+ Args:
213
+ event_data: Event data to send
214
+ span_name: Name of the span (for logging)
215
+ parent_id: Parent event ID (for context)
216
+ client_id: Client ID for routing (if available)
217
+ """
218
+ try:
124
219
  # Set context for parent if needed
125
220
  from ..sdk.context import current_parent_event_id as parent_context
221
+
126
222
  if parent_id:
127
223
  token = parent_context.set(parent_id)
128
224
  else:
129
225
  token = None
130
-
226
+
131
227
  try:
132
- # Create immutable event via non-blocking queue
133
- debug(f"[Telemetry] Creating LLM event with parent_id: {truncate_id(parent_id)}, session_id: {truncate_id(target_session_id)}")
134
- event_id = create_event(
135
- type="llm_generation",
136
- session_id=target_session_id, # Pass the session_id explicitly
137
- occurred_at=occurred_at,
138
- duration=duration_seconds,
139
- provider=provider,
140
- model=model,
141
- messages=messages,
142
- params=params,
143
- output=output_text,
144
- input_tokens=input_tokens,
145
- output_tokens=output_tokens,
146
- cost=cost,
147
- raw={"images": images} if images else None,
148
- parent_event_id=parent_id, # Pass the parent_id explicitly
149
- )
228
+ # Try to route to specific client if client_id is available
229
+ if client_id:
230
+ with self._registry_lock:
231
+ client = self._client_registry.get(client_id)
232
+ if client:
233
+ # Use client's event resource directly
234
+ try:
235
+ response = client._resources["events"].create(**event_data)
236
+ event_id = response if response else None
237
+ debug(
238
+ f"[Telemetry] Routed LLM event {truncate_id(event_id)} to client {client_id[:8]}..."
239
+ )
240
+ return
241
+ except Exception as e:
242
+ debug(f"[Telemetry] Failed to route event to client: {e}")
243
+ # Fall through to emit_event
244
+
245
+ # Fallback to emit_event (uses global state)
246
+ event_id = emit_event(**event_data)
247
+ debug(
248
+ f"[Telemetry] Emitted LLM event {truncate_id(event_id)} from span {span_name}"
249
+ )
150
250
  finally:
151
251
  # Reset parent context
152
252
  if token:
153
253
  parent_context.reset(token)
154
-
155
- debug(f"[Telemetry] Created LLM event {truncate_id(event_id)} from span {span.name} for session {truncate_id(target_session_id)}")
156
254
 
157
255
  except Exception as e:
158
- error(f"[Telemetry] Failed to process span {span.name}: {e}")
159
-
160
-
161
- def _create_event_from_span(self, span: ReadableSpan, attributes: Dict[str, Any]) -> Optional[str]:
162
- """Create a Lucidic event from span start"""
163
- try:
164
- # Extract description from prompts/messages
165
- description = self._extract_description(span, attributes)
166
-
167
- # Extract images if present
168
- images = self._extract_images(attributes)
169
-
170
- # Get model info
171
- model = attributes.get(SpanAttributes.LLM_RESPONSE_MODEL) or \
172
- attributes.get(SpanAttributes.LLM_REQUEST_MODEL) or \
173
- attributes.get('gen_ai.request.model') or 'unknown'
174
-
175
- # Resolve target session id for this span
176
- target_session_id = attributes.get('lucidic.session_id')
177
- if not target_session_id:
178
- try:
179
- target_session_id = current_session_id.get(None)
180
- except Exception:
181
- target_session_id = None
182
- if not target_session_id:
183
- target_session_id = get_session_id()
184
- if not target_session_id:
185
- debug(f"[Telemetry] No session ID for span {span.name}, skipping")
186
- return None
187
-
188
- # Create event
189
- event_kwargs = {
190
- 'session_id': target_session_id, # Pass session_id explicitly
191
- 'description': description,
192
- 'result': "Processing...", # Will be updated when span ends
193
- 'model': model
194
- }
256
+ error(f"[Telemetry] Failed to send event for span {span_name}: {e}")
195
257
 
196
- if images:
197
- event_kwargs['screenshots'] = images
198
-
199
- return create_event(**event_kwargs)
200
-
201
- except Exception as e:
202
- error(f"[Telemetry] Failed to create event from span: {e}")
203
- return None
204
-
205
- def _update_event_from_span(self, span: ReadableSpan, attributes: Dict[str, Any], event_id: str) -> None:
206
- """Deprecated: events are immutable; no updates performed."""
207
- return
208
-
209
- def _extract_description(self, span: ReadableSpan, attributes: Dict[str, Any]) -> str:
210
- """Extract description from span attributes"""
211
- # Try to get prompts/messages
212
- prompts = attributes.get(SpanAttributes.LLM_PROMPTS) or \
213
- attributes.get('gen_ai.prompt')
214
-
215
- verbose(f"[Telemetry] Extracting description from attributes: {attributes}, prompts: {prompts}")
216
-
217
- if prompts:
218
- if isinstance(prompts, list) and prompts:
219
- # Handle message list format
220
- return self._format_messages(prompts)
221
- elif isinstance(prompts, str):
222
- return prompts
223
-
224
- # Fallback to span name
225
- return f"LLM Call: {span.name}"
226
-
227
- def _extract_result(self, span: ReadableSpan, attributes: Dict[str, Any]) -> str:
228
- """Extract result/response from span attributes"""
229
- # Try to get completions
230
- completions = attributes.get(SpanAttributes.LLM_COMPLETIONS) or \
231
- attributes.get('gen_ai.completion')
232
-
233
- if completions:
234
- if isinstance(completions, list) and completions:
235
- # Handle multiple completions
236
- return "\n".join(str(c) for c in completions)
237
- elif isinstance(completions, str):
238
- return completions
239
-
240
- # Check for error
241
- if span.status.status_code == StatusCode.ERROR:
242
- return f"Error: {span.status.description or 'Unknown error'}"
243
-
244
- return "Response received"
245
-
246
- def _detect_provider_name(self, attributes: Dict[str, Any]) -> str:
247
- name = attributes.get('gen_ai.system') or attributes.get('service.name')
248
- if name:
249
- return str(name)
250
- return "openai" if 'openai' in (str(attributes.get('service.name', '')).lower()) else "unknown"
251
-
252
258
 
253
259
  def _extract_params(self, attributes: Dict[str, Any]) -> Dict[str, Any]:
254
260
  return {
@@ -300,7 +306,13 @@ class LucidicSpanExporter(SpanExporter):
300
306
  return None
301
307
 
302
308
  def shutdown(self) -> None:
303
- return None
309
+ """Shutdown the exporter and flush pending events."""
310
+ from ..sdk.event import flush
311
+ self._shutdown = True
312
+ # Flush any pending background events
313
+ flush(timeout=5.0)
314
+ debug("[Telemetry] LucidicSpanExporter shutdown complete")
304
315
 
305
316
  def force_flush(self, timeout_millis: int = 30000) -> bool:
306
- return True
317
+ """Force flush is a no-op since events are sent immediately in background threads."""
318
+ return True
@@ -104,8 +104,8 @@ class OpenAIAgentsInstrumentor:
104
104
  import openai
105
105
  openai.chat.completions.create = self._original_openai_create
106
106
  logger.debug("Restored OpenAI chat.completions.create")
107
- except:
108
- pass
107
+ except Exception as e:
108
+ logger.debug(f"[OpenAIAgents] Failed to restore OpenAI client: {e}")
109
109
 
110
110
 
111
111
  class OpenAIAgentsTracingProcessor: