lucidicai 2.1.1__py3-none-any.whl → 2.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lucidicai/__init__.py CHANGED
@@ -100,6 +100,7 @@ def _end_session(
100
100
  ):
101
101
  """End the current session."""
102
102
  from .sdk.init import get_resources, get_session_id, get_event_queue
103
+ from .sdk.shutdown_manager import get_shutdown_manager
103
104
 
104
105
  # Use provided session_id or fall back to context
105
106
  if not session_id:
@@ -125,6 +126,9 @@ def _end_session(
125
126
  # Clear session context
126
127
  clear_active_session()
127
128
 
129
+ # unregister from shutdown manager
130
+ get_shutdown_manager().unregister_session(session_id)
131
+
128
132
 
129
133
  def _get_session():
130
134
  """Get the current session object."""
@@ -293,7 +297,7 @@ get_error_history = error_boundary.get_error_history
293
297
  clear_error_history = error_boundary.clear_error_history
294
298
 
295
299
  # Version
296
- __version__ = "2.1.1"
300
+ __version__ = "2.1.2"
297
301
 
298
302
  # Apply error boundary wrapping to all SDK functions
299
303
  from .sdk.error_boundary import wrap_sdk_function
lucidicai/sdk/context.py CHANGED
@@ -151,6 +151,27 @@ def session(**init_params) -> Iterator[None]:
151
151
  clear_thread_session()
152
152
  current_session_id.reset(token)
153
153
  try:
154
+ # Force flush OpenTelemetry spans before ending session
155
+ from .init import get_tracer_provider
156
+ from ..utils.logger import debug, info
157
+ import time
158
+
159
+ tracer_provider = get_tracer_provider()
160
+ if tracer_provider:
161
+ debug(f"[Session] Force flushing OpenTelemetry spans for session {session_id}")
162
+ try:
163
+ # Force flush with 5 second timeout to ensure all spans are exported
164
+ flush_result = tracer_provider.force_flush(timeout_millis=5000)
165
+ debug(f"[Session] Tracer provider force_flush returned: {flush_result}")
166
+
167
+ # Give a small additional delay to ensure the exporter processes everything
168
+ # This is necessary because force_flush on the provider flushes the processors,
169
+ # but the exporter might still be processing the spans
170
+ time.sleep(0.5)
171
+ debug(f"[Session] Successfully flushed spans for session {session_id}")
172
+ except Exception as e:
173
+ debug(f"[Session] Error flushing spans: {e}")
174
+
154
175
  # Pass session_id explicitly to avoid context issues
155
176
  lai.end_session(session_id=session_id)
156
177
  except Exception:
@@ -184,6 +205,27 @@ async def session_async(**init_params) -> AsyncIterator[None]:
184
205
  clear_task_session()
185
206
  current_session_id.reset(token)
186
207
  try:
208
+ # Force flush OpenTelemetry spans before ending session
209
+ from .init import get_tracer_provider
210
+ from ..utils.logger import debug, info
211
+ import asyncio
212
+
213
+ tracer_provider = get_tracer_provider()
214
+ if tracer_provider:
215
+ debug(f"[Session] Force flushing OpenTelemetry spans for async session {session_id}")
216
+ try:
217
+ # Force flush with 5 second timeout to ensure all spans are exported
218
+ flush_result = tracer_provider.force_flush(timeout_millis=5000)
219
+ debug(f"[Session] Tracer provider force_flush returned: {flush_result}")
220
+
221
+ # Give a small additional delay to ensure the exporter processes everything
222
+ # This is necessary because force_flush on the provider flushes the processors,
223
+ # but the exporter might still be processing the spans
224
+ await asyncio.sleep(0.5)
225
+ debug(f"[Session] Successfully flushed spans for async session {session_id}")
226
+ except Exception as e:
227
+ debug(f"[Session] Error flushing spans: {e}")
228
+
187
229
  # Pass session_id explicitly to avoid context issues in async
188
230
  lai.end_session(session_id=session_id)
189
231
  except Exception:
lucidicai/sdk/init.py CHANGED
@@ -353,6 +353,11 @@ def get_resources() -> dict:
353
353
  return _sdk_state.resources
354
354
 
355
355
 
356
+ def get_tracer_provider() -> Optional[TracerProvider]:
357
+ """Get the tracer provider instance."""
358
+ return _sdk_state.tracer_provider
359
+
360
+
356
361
  def clear_state() -> None:
357
362
  """Clear SDK state (for testing)."""
358
363
  global _sdk_state
@@ -46,6 +46,14 @@ class LucidicSpanExporter(SpanExporter):
46
46
 
47
47
  attributes = dict(span.attributes or {})
48
48
 
49
+ # Debug: Check what attributes we have for responses.create
50
+ if span.name == "openai.responses.create":
51
+ debug(f"[Telemetry] responses.create span has {len(attributes)} attributes")
52
+ # Check for specific attributes we're interested in
53
+ has_prompts = any(k.startswith('gen_ai.prompt') for k in attributes.keys())
54
+ has_completions = any(k.startswith('gen_ai.completion') for k in attributes.keys())
55
+ debug(f"[Telemetry] Has prompt attrs: {has_prompts}, Has completion attrs: {has_completions}")
56
+
49
57
  # Skip spans that are likely duplicates or incomplete
50
58
  # Check if this is a responses.parse span that was already handled
51
59
  if span.name == "openai.responses.create" and not attributes.get("lucidic.instrumented"):
@@ -93,6 +101,11 @@ class LucidicSpanExporter(SpanExporter):
93
101
  params = self._extract_params(attributes)
94
102
  output_text = extract_completions(span, attributes)
95
103
 
104
+ # Debug for responses.create
105
+ if span.name == "openai.responses.create":
106
+ debug(f"[Telemetry] Extracted messages: {messages}")
107
+ debug(f"[Telemetry] Extracted output: {output_text}")
108
+
96
109
  # Skip spans with no meaningful output (likely incomplete or duplicate instrumentation)
97
110
  if not output_text or output_text == "Response received":
98
111
  # Only use "Response received" if we have other meaningful data
@@ -1,12 +1,12 @@
1
- """OpenAI responses.parse instrumentation patch.
1
+ """OpenAI responses API instrumentation patch.
2
2
 
3
- This module provides instrumentation for OpenAI's responses.parse API
4
- which is not covered by the standard opentelemetry-instrumentation-openai package.
3
+ This module provides instrumentation for OpenAI's responses.parse and responses.create APIs
4
+ which are not covered by the standard opentelemetry-instrumentation-openai package.
5
5
  """
6
6
  import functools
7
7
  import logging
8
8
  import time
9
- from typing import Any, Callable, Optional
9
+ from typing import Any, Callable, Optional, Dict
10
10
 
11
11
  from opentelemetry import trace
12
12
  from opentelemetry.trace import Status, StatusCode, SpanKind
@@ -18,7 +18,7 @@ logger = logging.getLogger("Lucidic")
18
18
 
19
19
 
20
20
  class OpenAIResponsesPatcher:
21
- """Patches OpenAI client to instrument responses.parse method."""
21
+ """Patches OpenAI client to instrument responses API methods."""
22
22
 
23
23
  def __init__(self, tracer_provider=None):
24
24
  """Initialize the patcher.
@@ -29,13 +29,13 @@ class OpenAIResponsesPatcher:
29
29
  self._tracer_provider = tracer_provider or trace.get_tracer_provider()
30
30
  self._tracer = self._tracer_provider.get_tracer(__name__)
31
31
  self._is_patched = False
32
- self._original_parse = None
32
+ self._original_init = None
33
33
  self._client_refs = [] # Keep track of patched clients for cleanup
34
34
 
35
35
  def patch(self):
36
36
  """Apply the patch to OpenAI client initialization."""
37
37
  if self._is_patched:
38
- debug("[OpenAI Patch] responses.parse already patched")
38
+ debug("[OpenAI Patch] responses API already patched")
39
39
  return
40
40
 
41
41
  try:
@@ -50,102 +50,132 @@ class OpenAIResponsesPatcher:
50
50
  # Call original initialization
51
51
  original_init(client_self, *args, **kwargs)
52
52
 
53
- # Patch the responses.parse method on this specific instance
54
- if hasattr(client_self, 'resources') and hasattr(client_self.resources, 'responses'):
55
- responses = client_self.resources.responses
56
- if hasattr(responses, 'parse'):
57
- # Store original and apply wrapper
58
- original_parse = responses.parse
59
- responses.parse = self._create_parse_wrapper(original_parse)
53
+ # Patch responses API methods
54
+ self._patch_responses_api(client_self)
60
55
 
61
- # Track this client for cleanup
62
- self._client_refs.append((responses, original_parse))
63
-
64
- verbose("[OpenAI Patch] Patched responses.parse on client instance")
65
-
66
- # Also patch the direct access if available
67
- if hasattr(client_self, 'responses') and hasattr(client_self.responses, 'parse'):
68
- original_parse = client_self.responses.parse
69
- client_self.responses.parse = self._create_parse_wrapper(original_parse)
70
- self._client_refs.append((client_self.responses, original_parse))
71
- verbose("[OpenAI Patch] Patched client.responses.parse")
56
+ # Also patch beta.chat.completions.parse if it exists
57
+ self._patch_beta_api(client_self)
72
58
 
73
59
  # Replace the __init__ method
74
60
  OpenAI.__init__ = patched_init
75
61
  self._original_init = original_init
76
62
  self._is_patched = True
77
63
 
78
- logger.info("[OpenAI Patch] Successfully patched OpenAI client for responses.parse")
64
+ logger.info("[OpenAI Patch] Successfully patched OpenAI client for responses API")
79
65
 
80
66
  except ImportError:
81
67
  logger.warning("[OpenAI Patch] OpenAI library not installed, skipping patch")
82
68
  except Exception as e:
83
- logger.error(f"[OpenAI Patch] Failed to patch responses.parse: {e}")
69
+ logger.error(f"[OpenAI Patch] Failed to patch responses API: {e}")
70
+
71
+ def _patch_responses_api(self, client):
72
+ """Patch the responses API methods on the client."""
73
+ # Check for client.resources.responses (newer structure)
74
+ if hasattr(client, 'resources') and hasattr(client.resources, 'responses'):
75
+ responses = client.resources.responses
76
+ self._patch_responses_object(responses, "client.resources.responses")
77
+
78
+ # Check for client.responses (direct access)
79
+ if hasattr(client, 'responses'):
80
+ responses = client.responses
81
+ self._patch_responses_object(responses, "client.responses")
82
+
83
+ def _patch_responses_object(self, responses, location: str):
84
+ """Patch methods on a responses object.
85
+
86
+ Args:
87
+ responses: The responses object to patch
88
+ location: String describing where this object is (for logging)
89
+ """
90
+ methods_to_patch = {
91
+ 'parse': 'openai.responses.parse',
92
+ 'create': 'openai.responses.create'
93
+ }
94
+
95
+ for method_name, span_name in methods_to_patch.items():
96
+ if hasattr(responses, method_name):
97
+ original_method = getattr(responses, method_name)
98
+ wrapped_method = self._create_method_wrapper(original_method, span_name)
99
+ setattr(responses, method_name, wrapped_method)
100
+
101
+ # Track for cleanup
102
+ self._client_refs.append((responses, method_name, original_method))
103
+
104
+ verbose(f"[OpenAI Patch] Patched {location}.{method_name}")
105
+
106
+ def _patch_beta_api(self, client):
107
+ """Patch beta.chat.completions.parse if it exists."""
108
+ try:
109
+ if (hasattr(client, 'beta') and
110
+ hasattr(client.beta, 'chat') and
111
+ hasattr(client.beta.chat, 'completions') and
112
+ hasattr(client.beta.chat.completions, 'parse')):
113
+
114
+ completions = client.beta.chat.completions
115
+ original_parse = completions.parse
116
+
117
+ # Wrap with a slightly different span name for clarity
118
+ wrapped_parse = self._create_method_wrapper(
119
+ original_parse,
120
+ 'openai.beta.chat.completions.parse'
121
+ )
122
+ completions.parse = wrapped_parse
123
+
124
+ # Track for cleanup
125
+ self._client_refs.append((completions, 'parse', original_parse))
126
+
127
+ verbose("[OpenAI Patch] Patched beta.chat.completions.parse")
128
+
129
+ except Exception as e:
130
+ debug(f"[OpenAI Patch] Could not patch beta API: {e}")
84
131
 
85
- def _create_parse_wrapper(self, original_method: Callable) -> Callable:
86
- """Create a wrapper for the responses.parse method.
132
+ def _create_method_wrapper(self, original_method: Callable, span_name: str) -> Callable:
133
+ """Create a wrapper for an OpenAI API method.
87
134
 
88
135
  Args:
89
- original_method: The original parse method to wrap
136
+ original_method: The original method to wrap
137
+ span_name: Name for the OpenTelemetry span
90
138
 
91
139
  Returns:
92
140
  Wrapped method with instrumentation
93
141
  """
94
142
  @functools.wraps(original_method)
95
- def wrapper(**kwargs):
143
+ def wrapper(*args, **kwargs):
96
144
  # Create span for tracing
97
145
  with self._tracer.start_as_current_span(
98
- "openai.responses.parse",
146
+ span_name,
99
147
  kind=SpanKind.CLIENT
100
148
  ) as span:
101
149
  start_time = time.time()
102
150
 
103
151
  try:
104
- # Extract request parameters
105
- model = kwargs.get('model', 'unknown')
106
- temperature = kwargs.get('temperature', 1.0)
107
- input_param = kwargs.get('input', [])
108
- text_format = kwargs.get('text_format')
109
- instructions = kwargs.get('instructions')
110
-
111
- # Convert input to messages format if needed
112
- if isinstance(input_param, str):
113
- messages = [{"role": "user", "content": input_param}]
114
- elif isinstance(input_param, list):
115
- messages = input_param
116
- else:
117
- messages = []
152
+ # Debug log for responses.create to understand the parameters
153
+ if 'responses.create' in span_name:
154
+ debug(f"[OpenAI Patch] responses.create called with kwargs keys: {list(kwargs.keys())}")
155
+
156
+ # Extract and process request parameters
157
+ request_attrs = self._extract_request_attributes(span_name, args, kwargs)
118
158
 
119
159
  # Set span attributes
120
160
  span.set_attribute("gen_ai.system", "openai")
121
- span.set_attribute("gen_ai.request.model", model)
122
- span.set_attribute("gen_ai.request.temperature", temperature)
123
- span.set_attribute("gen_ai.operation.name", "responses.parse")
161
+ span.set_attribute("gen_ai.operation.name", span_name)
124
162
 
125
- # Add a unique marker for our instrumentation
126
- span.set_attribute("lucidic.instrumented", "responses.parse")
127
- span.set_attribute("lucidic.patch.version", "1.0")
163
+ # Add our instrumentation marker
164
+ span.set_attribute("lucidic.instrumented", span_name)
165
+ span.set_attribute("lucidic.patch.version", "2.0")
128
166
 
129
- if text_format and hasattr(text_format, '__name__'):
130
- span.set_attribute("gen_ai.request.response_format", text_format.__name__)
131
-
132
- if instructions:
133
- span.set_attribute("gen_ai.request.instructions", str(instructions))
134
-
135
- # Always set message attributes for proper event creation
136
- for i, msg in enumerate(messages): # Include all messages
137
- if isinstance(msg, dict):
138
- role = msg.get('role', 'user')
139
- content = msg.get('content', '')
140
- span.set_attribute(f"gen_ai.prompt.{i}.role", role)
141
- # Always include full content - EventQueue handles large messages
142
- span.set_attribute(f"gen_ai.prompt.{i}.content", str(content))
167
+ # Set request attributes on span
168
+ for key, value in request_attrs.items():
169
+ if value is not None:
170
+ span.set_attribute(key, value)
171
+ if 'responses.create' in span_name and ('prompt' in key or 'completion' in key):
172
+ debug(f"[OpenAI Patch] Set attribute {key}: {str(value)[:100]}")
143
173
 
144
174
  # Call the original method
145
- result = original_method(**kwargs)
175
+ result = original_method(*args, **kwargs)
146
176
 
147
- # Process the response and set attributes on span
148
- self._set_response_attributes(span, result, model, messages, start_time, text_format)
177
+ # Process the response
178
+ self._set_response_attributes(span, result, span_name, start_time)
149
179
 
150
180
  span.set_status(Status(StatusCode.OK))
151
181
  return result
@@ -160,29 +190,154 @@ class OpenAIResponsesPatcher:
160
190
 
161
191
  return wrapper
162
192
 
163
- def _set_response_attributes(self, span, result, model: str, messages: list, start_time: float, text_format):
193
+ def _extract_request_attributes(self, span_name: str, args: tuple, kwargs: dict) -> Dict[str, Any]:
194
+ """Extract request attributes based on the API method being called.
195
+
196
+ Args:
197
+ span_name: Name of the span/API method
198
+ args: Positional arguments
199
+ kwargs: Keyword arguments
200
+
201
+ Returns:
202
+ Dictionary of span attributes to set
203
+ """
204
+ attrs = {}
205
+
206
+ # Common attributes
207
+ model = kwargs.get('model', 'unknown')
208
+ attrs['gen_ai.request.model'] = model
209
+
210
+ temperature = kwargs.get('temperature')
211
+ if temperature is not None:
212
+ attrs['gen_ai.request.temperature'] = temperature
213
+
214
+ # Method-specific handling
215
+ if 'responses.parse' in span_name:
216
+ # Handle responses.parse format
217
+ input_param = kwargs.get('input', [])
218
+ text_format = kwargs.get('text_format')
219
+ instructions = kwargs.get('instructions')
220
+
221
+ # Convert input to messages format
222
+ if isinstance(input_param, str):
223
+ messages = [{"role": "user", "content": input_param}]
224
+ elif isinstance(input_param, list):
225
+ messages = input_param
226
+ else:
227
+ messages = []
228
+
229
+ if text_format and hasattr(text_format, '__name__'):
230
+ attrs['gen_ai.request.response_format'] = text_format.__name__
231
+
232
+ if instructions:
233
+ # Never truncate - EventQueue handles large messages automatically
234
+ attrs['gen_ai.request.instructions'] = str(instructions)
235
+
236
+ elif 'responses.create' in span_name:
237
+ # Handle responses.create format - it uses 'input' not 'messages'
238
+ input_param = kwargs.get('input', [])
239
+
240
+ # Convert input to messages format
241
+ if isinstance(input_param, str):
242
+ messages = [{"role": "user", "content": input_param}]
243
+ elif isinstance(input_param, list):
244
+ messages = input_param
245
+ else:
246
+ messages = []
247
+
248
+ # Handle text parameter for structured outputs
249
+ text_format = kwargs.get('text')
250
+ if text_format and hasattr(text_format, '__name__'):
251
+ attrs['gen_ai.request.response_format'] = text_format.__name__
252
+
253
+ elif 'completions.parse' in span_name:
254
+ # Handle standard chat completion format
255
+ messages = kwargs.get('messages', [])
256
+
257
+ # Handle response_format for structured outputs
258
+ response_format = kwargs.get('response_format')
259
+ if response_format:
260
+ if hasattr(response_format, '__name__'):
261
+ attrs['gen_ai.request.response_format'] = response_format.__name__
262
+ elif isinstance(response_format, dict):
263
+ attrs['gen_ai.request.response_format'] = str(response_format)
264
+
265
+ else:
266
+ # Fallback: try to get messages from kwargs
267
+ messages = kwargs.get('messages', kwargs.get('input', []))
268
+ if isinstance(messages, str):
269
+ messages = [{"role": "user", "content": messages}]
270
+
271
+ # Always set message attributes for proper event creation
272
+ # The EventQueue handles large messages automatically with blob storage
273
+ for i, msg in enumerate(messages):
274
+ if isinstance(msg, dict):
275
+ role = msg.get('role', 'user')
276
+ content = msg.get('content', '')
277
+ attrs[f'gen_ai.prompt.{i}.role'] = role
278
+ # Always include full content - EventQueue handles large messages
279
+ attrs[f'gen_ai.prompt.{i}.content'] = str(content)
280
+
281
+ return attrs
282
+
283
+ def _set_response_attributes(self, span, result, span_name: str, start_time: float):
164
284
  """Set response attributes on the span for the exporter to use.
165
285
 
166
286
  Args:
167
287
  span: OpenTelemetry span
168
288
  result: Response from OpenAI
169
- model: Model name
170
- messages: Input messages
289
+ span_name: Name of the API method
171
290
  start_time: Request start time
172
- text_format: Response format (Pydantic model)
173
291
  """
174
292
  duration = time.time() - start_time
293
+ span.set_attribute("lucidic.duration_seconds", duration)
175
294
 
176
- # Extract output
295
+ # Extract output based on response structure
177
296
  output_text = None
178
297
 
179
- # Handle structured output response
180
- if hasattr(result, 'output_parsed'):
181
- output_text = str(result.output_parsed)
182
-
183
- # Always set completion attributes so the exporter can extract them
298
+ # Handle different response formats
299
+ if 'responses.parse' in span_name:
300
+ # responses.parse format
301
+ if hasattr(result, 'output_parsed'):
302
+ output_text = str(result.output_parsed)
303
+ elif hasattr(result, 'parsed'):
304
+ output_text = str(result.parsed)
305
+
306
+ elif 'responses.create' in span_name:
307
+ # responses.create returns a Response object with output_text
308
+ if hasattr(result, 'output_text'):
309
+ output_text = result.output_text
310
+ elif hasattr(result, 'output'):
311
+ output_text = result.output
312
+ else:
313
+ # Log what we actually got for debugging
314
+ debug(f"[OpenAI Patch] responses.create result type: {type(result)}")
315
+ debug(f"[OpenAI Patch] responses.create result attributes: {[attr for attr in dir(result) if not attr.startswith('_')]}")
316
+
317
+ elif 'completions.parse' in span_name:
318
+ # Standard chat completion format
319
+ if hasattr(result, 'choices') and result.choices:
320
+ choice = result.choices[0]
321
+ if hasattr(choice, 'message'):
322
+ msg = choice.message
323
+ if hasattr(msg, 'parsed'):
324
+ # Structured output
325
+ output_text = str(msg.parsed)
326
+ elif hasattr(msg, 'content'):
327
+ # Regular content
328
+ output_text = msg.content
329
+ elif hasattr(choice, 'text'):
330
+ # Completion format
331
+ output_text = choice.text
332
+
333
+ # Set completion attributes if we have output
334
+ if output_text:
335
+ # Never truncate - EventQueue handles large messages automatically
184
336
  span.set_attribute("gen_ai.completion.0.role", "assistant")
185
337
  span.set_attribute("gen_ai.completion.0.content", output_text)
338
+ debug(f"[OpenAI Patch] Set completion: {output_text[:100]}")
339
+ else:
340
+ debug(f"[OpenAI Patch] No output_text found for {span_name}")
186
341
 
187
342
  # Handle usage data
188
343
  if hasattr(result, 'usage'):
@@ -223,54 +378,29 @@ class OpenAIResponsesPatcher:
223
378
  if total_tokens is not None:
224
379
  span.set_attribute("gen_ai.usage.total_tokens", total_tokens)
225
380
 
226
- # Set additional metadata for the exporter
227
- if text_format and hasattr(text_format, '__name__'):
228
- span.set_attribute("lucidic.response_format", text_format.__name__)
229
-
230
- # Set duration as attribute
231
- span.set_attribute("lucidic.duration_seconds", duration)
232
-
233
-
234
- def _should_capture_content(self) -> bool:
235
- """Check if message content should be captured.
236
-
237
- Returns:
238
- True if content capture is enabled
239
- """
240
-
241
- return True # always capture content for now
242
-
243
- import os
244
- # check OTEL standard env var
245
- otel_capture = os.getenv('OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT', 'false')
246
- # check Lucidic-specific env var
247
- lucidic_capture = os.getenv('LUCIDIC_CAPTURE_CONTENT', 'false')
248
-
249
- return otel_capture.lower() == 'true' or lucidic_capture.lower() == 'true'
250
-
251
381
  def unpatch(self):
252
382
  """Remove the patch and restore original behavior."""
253
383
  if not self._is_patched:
254
384
  return
255
385
 
256
386
  try:
257
- # restore original __init__ if we have it
258
- if hasattr(self, '_original_init'):
387
+ # Restore original __init__ if we have it
388
+ if self._original_init:
259
389
  import openai
260
390
  from openai import OpenAI
261
391
  OpenAI.__init__ = self._original_init
262
392
 
263
- # restore original parse methods on tracked clients
264
- for responses_obj, original_parse in self._client_refs:
393
+ # Restore original methods on tracked clients
394
+ for obj, method_name, original_method in self._client_refs:
265
395
  try:
266
- responses_obj.parse = original_parse
396
+ setattr(obj, method_name, original_method)
267
397
  except:
268
398
  pass # Client might have been garbage collected
269
399
 
270
400
  self._client_refs.clear()
271
401
  self._is_patched = False
272
402
 
273
- logger.info("[OpenAI Patch] Successfully removed responses.parse patch")
403
+ logger.info("[OpenAI Patch] Successfully removed responses API patch")
274
404
 
275
405
  except Exception as e:
276
406
  logger.error(f"[OpenAI Patch] Failed to unpatch: {e}")
@@ -60,7 +60,7 @@ def instrument_providers(providers: list, tracer_provider: TracerProvider, exist
60
60
  from .openai_uninstrument import clean_openai_instrumentation
61
61
  clean_openai_instrumentation()
62
62
 
63
- # Add patch for responses.parse (not covered by standard instrumentation)
63
+ # Add patch for responses API methods (not covered by standard instrumentation)
64
64
  import os
65
65
  if os.getenv('LUCIDIC_DISABLE_RESPONSES_PATCH', 'false').lower() != 'true':
66
66
  from .openai_patch import get_responses_patcher
@@ -68,9 +68,9 @@ def instrument_providers(providers: list, tracer_provider: TracerProvider, exist
68
68
  patcher.patch()
69
69
  _global_instrumentors["openai_responses_patch"] = patcher
70
70
  else:
71
- logger.info("[Telemetry] Skipping responses.parse patch (disabled via LUCIDIC_DISABLE_RESPONSES_PATCH)")
71
+ logger.info("[Telemetry] Skipping responses API patch (disabled via LUCIDIC_DISABLE_RESPONSES_PATCH)")
72
72
 
73
- logger.info("[Telemetry] Instrumented OpenAI (including responses.parse)")
73
+ logger.info("[Telemetry] Instrumented OpenAI (including responses.parse, responses.create, beta.chat.completions.parse)")
74
74
  except Exception as e:
75
75
  logger.error(f"Failed to instrument OpenAI: {e}")
76
76
 
lucidicai/utils/queue.py CHANGED
@@ -372,19 +372,51 @@ class EventQueue:
372
372
  t = (event_type or "generic").lower()
373
373
 
374
374
  if t == "llm_generation":
375
+
375
376
  req = payload.get("request", {})
377
+ usage = payload.get("usage", {})
378
+ messages = req.get("messages", [])[:5]
379
+ output = payload.get("response", {}).get("output", {})
380
+ compressed_messages = []
381
+ for i, m in enumerate(messages):
382
+ compressed_message_item = {}
383
+ for k, v in messages[i].items():
384
+ compressed_message_item[k] = str(v)[:200] if v else None
385
+ compressed_messages.append(compressed_message_item)
376
386
  return {
377
387
  "request": {
378
- "model": str(req.get("model", ""))[:200],
379
- "provider": str(req.get("provider", ""))[:200],
380
- "messages": "truncated"
388
+ "model": req.get("model")[:200] if req.get("model") else None,
389
+ "provider": req.get("provider")[:200] if req.get("provider") else None,
390
+ "messages": compressed_messages,
391
+ },
392
+ "usage": {
393
+ k: usage.get(k) for k in ("input_tokens", "output_tokens", "cost") if k in usage
381
394
  },
382
- "response": {"output": "truncated"}
395
+ "response": {
396
+ "output": str(output)[:200] if output else None,
397
+ }
383
398
  }
399
+
384
400
  elif t == "function_call":
401
+ args = payload.get("arguments")
402
+ truncated_args = (
403
+ {k: (str(v)[:200] if v is not None else None) for k, v in args.items()}
404
+ if isinstance(args, dict)
405
+ else (str(args)[:200] if args is not None else None)
406
+ )
407
+ return {
408
+ "function_name": payload.get("function_name")[:200] if payload.get("function_name") else None,
409
+ "arguments": truncated_args,
410
+ }
411
+
412
+ elif t == "error_traceback":
413
+ return {
414
+ "error": payload.get("error")[:200] if payload.get("error") else None,
415
+ }
416
+
417
+ elif t == "generic":
385
418
  return {
386
- "function_name": str(payload.get("function_name", ""))[:200],
387
- "arguments": "truncated"
419
+ "details": payload.get("details")[:200] if payload.get("details") else None,
388
420
  }
389
421
  else:
390
422
  return {"details": "preview_unavailable"}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lucidicai
3
- Version: 2.1.1
3
+ Version: 2.1.2
4
4
  Summary: Lucidic AI Python SDK
5
5
  Author: Andy Liang
6
6
  Author-email: andy@lucidic.ai
@@ -1,4 +1,4 @@
1
- lucidicai/__init__.py,sha256=b5om5w8CV6UjgmeFOlzHTyC-m2PNDgQJ5AcOD7SizYk,12600
1
+ lucidicai/__init__.py,sha256=WwZ3A73h0Ttk1xgiiCt9yM-zXD1vIvQNg6XLTbMYQLY,12757
2
2
  lucidicai/action.py,sha256=sPRd1hTIVXDqnvG9ZXWEipUFh0bsXcE0Fm7RVqmVccM,237
3
3
  lucidicai/client.py,sha256=IIhlY6Mfwy47FeMxzpvIygCaqcI1FnqiXiVU6M4QEiE,22327
4
4
  lucidicai/constants.py,sha256=zN8O7TjoRHRlaGa9CZUWppS73rhzKGwaEkF9XMTV0Cg,1160
@@ -45,12 +45,12 @@ lucidicai/providers/pydantic_ai_handler.py,sha256=Yhd9VTJhq292ZzJF04O_jYGRh-1bzs
45
45
  lucidicai/providers/text_storage.py,sha256=L62MMJ8E23TDqDTUv2aRntdKMCItsXV7XjY6cFwx2DE,1503
46
46
  lucidicai/providers/universal_image_interceptor.py,sha256=7d-hw4xihRwvvA1AP8-vqYNChtmVXKmn09MN4pDS7KQ,12126
47
47
  lucidicai/sdk/__init__.py,sha256=UrkV9FYbZkBxaX9qwxGbCJdXp-JqMpn0_u-huO9Y-ec,32
48
- lucidicai/sdk/context.py,sha256=_tNem1a39CY-cFWue173eM7FgeUmOEmQ42EfkvKhehQ,7515
48
+ lucidicai/sdk/context.py,sha256=5Z5bLXKX3hFS-LN-lYVK3ho3b3LmNFi66UOd0cIYkZ0,9873
49
49
  lucidicai/sdk/decorators.py,sha256=B5BXG9Sn5ruUkxFq10L1rrCR_wzYUPlYeu5aqyXetMM,8393
50
50
  lucidicai/sdk/error_boundary.py,sha256=IPr5wS9rS7ZQNgEaBwK53UaixAm6L2rijKKFfxcxjUI,9190
51
51
  lucidicai/sdk/event.py,sha256=NiPcnPzYCU0VlFbBk93LD88wqAYmnglV64nQb2XteOs,3747
52
52
  lucidicai/sdk/event_builder.py,sha256=oMvt39m07ZLmPllJTWwRxpinJUz9_AD17yNE6wQRoDA,10423
53
- lucidicai/sdk/init.py,sha256=RMTyu_LZIo9Pi0uA76jkRviX6VBuvIuVpmFUXC7zwA4,12784
53
+ lucidicai/sdk/init.py,sha256=tfpTRZLT317xNC_GE2OKqRR02Nj3s3a12CJELc-vVAE,12923
54
54
  lucidicai/sdk/shutdown_manager.py,sha256=I5ylR96QHQ_SfP1euAiM0qQ-I7upCPMW1HUNvoj7hCw,12090
55
55
  lucidicai/sdk/features/__init__.py,sha256=23KUF2EZBzsaH9JUFDGNXZb_3PSfc35VZfD59gAfyR0,26
56
56
  lucidicai/sdk/features/dataset.py,sha256=qFGnu8Wm1yhaflBhtm-5veN-KaoxGLBL5xWEifkrsY0,19416
@@ -61,17 +61,17 @@ lucidicai/telemetry/context_bridge.py,sha256=NwyclZvPcZHZtIvLSrY3oO8WQ_J1JSuHWIr
61
61
  lucidicai/telemetry/context_capture_processor.py,sha256=kzKWpg5m0OMUP5we6g453FjckWwA_jAVjOKCfiyKVN8,3651
62
62
  lucidicai/telemetry/extract.py,sha256=30Iqvnr9I0EkD61GRCMN0Zpk3fLmRYcuVajWjRz0z9I,6814
63
63
  lucidicai/telemetry/litellm_bridge.py,sha256=GlNeTX0HCu4JsUqfCGBb62XA61fhyWKv5ohfqSkriaE,16574
64
- lucidicai/telemetry/lucidic_exporter.py,sha256=tD1A2UGn0vuOW_FV_GVLXuXSxYYZf6r79Pczrn6d0lc,13189
64
+ lucidicai/telemetry/lucidic_exporter.py,sha256=tn5_Tk2QoYP6nZUmYoS19zyXdCfg6H4gy7tSlz6tCt4,14017
65
65
  lucidicai/telemetry/lucidic_span_processor.py,sha256=-jo7Muuslo3ZCSAysLsDGBqJijQSpIOvJHPbPNjP4iQ,31029
66
66
  lucidicai/telemetry/openai_agents_instrumentor.py,sha256=__wIbeglMnEEf4AGTQ--FXeWCKmz2yy8SBupwprEdZA,12694
67
- lucidicai/telemetry/openai_patch.py,sha256=BRSwX4JQLd1kiH43K2FnGvk6rcf5rfZg9lxg_wPb45M,11904
67
+ lucidicai/telemetry/openai_patch.py,sha256=3VEU7-7O9DMgKn9xVvAjhEWTCDih454o7lEBHPgLLI8,17083
68
68
  lucidicai/telemetry/openai_uninstrument.py,sha256=zELpoz2BU8O-rdHrg_7NuvjdNoY6swgoqVm5NtTCJRQ,3456
69
69
  lucidicai/telemetry/opentelemetry_converter.py,sha256=xOHCqoTyO4hUkL6k7fxy84PbljPpYep6ET9ZqbkJehc,17665
70
70
  lucidicai/telemetry/otel_handlers.py,sha256=OCzXuYog6AuwjI4eXy5Sk40DUehyz48QOxuOujXnEVU,20859
71
71
  lucidicai/telemetry/otel_init.py,sha256=hjUOX8nEBLrDOuh0UTKFfG-C98yFZHTiP8ql59bmNXY,13780
72
72
  lucidicai/telemetry/otel_provider.py,sha256=e5XcpQTd_a5UrMAq-EQcJ0zUJpO7NO16T-BphVUigR4,7513
73
73
  lucidicai/telemetry/pydantic_ai_handler.py,sha256=WPa3tFcVgVnPPO3AxcNOTbNkmODLgNOrU2_3GVtWqUw,28261
74
- lucidicai/telemetry/telemetry_init.py,sha256=i5lxd2RzIADv2Va06APob9CmQ0KZMSbLKDrGtAGFyBo,9503
74
+ lucidicai/telemetry/telemetry_init.py,sha256=YpjcYDcqlWpUDDz76-x2v4K0yz_ToEpuaDz_Hypbr2w,9554
75
75
  lucidicai/telemetry/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
76
76
  lucidicai/telemetry/utils/image_storage.py,sha256=4Z59ZpVexr7-lcExfr8GsqXe0y2VZmr8Yjwa-3DeOxU,1457
77
77
  lucidicai/telemetry/utils/model_pricing.py,sha256=Dxi6e0WjcIyCTkVX7K7f0pJ5rPu7nSt3lOmgzAUQl1o,12402
@@ -80,8 +80,8 @@ lucidicai/telemetry/utils/universal_image_interceptor.py,sha256=vARgMk1hVSF--zfi
80
80
  lucidicai/utils/__init__.py,sha256=ZiGtmJaF0ph9iIFIgQiAreVuYM_1o7qu9VySK1NblTw,22
81
81
  lucidicai/utils/images.py,sha256=YHFjeKHRxzWu0IsuNwKw303egPsd99AShaD4WND1lJk,12325
82
82
  lucidicai/utils/logger.py,sha256=R3B3gSee64F6UVHUrShihBq_O7W7bgfrBiVDXTO3Isg,4777
83
- lucidicai/utils/queue.py,sha256=iBhazYt9EPTpyuexfDyPjvJT-2ODaAbCBbGYvLVl8wM,15815
84
- lucidicai-2.1.1.dist-info/METADATA,sha256=QWcIgu6okS3ZQmbRJgq_2mAgHUnhV9tazlTSPmUMqKg,902
85
- lucidicai-2.1.1.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
86
- lucidicai-2.1.1.dist-info/top_level.txt,sha256=vSSdM3lclF4I5tyVC0xxUk8eIRnnYXMe1hW-eO91HUo,10
87
- lucidicai-2.1.1.dist-info/RECORD,,
83
+ lucidicai/utils/queue.py,sha256=tZYPAUHRAK_uyE8Mk4PloObsBcfITurHwIlNHr3gMFU,17326
84
+ lucidicai-2.1.2.dist-info/METADATA,sha256=ZcL5YaquJn3D5MyMf7UqZTzrP9m5E4XAhEJQJAuOXTs,902
85
+ lucidicai-2.1.2.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
86
+ lucidicai-2.1.2.dist-info/top_level.txt,sha256=vSSdM3lclF4I5tyVC0xxUk8eIRnnYXMe1hW-eO91HUo,10
87
+ lucidicai-2.1.2.dist-info/RECORD,,