lucidicai 2.1.1__tar.gz → 2.1.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. {lucidicai-2.1.1 → lucidicai-2.1.2}/PKG-INFO +1 -1
  2. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/__init__.py +5 -1
  3. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/sdk/context.py +42 -0
  4. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/sdk/init.py +5 -0
  5. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/telemetry/lucidic_exporter.py +13 -0
  6. lucidicai-2.1.2/lucidicai/telemetry/openai_patch.py +425 -0
  7. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/telemetry/telemetry_init.py +3 -3
  8. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/utils/queue.py +38 -6
  9. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai.egg-info/PKG-INFO +1 -1
  10. {lucidicai-2.1.1 → lucidicai-2.1.2}/setup.py +1 -1
  11. lucidicai-2.1.1/lucidicai/telemetry/openai_patch.py +0 -295
  12. {lucidicai-2.1.1 → lucidicai-2.1.2}/README.md +0 -0
  13. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/api/__init__.py +0 -0
  14. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/api/client.py +0 -0
  15. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/api/resources/__init__.py +0 -0
  16. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/api/resources/dataset.py +0 -0
  17. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/api/resources/event.py +0 -0
  18. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/api/resources/session.py +0 -0
  19. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/core/__init__.py +0 -0
  20. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/core/config.py +0 -0
  21. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/core/errors.py +0 -0
  22. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/core/types.py +0 -0
  23. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/sdk/__init__.py +0 -0
  24. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/sdk/decorators.py +0 -0
  25. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/sdk/error_boundary.py +0 -0
  26. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/sdk/event.py +0 -0
  27. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/sdk/event_builder.py +0 -0
  28. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/sdk/features/__init__.py +0 -0
  29. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/sdk/features/dataset.py +0 -0
  30. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/sdk/features/feature_flag.py +0 -0
  31. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/sdk/shutdown_manager.py +0 -0
  32. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/telemetry/__init__.py +0 -0
  33. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/telemetry/context_bridge.py +0 -0
  34. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/telemetry/context_capture_processor.py +0 -0
  35. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/telemetry/extract.py +0 -0
  36. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/telemetry/litellm_bridge.py +0 -0
  37. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/telemetry/openai_agents_instrumentor.py +0 -0
  38. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/telemetry/openai_uninstrument.py +0 -0
  39. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/telemetry/utils/__init__.py +0 -0
  40. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/telemetry/utils/model_pricing.py +0 -0
  41. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/utils/__init__.py +0 -0
  42. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/utils/images.py +0 -0
  43. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai/utils/logger.py +0 -0
  44. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai.egg-info/SOURCES.txt +0 -0
  45. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai.egg-info/dependency_links.txt +0 -0
  46. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai.egg-info/requires.txt +0 -0
  47. {lucidicai-2.1.1 → lucidicai-2.1.2}/lucidicai.egg-info/top_level.txt +0 -0
  48. {lucidicai-2.1.1 → lucidicai-2.1.2}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lucidicai
3
- Version: 2.1.1
3
+ Version: 2.1.2
4
4
  Summary: Lucidic AI Python SDK
5
5
  Author: Andy Liang
6
6
  Author-email: andy@lucidic.ai
@@ -100,6 +100,7 @@ def _end_session(
100
100
  ):
101
101
  """End the current session."""
102
102
  from .sdk.init import get_resources, get_session_id, get_event_queue
103
+ from .sdk.shutdown_manager import get_shutdown_manager
103
104
 
104
105
  # Use provided session_id or fall back to context
105
106
  if not session_id:
@@ -125,6 +126,9 @@ def _end_session(
125
126
  # Clear session context
126
127
  clear_active_session()
127
128
 
129
+ # unregister from shutdown manager
130
+ get_shutdown_manager().unregister_session(session_id)
131
+
128
132
 
129
133
  def _get_session():
130
134
  """Get the current session object."""
@@ -293,7 +297,7 @@ get_error_history = error_boundary.get_error_history
293
297
  clear_error_history = error_boundary.clear_error_history
294
298
 
295
299
  # Version
296
- __version__ = "2.1.1"
300
+ __version__ = "2.1.2"
297
301
 
298
302
  # Apply error boundary wrapping to all SDK functions
299
303
  from .sdk.error_boundary import wrap_sdk_function
@@ -151,6 +151,27 @@ def session(**init_params) -> Iterator[None]:
151
151
  clear_thread_session()
152
152
  current_session_id.reset(token)
153
153
  try:
154
+ # Force flush OpenTelemetry spans before ending session
155
+ from .init import get_tracer_provider
156
+ from ..utils.logger import debug, info
157
+ import time
158
+
159
+ tracer_provider = get_tracer_provider()
160
+ if tracer_provider:
161
+ debug(f"[Session] Force flushing OpenTelemetry spans for session {session_id}")
162
+ try:
163
+ # Force flush with 5 second timeout to ensure all spans are exported
164
+ flush_result = tracer_provider.force_flush(timeout_millis=5000)
165
+ debug(f"[Session] Tracer provider force_flush returned: {flush_result}")
166
+
167
+ # Give a small additional delay to ensure the exporter processes everything
168
+ # This is necessary because force_flush on the provider flushes the processors,
169
+ # but the exporter might still be processing the spans
170
+ time.sleep(0.5)
171
+ debug(f"[Session] Successfully flushed spans for session {session_id}")
172
+ except Exception as e:
173
+ debug(f"[Session] Error flushing spans: {e}")
174
+
154
175
  # Pass session_id explicitly to avoid context issues
155
176
  lai.end_session(session_id=session_id)
156
177
  except Exception:
@@ -184,6 +205,27 @@ async def session_async(**init_params) -> AsyncIterator[None]:
184
205
  clear_task_session()
185
206
  current_session_id.reset(token)
186
207
  try:
208
+ # Force flush OpenTelemetry spans before ending session
209
+ from .init import get_tracer_provider
210
+ from ..utils.logger import debug, info
211
+ import asyncio
212
+
213
+ tracer_provider = get_tracer_provider()
214
+ if tracer_provider:
215
+ debug(f"[Session] Force flushing OpenTelemetry spans for async session {session_id}")
216
+ try:
217
+ # Force flush with 5 second timeout to ensure all spans are exported
218
+ flush_result = tracer_provider.force_flush(timeout_millis=5000)
219
+ debug(f"[Session] Tracer provider force_flush returned: {flush_result}")
220
+
221
+ # Give a small additional delay to ensure the exporter processes everything
222
+ # This is necessary because force_flush on the provider flushes the processors,
223
+ # but the exporter might still be processing the spans
224
+ await asyncio.sleep(0.5)
225
+ debug(f"[Session] Successfully flushed spans for async session {session_id}")
226
+ except Exception as e:
227
+ debug(f"[Session] Error flushing spans: {e}")
228
+
187
229
  # Pass session_id explicitly to avoid context issues in async
188
230
  lai.end_session(session_id=session_id)
189
231
  except Exception:
@@ -353,6 +353,11 @@ def get_resources() -> dict:
353
353
  return _sdk_state.resources
354
354
 
355
355
 
356
+ def get_tracer_provider() -> Optional[TracerProvider]:
357
+ """Get the tracer provider instance."""
358
+ return _sdk_state.tracer_provider
359
+
360
+
356
361
  def clear_state() -> None:
357
362
  """Clear SDK state (for testing)."""
358
363
  global _sdk_state
@@ -46,6 +46,14 @@ class LucidicSpanExporter(SpanExporter):
46
46
 
47
47
  attributes = dict(span.attributes or {})
48
48
 
49
+ # Debug: Check what attributes we have for responses.create
50
+ if span.name == "openai.responses.create":
51
+ debug(f"[Telemetry] responses.create span has {len(attributes)} attributes")
52
+ # Check for specific attributes we're interested in
53
+ has_prompts = any(k.startswith('gen_ai.prompt') for k in attributes.keys())
54
+ has_completions = any(k.startswith('gen_ai.completion') for k in attributes.keys())
55
+ debug(f"[Telemetry] Has prompt attrs: {has_prompts}, Has completion attrs: {has_completions}")
56
+
49
57
  # Skip spans that are likely duplicates or incomplete
50
58
  # Check if this is a responses.parse span that was already handled
51
59
  if span.name == "openai.responses.create" and not attributes.get("lucidic.instrumented"):
@@ -93,6 +101,11 @@ class LucidicSpanExporter(SpanExporter):
93
101
  params = self._extract_params(attributes)
94
102
  output_text = extract_completions(span, attributes)
95
103
 
104
+ # Debug for responses.create
105
+ if span.name == "openai.responses.create":
106
+ debug(f"[Telemetry] Extracted messages: {messages}")
107
+ debug(f"[Telemetry] Extracted output: {output_text}")
108
+
96
109
  # Skip spans with no meaningful output (likely incomplete or duplicate instrumentation)
97
110
  if not output_text or output_text == "Response received":
98
111
  # Only use "Response received" if we have other meaningful data
@@ -0,0 +1,425 @@
1
+ """OpenAI responses API instrumentation patch.
2
+
3
+ This module provides instrumentation for OpenAI's responses.parse and responses.create APIs
4
+ which are not covered by the standard opentelemetry-instrumentation-openai package.
5
+ """
6
+ import functools
7
+ import logging
8
+ import time
9
+ from typing import Any, Callable, Optional, Dict
10
+
11
+ from opentelemetry import trace
12
+ from opentelemetry.trace import Status, StatusCode, SpanKind
13
+
14
+ from ..sdk.context import current_session_id, current_parent_event_id
15
+ from ..utils.logger import debug, verbose, warning
16
+
17
+ logger = logging.getLogger("Lucidic")
18
+
19
+
20
+ class OpenAIResponsesPatcher:
21
+ """Patches OpenAI client to instrument responses API methods."""
22
+
23
+ def __init__(self, tracer_provider=None):
24
+ """Initialize the patcher.
25
+
26
+ Args:
27
+ tracer_provider: OpenTelemetry TracerProvider to use
28
+ """
29
+ self._tracer_provider = tracer_provider or trace.get_tracer_provider()
30
+ self._tracer = self._tracer_provider.get_tracer(__name__)
31
+ self._is_patched = False
32
+ self._original_init = None
33
+ self._client_refs = [] # Keep track of patched clients for cleanup
34
+
35
+ def patch(self):
36
+ """Apply the patch to OpenAI client initialization."""
37
+ if self._is_patched:
38
+ debug("[OpenAI Patch] responses API already patched")
39
+ return
40
+
41
+ try:
42
+ import openai
43
+ from openai import OpenAI
44
+
45
+ # Store the original __init__
46
+ original_init = OpenAI.__init__
47
+
48
+ @functools.wraps(original_init)
49
+ def patched_init(client_self, *args, **kwargs):
50
+ # Call original initialization
51
+ original_init(client_self, *args, **kwargs)
52
+
53
+ # Patch responses API methods
54
+ self._patch_responses_api(client_self)
55
+
56
+ # Also patch beta.chat.completions.parse if it exists
57
+ self._patch_beta_api(client_self)
58
+
59
+ # Replace the __init__ method
60
+ OpenAI.__init__ = patched_init
61
+ self._original_init = original_init
62
+ self._is_patched = True
63
+
64
+ logger.info("[OpenAI Patch] Successfully patched OpenAI client for responses API")
65
+
66
+ except ImportError:
67
+ logger.warning("[OpenAI Patch] OpenAI library not installed, skipping patch")
68
+ except Exception as e:
69
+ logger.error(f"[OpenAI Patch] Failed to patch responses API: {e}")
70
+
71
+ def _patch_responses_api(self, client):
72
+ """Patch the responses API methods on the client."""
73
+ # Check for client.resources.responses (newer structure)
74
+ if hasattr(client, 'resources') and hasattr(client.resources, 'responses'):
75
+ responses = client.resources.responses
76
+ self._patch_responses_object(responses, "client.resources.responses")
77
+
78
+ # Check for client.responses (direct access)
79
+ if hasattr(client, 'responses'):
80
+ responses = client.responses
81
+ self._patch_responses_object(responses, "client.responses")
82
+
83
+ def _patch_responses_object(self, responses, location: str):
84
+ """Patch methods on a responses object.
85
+
86
+ Args:
87
+ responses: The responses object to patch
88
+ location: String describing where this object is (for logging)
89
+ """
90
+ methods_to_patch = {
91
+ 'parse': 'openai.responses.parse',
92
+ 'create': 'openai.responses.create'
93
+ }
94
+
95
+ for method_name, span_name in methods_to_patch.items():
96
+ if hasattr(responses, method_name):
97
+ original_method = getattr(responses, method_name)
98
+ wrapped_method = self._create_method_wrapper(original_method, span_name)
99
+ setattr(responses, method_name, wrapped_method)
100
+
101
+ # Track for cleanup
102
+ self._client_refs.append((responses, method_name, original_method))
103
+
104
+ verbose(f"[OpenAI Patch] Patched {location}.{method_name}")
105
+
106
+ def _patch_beta_api(self, client):
107
+ """Patch beta.chat.completions.parse if it exists."""
108
+ try:
109
+ if (hasattr(client, 'beta') and
110
+ hasattr(client.beta, 'chat') and
111
+ hasattr(client.beta.chat, 'completions') and
112
+ hasattr(client.beta.chat.completions, 'parse')):
113
+
114
+ completions = client.beta.chat.completions
115
+ original_parse = completions.parse
116
+
117
+ # Wrap with a slightly different span name for clarity
118
+ wrapped_parse = self._create_method_wrapper(
119
+ original_parse,
120
+ 'openai.beta.chat.completions.parse'
121
+ )
122
+ completions.parse = wrapped_parse
123
+
124
+ # Track for cleanup
125
+ self._client_refs.append((completions, 'parse', original_parse))
126
+
127
+ verbose("[OpenAI Patch] Patched beta.chat.completions.parse")
128
+
129
+ except Exception as e:
130
+ debug(f"[OpenAI Patch] Could not patch beta API: {e}")
131
+
132
+ def _create_method_wrapper(self, original_method: Callable, span_name: str) -> Callable:
133
+ """Create a wrapper for an OpenAI API method.
134
+
135
+ Args:
136
+ original_method: The original method to wrap
137
+ span_name: Name for the OpenTelemetry span
138
+
139
+ Returns:
140
+ Wrapped method with instrumentation
141
+ """
142
+ @functools.wraps(original_method)
143
+ def wrapper(*args, **kwargs):
144
+ # Create span for tracing
145
+ with self._tracer.start_as_current_span(
146
+ span_name,
147
+ kind=SpanKind.CLIENT
148
+ ) as span:
149
+ start_time = time.time()
150
+
151
+ try:
152
+ # Debug log for responses.create to understand the parameters
153
+ if 'responses.create' in span_name:
154
+ debug(f"[OpenAI Patch] responses.create called with kwargs keys: {list(kwargs.keys())}")
155
+
156
+ # Extract and process request parameters
157
+ request_attrs = self._extract_request_attributes(span_name, args, kwargs)
158
+
159
+ # Set span attributes
160
+ span.set_attribute("gen_ai.system", "openai")
161
+ span.set_attribute("gen_ai.operation.name", span_name)
162
+
163
+ # Add our instrumentation marker
164
+ span.set_attribute("lucidic.instrumented", span_name)
165
+ span.set_attribute("lucidic.patch.version", "2.0")
166
+
167
+ # Set request attributes on span
168
+ for key, value in request_attrs.items():
169
+ if value is not None:
170
+ span.set_attribute(key, value)
171
+ if 'responses.create' in span_name and ('prompt' in key or 'completion' in key):
172
+ debug(f"[OpenAI Patch] Set attribute {key}: {str(value)[:100]}")
173
+
174
+ # Call the original method
175
+ result = original_method(*args, **kwargs)
176
+
177
+ # Process the response
178
+ self._set_response_attributes(span, result, span_name, start_time)
179
+
180
+ span.set_status(Status(StatusCode.OK))
181
+ return result
182
+
183
+ except Exception as e:
184
+ # Record error in span
185
+ span.set_status(Status(StatusCode.ERROR, str(e)))
186
+ span.record_exception(e)
187
+
188
+ # The exporter will handle creating error events from the span
189
+ raise
190
+
191
+ return wrapper
192
+
193
+ def _extract_request_attributes(self, span_name: str, args: tuple, kwargs: dict) -> Dict[str, Any]:
194
+ """Extract request attributes based on the API method being called.
195
+
196
+ Args:
197
+ span_name: Name of the span/API method
198
+ args: Positional arguments
199
+ kwargs: Keyword arguments
200
+
201
+ Returns:
202
+ Dictionary of span attributes to set
203
+ """
204
+ attrs = {}
205
+
206
+ # Common attributes
207
+ model = kwargs.get('model', 'unknown')
208
+ attrs['gen_ai.request.model'] = model
209
+
210
+ temperature = kwargs.get('temperature')
211
+ if temperature is not None:
212
+ attrs['gen_ai.request.temperature'] = temperature
213
+
214
+ # Method-specific handling
215
+ if 'responses.parse' in span_name:
216
+ # Handle responses.parse format
217
+ input_param = kwargs.get('input', [])
218
+ text_format = kwargs.get('text_format')
219
+ instructions = kwargs.get('instructions')
220
+
221
+ # Convert input to messages format
222
+ if isinstance(input_param, str):
223
+ messages = [{"role": "user", "content": input_param}]
224
+ elif isinstance(input_param, list):
225
+ messages = input_param
226
+ else:
227
+ messages = []
228
+
229
+ if text_format and hasattr(text_format, '__name__'):
230
+ attrs['gen_ai.request.response_format'] = text_format.__name__
231
+
232
+ if instructions:
233
+ # Never truncate - EventQueue handles large messages automatically
234
+ attrs['gen_ai.request.instructions'] = str(instructions)
235
+
236
+ elif 'responses.create' in span_name:
237
+ # Handle responses.create format - it uses 'input' not 'messages'
238
+ input_param = kwargs.get('input', [])
239
+
240
+ # Convert input to messages format
241
+ if isinstance(input_param, str):
242
+ messages = [{"role": "user", "content": input_param}]
243
+ elif isinstance(input_param, list):
244
+ messages = input_param
245
+ else:
246
+ messages = []
247
+
248
+ # Handle text parameter for structured outputs
249
+ text_format = kwargs.get('text')
250
+ if text_format and hasattr(text_format, '__name__'):
251
+ attrs['gen_ai.request.response_format'] = text_format.__name__
252
+
253
+ elif 'completions.parse' in span_name:
254
+ # Handle standard chat completion format
255
+ messages = kwargs.get('messages', [])
256
+
257
+ # Handle response_format for structured outputs
258
+ response_format = kwargs.get('response_format')
259
+ if response_format:
260
+ if hasattr(response_format, '__name__'):
261
+ attrs['gen_ai.request.response_format'] = response_format.__name__
262
+ elif isinstance(response_format, dict):
263
+ attrs['gen_ai.request.response_format'] = str(response_format)
264
+
265
+ else:
266
+ # Fallback: try to get messages from kwargs
267
+ messages = kwargs.get('messages', kwargs.get('input', []))
268
+ if isinstance(messages, str):
269
+ messages = [{"role": "user", "content": messages}]
270
+
271
+ # Always set message attributes for proper event creation
272
+ # The EventQueue handles large messages automatically with blob storage
273
+ for i, msg in enumerate(messages):
274
+ if isinstance(msg, dict):
275
+ role = msg.get('role', 'user')
276
+ content = msg.get('content', '')
277
+ attrs[f'gen_ai.prompt.{i}.role'] = role
278
+ # Always include full content - EventQueue handles large messages
279
+ attrs[f'gen_ai.prompt.{i}.content'] = str(content)
280
+
281
+ return attrs
282
+
283
+ def _set_response_attributes(self, span, result, span_name: str, start_time: float):
284
+ """Set response attributes on the span for the exporter to use.
285
+
286
+ Args:
287
+ span: OpenTelemetry span
288
+ result: Response from OpenAI
289
+ span_name: Name of the API method
290
+ start_time: Request start time
291
+ """
292
+ duration = time.time() - start_time
293
+ span.set_attribute("lucidic.duration_seconds", duration)
294
+
295
+ # Extract output based on response structure
296
+ output_text = None
297
+
298
+ # Handle different response formats
299
+ if 'responses.parse' in span_name:
300
+ # responses.parse format
301
+ if hasattr(result, 'output_parsed'):
302
+ output_text = str(result.output_parsed)
303
+ elif hasattr(result, 'parsed'):
304
+ output_text = str(result.parsed)
305
+
306
+ elif 'responses.create' in span_name:
307
+ # responses.create returns a Response object with output_text
308
+ if hasattr(result, 'output_text'):
309
+ output_text = result.output_text
310
+ elif hasattr(result, 'output'):
311
+ output_text = result.output
312
+ else:
313
+ # Log what we actually got for debugging
314
+ debug(f"[OpenAI Patch] responses.create result type: {type(result)}")
315
+ debug(f"[OpenAI Patch] responses.create result attributes: {[attr for attr in dir(result) if not attr.startswith('_')]}")
316
+
317
+ elif 'completions.parse' in span_name:
318
+ # Standard chat completion format
319
+ if hasattr(result, 'choices') and result.choices:
320
+ choice = result.choices[0]
321
+ if hasattr(choice, 'message'):
322
+ msg = choice.message
323
+ if hasattr(msg, 'parsed'):
324
+ # Structured output
325
+ output_text = str(msg.parsed)
326
+ elif hasattr(msg, 'content'):
327
+ # Regular content
328
+ output_text = msg.content
329
+ elif hasattr(choice, 'text'):
330
+ # Completion format
331
+ output_text = choice.text
332
+
333
+ # Set completion attributes if we have output
334
+ if output_text:
335
+ # Never truncate - EventQueue handles large messages automatically
336
+ span.set_attribute("gen_ai.completion.0.role", "assistant")
337
+ span.set_attribute("gen_ai.completion.0.content", output_text)
338
+ debug(f"[OpenAI Patch] Set completion: {output_text[:100]}")
339
+ else:
340
+ debug(f"[OpenAI Patch] No output_text found for {span_name}")
341
+
342
+ # Handle usage data
343
+ if hasattr(result, 'usage'):
344
+ usage = result.usage
345
+
346
+ # Debug logging
347
+ debug(f"[OpenAI Patch] Usage object type: {type(usage)}")
348
+ debug(f"[OpenAI Patch] Usage attributes: {[attr for attr in dir(usage) if not attr.startswith('_')]}")
349
+
350
+ # Extract tokens with proper handling
351
+ prompt_tokens = None
352
+ completion_tokens = None
353
+ total_tokens = None
354
+
355
+ # Try different ways to access token data
356
+ if hasattr(usage, 'prompt_tokens'):
357
+ prompt_tokens = usage.prompt_tokens
358
+ elif hasattr(usage, 'input_tokens'):
359
+ prompt_tokens = usage.input_tokens
360
+
361
+ if hasattr(usage, 'completion_tokens'):
362
+ completion_tokens = usage.completion_tokens
363
+ elif hasattr(usage, 'output_tokens'):
364
+ completion_tokens = usage.output_tokens
365
+
366
+ if hasattr(usage, 'total_tokens'):
367
+ total_tokens = usage.total_tokens
368
+ elif prompt_tokens is not None and completion_tokens is not None:
369
+ total_tokens = prompt_tokens + completion_tokens
370
+
371
+ debug(f"[OpenAI Patch] Extracted tokens - prompt: {prompt_tokens}, completion: {completion_tokens}, total: {total_tokens}")
372
+
373
+ # Set usage attributes on span
374
+ if prompt_tokens is not None:
375
+ span.set_attribute("gen_ai.usage.prompt_tokens", prompt_tokens)
376
+ if completion_tokens is not None:
377
+ span.set_attribute("gen_ai.usage.completion_tokens", completion_tokens)
378
+ if total_tokens is not None:
379
+ span.set_attribute("gen_ai.usage.total_tokens", total_tokens)
380
+
381
+ def unpatch(self):
382
+ """Remove the patch and restore original behavior."""
383
+ if not self._is_patched:
384
+ return
385
+
386
+ try:
387
+ # Restore original __init__ if we have it
388
+ if self._original_init:
389
+ import openai
390
+ from openai import OpenAI
391
+ OpenAI.__init__ = self._original_init
392
+
393
+ # Restore original methods on tracked clients
394
+ for obj, method_name, original_method in self._client_refs:
395
+ try:
396
+ setattr(obj, method_name, original_method)
397
+ except:
398
+ pass # Client might have been garbage collected
399
+
400
+ self._client_refs.clear()
401
+ self._is_patched = False
402
+
403
+ logger.info("[OpenAI Patch] Successfully removed responses API patch")
404
+
405
+ except Exception as e:
406
+ logger.error(f"[OpenAI Patch] Failed to unpatch: {e}")
407
+
408
+
409
+ # Global singleton instance
410
+ _patcher_instance: Optional[OpenAIResponsesPatcher] = None
411
+
412
+
413
+ def get_responses_patcher(tracer_provider=None) -> OpenAIResponsesPatcher:
414
+ """Get or create the global patcher instance.
415
+
416
+ Args:
417
+ tracer_provider: OpenTelemetry TracerProvider
418
+
419
+ Returns:
420
+ The singleton patcher instance
421
+ """
422
+ global _patcher_instance
423
+ if _patcher_instance is None:
424
+ _patcher_instance = OpenAIResponsesPatcher(tracer_provider)
425
+ return _patcher_instance
@@ -60,7 +60,7 @@ def instrument_providers(providers: list, tracer_provider: TracerProvider, exist
60
60
  from .openai_uninstrument import clean_openai_instrumentation
61
61
  clean_openai_instrumentation()
62
62
 
63
- # Add patch for responses.parse (not covered by standard instrumentation)
63
+ # Add patch for responses API methods (not covered by standard instrumentation)
64
64
  import os
65
65
  if os.getenv('LUCIDIC_DISABLE_RESPONSES_PATCH', 'false').lower() != 'true':
66
66
  from .openai_patch import get_responses_patcher
@@ -68,9 +68,9 @@ def instrument_providers(providers: list, tracer_provider: TracerProvider, exist
68
68
  patcher.patch()
69
69
  _global_instrumentors["openai_responses_patch"] = patcher
70
70
  else:
71
- logger.info("[Telemetry] Skipping responses.parse patch (disabled via LUCIDIC_DISABLE_RESPONSES_PATCH)")
71
+ logger.info("[Telemetry] Skipping responses API patch (disabled via LUCIDIC_DISABLE_RESPONSES_PATCH)")
72
72
 
73
- logger.info("[Telemetry] Instrumented OpenAI (including responses.parse)")
73
+ logger.info("[Telemetry] Instrumented OpenAI (including responses.parse, responses.create, beta.chat.completions.parse)")
74
74
  except Exception as e:
75
75
  logger.error(f"Failed to instrument OpenAI: {e}")
76
76
 
@@ -372,19 +372,51 @@ class EventQueue:
372
372
  t = (event_type or "generic").lower()
373
373
 
374
374
  if t == "llm_generation":
375
+
375
376
  req = payload.get("request", {})
377
+ usage = payload.get("usage", {})
378
+ messages = req.get("messages", [])[:5]
379
+ output = payload.get("response", {}).get("output", {})
380
+ compressed_messages = []
381
+ for i, m in enumerate(messages):
382
+ compressed_message_item = {}
383
+ for k, v in messages[i].items():
384
+ compressed_message_item[k] = str(v)[:200] if v else None
385
+ compressed_messages.append(compressed_message_item)
376
386
  return {
377
387
  "request": {
378
- "model": str(req.get("model", ""))[:200],
379
- "provider": str(req.get("provider", ""))[:200],
380
- "messages": "truncated"
388
+ "model": req.get("model")[:200] if req.get("model") else None,
389
+ "provider": req.get("provider")[:200] if req.get("provider") else None,
390
+ "messages": compressed_messages,
391
+ },
392
+ "usage": {
393
+ k: usage.get(k) for k in ("input_tokens", "output_tokens", "cost") if k in usage
381
394
  },
382
- "response": {"output": "truncated"}
395
+ "response": {
396
+ "output": str(output)[:200] if output else None,
397
+ }
383
398
  }
399
+
384
400
  elif t == "function_call":
401
+ args = payload.get("arguments")
402
+ truncated_args = (
403
+ {k: (str(v)[:200] if v is not None else None) for k, v in args.items()}
404
+ if isinstance(args, dict)
405
+ else (str(args)[:200] if args is not None else None)
406
+ )
407
+ return {
408
+ "function_name": payload.get("function_name")[:200] if payload.get("function_name") else None,
409
+ "arguments": truncated_args,
410
+ }
411
+
412
+ elif t == "error_traceback":
413
+ return {
414
+ "error": payload.get("error")[:200] if payload.get("error") else None,
415
+ }
416
+
417
+ elif t == "generic":
385
418
  return {
386
- "function_name": str(payload.get("function_name", ""))[:200],
387
- "arguments": "truncated"
419
+ "details": payload.get("details")[:200] if payload.get("details") else None,
388
420
  }
389
421
  else:
390
422
  return {"details": "preview_unavailable"}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lucidicai
3
- Version: 2.1.1
3
+ Version: 2.1.2
4
4
  Summary: Lucidic AI Python SDK
5
5
  Author: Andy Liang
6
6
  Author-email: andy@lucidic.ai
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="lucidicai",
5
- version="2.1.1",
5
+ version="2.1.2",
6
6
  packages=find_packages(),
7
7
  install_requires=[
8
8
  "requests>=2.25.1",
@@ -1,295 +0,0 @@
1
- """OpenAI responses.parse instrumentation patch.
2
-
3
- This module provides instrumentation for OpenAI's responses.parse API
4
- which is not covered by the standard opentelemetry-instrumentation-openai package.
5
- """
6
- import functools
7
- import logging
8
- import time
9
- from typing import Any, Callable, Optional
10
-
11
- from opentelemetry import trace
12
- from opentelemetry.trace import Status, StatusCode, SpanKind
13
-
14
- from ..sdk.context import current_session_id, current_parent_event_id
15
- from ..utils.logger import debug, verbose, warning
16
-
17
- logger = logging.getLogger("Lucidic")
18
-
19
-
20
- class OpenAIResponsesPatcher:
21
- """Patches OpenAI client to instrument responses.parse method."""
22
-
23
- def __init__(self, tracer_provider=None):
24
- """Initialize the patcher.
25
-
26
- Args:
27
- tracer_provider: OpenTelemetry TracerProvider to use
28
- """
29
- self._tracer_provider = tracer_provider or trace.get_tracer_provider()
30
- self._tracer = self._tracer_provider.get_tracer(__name__)
31
- self._is_patched = False
32
- self._original_parse = None
33
- self._client_refs = [] # Keep track of patched clients for cleanup
34
-
35
- def patch(self):
36
- """Apply the patch to OpenAI client initialization."""
37
- if self._is_patched:
38
- debug("[OpenAI Patch] responses.parse already patched")
39
- return
40
-
41
- try:
42
- import openai
43
- from openai import OpenAI
44
-
45
- # Store the original __init__
46
- original_init = OpenAI.__init__
47
-
48
- @functools.wraps(original_init)
49
- def patched_init(client_self, *args, **kwargs):
50
- # Call original initialization
51
- original_init(client_self, *args, **kwargs)
52
-
53
- # Patch the responses.parse method on this specific instance
54
- if hasattr(client_self, 'resources') and hasattr(client_self.resources, 'responses'):
55
- responses = client_self.resources.responses
56
- if hasattr(responses, 'parse'):
57
- # Store original and apply wrapper
58
- original_parse = responses.parse
59
- responses.parse = self._create_parse_wrapper(original_parse)
60
-
61
- # Track this client for cleanup
62
- self._client_refs.append((responses, original_parse))
63
-
64
- verbose("[OpenAI Patch] Patched responses.parse on client instance")
65
-
66
- # Also patch the direct access if available
67
- if hasattr(client_self, 'responses') and hasattr(client_self.responses, 'parse'):
68
- original_parse = client_self.responses.parse
69
- client_self.responses.parse = self._create_parse_wrapper(original_parse)
70
- self._client_refs.append((client_self.responses, original_parse))
71
- verbose("[OpenAI Patch] Patched client.responses.parse")
72
-
73
- # Replace the __init__ method
74
- OpenAI.__init__ = patched_init
75
- self._original_init = original_init
76
- self._is_patched = True
77
-
78
- logger.info("[OpenAI Patch] Successfully patched OpenAI client for responses.parse")
79
-
80
- except ImportError:
81
- logger.warning("[OpenAI Patch] OpenAI library not installed, skipping patch")
82
- except Exception as e:
83
- logger.error(f"[OpenAI Patch] Failed to patch responses.parse: {e}")
84
-
85
- def _create_parse_wrapper(self, original_method: Callable) -> Callable:
86
- """Create a wrapper for the responses.parse method.
87
-
88
- Args:
89
- original_method: The original parse method to wrap
90
-
91
- Returns:
92
- Wrapped method with instrumentation
93
- """
94
- @functools.wraps(original_method)
95
- def wrapper(**kwargs):
96
- # Create span for tracing
97
- with self._tracer.start_as_current_span(
98
- "openai.responses.parse",
99
- kind=SpanKind.CLIENT
100
- ) as span:
101
- start_time = time.time()
102
-
103
- try:
104
- # Extract request parameters
105
- model = kwargs.get('model', 'unknown')
106
- temperature = kwargs.get('temperature', 1.0)
107
- input_param = kwargs.get('input', [])
108
- text_format = kwargs.get('text_format')
109
- instructions = kwargs.get('instructions')
110
-
111
- # Convert input to messages format if needed
112
- if isinstance(input_param, str):
113
- messages = [{"role": "user", "content": input_param}]
114
- elif isinstance(input_param, list):
115
- messages = input_param
116
- else:
117
- messages = []
118
-
119
- # Set span attributes
120
- span.set_attribute("gen_ai.system", "openai")
121
- span.set_attribute("gen_ai.request.model", model)
122
- span.set_attribute("gen_ai.request.temperature", temperature)
123
- span.set_attribute("gen_ai.operation.name", "responses.parse")
124
-
125
- # Add a unique marker for our instrumentation
126
- span.set_attribute("lucidic.instrumented", "responses.parse")
127
- span.set_attribute("lucidic.patch.version", "1.0")
128
-
129
- if text_format and hasattr(text_format, '__name__'):
130
- span.set_attribute("gen_ai.request.response_format", text_format.__name__)
131
-
132
- if instructions:
133
- span.set_attribute("gen_ai.request.instructions", str(instructions))
134
-
135
- # Always set message attributes for proper event creation
136
- for i, msg in enumerate(messages): # Include all messages
137
- if isinstance(msg, dict):
138
- role = msg.get('role', 'user')
139
- content = msg.get('content', '')
140
- span.set_attribute(f"gen_ai.prompt.{i}.role", role)
141
- # Always include full content - EventQueue handles large messages
142
- span.set_attribute(f"gen_ai.prompt.{i}.content", str(content))
143
-
144
- # Call the original method
145
- result = original_method(**kwargs)
146
-
147
- # Process the response and set attributes on span
148
- self._set_response_attributes(span, result, model, messages, start_time, text_format)
149
-
150
- span.set_status(Status(StatusCode.OK))
151
- return result
152
-
153
- except Exception as e:
154
- # Record error in span
155
- span.set_status(Status(StatusCode.ERROR, str(e)))
156
- span.record_exception(e)
157
-
158
- # The exporter will handle creating error events from the span
159
- raise
160
-
161
- return wrapper
162
-
163
- def _set_response_attributes(self, span, result, model: str, messages: list, start_time: float, text_format):
164
- """Set response attributes on the span for the exporter to use.
165
-
166
- Args:
167
- span: OpenTelemetry span
168
- result: Response from OpenAI
169
- model: Model name
170
- messages: Input messages
171
- start_time: Request start time
172
- text_format: Response format (Pydantic model)
173
- """
174
- duration = time.time() - start_time
175
-
176
- # Extract output
177
- output_text = None
178
-
179
- # Handle structured output response
180
- if hasattr(result, 'output_parsed'):
181
- output_text = str(result.output_parsed)
182
-
183
- # Always set completion attributes so the exporter can extract them
184
- span.set_attribute("gen_ai.completion.0.role", "assistant")
185
- span.set_attribute("gen_ai.completion.0.content", output_text)
186
-
187
- # Handle usage data
188
- if hasattr(result, 'usage'):
189
- usage = result.usage
190
-
191
- # Debug logging
192
- debug(f"[OpenAI Patch] Usage object type: {type(usage)}")
193
- debug(f"[OpenAI Patch] Usage attributes: {[attr for attr in dir(usage) if not attr.startswith('_')]}")
194
-
195
- # Extract tokens with proper handling
196
- prompt_tokens = None
197
- completion_tokens = None
198
- total_tokens = None
199
-
200
- # Try different ways to access token data
201
- if hasattr(usage, 'prompt_tokens'):
202
- prompt_tokens = usage.prompt_tokens
203
- elif hasattr(usage, 'input_tokens'):
204
- prompt_tokens = usage.input_tokens
205
-
206
- if hasattr(usage, 'completion_tokens'):
207
- completion_tokens = usage.completion_tokens
208
- elif hasattr(usage, 'output_tokens'):
209
- completion_tokens = usage.output_tokens
210
-
211
- if hasattr(usage, 'total_tokens'):
212
- total_tokens = usage.total_tokens
213
- elif prompt_tokens is not None and completion_tokens is not None:
214
- total_tokens = prompt_tokens + completion_tokens
215
-
216
- debug(f"[OpenAI Patch] Extracted tokens - prompt: {prompt_tokens}, completion: {completion_tokens}, total: {total_tokens}")
217
-
218
- # Set usage attributes on span
219
- if prompt_tokens is not None:
220
- span.set_attribute("gen_ai.usage.prompt_tokens", prompt_tokens)
221
- if completion_tokens is not None:
222
- span.set_attribute("gen_ai.usage.completion_tokens", completion_tokens)
223
- if total_tokens is not None:
224
- span.set_attribute("gen_ai.usage.total_tokens", total_tokens)
225
-
226
- # Set additional metadata for the exporter
227
- if text_format and hasattr(text_format, '__name__'):
228
- span.set_attribute("lucidic.response_format", text_format.__name__)
229
-
230
- # Set duration as attribute
231
- span.set_attribute("lucidic.duration_seconds", duration)
232
-
233
-
234
- def _should_capture_content(self) -> bool:
235
- """Check if message content should be captured.
236
-
237
- Returns:
238
- True if content capture is enabled
239
- """
240
-
241
- return True # always capture content for now
242
-
243
- import os
244
- # check OTEL standard env var
245
- otel_capture = os.getenv('OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT', 'false')
246
- # check Lucidic-specific env var
247
- lucidic_capture = os.getenv('LUCIDIC_CAPTURE_CONTENT', 'false')
248
-
249
- return otel_capture.lower() == 'true' or lucidic_capture.lower() == 'true'
250
-
251
- def unpatch(self):
252
- """Remove the patch and restore original behavior."""
253
- if not self._is_patched:
254
- return
255
-
256
- try:
257
- # restore original __init__ if we have it
258
- if hasattr(self, '_original_init'):
259
- import openai
260
- from openai import OpenAI
261
- OpenAI.__init__ = self._original_init
262
-
263
- # restore original parse methods on tracked clients
264
- for responses_obj, original_parse in self._client_refs:
265
- try:
266
- responses_obj.parse = original_parse
267
- except:
268
- pass # Client might have been garbage collected
269
-
270
- self._client_refs.clear()
271
- self._is_patched = False
272
-
273
- logger.info("[OpenAI Patch] Successfully removed responses.parse patch")
274
-
275
- except Exception as e:
276
- logger.error(f"[OpenAI Patch] Failed to unpatch: {e}")
277
-
278
-
279
- # Global singleton instance
280
- _patcher_instance: Optional[OpenAIResponsesPatcher] = None
281
-
282
-
283
- def get_responses_patcher(tracer_provider=None) -> OpenAIResponsesPatcher:
284
- """Get or create the global patcher instance.
285
-
286
- Args:
287
- tracer_provider: OpenTelemetry TracerProvider
288
-
289
- Returns:
290
- The singleton patcher instance
291
- """
292
- global _patcher_instance
293
- if _patcher_instance is None:
294
- _patcher_instance = OpenAIResponsesPatcher(tracer_provider)
295
- return _patcher_instance
File without changes
File without changes