lucidicai 2.1.0__py3-none-any.whl → 2.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,425 @@
1
+ """OpenAI responses API instrumentation patch.
2
+
3
+ This module provides instrumentation for OpenAI's responses.parse and responses.create APIs
4
+ which are not covered by the standard opentelemetry-instrumentation-openai package.
5
+ """
6
+ import functools
7
+ import logging
8
+ import time
9
+ from typing import Any, Callable, Optional, Dict
10
+
11
+ from opentelemetry import trace
12
+ from opentelemetry.trace import Status, StatusCode, SpanKind
13
+
14
+ from ..sdk.context import current_session_id, current_parent_event_id
15
+ from ..utils.logger import debug, verbose, warning
16
+
17
+ logger = logging.getLogger("Lucidic")
18
+
19
+
20
+ class OpenAIResponsesPatcher:
21
+ """Patches OpenAI client to instrument responses API methods."""
22
+
23
+ def __init__(self, tracer_provider=None):
24
+ """Initialize the patcher.
25
+
26
+ Args:
27
+ tracer_provider: OpenTelemetry TracerProvider to use
28
+ """
29
+ self._tracer_provider = tracer_provider or trace.get_tracer_provider()
30
+ self._tracer = self._tracer_provider.get_tracer(__name__)
31
+ self._is_patched = False
32
+ self._original_init = None
33
+ self._client_refs = [] # Keep track of patched clients for cleanup
34
+
35
+ def patch(self):
36
+ """Apply the patch to OpenAI client initialization."""
37
+ if self._is_patched:
38
+ debug("[OpenAI Patch] responses API already patched")
39
+ return
40
+
41
+ try:
42
+ import openai
43
+ from openai import OpenAI
44
+
45
+ # Store the original __init__
46
+ original_init = OpenAI.__init__
47
+
48
+ @functools.wraps(original_init)
49
+ def patched_init(client_self, *args, **kwargs):
50
+ # Call original initialization
51
+ original_init(client_self, *args, **kwargs)
52
+
53
+ # Patch responses API methods
54
+ self._patch_responses_api(client_self)
55
+
56
+ # Also patch beta.chat.completions.parse if it exists
57
+ self._patch_beta_api(client_self)
58
+
59
+ # Replace the __init__ method
60
+ OpenAI.__init__ = patched_init
61
+ self._original_init = original_init
62
+ self._is_patched = True
63
+
64
+ logger.info("[OpenAI Patch] Successfully patched OpenAI client for responses API")
65
+
66
+ except ImportError:
67
+ logger.warning("[OpenAI Patch] OpenAI library not installed, skipping patch")
68
+ except Exception as e:
69
+ logger.error(f"[OpenAI Patch] Failed to patch responses API: {e}")
70
+
71
+ def _patch_responses_api(self, client):
72
+ """Patch the responses API methods on the client."""
73
+ # Check for client.resources.responses (newer structure)
74
+ if hasattr(client, 'resources') and hasattr(client.resources, 'responses'):
75
+ responses = client.resources.responses
76
+ self._patch_responses_object(responses, "client.resources.responses")
77
+
78
+ # Check for client.responses (direct access)
79
+ if hasattr(client, 'responses'):
80
+ responses = client.responses
81
+ self._patch_responses_object(responses, "client.responses")
82
+
83
+ def _patch_responses_object(self, responses, location: str):
84
+ """Patch methods on a responses object.
85
+
86
+ Args:
87
+ responses: The responses object to patch
88
+ location: String describing where this object is (for logging)
89
+ """
90
+ methods_to_patch = {
91
+ 'parse': 'openai.responses.parse',
92
+ 'create': 'openai.responses.create'
93
+ }
94
+
95
+ for method_name, span_name in methods_to_patch.items():
96
+ if hasattr(responses, method_name):
97
+ original_method = getattr(responses, method_name)
98
+ wrapped_method = self._create_method_wrapper(original_method, span_name)
99
+ setattr(responses, method_name, wrapped_method)
100
+
101
+ # Track for cleanup
102
+ self._client_refs.append((responses, method_name, original_method))
103
+
104
+ verbose(f"[OpenAI Patch] Patched {location}.{method_name}")
105
+
106
+ def _patch_beta_api(self, client):
107
+ """Patch beta.chat.completions.parse if it exists."""
108
+ try:
109
+ if (hasattr(client, 'beta') and
110
+ hasattr(client.beta, 'chat') and
111
+ hasattr(client.beta.chat, 'completions') and
112
+ hasattr(client.beta.chat.completions, 'parse')):
113
+
114
+ completions = client.beta.chat.completions
115
+ original_parse = completions.parse
116
+
117
+ # Wrap with a slightly different span name for clarity
118
+ wrapped_parse = self._create_method_wrapper(
119
+ original_parse,
120
+ 'openai.beta.chat.completions.parse'
121
+ )
122
+ completions.parse = wrapped_parse
123
+
124
+ # Track for cleanup
125
+ self._client_refs.append((completions, 'parse', original_parse))
126
+
127
+ verbose("[OpenAI Patch] Patched beta.chat.completions.parse")
128
+
129
+ except Exception as e:
130
+ debug(f"[OpenAI Patch] Could not patch beta API: {e}")
131
+
132
+ def _create_method_wrapper(self, original_method: Callable, span_name: str) -> Callable:
133
+ """Create a wrapper for an OpenAI API method.
134
+
135
+ Args:
136
+ original_method: The original method to wrap
137
+ span_name: Name for the OpenTelemetry span
138
+
139
+ Returns:
140
+ Wrapped method with instrumentation
141
+ """
142
+ @functools.wraps(original_method)
143
+ def wrapper(*args, **kwargs):
144
+ # Create span for tracing
145
+ with self._tracer.start_as_current_span(
146
+ span_name,
147
+ kind=SpanKind.CLIENT
148
+ ) as span:
149
+ start_time = time.time()
150
+
151
+ try:
152
+ # Debug log for responses.create to understand the parameters
153
+ if 'responses.create' in span_name:
154
+ debug(f"[OpenAI Patch] responses.create called with kwargs keys: {list(kwargs.keys())}")
155
+
156
+ # Extract and process request parameters
157
+ request_attrs = self._extract_request_attributes(span_name, args, kwargs)
158
+
159
+ # Set span attributes
160
+ span.set_attribute("gen_ai.system", "openai")
161
+ span.set_attribute("gen_ai.operation.name", span_name)
162
+
163
+ # Add our instrumentation marker
164
+ span.set_attribute("lucidic.instrumented", span_name)
165
+ span.set_attribute("lucidic.patch.version", "2.0")
166
+
167
+ # Set request attributes on span
168
+ for key, value in request_attrs.items():
169
+ if value is not None:
170
+ span.set_attribute(key, value)
171
+ if 'responses.create' in span_name and ('prompt' in key or 'completion' in key):
172
+ debug(f"[OpenAI Patch] Set attribute {key}: {str(value)[:100]}")
173
+
174
+ # Call the original method
175
+ result = original_method(*args, **kwargs)
176
+
177
+ # Process the response
178
+ self._set_response_attributes(span, result, span_name, start_time)
179
+
180
+ span.set_status(Status(StatusCode.OK))
181
+ return result
182
+
183
+ except Exception as e:
184
+ # Record error in span
185
+ span.set_status(Status(StatusCode.ERROR, str(e)))
186
+ span.record_exception(e)
187
+
188
+ # The exporter will handle creating error events from the span
189
+ raise
190
+
191
+ return wrapper
192
+
193
+ def _extract_request_attributes(self, span_name: str, args: tuple, kwargs: dict) -> Dict[str, Any]:
194
+ """Extract request attributes based on the API method being called.
195
+
196
+ Args:
197
+ span_name: Name of the span/API method
198
+ args: Positional arguments
199
+ kwargs: Keyword arguments
200
+
201
+ Returns:
202
+ Dictionary of span attributes to set
203
+ """
204
+ attrs = {}
205
+
206
+ # Common attributes
207
+ model = kwargs.get('model', 'unknown')
208
+ attrs['gen_ai.request.model'] = model
209
+
210
+ temperature = kwargs.get('temperature')
211
+ if temperature is not None:
212
+ attrs['gen_ai.request.temperature'] = temperature
213
+
214
+ # Method-specific handling
215
+ if 'responses.parse' in span_name:
216
+ # Handle responses.parse format
217
+ input_param = kwargs.get('input', [])
218
+ text_format = kwargs.get('text_format')
219
+ instructions = kwargs.get('instructions')
220
+
221
+ # Convert input to messages format
222
+ if isinstance(input_param, str):
223
+ messages = [{"role": "user", "content": input_param}]
224
+ elif isinstance(input_param, list):
225
+ messages = input_param
226
+ else:
227
+ messages = []
228
+
229
+ if text_format and hasattr(text_format, '__name__'):
230
+ attrs['gen_ai.request.response_format'] = text_format.__name__
231
+
232
+ if instructions:
233
+ # Never truncate - EventQueue handles large messages automatically
234
+ attrs['gen_ai.request.instructions'] = str(instructions)
235
+
236
+ elif 'responses.create' in span_name:
237
+ # Handle responses.create format - it uses 'input' not 'messages'
238
+ input_param = kwargs.get('input', [])
239
+
240
+ # Convert input to messages format
241
+ if isinstance(input_param, str):
242
+ messages = [{"role": "user", "content": input_param}]
243
+ elif isinstance(input_param, list):
244
+ messages = input_param
245
+ else:
246
+ messages = []
247
+
248
+ # Handle text parameter for structured outputs
249
+ text_format = kwargs.get('text')
250
+ if text_format and hasattr(text_format, '__name__'):
251
+ attrs['gen_ai.request.response_format'] = text_format.__name__
252
+
253
+ elif 'completions.parse' in span_name:
254
+ # Handle standard chat completion format
255
+ messages = kwargs.get('messages', [])
256
+
257
+ # Handle response_format for structured outputs
258
+ response_format = kwargs.get('response_format')
259
+ if response_format:
260
+ if hasattr(response_format, '__name__'):
261
+ attrs['gen_ai.request.response_format'] = response_format.__name__
262
+ elif isinstance(response_format, dict):
263
+ attrs['gen_ai.request.response_format'] = str(response_format)
264
+
265
+ else:
266
+ # Fallback: try to get messages from kwargs
267
+ messages = kwargs.get('messages', kwargs.get('input', []))
268
+ if isinstance(messages, str):
269
+ messages = [{"role": "user", "content": messages}]
270
+
271
+ # Always set message attributes for proper event creation
272
+ # The EventQueue handles large messages automatically with blob storage
273
+ for i, msg in enumerate(messages):
274
+ if isinstance(msg, dict):
275
+ role = msg.get('role', 'user')
276
+ content = msg.get('content', '')
277
+ attrs[f'gen_ai.prompt.{i}.role'] = role
278
+ # Always include full content - EventQueue handles large messages
279
+ attrs[f'gen_ai.prompt.{i}.content'] = str(content)
280
+
281
+ return attrs
282
+
283
+ def _set_response_attributes(self, span, result, span_name: str, start_time: float):
284
+ """Set response attributes on the span for the exporter to use.
285
+
286
+ Args:
287
+ span: OpenTelemetry span
288
+ result: Response from OpenAI
289
+ span_name: Name of the API method
290
+ start_time: Request start time
291
+ """
292
+ duration = time.time() - start_time
293
+ span.set_attribute("lucidic.duration_seconds", duration)
294
+
295
+ # Extract output based on response structure
296
+ output_text = None
297
+
298
+ # Handle different response formats
299
+ if 'responses.parse' in span_name:
300
+ # responses.parse format
301
+ if hasattr(result, 'output_parsed'):
302
+ output_text = str(result.output_parsed)
303
+ elif hasattr(result, 'parsed'):
304
+ output_text = str(result.parsed)
305
+
306
+ elif 'responses.create' in span_name:
307
+ # responses.create returns a Response object with output_text
308
+ if hasattr(result, 'output_text'):
309
+ output_text = result.output_text
310
+ elif hasattr(result, 'output'):
311
+ output_text = result.output
312
+ else:
313
+ # Log what we actually got for debugging
314
+ debug(f"[OpenAI Patch] responses.create result type: {type(result)}")
315
+ debug(f"[OpenAI Patch] responses.create result attributes: {[attr for attr in dir(result) if not attr.startswith('_')]}")
316
+
317
+ elif 'completions.parse' in span_name:
318
+ # Standard chat completion format
319
+ if hasattr(result, 'choices') and result.choices:
320
+ choice = result.choices[0]
321
+ if hasattr(choice, 'message'):
322
+ msg = choice.message
323
+ if hasattr(msg, 'parsed'):
324
+ # Structured output
325
+ output_text = str(msg.parsed)
326
+ elif hasattr(msg, 'content'):
327
+ # Regular content
328
+ output_text = msg.content
329
+ elif hasattr(choice, 'text'):
330
+ # Completion format
331
+ output_text = choice.text
332
+
333
+ # Set completion attributes if we have output
334
+ if output_text:
335
+ # Never truncate - EventQueue handles large messages automatically
336
+ span.set_attribute("gen_ai.completion.0.role", "assistant")
337
+ span.set_attribute("gen_ai.completion.0.content", output_text)
338
+ debug(f"[OpenAI Patch] Set completion: {output_text[:100]}")
339
+ else:
340
+ debug(f"[OpenAI Patch] No output_text found for {span_name}")
341
+
342
+ # Handle usage data
343
+ if hasattr(result, 'usage'):
344
+ usage = result.usage
345
+
346
+ # Debug logging
347
+ debug(f"[OpenAI Patch] Usage object type: {type(usage)}")
348
+ debug(f"[OpenAI Patch] Usage attributes: {[attr for attr in dir(usage) if not attr.startswith('_')]}")
349
+
350
+ # Extract tokens with proper handling
351
+ prompt_tokens = None
352
+ completion_tokens = None
353
+ total_tokens = None
354
+
355
+ # Try different ways to access token data
356
+ if hasattr(usage, 'prompt_tokens'):
357
+ prompt_tokens = usage.prompt_tokens
358
+ elif hasattr(usage, 'input_tokens'):
359
+ prompt_tokens = usage.input_tokens
360
+
361
+ if hasattr(usage, 'completion_tokens'):
362
+ completion_tokens = usage.completion_tokens
363
+ elif hasattr(usage, 'output_tokens'):
364
+ completion_tokens = usage.output_tokens
365
+
366
+ if hasattr(usage, 'total_tokens'):
367
+ total_tokens = usage.total_tokens
368
+ elif prompt_tokens is not None and completion_tokens is not None:
369
+ total_tokens = prompt_tokens + completion_tokens
370
+
371
+ debug(f"[OpenAI Patch] Extracted tokens - prompt: {prompt_tokens}, completion: {completion_tokens}, total: {total_tokens}")
372
+
373
+ # Set usage attributes on span
374
+ if prompt_tokens is not None:
375
+ span.set_attribute("gen_ai.usage.prompt_tokens", prompt_tokens)
376
+ if completion_tokens is not None:
377
+ span.set_attribute("gen_ai.usage.completion_tokens", completion_tokens)
378
+ if total_tokens is not None:
379
+ span.set_attribute("gen_ai.usage.total_tokens", total_tokens)
380
+
381
+ def unpatch(self):
382
+ """Remove the patch and restore original behavior."""
383
+ if not self._is_patched:
384
+ return
385
+
386
+ try:
387
+ # Restore original __init__ if we have it
388
+ if self._original_init:
389
+ import openai
390
+ from openai import OpenAI
391
+ OpenAI.__init__ = self._original_init
392
+
393
+ # Restore original methods on tracked clients
394
+ for obj, method_name, original_method in self._client_refs:
395
+ try:
396
+ setattr(obj, method_name, original_method)
397
+ except:
398
+ pass # Client might have been garbage collected
399
+
400
+ self._client_refs.clear()
401
+ self._is_patched = False
402
+
403
+ logger.info("[OpenAI Patch] Successfully removed responses API patch")
404
+
405
+ except Exception as e:
406
+ logger.error(f"[OpenAI Patch] Failed to unpatch: {e}")
407
+
408
+
409
+ # Global singleton instance
410
+ _patcher_instance: Optional[OpenAIResponsesPatcher] = None
411
+
412
+
413
+ def get_responses_patcher(tracer_provider=None) -> OpenAIResponsesPatcher:
414
+ """Get or create the global patcher instance.
415
+
416
+ Args:
417
+ tracer_provider: OpenTelemetry TracerProvider
418
+
419
+ Returns:
420
+ The singleton patcher instance
421
+ """
422
+ global _patcher_instance
423
+ if _patcher_instance is None:
424
+ _patcher_instance = OpenAIResponsesPatcher(tracer_provider)
425
+ return _patcher_instance
@@ -0,0 +1,87 @@
1
+ """Utility to uninstrument specific OpenAI methods to prevent duplicates.
2
+
3
+ This module helps prevent the standard OpenTelemetry instrumentation
4
+ from creating duplicate spans for methods we're handling ourselves.
5
+ """
6
+ import logging
7
+
8
+ logger = logging.getLogger("Lucidic")
9
+
10
+
11
+ def uninstrument_responses(openai_module):
12
+ """Remove any incorrect instrumentation from responses module.
13
+
14
+ The standard OpenTelemetry instrumentation might try to instrument
15
+ responses.create (which doesn't exist) or other responses methods.
16
+ This function removes any such instrumentation.
17
+
18
+ Args:
19
+ openai_module: The OpenAI module
20
+ """
21
+ try:
22
+ # Check if responses module exists
23
+ if not hasattr(openai_module, 'resources'):
24
+ return
25
+
26
+ resources = openai_module.resources
27
+ if not hasattr(resources, 'responses'):
28
+ return
29
+
30
+ responses = resources.responses
31
+
32
+ # Check for incorrectly wrapped methods
33
+ methods_to_check = ['create', 'parse']
34
+
35
+ for method_name in methods_to_check:
36
+ if hasattr(responses, method_name):
37
+ method = getattr(responses, method_name)
38
+
39
+ # Check if it's wrapped (wrapped methods usually have __wrapped__ attribute)
40
+ if hasattr(method, '__wrapped__'):
41
+ # Restore original
42
+ original = method.__wrapped__
43
+ setattr(responses, method_name, original)
44
+ logger.debug(f"[OpenAI Uninstrument] Removed wrapper from responses.{method_name}")
45
+
46
+ # Also check for _original_* attributes (another wrapping pattern)
47
+ original_attr = f'_original_{method_name}'
48
+ if hasattr(responses, original_attr):
49
+ original = getattr(responses, original_attr)
50
+ setattr(responses, method_name, original)
51
+ delattr(responses, original_attr)
52
+ logger.debug(f"[OpenAI Uninstrument] Restored original responses.{method_name}")
53
+
54
+ # Also check the Responses class itself
55
+ if hasattr(responses, 'Responses'):
56
+ Responses = responses.Responses
57
+ for method_name in methods_to_check:
58
+ if hasattr(Responses, method_name):
59
+ method = getattr(Responses, method_name)
60
+ if hasattr(method, '__wrapped__'):
61
+ original = method.__wrapped__
62
+ setattr(Responses, method_name, original)
63
+ logger.debug(f"[OpenAI Uninstrument] Removed wrapper from Responses.{method_name}")
64
+
65
+ except Exception as e:
66
+ logger.debug(f"[OpenAI Uninstrument] Error while checking responses instrumentation: {e}")
67
+
68
+
69
+ def clean_openai_instrumentation():
70
+ """Clean up any problematic OpenAI instrumentation.
71
+
72
+ This should be called after standard instrumentation but before our patches.
73
+ """
74
+ try:
75
+ import openai
76
+ uninstrument_responses(openai)
77
+
78
+ # Also check if client instances need cleaning
79
+ if hasattr(openai, 'OpenAI'):
80
+ # The OpenAI class might have wrapped __init__ that creates bad instrumentation
81
+ # We don't want to break it, just ensure responses aren't double-instrumented
82
+ pass
83
+
84
+ except ImportError:
85
+ pass # OpenAI not installed
86
+ except Exception as e:
87
+ logger.debug(f"[OpenAI Uninstrument] Error during cleanup: {e}")
@@ -55,7 +55,22 @@ def instrument_providers(providers: list, tracer_provider: TracerProvider, exist
55
55
  inst.instrument(tracer_provider=tracer_provider, enrich_token_usage=True)
56
56
  _global_instrumentors["openai"] = inst
57
57
  new_instrumentors["openai"] = inst
58
- logger.info("[Telemetry] Instrumented OpenAI")
58
+
59
+ # Clean up any problematic instrumentation from standard library
60
+ from .openai_uninstrument import clean_openai_instrumentation
61
+ clean_openai_instrumentation()
62
+
63
+ # Add patch for responses API methods (not covered by standard instrumentation)
64
+ import os
65
+ if os.getenv('LUCIDIC_DISABLE_RESPONSES_PATCH', 'false').lower() != 'true':
66
+ from .openai_patch import get_responses_patcher
67
+ patcher = get_responses_patcher(tracer_provider)
68
+ patcher.patch()
69
+ _global_instrumentors["openai_responses_patch"] = patcher
70
+ else:
71
+ logger.info("[Telemetry] Skipping responses API patch (disabled via LUCIDIC_DISABLE_RESPONSES_PATCH)")
72
+
73
+ logger.info("[Telemetry] Instrumented OpenAI (including responses.parse, responses.create, beta.chat.completions.parse)")
59
74
  except Exception as e:
60
75
  logger.error(f"Failed to instrument OpenAI: {e}")
61
76
 
lucidicai/utils/queue.py CHANGED
@@ -372,19 +372,51 @@ class EventQueue:
372
372
  t = (event_type or "generic").lower()
373
373
 
374
374
  if t == "llm_generation":
375
+
375
376
  req = payload.get("request", {})
377
+ usage = payload.get("usage", {})
378
+ messages = req.get("messages", [])[:5]
379
+ output = payload.get("response", {}).get("output", {})
380
+ compressed_messages = []
381
+ for i, m in enumerate(messages):
382
+ compressed_message_item = {}
383
+ for k, v in messages[i].items():
384
+ compressed_message_item[k] = str(v)[:200] if v else None
385
+ compressed_messages.append(compressed_message_item)
376
386
  return {
377
387
  "request": {
378
- "model": str(req.get("model", ""))[:200],
379
- "provider": str(req.get("provider", ""))[:200],
380
- "messages": "truncated"
388
+ "model": req.get("model")[:200] if req.get("model") else None,
389
+ "provider": req.get("provider")[:200] if req.get("provider") else None,
390
+ "messages": compressed_messages,
391
+ },
392
+ "usage": {
393
+ k: usage.get(k) for k in ("input_tokens", "output_tokens", "cost") if k in usage
381
394
  },
382
- "response": {"output": "truncated"}
395
+ "response": {
396
+ "output": str(output)[:200] if output else None,
397
+ }
383
398
  }
399
+
384
400
  elif t == "function_call":
401
+ args = payload.get("arguments")
402
+ truncated_args = (
403
+ {k: (str(v)[:200] if v is not None else None) for k, v in args.items()}
404
+ if isinstance(args, dict)
405
+ else (str(args)[:200] if args is not None else None)
406
+ )
407
+ return {
408
+ "function_name": payload.get("function_name")[:200] if payload.get("function_name") else None,
409
+ "arguments": truncated_args,
410
+ }
411
+
412
+ elif t == "error_traceback":
413
+ return {
414
+ "error": payload.get("error")[:200] if payload.get("error") else None,
415
+ }
416
+
417
+ elif t == "generic":
385
418
  return {
386
- "function_name": str(payload.get("function_name", ""))[:200],
387
- "arguments": "truncated"
419
+ "details": payload.get("details")[:200] if payload.get("details") else None,
388
420
  }
389
421
  else:
390
422
  return {"details": "preview_unavailable"}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lucidicai
3
- Version: 2.1.0
3
+ Version: 2.1.2
4
4
  Summary: Lucidic AI Python SDK
5
5
  Author: Andy Liang
6
6
  Author-email: andy@lucidic.ai
@@ -1,4 +1,4 @@
1
- lucidicai/__init__.py,sha256=qAza0IBGUQS3e-_uXzRvqVxUH7XliA5XYieTEpXnGT0,11992
1
+ lucidicai/__init__.py,sha256=WwZ3A73h0Ttk1xgiiCt9yM-zXD1vIvQNg6XLTbMYQLY,12757
2
2
  lucidicai/action.py,sha256=sPRd1hTIVXDqnvG9ZXWEipUFh0bsXcE0Fm7RVqmVccM,237
3
3
  lucidicai/client.py,sha256=IIhlY6Mfwy47FeMxzpvIygCaqcI1FnqiXiVU6M4QEiE,22327
4
4
  lucidicai/constants.py,sha256=zN8O7TjoRHRlaGa9CZUWppS73rhzKGwaEkF9XMTV0Cg,1160
@@ -24,7 +24,7 @@ lucidicai/api/resources/dataset.py,sha256=6UnMUd-y__TOAjUJLjbc0lZJRTy_gHkyoE82Ov
24
24
  lucidicai/api/resources/event.py,sha256=GyyNL3_k53EbmvTdgJEABexiuJnoX61hxWey7DYmlYY,2434
25
25
  lucidicai/api/resources/session.py,sha256=w7b4kkbWdbaNbwuMBFgEeVmDfaYozBf9OK8B8L9B1m8,3730
26
26
  lucidicai/core/__init__.py,sha256=b0YQkd8190Y_GgwUcmf0tOiSLARd7L4kq4jwfhhGAyI,39
27
- lucidicai/core/config.py,sha256=m5kl9wiVp5J0DW6ES--GzsMgyykSYtaCi3D-2tW650M,7972
27
+ lucidicai/core/config.py,sha256=P9y5aSZRkAehTvoBdYEc6x5-jiumB5cxftoMtJatl7w,7980
28
28
  lucidicai/core/errors.py,sha256=aRfdXABiTWFTiWELgu2Dz_wxVSggcBFqX7Q-toCy_fY,2130
29
29
  lucidicai/core/types.py,sha256=KabcTBQe7SemigccKfJSDiJmjSJDJJvvtefSd8pfrJI,702
30
30
  lucidicai/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -45,12 +45,12 @@ lucidicai/providers/pydantic_ai_handler.py,sha256=Yhd9VTJhq292ZzJF04O_jYGRh-1bzs
45
45
  lucidicai/providers/text_storage.py,sha256=L62MMJ8E23TDqDTUv2aRntdKMCItsXV7XjY6cFwx2DE,1503
46
46
  lucidicai/providers/universal_image_interceptor.py,sha256=7d-hw4xihRwvvA1AP8-vqYNChtmVXKmn09MN4pDS7KQ,12126
47
47
  lucidicai/sdk/__init__.py,sha256=UrkV9FYbZkBxaX9qwxGbCJdXp-JqMpn0_u-huO9Y-ec,32
48
- lucidicai/sdk/context.py,sha256=ruEXAndSv0gQ-YEXLlC4Fx6NNbaylfp_dZxbpwmLZSA,4622
48
+ lucidicai/sdk/context.py,sha256=5Z5bLXKX3hFS-LN-lYVK3ho3b3LmNFi66UOd0cIYkZ0,9873
49
49
  lucidicai/sdk/decorators.py,sha256=B5BXG9Sn5ruUkxFq10L1rrCR_wzYUPlYeu5aqyXetMM,8393
50
50
  lucidicai/sdk/error_boundary.py,sha256=IPr5wS9rS7ZQNgEaBwK53UaixAm6L2rijKKFfxcxjUI,9190
51
- lucidicai/sdk/event.py,sha256=jadK8bZ_kkpycx5zHC5tlNUqL_yCk2WJ6REuFrSrIVI,3564
51
+ lucidicai/sdk/event.py,sha256=NiPcnPzYCU0VlFbBk93LD88wqAYmnglV64nQb2XteOs,3747
52
52
  lucidicai/sdk/event_builder.py,sha256=oMvt39m07ZLmPllJTWwRxpinJUz9_AD17yNE6wQRoDA,10423
53
- lucidicai/sdk/init.py,sha256=gxWfK_c22BcAwnv9LZWqmS_G303_rRAhA0zt2nWKdvc,9289
53
+ lucidicai/sdk/init.py,sha256=tfpTRZLT317xNC_GE2OKqRR02Nj3s3a12CJELc-vVAE,12923
54
54
  lucidicai/sdk/shutdown_manager.py,sha256=I5ylR96QHQ_SfP1euAiM0qQ-I7upCPMW1HUNvoj7hCw,12090
55
55
  lucidicai/sdk/features/__init__.py,sha256=23KUF2EZBzsaH9JUFDGNXZb_3PSfc35VZfD59gAfyR0,26
56
56
  lucidicai/sdk/features/dataset.py,sha256=qFGnu8Wm1yhaflBhtm-5veN-KaoxGLBL5xWEifkrsY0,19416
@@ -60,16 +60,18 @@ lucidicai/telemetry/base_provider.py,sha256=nrZVr4Y9xcAiMn4uAN3t3k6DlHNTvlXrA4qQ
60
60
  lucidicai/telemetry/context_bridge.py,sha256=NwyclZvPcZHZtIvLSrY3oO8WQ_J1JSuHWIr36gxA7xk,2989
61
61
  lucidicai/telemetry/context_capture_processor.py,sha256=kzKWpg5m0OMUP5we6g453FjckWwA_jAVjOKCfiyKVN8,3651
62
62
  lucidicai/telemetry/extract.py,sha256=30Iqvnr9I0EkD61GRCMN0Zpk3fLmRYcuVajWjRz0z9I,6814
63
- lucidicai/telemetry/litellm_bridge.py,sha256=QXUNwFI3GyvCQtnsnnSnmh2BfdnfnbKIDjfc_Rviau4,16436
64
- lucidicai/telemetry/lucidic_exporter.py,sha256=ghzPVGJlR3yPtRNMtnvlqcMuQCNTkU-oRLyu_YiuzQU,11892
63
+ lucidicai/telemetry/litellm_bridge.py,sha256=GlNeTX0HCu4JsUqfCGBb62XA61fhyWKv5ohfqSkriaE,16574
64
+ lucidicai/telemetry/lucidic_exporter.py,sha256=tn5_Tk2QoYP6nZUmYoS19zyXdCfg6H4gy7tSlz6tCt4,14017
65
65
  lucidicai/telemetry/lucidic_span_processor.py,sha256=-jo7Muuslo3ZCSAysLsDGBqJijQSpIOvJHPbPNjP4iQ,31029
66
66
  lucidicai/telemetry/openai_agents_instrumentor.py,sha256=__wIbeglMnEEf4AGTQ--FXeWCKmz2yy8SBupwprEdZA,12694
67
+ lucidicai/telemetry/openai_patch.py,sha256=3VEU7-7O9DMgKn9xVvAjhEWTCDih454o7lEBHPgLLI8,17083
68
+ lucidicai/telemetry/openai_uninstrument.py,sha256=zELpoz2BU8O-rdHrg_7NuvjdNoY6swgoqVm5NtTCJRQ,3456
67
69
  lucidicai/telemetry/opentelemetry_converter.py,sha256=xOHCqoTyO4hUkL6k7fxy84PbljPpYep6ET9ZqbkJehc,17665
68
70
  lucidicai/telemetry/otel_handlers.py,sha256=OCzXuYog6AuwjI4eXy5Sk40DUehyz48QOxuOujXnEVU,20859
69
71
  lucidicai/telemetry/otel_init.py,sha256=hjUOX8nEBLrDOuh0UTKFfG-C98yFZHTiP8ql59bmNXY,13780
70
72
  lucidicai/telemetry/otel_provider.py,sha256=e5XcpQTd_a5UrMAq-EQcJ0zUJpO7NO16T-BphVUigR4,7513
71
73
  lucidicai/telemetry/pydantic_ai_handler.py,sha256=WPa3tFcVgVnPPO3AxcNOTbNkmODLgNOrU2_3GVtWqUw,28261
72
- lucidicai/telemetry/telemetry_init.py,sha256=8RMzZeeHYvaJKaM5KeSt0svaUAqODHmLstECjgHr8fc,8660
74
+ lucidicai/telemetry/telemetry_init.py,sha256=YpjcYDcqlWpUDDz76-x2v4K0yz_ToEpuaDz_Hypbr2w,9554
73
75
  lucidicai/telemetry/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
74
76
  lucidicai/telemetry/utils/image_storage.py,sha256=4Z59ZpVexr7-lcExfr8GsqXe0y2VZmr8Yjwa-3DeOxU,1457
75
77
  lucidicai/telemetry/utils/model_pricing.py,sha256=Dxi6e0WjcIyCTkVX7K7f0pJ5rPu7nSt3lOmgzAUQl1o,12402
@@ -78,8 +80,8 @@ lucidicai/telemetry/utils/universal_image_interceptor.py,sha256=vARgMk1hVSF--zfi
78
80
  lucidicai/utils/__init__.py,sha256=ZiGtmJaF0ph9iIFIgQiAreVuYM_1o7qu9VySK1NblTw,22
79
81
  lucidicai/utils/images.py,sha256=YHFjeKHRxzWu0IsuNwKw303egPsd99AShaD4WND1lJk,12325
80
82
  lucidicai/utils/logger.py,sha256=R3B3gSee64F6UVHUrShihBq_O7W7bgfrBiVDXTO3Isg,4777
81
- lucidicai/utils/queue.py,sha256=iBhazYt9EPTpyuexfDyPjvJT-2ODaAbCBbGYvLVl8wM,15815
82
- lucidicai-2.1.0.dist-info/METADATA,sha256=5olKiUoS21uLWjjoOkbJaQGY1J0FDJSTKMVGmOcoTEI,902
83
- lucidicai-2.1.0.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
84
- lucidicai-2.1.0.dist-info/top_level.txt,sha256=vSSdM3lclF4I5tyVC0xxUk8eIRnnYXMe1hW-eO91HUo,10
85
- lucidicai-2.1.0.dist-info/RECORD,,
83
+ lucidicai/utils/queue.py,sha256=tZYPAUHRAK_uyE8Mk4PloObsBcfITurHwIlNHr3gMFU,17326
84
+ lucidicai-2.1.2.dist-info/METADATA,sha256=ZcL5YaquJn3D5MyMf7UqZTzrP9m5E4XAhEJQJAuOXTs,902
85
+ lucidicai-2.1.2.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
86
+ lucidicai-2.1.2.dist-info/top_level.txt,sha256=vSSdM3lclF4I5tyVC0xxUk8eIRnnYXMe1hW-eO91HUo,10
87
+ lucidicai-2.1.2.dist-info/RECORD,,