lucidicai 1.3.5__py3-none-any.whl → 2.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,16 +1,21 @@
1
- """Custom OpenTelemetry exporter for Lucidic backend compatibility"""
1
+ """Custom OpenTelemetry exporter for Lucidic (Exporter-only mode).
2
+
3
+ Converts completed spans into immutable typed LLM events via Client.create_event(),
4
+ which enqueues non-blocking delivery through the EventQueue.
5
+ """
2
6
  import json
3
7
  import logging
4
8
  from typing import Sequence, Optional, Dict, Any, List
9
+ from datetime import datetime, timezone
5
10
  from opentelemetry.sdk.trace import ReadableSpan
6
11
  from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
7
12
  from opentelemetry.trace import StatusCode
8
13
  from opentelemetry.semconv_ai import SpanAttributes
9
14
 
10
15
  from lucidicai.client import Client
11
- from lucidicai.context import current_session_id
16
+ from lucidicai.context import current_session_id, current_parent_event_id
12
17
  from lucidicai.model_pricing import calculate_cost
13
- from lucidicai.image_upload import extract_base64_images
18
+ from .extract import detect_is_llm_span, extract_images, extract_prompts, extract_completions, extract_model
14
19
 
15
20
  logger = logging.getLogger("Lucidic")
16
21
  import os
@@ -20,69 +25,90 @@ VERBOSE = os.getenv("LUCIDIC_VERBOSE", "False") == "True"
20
25
 
21
26
 
22
27
  class LucidicSpanExporter(SpanExporter):
23
- """Custom exporter that converts OpenTelemetry spans to Lucidic events"""
24
-
25
- def __init__(self):
26
- self.pending_events = {} # Track events by span_id
27
-
28
+ """Exporter that creates immutable LLM events for completed spans."""
29
+
28
30
  def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
29
- """Export spans by converting them to Lucidic events"""
30
31
  try:
31
32
  client = Client()
32
-
33
+ if DEBUG and spans:
34
+ logger.debug(f"[LucidicSpanExporter] Processing {len(spans)} spans")
33
35
  for span in spans:
34
36
  self._process_span(span, client)
35
-
37
+ if DEBUG and spans:
38
+ logger.debug(f"[LucidicSpanExporter] Successfully exported {len(spans)} spans")
36
39
  return SpanExportResult.SUCCESS
37
40
  except Exception as e:
38
41
  logger.error(f"Failed to export spans: {e}")
39
42
  return SpanExportResult.FAILURE
40
-
43
+
41
44
  def _process_span(self, span: ReadableSpan, client: Client) -> None:
42
- """Process a single span and convert to Lucidic event"""
45
+ """Convert a single LLM span into a typed, immutable event."""
43
46
  try:
44
- # Skip non-LLM spans
45
- if not self._is_llm_span(span):
47
+ if not detect_is_llm_span(span):
46
48
  return
47
-
48
- # Extract relevant attributes
49
+
49
50
  attributes = dict(span.attributes or {})
51
+
52
+ # Resolve session id
53
+ target_session_id = attributes.get('lucidic.session_id')
54
+ if not target_session_id:
55
+ try:
56
+ target_session_id = current_session_id.get(None)
57
+ except Exception:
58
+ target_session_id = None
59
+ if not target_session_id and getattr(client, 'session', None) and getattr(client.session, 'session_id', None):
60
+ target_session_id = client.session.session_id
61
+ if not target_session_id:
62
+ return
63
+
64
+ # Parent nesting - get from span attributes (captured at span creation)
65
+ parent_id = attributes.get('lucidic.parent_event_id')
66
+ if not parent_id:
67
+ # Fallback to trying context (may work if same thread)
68
+ try:
69
+ parent_id = current_parent_event_id.get(None)
70
+ except Exception:
71
+ parent_id = None
72
+
73
+ # Timing
74
+ occurred_at = datetime.fromtimestamp(span.start_time / 1_000_000_000, tz=timezone.utc) if span.start_time else datetime.now(tz=timezone.utc)
75
+ duration_seconds = ((span.end_time - span.start_time) / 1_000_000_000) if (span.start_time and span.end_time) else None
76
+
77
+ # Typed fields using extract utilities
78
+ model = extract_model(attributes) or 'unknown'
79
+ provider = self._detect_provider_name(attributes)
80
+ messages = extract_prompts(attributes) or []
81
+ params = self._extract_params(attributes)
82
+ output_text = extract_completions(span, attributes) or "Response received"
83
+ input_tokens = self._extract_prompt_tokens(attributes)
84
+ output_tokens = self._extract_completion_tokens(attributes)
85
+ cost = self._calculate_cost(attributes)
86
+ images = extract_images(attributes)
87
+
88
+ # Create immutable event via non-blocking queue
89
+ event_id = client.create_event(
90
+ type="llm_generation",
91
+ session_id=target_session_id,
92
+ parent_event_id=parent_id,
93
+ occurred_at=occurred_at,
94
+ duration=duration_seconds,
95
+ provider=provider,
96
+ model=model,
97
+ messages=messages,
98
+ params=params,
99
+ output=output_text,
100
+ input_tokens=input_tokens,
101
+ output_tokens=output_tokens,
102
+ cost=cost,
103
+ raw={"images": images} if images else None,
104
+ )
50
105
 
51
- # Create or update event based on span lifecycle
52
- span_id = format(span.context.span_id, '016x')
53
-
54
- if span_id not in self.pending_events:
55
- # New span - create event
56
- event_id = self._create_event_from_span(span, attributes, client)
57
- if event_id:
58
- self.pending_events[span_id] = {
59
- 'event_id': event_id,
60
- 'start_time': span.start_time
61
- }
62
- else:
63
- # Span ended - update event
64
- event_info = self.pending_events.pop(span_id)
65
- self._update_event_from_span(span, attributes, event_info['event_id'], client)
66
-
106
+ if DEBUG:
107
+ logger.debug(f"[LucidicSpanExporter] Created LLM event {event_id} for session {target_session_id[:8]}...")
108
+
67
109
  except Exception as e:
68
110
  logger.error(f"Failed to process span {span.name}: {e}")
69
111
 
70
- def _is_llm_span(self, span: ReadableSpan) -> bool:
71
- """Check if this is an LLM-related span"""
72
- # Check span name patterns
73
- llm_patterns = ['openai', 'anthropic', 'chat', 'completion', 'embedding', 'llm']
74
- span_name_lower = span.name.lower()
75
-
76
- if any(pattern in span_name_lower for pattern in llm_patterns):
77
- return True
78
-
79
- # Check for LLM attributes
80
- if span.attributes:
81
- for key in span.attributes:
82
- if key.startswith('gen_ai.') or key.startswith('llm.'):
83
- return True
84
-
85
- return False
86
112
 
87
113
  def _create_event_from_span(self, span: ReadableSpan, attributes: Dict[str, Any], client: Client) -> Optional[str]:
88
114
  """Create a Lucidic event from span start"""
@@ -121,11 +147,6 @@ class LucidicSpanExporter(SpanExporter):
121
147
  if images:
122
148
  event_kwargs['screenshots'] = images
123
149
 
124
- # Check if we have a specific step_id in span attributes
125
- step_id = attributes.get('lucidic.step_id')
126
- if step_id:
127
- event_kwargs['step_id'] = step_id
128
-
129
150
  return client.create_event_for_session(target_session_id, **event_kwargs)
130
151
 
131
152
  except Exception as e:
@@ -133,32 +154,8 @@ class LucidicSpanExporter(SpanExporter):
133
154
  return None
134
155
 
135
156
  def _update_event_from_span(self, span: ReadableSpan, attributes: Dict[str, Any], event_id: str, client: Client) -> None:
136
- """Update a Lucidic event from span end"""
137
- try:
138
- # Extract response/result
139
- result = self._extract_result(span, attributes)
140
-
141
- # Calculate cost if we have token usage
142
- cost = self._calculate_cost(attributes)
143
-
144
- # Determine success
145
- is_successful = span.status.status_code != StatusCode.ERROR
146
-
147
- update_kwargs = {
148
- 'event_id': event_id,
149
- 'result': result,
150
- 'is_finished': True,
151
- 'is_successful': is_successful
152
- }
153
-
154
- if cost is not None:
155
- update_kwargs['cost_added'] = cost
156
-
157
- # Route update to the same session; event_id is globally unique so server resolves it
158
- client.session.update_event(**update_kwargs)
159
-
160
- except Exception as e:
161
- logger.error(f"Failed to update event from span: {e}")
157
+ """Deprecated: events are immutable; no updates performed."""
158
+ return
162
159
 
163
160
  def _extract_description(self, span: ReadableSpan, attributes: Dict[str, Any]) -> str:
164
161
  """Extract description from span attributes"""
@@ -198,74 +195,60 @@ class LucidicSpanExporter(SpanExporter):
198
195
 
199
196
  return "Response received"
200
197
 
201
- def _extract_images(self, attributes: Dict[str, Any]) -> List[str]:
202
- """Extract base64 images from attributes"""
203
- images = []
204
-
205
- # Check prompts for multimodal content
206
- prompts = attributes.get(SpanAttributes.LLM_PROMPTS) or \
207
- attributes.get('gen_ai.prompt')
208
-
209
- if isinstance(prompts, list):
210
- for prompt in prompts:
211
- if isinstance(prompt, dict) and 'content' in prompt:
212
- content = prompt['content']
213
- if isinstance(content, list):
214
- for item in content:
215
- if isinstance(item, dict) and item.get('type') == 'image_url':
216
- image_url = item.get('image_url', {})
217
- if isinstance(image_url, dict) and 'url' in image_url:
218
- url = image_url['url']
219
- if url.startswith('data:image'):
220
- images.append(url)
221
-
222
- return images
198
+ def _detect_provider_name(self, attributes: Dict[str, Any]) -> str:
199
+ name = attributes.get('gen_ai.system') or attributes.get('service.name')
200
+ if name:
201
+ return str(name)
202
+ return "openai" if 'openai' in (str(attributes.get('service.name', '')).lower()) else "unknown"
223
203
 
224
- def _format_messages(self, messages: List[Any]) -> str:
225
- """Format message list into description"""
226
- formatted = []
227
-
228
- for msg in messages:
229
- if isinstance(msg, dict):
230
- role = msg.get('role', 'unknown')
231
- content = msg.get('content', '')
232
-
233
- if isinstance(content, str):
234
- formatted.append(f"{role}: {content}")
235
- elif isinstance(content, list):
236
- # Handle multimodal content
237
- text_parts = []
238
- for item in content:
239
- if isinstance(item, dict) and item.get('type') == 'text':
240
- text_parts.append(item.get('text', ''))
241
- if text_parts:
242
- formatted.append(f"{role}: {' '.join(text_parts)}")
243
-
244
- return '\n'.join(formatted) if formatted else "Model request"
204
+
205
+ def _extract_params(self, attributes: Dict[str, Any]) -> Dict[str, Any]:
206
+ return {
207
+ "temperature": attributes.get('gen_ai.request.temperature'),
208
+ "max_tokens": attributes.get('gen_ai.request.max_tokens'),
209
+ "top_p": attributes.get('gen_ai.request.top_p'),
210
+ }
211
+
212
+ def _extract_prompt_tokens(self, attributes: Dict[str, Any]) -> int:
213
+ return (
214
+ attributes.get(SpanAttributes.LLM_USAGE_PROMPT_TOKENS) or
215
+ attributes.get('gen_ai.usage.prompt_tokens') or
216
+ attributes.get('gen_ai.usage.input_tokens') or 0
217
+ )
218
+
219
+ def _extract_completion_tokens(self, attributes: Dict[str, Any]) -> int:
220
+ return (
221
+ attributes.get(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS) or
222
+ attributes.get('gen_ai.usage.completion_tokens') or
223
+ attributes.get('gen_ai.usage.output_tokens') or 0
224
+ )
245
225
 
246
226
  def _calculate_cost(self, attributes: Dict[str, Any]) -> Optional[float]:
247
- """Calculate cost from token usage"""
248
- prompt_tokens = attributes.get(SpanAttributes.LLM_USAGE_PROMPT_TOKENS) or \
249
- attributes.get('gen_ai.usage.prompt_tokens') or 0
250
- completion_tokens = attributes.get(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS) or \
251
- attributes.get('gen_ai.usage.completion_tokens') or 0
252
-
253
- if prompt_tokens or completion_tokens:
254
- model = attributes.get(SpanAttributes.LLM_RESPONSE_MODEL) or \
255
- attributes.get(SpanAttributes.LLM_REQUEST_MODEL) or \
256
- attributes.get('gen_ai.request.model')
257
-
227
+ prompt_tokens = (
228
+ attributes.get(SpanAttributes.LLM_USAGE_PROMPT_TOKENS) or
229
+ attributes.get('gen_ai.usage.prompt_tokens') or
230
+ attributes.get('gen_ai.usage.input_tokens') or 0
231
+ )
232
+ completion_tokens = (
233
+ attributes.get(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS) or
234
+ attributes.get('gen_ai.usage.completion_tokens') or
235
+ attributes.get('gen_ai.usage.output_tokens') or 0
236
+ )
237
+ total_tokens = (prompt_tokens or 0) + (completion_tokens or 0)
238
+ if total_tokens > 0:
239
+ model = (
240
+ attributes.get(SpanAttributes.LLM_RESPONSE_MODEL) or
241
+ attributes.get(SpanAttributes.LLM_REQUEST_MODEL) or
242
+ attributes.get('gen_ai.response.model') or
243
+ attributes.get('gen_ai.request.model')
244
+ )
258
245
  if model:
259
- return calculate_cost(prompt_tokens, completion_tokens, model)
260
-
246
+ usage = {"prompt_tokens": prompt_tokens or 0, "completion_tokens": completion_tokens or 0, "total_tokens": total_tokens}
247
+ return calculate_cost(model, usage)
261
248
  return None
262
249
 
263
250
  def shutdown(self) -> None:
264
- """Shutdown the exporter"""
265
- # Process any remaining pending events
266
- if self.pending_events:
267
- logger.warning(f"Shutting down with {len(self.pending_events)} pending events")
268
-
251
+ return None
252
+
269
253
  def force_flush(self, timeout_millis: int = 30000) -> bool:
270
- """Force flush any pending spans"""
271
254
  return True
@@ -0,0 +1,189 @@
1
+ """Unified telemetry initialization - SpanExporter-only architecture.
2
+
3
+ Provides functions to instrument OpenTelemetry providers.
4
+ Provider creation is now handled by the Client singleton.
5
+ """
6
+ import logging
7
+ import threading
8
+ from typing import Dict, Any, Optional
9
+
10
+ from opentelemetry.sdk.trace import TracerProvider
11
+
12
+ logger = logging.getLogger("Lucidic")
13
+
14
+ # Global tracking to prevent duplicate instrumentation
15
+ _global_instrumentors = {}
16
+ _instrumentation_lock = threading.Lock()
17
+
18
+
19
+ def instrument_providers(providers: list, tracer_provider: TracerProvider, existing_instrumentors: Dict[str, Any]) -> Dict[str, Any]:
20
+ """
21
+ Instrument the requested providers with the given TracerProvider.
22
+ Only instruments providers that haven't been instrumented yet.
23
+ Uses global tracking to prevent duplicate instrumentation across threads.
24
+
25
+ Args:
26
+ providers: List of provider names to instrument
27
+ tracer_provider: The TracerProvider to use for instrumentation
28
+ existing_instrumentors: Dict of already instrumented providers (ignored, kept for compatibility)
29
+
30
+ Returns:
31
+ Dict of newly instrumented providers (name -> instrumentor)
32
+ """
33
+ global _global_instrumentors
34
+ new_instrumentors = {}
35
+
36
+ # Normalize provider names
37
+ canonical = set()
38
+ for p in providers or []:
39
+ if p in ("google_generativeai",):
40
+ canonical.add("google")
41
+ elif p in ("vertex_ai",):
42
+ canonical.add("vertexai")
43
+ elif p in ("aws_bedrock", "amazon_bedrock"):
44
+ canonical.add("bedrock")
45
+ else:
46
+ canonical.add(p)
47
+
48
+ # Use global lock to prevent race conditions
49
+ with _instrumentation_lock:
50
+ # OpenAI
51
+ if "openai" in canonical and "openai" not in _global_instrumentors:
52
+ try:
53
+ from opentelemetry.instrumentation.openai import OpenAIInstrumentor
54
+ inst = OpenAIInstrumentor()
55
+ inst.instrument(tracer_provider=tracer_provider, enrich_token_usage=True)
56
+ _global_instrumentors["openai"] = inst
57
+ new_instrumentors["openai"] = inst
58
+ logger.info("[Telemetry] Instrumented OpenAI")
59
+ except Exception as e:
60
+ logger.error(f"Failed to instrument OpenAI: {e}")
61
+
62
+ # Anthropic
63
+ if "anthropic" in canonical and "anthropic" not in _global_instrumentors:
64
+ try:
65
+ from opentelemetry.instrumentation.anthropic import AnthropicInstrumentor
66
+ inst = AnthropicInstrumentor()
67
+ inst.instrument(tracer_provider=tracer_provider)
68
+ _global_instrumentors["anthropic"] = inst
69
+ new_instrumentors["anthropic"] = inst
70
+ logger.info("[Telemetry] Instrumented Anthropic")
71
+ except Exception as e:
72
+ logger.error(f"Failed to instrument Anthropic: {e}")
73
+
74
+ # LangChain
75
+ if "langchain" in canonical and "langchain" not in _global_instrumentors:
76
+ try:
77
+ from opentelemetry.instrumentation.langchain import LangchainInstrumentor
78
+ inst = LangchainInstrumentor()
79
+ inst.instrument(tracer_provider=tracer_provider)
80
+ _global_instrumentors["langchain"] = inst
81
+ new_instrumentors["langchain"] = inst
82
+ logger.info("[Telemetry] Instrumented LangChain")
83
+ except Exception as e:
84
+ logger.error(f"Failed to instrument LangChain: {e}")
85
+
86
+ # Google Generative AI
87
+ if "google" in canonical and "google" not in _global_instrumentors:
88
+ try:
89
+ from opentelemetry.instrumentation.google_generativeai import GoogleGenerativeAiInstrumentor
90
+ inst = GoogleGenerativeAiInstrumentor()
91
+ inst.instrument(tracer_provider=tracer_provider)
92
+ _global_instrumentors["google"] = inst
93
+ new_instrumentors["google"] = inst
94
+ logger.info("[Telemetry] Instrumented Google Generative AI")
95
+ except Exception as e:
96
+ logger.error(f"Failed to instrument Google Generative AI: {e}")
97
+
98
+ # Vertex AI
99
+ if "vertexai" in canonical and "vertexai" not in _global_instrumentors:
100
+ try:
101
+ from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor
102
+ inst = VertexAIInstrumentor()
103
+ inst.instrument(tracer_provider=tracer_provider)
104
+ _global_instrumentors["vertexai"] = inst
105
+ new_instrumentors["vertexai"] = inst
106
+ logger.info("[Telemetry] Instrumented Vertex AI")
107
+ except Exception as e:
108
+ logger.error(f"Failed to instrument Vertex AI: {e}")
109
+
110
+ # Bedrock
111
+ if "bedrock" in canonical and "bedrock" not in _global_instrumentors:
112
+ try:
113
+ from opentelemetry.instrumentation.bedrock import BedrockInstrumentor
114
+ inst = BedrockInstrumentor(enrich_token_usage=True)
115
+ inst.instrument(tracer_provider=tracer_provider)
116
+ _global_instrumentors["bedrock"] = inst
117
+ new_instrumentors["bedrock"] = inst
118
+ logger.info("[Telemetry] Instrumented Bedrock")
119
+ except Exception as e:
120
+ logger.error(f"Failed to instrument Bedrock: {e}")
121
+
122
+ # Cohere
123
+ if "cohere" in canonical and "cohere" not in _global_instrumentors:
124
+ try:
125
+ from opentelemetry.instrumentation.cohere import CohereInstrumentor
126
+ inst = CohereInstrumentor()
127
+ inst.instrument(tracer_provider=tracer_provider)
128
+ _global_instrumentors["cohere"] = inst
129
+ new_instrumentors["cohere"] = inst
130
+ logger.info("[Telemetry] Instrumented Cohere")
131
+ except Exception as e:
132
+ logger.error(f"Failed to instrument Cohere: {e}")
133
+
134
+ # Groq
135
+ if "groq" in canonical and "groq" not in _global_instrumentors:
136
+ try:
137
+ from opentelemetry.instrumentation.groq import GroqInstrumentor
138
+ inst = GroqInstrumentor()
139
+ inst.instrument(tracer_provider=tracer_provider)
140
+ _global_instrumentors["groq"] = inst
141
+ new_instrumentors["groq"] = inst
142
+ logger.info("[Telemetry] Instrumented Groq")
143
+ except Exception as e:
144
+ logger.error(f"Failed to instrument Groq: {e}")
145
+
146
+ # LiteLLM - callback-based (not OpenTelemetry)
147
+ if "litellm" in canonical and "litellm" not in _global_instrumentors:
148
+ logger.info("[Telemetry] LiteLLM uses callback-based instrumentation")
149
+ # LiteLLM requires setup via litellm_bridge.py
150
+ try:
151
+ from .litellm_bridge import setup_litellm_callback
152
+ setup_litellm_callback()
153
+ _global_instrumentors["litellm"] = None # No instrumentor object
154
+ new_instrumentors["litellm"] = None
155
+ except Exception as e:
156
+ logger.error(f"Failed to setup LiteLLM: {e}")
157
+
158
+ # Pydantic AI - manual spans
159
+ if "pydantic_ai" in canonical and "pydantic_ai" not in _global_instrumentors:
160
+ logger.info("[Telemetry] Pydantic AI requires manual span creation")
161
+ # No automatic instrumentation available
162
+ _global_instrumentors["pydantic_ai"] = None
163
+ new_instrumentors["pydantic_ai"] = None
164
+
165
+ # OpenAI Agents - custom instrumentor
166
+ if "openai_agents" in canonical and "openai_agents" not in _global_instrumentors:
167
+ try:
168
+ from .openai_agents_instrumentor import OpenAIAgentsInstrumentor
169
+ inst = OpenAIAgentsInstrumentor(tracer_provider=tracer_provider)
170
+ inst.instrument()
171
+ _global_instrumentors["openai_agents"] = inst
172
+ new_instrumentors["openai_agents"] = inst
173
+ logger.info("[Telemetry] Instrumented OpenAI Agents SDK")
174
+ except Exception as e:
175
+ logger.error(f"Failed to instrument OpenAI Agents: {e}")
176
+
177
+ return new_instrumentors
178
+
179
+
180
+ # Keep the old function for backward compatibility (deprecated)
181
+ def initialize_telemetry(providers: list, agent_id: str):
182
+ """
183
+ DEPRECATED: Use Client.initialize_telemetry() instead.
184
+ This function is kept for backward compatibility but will not work correctly
185
+ in multi-threaded environments.
186
+ """
187
+ logger.warning("[Telemetry] initialize_telemetry() is deprecated. Telemetry should be initialized via Client.")
188
+ # Return empty tuple to satisfy old callers
189
+ return None, []
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lucidicai
3
- Version: 1.3.5
3
+ Version: 2.0.1
4
4
  Summary: Lucidic AI Python SDK
5
5
  Author: Andy Liang
6
6
  Author-email: andy@lucidic.ai
@@ -1,19 +1,22 @@
1
- lucidicai/__init__.py,sha256=bac3gGZ1Rxn4ZRr4hYssPcucdfHDTD2yTlAGfP1cqzM,29840
1
+ lucidicai/__init__.py,sha256=3gm2pFPTndvfx1zalGWqBdbdkUE_DDvAPbtsoIV5720,35879
2
2
  lucidicai/action.py,sha256=sPRd1hTIVXDqnvG9ZXWEipUFh0bsXcE0Fm7RVqmVccM,237
3
- lucidicai/client.py,sha256=qTY0LIuG6NbdV4Gy09poqDDe-pFnlWtEQi7Xhf2exNE,9856
4
- lucidicai/constants.py,sha256=_u0z3M4geZgS1g-CrOZUVjtcew8l70dKQnpVQvlXh9w,2172
5
- lucidicai/context.py,sha256=R3YRxRMnzKt-RqG1FkcT7__o-cyKRcvSzqonzRNBkZk,3975
6
- lucidicai/decorators.py,sha256=Z3w7AHXoYwZcvcu_Q0yNsSfkFd42fuQPscb4B0PPq6A,14542
7
- lucidicai/errors.py,sha256=gTg0bdzjuTcUnakRbZnxjngO4gZnRLVwRHRglpZZJsM,970
8
- lucidicai/event.py,sha256=2Rxa4EDLFBTBFTNXD4sjQ9DCMLiloFQqugL7unyYcL4,2642
3
+ lucidicai/client.py,sha256=dRSwOAGth_b-RRBjLuxhPI75ULpQHP7M-KfP9X-XYjY,22172
4
+ lucidicai/constants.py,sha256=zN8O7TjoRHRlaGa9CZUWppS73rhzKGwaEkF9XMTV0Cg,1160
5
+ lucidicai/context.py,sha256=ruEXAndSv0gQ-YEXLlC4Fx6NNbaylfp_dZxbpwmLZSA,4622
6
+ lucidicai/dataset.py,sha256=IgWCUhoclq1ZzSNc22UHd3fLs0hJv9A81OQizjbHtiE,3951
7
+ lucidicai/decorators.py,sha256=obpHbGLhRd-yIL5xIqzjNmf-ZKCIIx5vlYnMpCcJ7Uo,5416
8
+ lucidicai/errors.py,sha256=XT9UiYVoi88VsxrD2RU96l6mwCmxSeICOWhghB0iJ7Y,2058
9
+ lucidicai/event.py,sha256=ObPXS22QIB-n4eHxzEimTtrlOxC1L6_eQVUAx4ZIT7s,2089
10
+ lucidicai/event_queue.py,sha256=7Y8hkrm0a7EGCBN2oW_XWd-GkJ9Cihnu2Gyk6FMftks,20065
11
+ lucidicai/feature_flag.py,sha256=Hfcoqqb5VimuaY1Q0NXl08elxQWG97KqzRpaMfE4PYA,11841
9
12
  lucidicai/image_upload.py,sha256=6SRudg-BpInM2gzMx1Yf1Rz_Zyh8inwoJ7U4pBw7ruY,3807
10
13
  lucidicai/lru.py,sha256=PXiDSoUCOxjamG1QlQx6pDbQCm8h5hKAnnr_NI0PEgE,618
11
14
  lucidicai/model_pricing.py,sha256=Dxi6e0WjcIyCTkVX7K7f0pJ5rPu7nSt3lOmgzAUQl1o,12402
12
- lucidicai/session.py,sha256=bSI6_kOHQEuk7z8NtNbU_RZoc41UxhwWrztwScFuNTw,4068
13
- lucidicai/singleton.py,sha256=gfT3XdWLXSIWMqDXbY6-pnesMZ8RGRitaEPhIsgrRPw,1272
15
+ lucidicai/session.py,sha256=wHnjUPo7ANzJAdz_llA4EXKeCAm0WZR0Ij9dNvdCodY,1729
16
+ lucidicai/singleton.py,sha256=SKiNBgt_Wb5cCWbMt3IWjRAQw3v153LTRgqvDj8poF8,1457
14
17
  lucidicai/state.py,sha256=4Tb1X6l2or6w_e62FYSuEeghAv3xXm5gquKwzCpvdok,235
15
18
  lucidicai/step.py,sha256=_oBIyTBZBvNkUkYHIrwWd75KMSlMtR9Ws2Lo71Lyff8,2522
16
- lucidicai/streaming.py,sha256=Y59vQOqhcOvTQWSAIamAnGCaQqCZz77N62_V2fuQlFA,11565
19
+ lucidicai/streaming.py,sha256=QOLAzhwxetvx711J8VcphY5kXWPJz9XEBJrmHveRKMc,9796
17
20
  lucidicai/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
21
  lucidicai/providers/anthropic_handler.py,sha256=GZEa4QOrjZ9ftu_qTwY3L410HwKzkXgN7omYRsEQ4LU,10174
19
22
  lucidicai/providers/base_providers.py,sha256=nrZVr4Y9xcAiMn4uAN3t3k6DlHNTvlXrA4qQg7lANOQ,544
@@ -33,8 +36,10 @@ lucidicai/providers/text_storage.py,sha256=L62MMJ8E23TDqDTUv2aRntdKMCItsXV7XjY6c
33
36
  lucidicai/providers/universal_image_interceptor.py,sha256=7d-hw4xihRwvvA1AP8-vqYNChtmVXKmn09MN4pDS7KQ,12126
34
37
  lucidicai/telemetry/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
38
  lucidicai/telemetry/base_provider.py,sha256=nrZVr4Y9xcAiMn4uAN3t3k6DlHNTvlXrA4qQg7lANOQ,544
36
- lucidicai/telemetry/litellm_bridge.py,sha256=mOdjEfvP--ToDv8snoOMU4pRQx_Yg4s2o3BMTMzeRK8,14979
37
- lucidicai/telemetry/lucidic_exporter.py,sha256=XAeMQClz5MbGb8ziNT9bhAk3bcqTAkcMxJHvxKAjbCk,11435
39
+ lucidicai/telemetry/context_capture_processor.py,sha256=k4_uTaoOhLeUAZsyUcDExVNXadk0nR4R1hJW096EVwY,2472
40
+ lucidicai/telemetry/extract.py,sha256=30Iqvnr9I0EkD61GRCMN0Zpk3fLmRYcuVajWjRz0z9I,6814
41
+ lucidicai/telemetry/litellm_bridge.py,sha256=CFXVu8nduBtJEhv21maNMrkXV_x5ropy--7qr6HAjh8,16542
42
+ lucidicai/telemetry/lucidic_exporter.py,sha256=PfUB5a5o6av1YbYj52WQ-I71q8PBja3xvYTRWItFDPc,11029
38
43
  lucidicai/telemetry/lucidic_span_processor.py,sha256=-jo7Muuslo3ZCSAysLsDGBqJijQSpIOvJHPbPNjP4iQ,31029
39
44
  lucidicai/telemetry/openai_agents_instrumentor.py,sha256=__wIbeglMnEEf4AGTQ--FXeWCKmz2yy8SBupwprEdZA,12694
40
45
  lucidicai/telemetry/opentelemetry_converter.py,sha256=xOHCqoTyO4hUkL6k7fxy84PbljPpYep6ET9ZqbkJehc,17665
@@ -42,11 +47,12 @@ lucidicai/telemetry/otel_handlers.py,sha256=OCzXuYog6AuwjI4eXy5Sk40DUehyz48QOxuO
42
47
  lucidicai/telemetry/otel_init.py,sha256=hjUOX8nEBLrDOuh0UTKFfG-C98yFZHTiP8ql59bmNXY,13780
43
48
  lucidicai/telemetry/otel_provider.py,sha256=e5XcpQTd_a5UrMAq-EQcJ0zUJpO7NO16T-BphVUigR4,7513
44
49
  lucidicai/telemetry/pydantic_ai_handler.py,sha256=WPa3tFcVgVnPPO3AxcNOTbNkmODLgNOrU2_3GVtWqUw,28261
50
+ lucidicai/telemetry/telemetry_init.py,sha256=8RMzZeeHYvaJKaM5KeSt0svaUAqODHmLstECjgHr8fc,8660
45
51
  lucidicai/telemetry/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
52
  lucidicai/telemetry/utils/image_storage.py,sha256=4Z59ZpVexr7-lcExfr8GsqXe0y2VZmr8Yjwa-3DeOxU,1457
47
53
  lucidicai/telemetry/utils/text_storage.py,sha256=L62MMJ8E23TDqDTUv2aRntdKMCItsXV7XjY6cFwx2DE,1503
48
54
  lucidicai/telemetry/utils/universal_image_interceptor.py,sha256=vARgMk1hVSF--zfi5b8qBpJJOESuD17YlH9xqxmB9Uw,15954
49
- lucidicai-1.3.5.dist-info/METADATA,sha256=u-IHX3Y158OpO3FlhYjNMklIr1UB_1xqOz0HnURiiug,902
50
- lucidicai-1.3.5.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
51
- lucidicai-1.3.5.dist-info/top_level.txt,sha256=vSSdM3lclF4I5tyVC0xxUk8eIRnnYXMe1hW-eO91HUo,10
52
- lucidicai-1.3.5.dist-info/RECORD,,
55
+ lucidicai-2.0.1.dist-info/METADATA,sha256=DOyezEU2bp3jBJOiNkXIOOZu55NRdLXztk95jZf9rwA,902
56
+ lucidicai-2.0.1.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
57
+ lucidicai-2.0.1.dist-info/top_level.txt,sha256=vSSdM3lclF4I5tyVC0xxUk8eIRnnYXMe1hW-eO91HUo,10
58
+ lucidicai-2.0.1.dist-info/RECORD,,