lucidicai 1.2.16__py3-none-any.whl → 1.2.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. lucidicai/__init__.py +93 -19
  2. lucidicai/client.py +3 -2
  3. lucidicai/decorators.py +357 -0
  4. lucidicai/image_upload.py +24 -1
  5. lucidicai/providers/image_storage.py +45 -0
  6. lucidicai/providers/lucidic_exporter.py +259 -0
  7. lucidicai/providers/lucidic_span_processor.py +648 -0
  8. lucidicai/providers/openai_agents_instrumentor.py +307 -0
  9. lucidicai/providers/otel_handlers.py +266 -0
  10. lucidicai/providers/otel_init.py +197 -0
  11. lucidicai/providers/otel_provider.py +168 -0
  12. lucidicai/providers/pydantic_ai_handler.py +1 -1
  13. lucidicai/providers/text_storage.py +53 -0
  14. lucidicai/providers/universal_image_interceptor.py +276 -0
  15. lucidicai/session.py +7 -0
  16. lucidicai/telemetry/__init__.py +0 -0
  17. lucidicai/telemetry/base_provider.py +21 -0
  18. lucidicai/telemetry/lucidic_exporter.py +259 -0
  19. lucidicai/telemetry/lucidic_span_processor.py +665 -0
  20. lucidicai/telemetry/openai_agents_instrumentor.py +306 -0
  21. lucidicai/telemetry/opentelemetry_converter.py +436 -0
  22. lucidicai/telemetry/otel_handlers.py +266 -0
  23. lucidicai/telemetry/otel_init.py +197 -0
  24. lucidicai/telemetry/otel_provider.py +168 -0
  25. lucidicai/telemetry/pydantic_ai_handler.py +600 -0
  26. lucidicai/telemetry/utils/__init__.py +0 -0
  27. lucidicai/telemetry/utils/image_storage.py +45 -0
  28. lucidicai/telemetry/utils/text_storage.py +53 -0
  29. lucidicai/telemetry/utils/universal_image_interceptor.py +276 -0
  30. {lucidicai-1.2.16.dist-info → lucidicai-1.2.17.dist-info}/METADATA +1 -1
  31. lucidicai-1.2.17.dist-info/RECORD +49 -0
  32. lucidicai-1.2.16.dist-info/RECORD +0 -25
  33. {lucidicai-1.2.16.dist-info → lucidicai-1.2.17.dist-info}/WHEEL +0 -0
  34. {lucidicai-1.2.16.dist-info → lucidicai-1.2.17.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,259 @@
1
+ """Custom OpenTelemetry exporter for Lucidic backend compatibility"""
2
+ import json
3
+ import logging
4
+ from typing import Sequence, Optional, Dict, Any, List
5
+ from opentelemetry.sdk.trace import ReadableSpan
6
+ from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
7
+ from opentelemetry.trace import StatusCode
8
+ from opentelemetry.semconv_ai import SpanAttributes
9
+
10
+ from lucidicai.client import Client
11
+ from lucidicai.model_pricing import calculate_cost
12
+ from lucidicai.image_upload import extract_base64_images
13
+
14
+ logger = logging.getLogger("Lucidic")
15
+ import os
16
+
17
+ DEBUG = os.getenv("LUCIDIC_DEBUG", "False") == "True"
18
+
19
+
20
+
21
+ class LucidicSpanExporter(SpanExporter):
22
+ """Custom exporter that converts OpenTelemetry spans to Lucidic events"""
23
+
24
+ def __init__(self):
25
+ self.pending_events = {} # Track events by span_id
26
+
27
+ def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
28
+ """Export spans by converting them to Lucidic events"""
29
+ try:
30
+ client = Client()
31
+ if not client.session:
32
+ logger.debug("No active session, skipping span export")
33
+ return SpanExportResult.SUCCESS
34
+
35
+ for span in spans:
36
+ self._process_span(span, client)
37
+
38
+ return SpanExportResult.SUCCESS
39
+ except Exception as e:
40
+ logger.error(f"Failed to export spans: {e}")
41
+ return SpanExportResult.FAILURE
42
+
43
+ def _process_span(self, span: ReadableSpan, client: Client) -> None:
44
+ """Process a single span and convert to Lucidic event"""
45
+ try:
46
+ # Skip non-LLM spans
47
+ if not self._is_llm_span(span):
48
+ return
49
+
50
+ # Extract relevant attributes
51
+ attributes = dict(span.attributes or {})
52
+
53
+ # Create or update event based on span lifecycle
54
+ span_id = format(span.context.span_id, '016x')
55
+
56
+ if span_id not in self.pending_events:
57
+ # New span - create event
58
+ event_id = self._create_event_from_span(span, attributes, client)
59
+ if event_id:
60
+ self.pending_events[span_id] = {
61
+ 'event_id': event_id,
62
+ 'start_time': span.start_time
63
+ }
64
+ else:
65
+ # Span ended - update event
66
+ event_info = self.pending_events.pop(span_id)
67
+ self._update_event_from_span(span, attributes, event_info['event_id'], client)
68
+
69
+ except Exception as e:
70
+ logger.error(f"Failed to process span {span.name}: {e}")
71
+
72
+ def _is_llm_span(self, span: ReadableSpan) -> bool:
73
+ """Check if this is an LLM-related span"""
74
+ # Check span name patterns
75
+ llm_patterns = ['openai', 'anthropic', 'chat', 'completion', 'embedding', 'llm']
76
+ span_name_lower = span.name.lower()
77
+
78
+ if any(pattern in span_name_lower for pattern in llm_patterns):
79
+ return True
80
+
81
+ # Check for LLM attributes
82
+ if span.attributes:
83
+ for key in span.attributes:
84
+ if key.startswith('gen_ai.') or key.startswith('llm.'):
85
+ return True
86
+
87
+ return False
88
+
89
+ def _create_event_from_span(self, span: ReadableSpan, attributes: Dict[str, Any], client: Client) -> Optional[str]:
90
+ """Create a Lucidic event from span start"""
91
+ try:
92
+ # Extract description from prompts/messages
93
+ description = self._extract_description(span, attributes)
94
+
95
+ # Extract images if present
96
+ images = self._extract_images(attributes)
97
+
98
+ # Get model info
99
+ model = attributes.get(SpanAttributes.LLM_RESPONSE_MODEL) or \
100
+ attributes.get(SpanAttributes.LLM_REQUEST_MODEL) or \
101
+ attributes.get('gen_ai.request.model') or 'unknown'
102
+
103
+ # Create event
104
+ event_kwargs = {
105
+ 'description': description,
106
+ 'result': "Processing...", # Will be updated when span ends
107
+ 'model': model
108
+ }
109
+
110
+ if images:
111
+ event_kwargs['screenshots'] = images
112
+
113
+ # Check if we have a specific step_id in span attributes
114
+ step_id = attributes.get('lucidic.step_id')
115
+ if step_id:
116
+ event_kwargs['step_id'] = step_id
117
+
118
+ return client.session.create_event(**event_kwargs)
119
+
120
+ except Exception as e:
121
+ logger.error(f"Failed to create event from span: {e}")
122
+ return None
123
+
124
+ def _update_event_from_span(self, span: ReadableSpan, attributes: Dict[str, Any], event_id: str, client: Client) -> None:
125
+ """Update a Lucidic event from span end"""
126
+ try:
127
+ # Extract response/result
128
+ result = self._extract_result(span, attributes)
129
+
130
+ # Calculate cost if we have token usage
131
+ cost = self._calculate_cost(attributes)
132
+
133
+ # Determine success
134
+ is_successful = span.status.status_code != StatusCode.ERROR
135
+
136
+ update_kwargs = {
137
+ 'event_id': event_id,
138
+ 'result': result,
139
+ 'is_finished': True,
140
+ 'is_successful': is_successful
141
+ }
142
+
143
+ if cost is not None:
144
+ update_kwargs['cost_added'] = cost
145
+
146
+ client.session.update_event(**update_kwargs)
147
+
148
+ except Exception as e:
149
+ logger.error(f"Failed to update event from span: {e}")
150
+
151
+ def _extract_description(self, span: ReadableSpan, attributes: Dict[str, Any]) -> str:
152
+ """Extract description from span attributes"""
153
+ # Try to get prompts/messages
154
+ prompts = attributes.get(SpanAttributes.LLM_PROMPTS) or \
155
+ attributes.get('gen_ai.prompt')
156
+
157
+ if DEBUG:
158
+ logger.info(f"[SpaneExporter -- DEBUG] Extracting Description attributes: {attributes}, prompts: {prompts}")
159
+
160
+ if prompts:
161
+ if isinstance(prompts, list) and prompts:
162
+ # Handle message list format
163
+ return self._format_messages(prompts)
164
+ elif isinstance(prompts, str):
165
+ return prompts
166
+
167
+ # Fallback to span name
168
+ return f"LLM Call: {span.name}"
169
+
170
+ def _extract_result(self, span: ReadableSpan, attributes: Dict[str, Any]) -> str:
171
+ """Extract result/response from span attributes"""
172
+ # Try to get completions
173
+ completions = attributes.get(SpanAttributes.LLM_COMPLETIONS) or \
174
+ attributes.get('gen_ai.completion')
175
+
176
+ if completions:
177
+ if isinstance(completions, list) and completions:
178
+ # Handle multiple completions
179
+ return "\n".join(str(c) for c in completions)
180
+ elif isinstance(completions, str):
181
+ return completions
182
+
183
+ # Check for error
184
+ if span.status.status_code == StatusCode.ERROR:
185
+ return f"Error: {span.status.description or 'Unknown error'}"
186
+
187
+ return "Response received"
188
+
189
+ def _extract_images(self, attributes: Dict[str, Any]) -> List[str]:
190
+ """Extract base64 images from attributes"""
191
+ images = []
192
+
193
+ # Check prompts for multimodal content
194
+ prompts = attributes.get(SpanAttributes.LLM_PROMPTS) or \
195
+ attributes.get('gen_ai.prompt')
196
+
197
+ if isinstance(prompts, list):
198
+ for prompt in prompts:
199
+ if isinstance(prompt, dict) and 'content' in prompt:
200
+ content = prompt['content']
201
+ if isinstance(content, list):
202
+ for item in content:
203
+ if isinstance(item, dict) and item.get('type') == 'image_url':
204
+ image_url = item.get('image_url', {})
205
+ if isinstance(image_url, dict) and 'url' in image_url:
206
+ url = image_url['url']
207
+ if url.startswith('data:image'):
208
+ images.append(url)
209
+
210
+ return images
211
+
212
+ def _format_messages(self, messages: List[Any]) -> str:
213
+ """Format message list into description"""
214
+ formatted = []
215
+
216
+ for msg in messages:
217
+ if isinstance(msg, dict):
218
+ role = msg.get('role', 'unknown')
219
+ content = msg.get('content', '')
220
+
221
+ if isinstance(content, str):
222
+ formatted.append(f"{role}: {content}")
223
+ elif isinstance(content, list):
224
+ # Handle multimodal content
225
+ text_parts = []
226
+ for item in content:
227
+ if isinstance(item, dict) and item.get('type') == 'text':
228
+ text_parts.append(item.get('text', ''))
229
+ if text_parts:
230
+ formatted.append(f"{role}: {' '.join(text_parts)}")
231
+
232
+ return '\n'.join(formatted) if formatted else "Model request"
233
+
234
+ def _calculate_cost(self, attributes: Dict[str, Any]) -> Optional[float]:
235
+ """Calculate cost from token usage"""
236
+ prompt_tokens = attributes.get(SpanAttributes.LLM_USAGE_PROMPT_TOKENS) or \
237
+ attributes.get('gen_ai.usage.prompt_tokens') or 0
238
+ completion_tokens = attributes.get(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS) or \
239
+ attributes.get('gen_ai.usage.completion_tokens') or 0
240
+
241
+ if prompt_tokens or completion_tokens:
242
+ model = attributes.get(SpanAttributes.LLM_RESPONSE_MODEL) or \
243
+ attributes.get(SpanAttributes.LLM_REQUEST_MODEL) or \
244
+ attributes.get('gen_ai.request.model')
245
+
246
+ if model:
247
+ return calculate_cost(prompt_tokens, completion_tokens, model)
248
+
249
+ return None
250
+
251
+ def shutdown(self) -> None:
252
+ """Shutdown the exporter"""
253
+ # Process any remaining pending events
254
+ if self.pending_events:
255
+ logger.warning(f"Shutting down with {len(self.pending_events)} pending events")
256
+
257
+ def force_flush(self, timeout_millis: int = 30000) -> bool:
258
+ """Force flush any pending spans"""
259
+ return True