lucidicai 1.2.15__py3-none-any.whl → 1.2.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lucidicai/__init__.py +111 -21
- lucidicai/client.py +22 -5
- lucidicai/decorators.py +357 -0
- lucidicai/event.py +2 -2
- lucidicai/image_upload.py +24 -1
- lucidicai/providers/anthropic_handler.py +0 -7
- lucidicai/providers/image_storage.py +45 -0
- lucidicai/providers/langchain.py +0 -78
- lucidicai/providers/lucidic_exporter.py +259 -0
- lucidicai/providers/lucidic_span_processor.py +648 -0
- lucidicai/providers/openai_agents_instrumentor.py +307 -0
- lucidicai/providers/openai_handler.py +1 -56
- lucidicai/providers/otel_handlers.py +266 -0
- lucidicai/providers/otel_init.py +197 -0
- lucidicai/providers/otel_provider.py +168 -0
- lucidicai/providers/pydantic_ai_handler.py +2 -19
- lucidicai/providers/text_storage.py +53 -0
- lucidicai/providers/universal_image_interceptor.py +276 -0
- lucidicai/session.py +17 -4
- lucidicai/step.py +4 -4
- lucidicai/streaming.py +2 -3
- lucidicai/telemetry/__init__.py +0 -0
- lucidicai/telemetry/base_provider.py +21 -0
- lucidicai/telemetry/lucidic_exporter.py +259 -0
- lucidicai/telemetry/lucidic_span_processor.py +665 -0
- lucidicai/telemetry/openai_agents_instrumentor.py +306 -0
- lucidicai/telemetry/opentelemetry_converter.py +436 -0
- lucidicai/telemetry/otel_handlers.py +266 -0
- lucidicai/telemetry/otel_init.py +197 -0
- lucidicai/telemetry/otel_provider.py +168 -0
- lucidicai/telemetry/pydantic_ai_handler.py +600 -0
- lucidicai/telemetry/utils/__init__.py +0 -0
- lucidicai/telemetry/utils/image_storage.py +45 -0
- lucidicai/telemetry/utils/text_storage.py +53 -0
- lucidicai/telemetry/utils/universal_image_interceptor.py +276 -0
- {lucidicai-1.2.15.dist-info → lucidicai-1.2.17.dist-info}/METADATA +1 -1
- lucidicai-1.2.17.dist-info/RECORD +49 -0
- lucidicai-1.2.15.dist-info/RECORD +0 -25
- {lucidicai-1.2.15.dist-info → lucidicai-1.2.17.dist-info}/WHEEL +0 -0
- {lucidicai-1.2.15.dist-info → lucidicai-1.2.17.dist-info}/top_level.txt +0 -0
|
@@ -51,7 +51,6 @@ class AnthropicHandler(BaseProvider):
|
|
|
51
51
|
return " ".join(descriptions), screenshots
|
|
52
52
|
|
|
53
53
|
def handle_response(self, response, kwargs):
|
|
54
|
-
event = Client().session.active_step
|
|
55
54
|
|
|
56
55
|
# for synchronous streaming responses
|
|
57
56
|
if isinstance(response, Stream):
|
|
@@ -222,9 +221,6 @@ class AnthropicHandler(BaseProvider):
|
|
|
222
221
|
self.original_create_async = AsyncMessages.create
|
|
223
222
|
|
|
224
223
|
def patched_create(*args, **kwargs):
|
|
225
|
-
step = Client().session.active_step
|
|
226
|
-
if not step:
|
|
227
|
-
return self.original_create(*args, **kwargs)
|
|
228
224
|
description, images = self._format_messages(kwargs.get("messages", []))
|
|
229
225
|
|
|
230
226
|
event_id = Client().session.create_event(
|
|
@@ -237,9 +233,6 @@ class AnthropicHandler(BaseProvider):
|
|
|
237
233
|
return self.handle_response(result, kwargs)
|
|
238
234
|
|
|
239
235
|
async def patched_create_async(*args, **kwargs):
|
|
240
|
-
step = Client().session.active_step
|
|
241
|
-
if not step:
|
|
242
|
-
return self.original_create_async(*args, **kwargs)
|
|
243
236
|
description, images = self._format_messages(kwargs.get("messages", []))
|
|
244
237
|
|
|
245
238
|
event_id = Client().session.create_event(
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""Thread-local storage for images to work around OpenTelemetry attribute size limits"""
|
|
2
|
+
import threading
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
logger = logging.getLogger("Lucidic")
|
|
7
|
+
DEBUG = os.getenv("LUCIDIC_DEBUG", "False") == "True"
|
|
8
|
+
|
|
9
|
+
# Thread-local storage for images
|
|
10
|
+
_thread_local = threading.local()
|
|
11
|
+
|
|
12
|
+
def store_image(image_base64: str) -> str:
|
|
13
|
+
"""Store image in thread-local storage and return placeholder"""
|
|
14
|
+
if not hasattr(_thread_local, 'images'):
|
|
15
|
+
_thread_local.images = []
|
|
16
|
+
|
|
17
|
+
_thread_local.images.append(image_base64)
|
|
18
|
+
placeholder = f"lucidic_image_{len(_thread_local.images) - 1}"
|
|
19
|
+
|
|
20
|
+
if DEBUG:
|
|
21
|
+
logger.info(f"[ImageStorage] Stored image of size {len(image_base64)}, placeholder: {placeholder}")
|
|
22
|
+
|
|
23
|
+
return placeholder
|
|
24
|
+
|
|
25
|
+
def get_stored_images():
|
|
26
|
+
"""Get all stored images"""
|
|
27
|
+
if hasattr(_thread_local, 'images'):
|
|
28
|
+
return _thread_local.images
|
|
29
|
+
return []
|
|
30
|
+
|
|
31
|
+
def clear_stored_images():
|
|
32
|
+
"""Clear stored images"""
|
|
33
|
+
if hasattr(_thread_local, 'images'):
|
|
34
|
+
_thread_local.images.clear()
|
|
35
|
+
|
|
36
|
+
def get_image_by_placeholder(placeholder: str):
|
|
37
|
+
"""Get image by placeholder"""
|
|
38
|
+
if hasattr(_thread_local, 'images') and placeholder.startswith('lucidic_image_'):
|
|
39
|
+
try:
|
|
40
|
+
index = int(placeholder.split('_')[-1])
|
|
41
|
+
if 0 <= index < len(_thread_local.images):
|
|
42
|
+
return _thread_local.images[index]
|
|
43
|
+
except (ValueError, IndexError):
|
|
44
|
+
pass
|
|
45
|
+
return None
|
lucidicai/providers/langchain.py
CHANGED
|
@@ -63,11 +63,6 @@ class LucidicLangchainHandler(BaseCallbackHandler):
|
|
|
63
63
|
text.append(prompt)
|
|
64
64
|
elif isinstance(prompt, dict) and 'image' in prompt:
|
|
65
65
|
images.append(prompt['image'])
|
|
66
|
-
|
|
67
|
-
# Make sure we have a valid session and step
|
|
68
|
-
if not (Client().session and Client().session.active_step):
|
|
69
|
-
logger.warning("Cannot create event - no active session or step")
|
|
70
|
-
return
|
|
71
66
|
|
|
72
67
|
try:
|
|
73
68
|
# Create a new event
|
|
@@ -112,12 +107,6 @@ class LucidicLangchainHandler(BaseCallbackHandler):
|
|
|
112
107
|
image_url = block.get("image_url", "")
|
|
113
108
|
image_str = image_url.get('url', "")
|
|
114
109
|
images_b64.append(image_str[image_str.find(',') + 1:])
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
# Make sure we have a valid session and step
|
|
118
|
-
if not (Client().session and Client().session.active_step):
|
|
119
|
-
logger.warning("Cannot create event - no active session or step")
|
|
120
|
-
return
|
|
121
110
|
|
|
122
111
|
try:
|
|
123
112
|
# Create a new event
|
|
@@ -157,11 +146,6 @@ class LucidicLangchainHandler(BaseCallbackHandler):
|
|
|
157
146
|
message = response.generations[0][0].message
|
|
158
147
|
usage = message.usage_metadata
|
|
159
148
|
cost = calculate_cost(model, usage)
|
|
160
|
-
|
|
161
|
-
# Make sure we have a valid session
|
|
162
|
-
if not (Client().session and Client().session.active_step):
|
|
163
|
-
logger.warning("Cannot end event - no active session or step")
|
|
164
|
-
return
|
|
165
149
|
|
|
166
150
|
try:
|
|
167
151
|
if run_str in self.run_to_event:
|
|
@@ -206,11 +190,6 @@ class LucidicLangchainHandler(BaseCallbackHandler):
|
|
|
206
190
|
logger.debug("Handling LLM error in Langchain Handler, ending event...")
|
|
207
191
|
run_str = str(run_id)
|
|
208
192
|
model = self.run_to_model.get(run_str, "unknown")
|
|
209
|
-
|
|
210
|
-
# Make sure we have a valid session
|
|
211
|
-
if not (Client().session and Client().session.active_step):
|
|
212
|
-
logger.warning("Cannot end event - no active session or step")
|
|
213
|
-
return
|
|
214
193
|
|
|
215
194
|
try:
|
|
216
195
|
if run_str in self.run_to_event:
|
|
@@ -266,12 +245,6 @@ class LucidicLangchainHandler(BaseCallbackHandler):
|
|
|
266
245
|
image_url = block.get("image_url", "")
|
|
267
246
|
image_str = image_url.get('url', "")
|
|
268
247
|
images_b64.append(image_str[image_str.find(',') + 1:])
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
# Make sure we have a valid session and step
|
|
272
|
-
if not (Client().session and Client().session.active_step):
|
|
273
|
-
logger.warning("Cannot create event - no active session or step")
|
|
274
|
-
return
|
|
275
248
|
|
|
276
249
|
try:
|
|
277
250
|
# Create a new event
|
|
@@ -285,11 +258,6 @@ class LucidicLangchainHandler(BaseCallbackHandler):
|
|
|
285
258
|
logger.debug("Ending chain execution in Langchain Handler, ending event...")
|
|
286
259
|
run_id = str(kwargs.get("run_id", "unknown"))
|
|
287
260
|
|
|
288
|
-
# Make sure we have a valid session
|
|
289
|
-
if not (Client().session and Client().session.active_step):
|
|
290
|
-
logger.warning("Cannot end event - no active session or step")
|
|
291
|
-
return
|
|
292
|
-
|
|
293
261
|
# Extract result from outputs
|
|
294
262
|
result = None
|
|
295
263
|
if outputs:
|
|
@@ -321,11 +289,6 @@ class LucidicLangchainHandler(BaseCallbackHandler):
|
|
|
321
289
|
"""Handle chain errors"""
|
|
322
290
|
logger.debug("Handling chain error in Langchain Handler, ending event...")
|
|
323
291
|
run_id = str(kwargs.get("run_id", "unknown"))
|
|
324
|
-
|
|
325
|
-
# Make sure we have a valid session
|
|
326
|
-
if not (Client().session and Client().session.active_step):
|
|
327
|
-
logger.warning("Cannot end event - no active session or step")
|
|
328
|
-
return
|
|
329
292
|
|
|
330
293
|
try:
|
|
331
294
|
if run_id in self.run_to_event:
|
|
@@ -352,11 +315,6 @@ class LucidicLangchainHandler(BaseCallbackHandler):
|
|
|
352
315
|
run_id = str(kwargs.get("run_id", "unknown"))
|
|
353
316
|
tool_name = serialized.get("name", "Unknown Tool")
|
|
354
317
|
description = f"Tool Call ({tool_name}): {input_str[:100]}..."
|
|
355
|
-
|
|
356
|
-
# Make sure we have a valid session and step
|
|
357
|
-
if not (Client().session and Client().session.active_step):
|
|
358
|
-
logger.warning("Cannot create event - no active session or step")
|
|
359
|
-
return
|
|
360
318
|
|
|
361
319
|
try:
|
|
362
320
|
# Create event
|
|
@@ -372,11 +330,6 @@ class LucidicLangchainHandler(BaseCallbackHandler):
|
|
|
372
330
|
logger.debug("Ending tool execution in Langchain Handler, ending event...")
|
|
373
331
|
run_id = str(kwargs.get("run_id", "unknown"))
|
|
374
332
|
|
|
375
|
-
# Make sure we have a valid session and step
|
|
376
|
-
if not (Client().session and Client().session.active_step):
|
|
377
|
-
logger.warning("Cannot end event - no active session or step")
|
|
378
|
-
return
|
|
379
|
-
|
|
380
333
|
# Get result from output
|
|
381
334
|
result = None
|
|
382
335
|
if output is not None:
|
|
@@ -404,11 +357,6 @@ class LucidicLangchainHandler(BaseCallbackHandler):
|
|
|
404
357
|
"""
|
|
405
358
|
logger.debug("Handling tool error in Langchain Handler, ending event...")
|
|
406
359
|
run_id = str(kwargs.get("run_id", "unknown"))
|
|
407
|
-
|
|
408
|
-
# Make sure we have a valid session and step
|
|
409
|
-
if not (Client().session and Client().session.active_step):
|
|
410
|
-
logger.warning("Cannot end event - no active session or step")
|
|
411
|
-
return
|
|
412
360
|
|
|
413
361
|
try:
|
|
414
362
|
if run_id in self.run_to_event:
|
|
@@ -434,11 +382,6 @@ class LucidicLangchainHandler(BaseCallbackHandler):
|
|
|
434
382
|
run_id = str(kwargs.get("run_id", "unknown"))
|
|
435
383
|
retriever_type = serialized.get("name", "Unknown Retriever")
|
|
436
384
|
description = f"Retriever ({retriever_type}): {query[:100]}..."
|
|
437
|
-
|
|
438
|
-
# Make sure we have a valid session and step
|
|
439
|
-
if not (Client().session and Client().session.active_step):
|
|
440
|
-
logger.warning("Cannot create event - no active session or step")
|
|
441
|
-
return
|
|
442
385
|
|
|
443
386
|
try:
|
|
444
387
|
# Create event
|
|
@@ -454,11 +397,6 @@ class LucidicLangchainHandler(BaseCallbackHandler):
|
|
|
454
397
|
logger.debug("Ending retriever execution in Langchain Handler, ending event...")
|
|
455
398
|
run_id = str(kwargs.get("run_id", "unknown"))
|
|
456
399
|
|
|
457
|
-
# Make sure we have a valid session and step
|
|
458
|
-
if not (Client().session and Client().session.active_step):
|
|
459
|
-
logger.warning("Cannot end event - no active session or step")
|
|
460
|
-
return
|
|
461
|
-
|
|
462
400
|
# Extract result from documents
|
|
463
401
|
result = None
|
|
464
402
|
if documents:
|
|
@@ -493,11 +431,6 @@ class LucidicLangchainHandler(BaseCallbackHandler):
|
|
|
493
431
|
"""
|
|
494
432
|
logger.debug("Handling retriever error in Langchain Handler, ending event...")
|
|
495
433
|
run_id = str(kwargs.get("run_id", "unknown"))
|
|
496
|
-
|
|
497
|
-
# Make sure we have a valid session and step
|
|
498
|
-
if not (Client().session and Client().session.active_step):
|
|
499
|
-
logger.warning("Cannot end event - no active session or step")
|
|
500
|
-
return
|
|
501
434
|
|
|
502
435
|
try:
|
|
503
436
|
if run_id in self.run_to_event:
|
|
@@ -524,11 +457,6 @@ class LucidicLangchainHandler(BaseCallbackHandler):
|
|
|
524
457
|
tool = getattr(action, 'tool', 'unknown_tool')
|
|
525
458
|
description = f"Agent Action: {tool}"
|
|
526
459
|
|
|
527
|
-
# Make sure we have a valid session and step
|
|
528
|
-
if not (Client().session and Client().session.active_step):
|
|
529
|
-
logger.warning("Cannot create event - no active session or step")
|
|
530
|
-
return
|
|
531
|
-
|
|
532
460
|
# Extract useful information from the action
|
|
533
461
|
result = None
|
|
534
462
|
try:
|
|
@@ -571,12 +499,6 @@ class LucidicLangchainHandler(BaseCallbackHandler):
|
|
|
571
499
|
"""
|
|
572
500
|
logger.debug("Handling agent finish in Langchain Handler, ending event...")
|
|
573
501
|
run_id = str(kwargs.get("run_id", "unknown"))
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
# Make sure we have a valid session and step
|
|
577
|
-
if not (Client().session and Client().session.active_step):
|
|
578
|
-
logger.warning("Cannot end event - no active session or step")
|
|
579
|
-
return
|
|
580
502
|
|
|
581
503
|
# Extract result from finish
|
|
582
504
|
result = None
|
|
@@ -0,0 +1,259 @@
|
|
|
1
|
+
"""Custom OpenTelemetry exporter for Lucidic backend compatibility"""
|
|
2
|
+
import json
|
|
3
|
+
import logging
|
|
4
|
+
from typing import Sequence, Optional, Dict, Any, List
|
|
5
|
+
from opentelemetry.sdk.trace import ReadableSpan
|
|
6
|
+
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
|
|
7
|
+
from opentelemetry.trace import StatusCode
|
|
8
|
+
from opentelemetry.semconv_ai import SpanAttributes
|
|
9
|
+
|
|
10
|
+
from lucidicai.client import Client
|
|
11
|
+
from lucidicai.model_pricing import calculate_cost
|
|
12
|
+
from lucidicai.image_upload import extract_base64_images
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger("Lucidic")
|
|
15
|
+
import os
|
|
16
|
+
|
|
17
|
+
DEBUG = os.getenv("LUCIDIC_DEBUG", "False") == "True"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class LucidicSpanExporter(SpanExporter):
|
|
22
|
+
"""Custom exporter that converts OpenTelemetry spans to Lucidic events"""
|
|
23
|
+
|
|
24
|
+
def __init__(self):
|
|
25
|
+
self.pending_events = {} # Track events by span_id
|
|
26
|
+
|
|
27
|
+
def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
|
|
28
|
+
"""Export spans by converting them to Lucidic events"""
|
|
29
|
+
try:
|
|
30
|
+
client = Client()
|
|
31
|
+
if not client.session:
|
|
32
|
+
logger.debug("No active session, skipping span export")
|
|
33
|
+
return SpanExportResult.SUCCESS
|
|
34
|
+
|
|
35
|
+
for span in spans:
|
|
36
|
+
self._process_span(span, client)
|
|
37
|
+
|
|
38
|
+
return SpanExportResult.SUCCESS
|
|
39
|
+
except Exception as e:
|
|
40
|
+
logger.error(f"Failed to export spans: {e}")
|
|
41
|
+
return SpanExportResult.FAILURE
|
|
42
|
+
|
|
43
|
+
def _process_span(self, span: ReadableSpan, client: Client) -> None:
|
|
44
|
+
"""Process a single span and convert to Lucidic event"""
|
|
45
|
+
try:
|
|
46
|
+
# Skip non-LLM spans
|
|
47
|
+
if not self._is_llm_span(span):
|
|
48
|
+
return
|
|
49
|
+
|
|
50
|
+
# Extract relevant attributes
|
|
51
|
+
attributes = dict(span.attributes or {})
|
|
52
|
+
|
|
53
|
+
# Create or update event based on span lifecycle
|
|
54
|
+
span_id = format(span.context.span_id, '016x')
|
|
55
|
+
|
|
56
|
+
if span_id not in self.pending_events:
|
|
57
|
+
# New span - create event
|
|
58
|
+
event_id = self._create_event_from_span(span, attributes, client)
|
|
59
|
+
if event_id:
|
|
60
|
+
self.pending_events[span_id] = {
|
|
61
|
+
'event_id': event_id,
|
|
62
|
+
'start_time': span.start_time
|
|
63
|
+
}
|
|
64
|
+
else:
|
|
65
|
+
# Span ended - update event
|
|
66
|
+
event_info = self.pending_events.pop(span_id)
|
|
67
|
+
self._update_event_from_span(span, attributes, event_info['event_id'], client)
|
|
68
|
+
|
|
69
|
+
except Exception as e:
|
|
70
|
+
logger.error(f"Failed to process span {span.name}: {e}")
|
|
71
|
+
|
|
72
|
+
def _is_llm_span(self, span: ReadableSpan) -> bool:
|
|
73
|
+
"""Check if this is an LLM-related span"""
|
|
74
|
+
# Check span name patterns
|
|
75
|
+
llm_patterns = ['openai', 'anthropic', 'chat', 'completion', 'embedding', 'llm']
|
|
76
|
+
span_name_lower = span.name.lower()
|
|
77
|
+
|
|
78
|
+
if any(pattern in span_name_lower for pattern in llm_patterns):
|
|
79
|
+
return True
|
|
80
|
+
|
|
81
|
+
# Check for LLM attributes
|
|
82
|
+
if span.attributes:
|
|
83
|
+
for key in span.attributes:
|
|
84
|
+
if key.startswith('gen_ai.') or key.startswith('llm.'):
|
|
85
|
+
return True
|
|
86
|
+
|
|
87
|
+
return False
|
|
88
|
+
|
|
89
|
+
def _create_event_from_span(self, span: ReadableSpan, attributes: Dict[str, Any], client: Client) -> Optional[str]:
|
|
90
|
+
"""Create a Lucidic event from span start"""
|
|
91
|
+
try:
|
|
92
|
+
# Extract description from prompts/messages
|
|
93
|
+
description = self._extract_description(span, attributes)
|
|
94
|
+
|
|
95
|
+
# Extract images if present
|
|
96
|
+
images = self._extract_images(attributes)
|
|
97
|
+
|
|
98
|
+
# Get model info
|
|
99
|
+
model = attributes.get(SpanAttributes.LLM_RESPONSE_MODEL) or \
|
|
100
|
+
attributes.get(SpanAttributes.LLM_REQUEST_MODEL) or \
|
|
101
|
+
attributes.get('gen_ai.request.model') or 'unknown'
|
|
102
|
+
|
|
103
|
+
# Create event
|
|
104
|
+
event_kwargs = {
|
|
105
|
+
'description': description,
|
|
106
|
+
'result': "Processing...", # Will be updated when span ends
|
|
107
|
+
'model': model
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
if images:
|
|
111
|
+
event_kwargs['screenshots'] = images
|
|
112
|
+
|
|
113
|
+
# Check if we have a specific step_id in span attributes
|
|
114
|
+
step_id = attributes.get('lucidic.step_id')
|
|
115
|
+
if step_id:
|
|
116
|
+
event_kwargs['step_id'] = step_id
|
|
117
|
+
|
|
118
|
+
return client.session.create_event(**event_kwargs)
|
|
119
|
+
|
|
120
|
+
except Exception as e:
|
|
121
|
+
logger.error(f"Failed to create event from span: {e}")
|
|
122
|
+
return None
|
|
123
|
+
|
|
124
|
+
def _update_event_from_span(self, span: ReadableSpan, attributes: Dict[str, Any], event_id: str, client: Client) -> None:
|
|
125
|
+
"""Update a Lucidic event from span end"""
|
|
126
|
+
try:
|
|
127
|
+
# Extract response/result
|
|
128
|
+
result = self._extract_result(span, attributes)
|
|
129
|
+
|
|
130
|
+
# Calculate cost if we have token usage
|
|
131
|
+
cost = self._calculate_cost(attributes)
|
|
132
|
+
|
|
133
|
+
# Determine success
|
|
134
|
+
is_successful = span.status.status_code != StatusCode.ERROR
|
|
135
|
+
|
|
136
|
+
update_kwargs = {
|
|
137
|
+
'event_id': event_id,
|
|
138
|
+
'result': result,
|
|
139
|
+
'is_finished': True,
|
|
140
|
+
'is_successful': is_successful
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
if cost is not None:
|
|
144
|
+
update_kwargs['cost_added'] = cost
|
|
145
|
+
|
|
146
|
+
client.session.update_event(**update_kwargs)
|
|
147
|
+
|
|
148
|
+
except Exception as e:
|
|
149
|
+
logger.error(f"Failed to update event from span: {e}")
|
|
150
|
+
|
|
151
|
+
def _extract_description(self, span: ReadableSpan, attributes: Dict[str, Any]) -> str:
|
|
152
|
+
"""Extract description from span attributes"""
|
|
153
|
+
# Try to get prompts/messages
|
|
154
|
+
prompts = attributes.get(SpanAttributes.LLM_PROMPTS) or \
|
|
155
|
+
attributes.get('gen_ai.prompt')
|
|
156
|
+
|
|
157
|
+
if DEBUG:
|
|
158
|
+
logger.info(f"[SpaneExporter -- DEBUG] Extracting Description attributes: {attributes}, prompts: {prompts}")
|
|
159
|
+
|
|
160
|
+
if prompts:
|
|
161
|
+
if isinstance(prompts, list) and prompts:
|
|
162
|
+
# Handle message list format
|
|
163
|
+
return self._format_messages(prompts)
|
|
164
|
+
elif isinstance(prompts, str):
|
|
165
|
+
return prompts
|
|
166
|
+
|
|
167
|
+
# Fallback to span name
|
|
168
|
+
return f"LLM Call: {span.name}"
|
|
169
|
+
|
|
170
|
+
def _extract_result(self, span: ReadableSpan, attributes: Dict[str, Any]) -> str:
|
|
171
|
+
"""Extract result/response from span attributes"""
|
|
172
|
+
# Try to get completions
|
|
173
|
+
completions = attributes.get(SpanAttributes.LLM_COMPLETIONS) or \
|
|
174
|
+
attributes.get('gen_ai.completion')
|
|
175
|
+
|
|
176
|
+
if completions:
|
|
177
|
+
if isinstance(completions, list) and completions:
|
|
178
|
+
# Handle multiple completions
|
|
179
|
+
return "\n".join(str(c) for c in completions)
|
|
180
|
+
elif isinstance(completions, str):
|
|
181
|
+
return completions
|
|
182
|
+
|
|
183
|
+
# Check for error
|
|
184
|
+
if span.status.status_code == StatusCode.ERROR:
|
|
185
|
+
return f"Error: {span.status.description or 'Unknown error'}"
|
|
186
|
+
|
|
187
|
+
return "Response received"
|
|
188
|
+
|
|
189
|
+
def _extract_images(self, attributes: Dict[str, Any]) -> List[str]:
|
|
190
|
+
"""Extract base64 images from attributes"""
|
|
191
|
+
images = []
|
|
192
|
+
|
|
193
|
+
# Check prompts for multimodal content
|
|
194
|
+
prompts = attributes.get(SpanAttributes.LLM_PROMPTS) or \
|
|
195
|
+
attributes.get('gen_ai.prompt')
|
|
196
|
+
|
|
197
|
+
if isinstance(prompts, list):
|
|
198
|
+
for prompt in prompts:
|
|
199
|
+
if isinstance(prompt, dict) and 'content' in prompt:
|
|
200
|
+
content = prompt['content']
|
|
201
|
+
if isinstance(content, list):
|
|
202
|
+
for item in content:
|
|
203
|
+
if isinstance(item, dict) and item.get('type') == 'image_url':
|
|
204
|
+
image_url = item.get('image_url', {})
|
|
205
|
+
if isinstance(image_url, dict) and 'url' in image_url:
|
|
206
|
+
url = image_url['url']
|
|
207
|
+
if url.startswith('data:image'):
|
|
208
|
+
images.append(url)
|
|
209
|
+
|
|
210
|
+
return images
|
|
211
|
+
|
|
212
|
+
def _format_messages(self, messages: List[Any]) -> str:
|
|
213
|
+
"""Format message list into description"""
|
|
214
|
+
formatted = []
|
|
215
|
+
|
|
216
|
+
for msg in messages:
|
|
217
|
+
if isinstance(msg, dict):
|
|
218
|
+
role = msg.get('role', 'unknown')
|
|
219
|
+
content = msg.get('content', '')
|
|
220
|
+
|
|
221
|
+
if isinstance(content, str):
|
|
222
|
+
formatted.append(f"{role}: {content}")
|
|
223
|
+
elif isinstance(content, list):
|
|
224
|
+
# Handle multimodal content
|
|
225
|
+
text_parts = []
|
|
226
|
+
for item in content:
|
|
227
|
+
if isinstance(item, dict) and item.get('type') == 'text':
|
|
228
|
+
text_parts.append(item.get('text', ''))
|
|
229
|
+
if text_parts:
|
|
230
|
+
formatted.append(f"{role}: {' '.join(text_parts)}")
|
|
231
|
+
|
|
232
|
+
return '\n'.join(formatted) if formatted else "Model request"
|
|
233
|
+
|
|
234
|
+
def _calculate_cost(self, attributes: Dict[str, Any]) -> Optional[float]:
|
|
235
|
+
"""Calculate cost from token usage"""
|
|
236
|
+
prompt_tokens = attributes.get(SpanAttributes.LLM_USAGE_PROMPT_TOKENS) or \
|
|
237
|
+
attributes.get('gen_ai.usage.prompt_tokens') or 0
|
|
238
|
+
completion_tokens = attributes.get(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS) or \
|
|
239
|
+
attributes.get('gen_ai.usage.completion_tokens') or 0
|
|
240
|
+
|
|
241
|
+
if prompt_tokens or completion_tokens:
|
|
242
|
+
model = attributes.get(SpanAttributes.LLM_RESPONSE_MODEL) or \
|
|
243
|
+
attributes.get(SpanAttributes.LLM_REQUEST_MODEL) or \
|
|
244
|
+
attributes.get('gen_ai.request.model')
|
|
245
|
+
|
|
246
|
+
if model:
|
|
247
|
+
return calculate_cost(prompt_tokens, completion_tokens, model)
|
|
248
|
+
|
|
249
|
+
return None
|
|
250
|
+
|
|
251
|
+
def shutdown(self) -> None:
|
|
252
|
+
"""Shutdown the exporter"""
|
|
253
|
+
# Process any remaining pending events
|
|
254
|
+
if self.pending_events:
|
|
255
|
+
logger.warning(f"Shutting down with {len(self.pending_events)} pending events")
|
|
256
|
+
|
|
257
|
+
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
|
258
|
+
"""Force flush any pending spans"""
|
|
259
|
+
return True
|