lucidicai 1.2.16__py3-none-any.whl → 1.2.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. lucidicai/__init__.py +93 -19
  2. lucidicai/client.py +3 -2
  3. lucidicai/decorators.py +357 -0
  4. lucidicai/image_upload.py +24 -1
  5. lucidicai/providers/image_storage.py +45 -0
  6. lucidicai/providers/lucidic_exporter.py +259 -0
  7. lucidicai/providers/lucidic_span_processor.py +648 -0
  8. lucidicai/providers/openai_agents_instrumentor.py +307 -0
  9. lucidicai/providers/otel_handlers.py +266 -0
  10. lucidicai/providers/otel_init.py +197 -0
  11. lucidicai/providers/otel_provider.py +168 -0
  12. lucidicai/providers/pydantic_ai_handler.py +1 -1
  13. lucidicai/providers/text_storage.py +53 -0
  14. lucidicai/providers/universal_image_interceptor.py +276 -0
  15. lucidicai/session.py +7 -0
  16. lucidicai/telemetry/__init__.py +0 -0
  17. lucidicai/telemetry/base_provider.py +21 -0
  18. lucidicai/telemetry/lucidic_exporter.py +259 -0
  19. lucidicai/telemetry/lucidic_span_processor.py +665 -0
  20. lucidicai/telemetry/openai_agents_instrumentor.py +306 -0
  21. lucidicai/telemetry/opentelemetry_converter.py +436 -0
  22. lucidicai/telemetry/otel_handlers.py +266 -0
  23. lucidicai/telemetry/otel_init.py +197 -0
  24. lucidicai/telemetry/otel_provider.py +168 -0
  25. lucidicai/telemetry/pydantic_ai_handler.py +600 -0
  26. lucidicai/telemetry/utils/__init__.py +0 -0
  27. lucidicai/telemetry/utils/image_storage.py +45 -0
  28. lucidicai/telemetry/utils/text_storage.py +53 -0
  29. lucidicai/telemetry/utils/universal_image_interceptor.py +276 -0
  30. {lucidicai-1.2.16.dist-info → lucidicai-1.2.17.dist-info}/METADATA +1 -1
  31. lucidicai-1.2.17.dist-info/RECORD +49 -0
  32. lucidicai-1.2.16.dist-info/RECORD +0 -25
  33. {lucidicai-1.2.16.dist-info → lucidicai-1.2.17.dist-info}/WHEEL +0 -0
  34. {lucidicai-1.2.16.dist-info → lucidicai-1.2.17.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,648 @@
1
+ """Custom span processor for real-time Lucidic event handling"""
2
+ import os
3
+ import logging
4
+ import json
5
+ from typing import Optional, Dict, Any
6
+ from opentelemetry import context as otel_context
7
+ from opentelemetry.sdk.trace import Span, SpanProcessor
8
+ from opentelemetry.trace import StatusCode
9
+ from opentelemetry.semconv_ai import SpanAttributes
10
+
11
+ from lucidicai.client import Client
12
+ from lucidicai.model_pricing import calculate_cost
13
+ from .image_storage import get_stored_images, clear_stored_images
14
+ from .text_storage import get_stored_text, clear_stored_texts
15
+
16
+ logger = logging.getLogger("Lucidic")
17
+ DEBUG = os.getenv("LUCIDIC_DEBUG", "False") == "True"
18
+
19
+
20
+ class LucidicSpanProcessor(SpanProcessor):
21
+ """
22
+ Real-time span processor that creates Lucidic events as spans start
23
+ and updates them as spans end, maintaining the current SDK behavior
24
+ """
25
+
26
+ def __init__(self):
27
+ self.span_to_event = {} # Map span_id to event_id
28
+ self.span_contexts = {} # Store span start data
29
+
30
+ def on_start(self, span: Span, parent_context: Optional[otel_context.Context] = None) -> None:
31
+ """Called when a span is started - create Lucidic event immediately"""
32
+ try:
33
+ if DEBUG:
34
+ logger.info(f"[SpanProcessor] on_start called for span: {span.name}")
35
+ logger.info(f"[SpanProcessor ASDRFTYHJUKLTGHJ] Span attributes at start: {dict(span.attributes or {})}")
36
+
37
+ client = Client()
38
+ if not client.session:
39
+ logger.debug("No active session, skipping span tracking")
40
+ return
41
+
42
+ # Only process LLM spans
43
+ if not self._is_llm_span(span):
44
+ if DEBUG:
45
+ logger.info(f"[SpanProcessor] Skipping non-LLM span: {span.name}")
46
+ return
47
+
48
+ # Store span info for processing - we'll create the event on_end when all attributes are available
49
+ span_id = span.get_span_context().span_id
50
+ self.span_contexts[span_id] = {
51
+ 'start_time': span.start_time,
52
+ 'name': span.name,
53
+ 'attributes': dict(span.attributes or {}),
54
+ 'span': span
55
+ }
56
+
57
+ if DEBUG:
58
+ logger.info(f"[SpanProcessor] Stored span {span_id} for later processing")
59
+
60
+ except Exception as e:
61
+ logger.error(f"Error in on_start: {e}")
62
+ if DEBUG:
63
+ import traceback
64
+ traceback.print_exc()
65
+
66
+ def on_end(self, span: Span) -> None:
67
+ """Called when a span ends - create and complete the Lucidic event"""
68
+ try:
69
+ span_id = span.get_span_context().span_id
70
+
71
+ if DEBUG:
72
+ logger.info(f"[SpanProcessor] on_end called for span: {span.name}")
73
+ logger.info(f"[SpanProcessor] Span attributes at end: {dict(span.attributes or {})}")
74
+ logger.info(f"[SpanProcessor] Tracked span contexts: {list(self.span_contexts.keys())}")
75
+
76
+ # Log any attributes that might contain message data
77
+ attrs = dict(span.attributes or {})
78
+ for key, value in attrs.items():
79
+ if 'message' in key.lower() or 'prompt' in key.lower() or 'content' in key.lower():
80
+ logger.info(f"[SpanProcessor] Found potential message attr: {key} = {value[:200] if isinstance(value, str) else value}")
81
+
82
+ # Check if we have context for this span
83
+ if span_id not in self.span_contexts:
84
+ if DEBUG:
85
+ logger.warning(f"[SpanProcessor] No context found for span {span_id}")
86
+ return
87
+
88
+ client = Client()
89
+ if not client.session:
90
+ return
91
+
92
+ span_context = self.span_contexts.pop(span_id, {})
93
+
94
+ # Create event with all the attributes now available
95
+ event_id = self._create_event_from_span_end(span, client)
96
+
97
+ if DEBUG:
98
+ logger.info(f"[SpanProcessor] Created and completed event {event_id} for span {span_id}")
99
+
100
+ # Clear thread-local images and texts after processing
101
+ clear_stored_images()
102
+ clear_stored_texts()
103
+
104
+ except Exception as e:
105
+ logger.error(f"Error in on_end: {e}")
106
+ if DEBUG:
107
+ import traceback
108
+ traceback.print_exc()
109
+
110
+ def _is_llm_span(self, span: Span) -> bool:
111
+ """Check if this is an LLM-related span with actual LLM content"""
112
+ # Check if it's an agent span without LLM content
113
+ if span.attributes:
114
+ attrs = dict(span.attributes)
115
+
116
+ # Skip agent spans that don't have prompt/completion attributes
117
+ if attrs.get('gen_ai.operation.name') == 'agent':
118
+ # Check if it has actual LLM content
119
+ has_prompts = any(k for k in attrs.keys() if 'prompt' in k.lower())
120
+ has_completions = any(k for k in attrs.keys() if 'completion' in k.lower())
121
+ if not has_prompts and not has_completions:
122
+ if DEBUG:
123
+ logger.info(f"[SpanProcessor] Skipping agent span without LLM content: {span.name}")
124
+ return False
125
+
126
+ # Check span name
127
+ span_name_lower = span.name.lower()
128
+ llm_patterns = ['openai', 'anthropic', 'chat', 'completion', 'embedding', 'gemini', 'claude']
129
+
130
+ if any(pattern in span_name_lower for pattern in llm_patterns):
131
+ return True
132
+
133
+ # Check attributes
134
+ if span.attributes:
135
+ for key in span.attributes:
136
+ if isinstance(key, str) and (key.startswith('gen_ai.') or key.startswith('llm.')):
137
+ return True
138
+
139
+ return False
140
+
141
+ def _create_event_from_span_start(self, span: Span, client: Client) -> Optional[str]:
142
+ """Create event when span starts"""
143
+ try:
144
+ attributes = dict(span.attributes or {})
145
+
146
+ # Extract description
147
+ if DEBUG:
148
+ logger.info(f"[SpanProcessor -- DEBUG] Extracting Description from span start: {span}")
149
+ description = self._extract_description(span, attributes)
150
+
151
+ # Extract images
152
+ images = self._extract_images(attributes)
153
+
154
+ # Get model
155
+ model = (
156
+ attributes.get(SpanAttributes.LLM_REQUEST_MODEL) or
157
+ attributes.get('gen_ai.request.model') or
158
+ attributes.get('llm.model') or
159
+ 'unknown'
160
+ )
161
+
162
+ # Initial result based on whether it's streaming
163
+ is_streaming = attributes.get(SpanAttributes.LLM_IS_STREAMING, False) or \
164
+ attributes.get('llm.is_streaming', False)
165
+ initial_result = None if is_streaming else "Waiting for response..."
166
+
167
+ # Apply masking to description if configured
168
+ if client.masking_function:
169
+ description = client.mask(description)
170
+
171
+ # Create event - session.create_event will handle temporary step creation if needed
172
+ event_kwargs = {
173
+ 'description': description,
174
+ 'result': initial_result,
175
+ 'model': model
176
+ }
177
+
178
+ if DEBUG:
179
+ logger.info(f"[SpanProcessor -- DEBUG -- _create_event_from_span_start -- ORIGINAL] event_kwargs: {event_kwargs}")
180
+
181
+ if images:
182
+ event_kwargs['screenshots'] = images
183
+
184
+ # Check for step context
185
+ step_id = attributes.get('lucidic.step_id')
186
+ if step_id:
187
+ event_kwargs['step_id'] = step_id
188
+
189
+ return client.session.create_event(**event_kwargs)
190
+
191
+ except Exception as e:
192
+ logger.error(f"Failed to create event: {e}")
193
+ return None
194
+
195
+ def _create_event_from_span_end(self, span: Span, client: Client) -> Optional[str]:
196
+ """Create and complete event when span ends with all attributes available"""
197
+ try:
198
+ attributes = dict(span.attributes or {})
199
+
200
+ if DEBUG:
201
+ logger.info(f"[SpanProcessor] Creating event from span end with {len(attributes)} attributes")
202
+
203
+ # Extract all information
204
+ if False and DEBUG:
205
+ logger.info(f"[SpanProcessor -- DEBUG] Extracting Description attributes: {attributes}")
206
+ description = self._extract_description(span, attributes)
207
+
208
+ if False and DEBUG:
209
+ logger.info(f"[SpanProcessor -- DEBUG] Extracting Result attributes: {attributes}")
210
+ raw_result = self._extract_result(span, attributes)
211
+
212
+ if False and DEBUG:
213
+ logger.info(f"[SpanProcessor -- DEBUG] Extracting Images: span: attributes: {attributes}")
214
+ images = self._extract_images(attributes)
215
+
216
+ if False and DEBUG:
217
+ logger.info(f"[SpanProcessor -- DEBUG] Extracting Model: span: {span} attributes: {attributes}")
218
+ model = (
219
+ attributes.get(SpanAttributes.LLM_RESPONSE_MODEL) or
220
+ attributes.get(SpanAttributes.LLM_REQUEST_MODEL) or
221
+ attributes.get('gen_ai.response.model') or
222
+ attributes.get('gen_ai.request.model') or
223
+ 'unknown'
224
+ )
225
+
226
+ # Format result as Input/Output
227
+ # The description contains the input (prompts), raw_result contains the output (completions)
228
+ formatted_result = f"{raw_result}"
229
+
230
+ if False and DEBUG:
231
+ logger.info(f"[SpanProcessor -- DEBUG] description: {description}, result: {formatted_result}, model: {model}, images: {images}")
232
+
233
+ # Apply masking
234
+ if client.masking_function:
235
+ formatted_result = client.mask(formatted_result)
236
+
237
+ # Calculate cost
238
+ cost = self._calculate_cost(attributes)
239
+
240
+ # Check success
241
+ is_successful = span.status.status_code != StatusCode.ERROR
242
+
243
+ # Create event with all data
244
+ event_kwargs = {
245
+ 'description': description,
246
+ 'result': formatted_result,
247
+ 'model': model,
248
+ 'is_finished': True
249
+ }
250
+
251
+ if images:
252
+ event_kwargs['screenshots'] = images
253
+
254
+ if cost is not None:
255
+ event_kwargs['cost_added'] = cost
256
+
257
+ # Check for step context
258
+ step_id = attributes.get('lucidic.step_id')
259
+ if step_id:
260
+ event_kwargs['step_id'] = step_id
261
+
262
+ # Create the event (already completed)
263
+ event_id = client.session.create_event(**event_kwargs)
264
+
265
+ return event_id
266
+
267
+ except Exception as e:
268
+ logger.error(f"Failed to create event from span end: {e}")
269
+ if DEBUG:
270
+ import traceback
271
+ traceback.print_exc()
272
+ return None
273
+
274
+ def _update_event_from_span_end(self, span: Span, event_id: str, client: Client) -> None:
275
+ """Update event when span ends"""
276
+ try:
277
+ attributes = dict(span.attributes or {})
278
+
279
+ # Extract response
280
+ result = self._extract_result(span, attributes)
281
+
282
+ # Apply masking to result if configured
283
+ if client.masking_function:
284
+ result = client.mask(result)
285
+
286
+ # Calculate cost
287
+ cost = self._calculate_cost(attributes)
288
+
289
+ # Check success
290
+ is_successful = span.status.status_code != StatusCode.ERROR
291
+
292
+ # Update event
293
+ update_kwargs = {
294
+ 'event_id': event_id,
295
+ 'result': result,
296
+ 'is_finished': True
297
+ }
298
+
299
+ if cost is not None:
300
+ update_kwargs['cost_added'] = cost
301
+
302
+ # Update model if we got a response model
303
+ response_model = attributes.get(SpanAttributes.LLM_RESPONSE_MODEL) or \
304
+ attributes.get('gen_ai.response.model')
305
+ if response_model:
306
+ update_kwargs['model'] = response_model
307
+
308
+ if DEBUG:
309
+ logger.info(f"[SpanProcessor -- DEBUG] update_kwargs: {update_kwargs}")
310
+
311
+ client.session.update_event(**update_kwargs)
312
+
313
+ except Exception as e:
314
+ logger.error(f"Failed to update event: {e}")
315
+
316
+ def _extract_description(self, span: Span, attributes: Dict[str, Any]) -> str:
317
+ """Extract description from span"""
318
+ if DEBUG:
319
+ logger.info(f"[SpanProcessor] Extracting description from attributes: {list(attributes.keys())}")
320
+
321
+ # Try to reconstruct messages from indexed attributes (OpenLLMetry format)
322
+ messages = self._extract_indexed_messages(attributes)
323
+ if messages:
324
+ if DEBUG:
325
+ logger.info(f"[SpanProcessor] Reconstructed {len(messages)} messages from indexed attributes")
326
+ return self._format_messages(messages)
327
+
328
+ # Try prompts first (other formats)
329
+ prompts = attributes.get(SpanAttributes.LLM_PROMPTS) or \
330
+ attributes.get('gen_ai.prompt') or \
331
+ attributes.get('llm.prompts')
332
+
333
+ if prompts:
334
+ if DEBUG:
335
+ logger.info(f"[SpanProcessor] Found prompts: {prompts}")
336
+ return self._format_prompts(prompts)
337
+
338
+ # Try messages
339
+ messages = attributes.get('gen_ai.messages') or \
340
+ attributes.get('llm.messages')
341
+
342
+ if messages:
343
+ if DEBUG:
344
+ logger.info(f"[SpanProcessor] Found messages: {messages}")
345
+ return self._format_messages(messages)
346
+
347
+ # Fallback
348
+ if DEBUG:
349
+ logger.warning(f"[SpanProcessor] No prompts/messages found, using fallback")
350
+ return f"LLM Request: {span.name}"
351
+
352
+ def _extract_indexed_messages(self, attributes: Dict[str, Any]) -> list:
353
+ """Extract messages from indexed attributes (gen_ai.prompt.0.role, gen_ai.prompt.0.content, etc.)"""
354
+ messages = []
355
+ i = 0
356
+
357
+ # Keep extracting messages until we don't find any more
358
+ while True:
359
+ prefix = f"gen_ai.prompt.{i}"
360
+ role = attributes.get(f"{prefix}.role")
361
+
362
+ if not role:
363
+ break
364
+
365
+ message = {"role": role}
366
+
367
+ # Get content
368
+ content = attributes.get(f"{prefix}.content")
369
+ if content:
370
+ # Try to parse JSON content (for multimodal)
371
+ try:
372
+ import json
373
+ parsed_content = json.loads(content)
374
+ message["content"] = parsed_content
375
+ except:
376
+ message["content"] = content
377
+ else:
378
+ # Content might be missing for multimodal messages due to size limits
379
+ # Check if we have stored text and/or images in thread-local storage
380
+ stored_text = get_stored_text(i)
381
+ stored_images = get_stored_images()
382
+
383
+ if stored_text or stored_images:
384
+ if DEBUG:
385
+ logger.info(f"[SpanProcessor] No content for message {i}, but found stored text/images")
386
+
387
+ # Create synthetic content with both text and images
388
+ synthetic_content = []
389
+
390
+ # Add text if available
391
+ if stored_text:
392
+ synthetic_content.append({
393
+ "type": "text",
394
+ "text": stored_text
395
+ })
396
+
397
+ # Add images if available
398
+ if stored_images and i == 0: # Assume first message might have images
399
+ for idx, img in enumerate(stored_images):
400
+ synthetic_content.append({
401
+ "type": "image_url",
402
+ "image_url": {"url": img}
403
+ })
404
+
405
+ if synthetic_content:
406
+ message["content"] = synthetic_content
407
+
408
+ messages.append(message)
409
+ i += 1
410
+
411
+ return messages
412
+
413
+ def _extract_indexed_completions(self, attributes: Dict[str, Any]) -> list:
414
+ """Extract completions from indexed attributes"""
415
+ completions = []
416
+ i = 0
417
+
418
+ while True:
419
+ prefix = f"gen_ai.completion.{i}"
420
+ role = attributes.get(f"{prefix}.role")
421
+ content = attributes.get(f"{prefix}.content")
422
+
423
+ if not role and not content:
424
+ break
425
+
426
+ completion = {}
427
+ if role:
428
+ completion["role"] = role
429
+ if content:
430
+ completion["content"] = content
431
+
432
+ if completion:
433
+ completions.append(completion)
434
+
435
+ i += 1
436
+
437
+ return completions
438
+
439
+ def _extract_result(self, span: Span, attributes: Dict[str, Any]) -> str:
440
+ """Extract result from span"""
441
+ if DEBUG:
442
+ logger.info(f"[SpanProcessor -- _extract_result -- DEBUG] Extracting result from attributes: {attributes}")
443
+
444
+ # Try indexed completions first (OpenLLMetry format)
445
+ completions = self._extract_indexed_completions(attributes)
446
+ if completions:
447
+ if DEBUG:
448
+ logger.info(f"[SpanProcessor] Found {len(completions)} indexed completions")
449
+ # Format completions
450
+ results = []
451
+ for comp in completions:
452
+ if "content" in comp:
453
+ results.append(str(comp["content"]))
454
+ if results:
455
+ return "\n".join(results)
456
+
457
+ # Try completions
458
+ completions = attributes.get(SpanAttributes.LLM_COMPLETIONS) or \
459
+ attributes.get('gen_ai.completion') or \
460
+ attributes.get('llm.completions')
461
+
462
+ if completions:
463
+ if isinstance(completions, list):
464
+ return "\n".join(str(c) for c in completions)
465
+ else:
466
+ return str(completions)
467
+
468
+ # Check for error
469
+ if span.status.status_code == StatusCode.ERROR:
470
+ return f"Error: {span.status.description or 'Unknown error'}"
471
+
472
+ # Check streaming
473
+ if attributes.get(SpanAttributes.LLM_IS_STREAMING):
474
+ content = attributes.get('llm.response.content') or \
475
+ attributes.get('gen_ai.response.content')
476
+ if content:
477
+ return content
478
+
479
+ return "Response received"
480
+
481
+ def _extract_images(self, attributes: Dict[str, Any]) -> list:
482
+ """Extract images from multimodal prompts"""
483
+ images = []
484
+
485
+ if DEBUG:
486
+ logger.info(f"[SpanProcessor -- _extract_images -- DEBUG] Extracting images from attributes: {attributes}")
487
+
488
+ # First check indexed messages (OpenLLMetry format)
489
+ messages = self._extract_indexed_messages(attributes)
490
+ for msg in messages:
491
+ if isinstance(msg, dict):
492
+ images.extend(self._extract_images_from_message(msg))
493
+
494
+ # Check for multimodal content in prompts
495
+ prompts = attributes.get(SpanAttributes.LLM_PROMPTS) or \
496
+ attributes.get('gen_ai.prompt')
497
+
498
+ if isinstance(prompts, list):
499
+ for prompt in prompts:
500
+ if isinstance(prompt, dict):
501
+ images.extend(self._extract_images_from_message(prompt))
502
+
503
+ # Check messages too
504
+ messages = attributes.get('gen_ai.messages') or \
505
+ attributes.get('llm.messages')
506
+
507
+ if isinstance(messages, list):
508
+ for msg in messages:
509
+ if isinstance(msg, dict):
510
+ images.extend(self._extract_images_from_message(msg))
511
+
512
+ # If no images found but we have stored images in thread-local, retrieve them
513
+ stored_images = get_stored_images()
514
+ if not images and stored_images:
515
+ if DEBUG:
516
+ logger.info(f"[SpanProcessor] No images found in attributes, checking thread-local storage: {len(stored_images)} images")
517
+ for img in stored_images:
518
+ if img and not img.startswith('data:'):
519
+ images.append(f"data:image/jpeg;base64,{img}")
520
+ else:
521
+ images.append(img)
522
+
523
+ if DEBUG and images:
524
+ logger.info(f"[SpanProcessor] Extracted {len(images)} images")
525
+
526
+ return images
527
+
528
+ def _extract_images_from_message(self, message: dict) -> list:
529
+ """Extract images from a single message"""
530
+ images = []
531
+ content = message.get('content', '')
532
+
533
+ if DEBUG:
534
+ logger.info(f"[SpanProcessor -- _extract_images_from_message -- DEBUG] Extracting images from message: {message}, content: {content}")
535
+
536
+ # Handle case where content might be a JSON string
537
+ if isinstance(content, str) and content.strip().startswith('['):
538
+ try:
539
+ parsed_content = json.loads(content)
540
+ if isinstance(parsed_content, list):
541
+ content = parsed_content
542
+ except json.JSONDecodeError:
543
+ # If parsing fails, keep content as string
544
+ pass
545
+
546
+ if isinstance(content, list):
547
+ for item in content:
548
+ if isinstance(item, dict) and item.get('type') == 'image_url':
549
+ image_url = item.get('image_url', {})
550
+ if isinstance(image_url, dict):
551
+ url = image_url.get('url', '')
552
+ if url.startswith('data:image'):
553
+ images.append(url)
554
+ elif url.startswith('lucidic_image_'):
555
+ # This is a placeholder - retrieve from thread-local storage
556
+ image = self._retrieve_image_from_placeholder(url)
557
+ if image:
558
+ images.append(image)
559
+
560
+ return images
561
+
562
+ def _retrieve_image_from_placeholder(self, placeholder: str) -> Optional[str]:
563
+ """Retrieve image from thread-local storage using placeholder"""
564
+ try:
565
+ base64_data = get_image_by_placeholder(placeholder)
566
+ if base64_data:
567
+ # Ensure it has proper data URI format
568
+ if not base64_data.startswith('data:'):
569
+ # Add data URI prefix if missing
570
+ base64_data = f"data:image/jpeg;base64,{base64_data}"
571
+ return base64_data
572
+ except Exception as e:
573
+ if DEBUG:
574
+ logger.error(f"[SpanProcessor] Failed to retrieve image from placeholder: {e}")
575
+ return None
576
+
577
+ def _format_prompts(self, prompts: Any) -> str:
578
+ """Format prompts into description"""
579
+ if isinstance(prompts, str):
580
+ return prompts
581
+ elif isinstance(prompts, list):
582
+ return self._format_messages(prompts)
583
+ else:
584
+ return "Model request"
585
+
586
+ def _format_messages(self, messages: list) -> str:
587
+ """Format message list"""
588
+ formatted = []
589
+
590
+ for msg in messages:
591
+ if isinstance(msg, dict):
592
+ role = msg.get('role', 'unknown')
593
+ content = msg.get('content', '')
594
+
595
+ if isinstance(content, str):
596
+ formatted.append(f"{role}: {content}")
597
+ elif isinstance(content, list):
598
+ # Extract text from multimodal
599
+ texts = []
600
+ for item in content:
601
+ if isinstance(item, dict) and item.get('type') == 'text':
602
+ texts.append(item.get('text', ''))
603
+ if texts:
604
+ formatted.append(f"{role}: {' '.join(texts)}")
605
+ elif isinstance(msg, str):
606
+ formatted.append(msg)
607
+
608
+ return '\n'.join(formatted) if formatted else "Model request"
609
+
610
+ def _calculate_cost(self, attributes: Dict[str, Any]) -> Optional[float]:
611
+ """Calculate cost from token usage"""
612
+ prompt_tokens = (
613
+ attributes.get(SpanAttributes.LLM_USAGE_PROMPT_TOKENS) or
614
+ attributes.get('gen_ai.usage.prompt_tokens') or
615
+ attributes.get('gen_ai.usage.input_tokens') or
616
+ 0
617
+ )
618
+
619
+ completion_tokens = (
620
+ attributes.get(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS) or
621
+ attributes.get('gen_ai.usage.completion_tokens') or
622
+ attributes.get('gen_ai.usage.output_tokens') or
623
+ 0
624
+ )
625
+
626
+ total_tokens = prompt_tokens + completion_tokens
627
+
628
+ if total_tokens > 0:
629
+ model = (
630
+ attributes.get(SpanAttributes.LLM_RESPONSE_MODEL) or
631
+ attributes.get(SpanAttributes.LLM_REQUEST_MODEL) or
632
+ attributes.get('gen_ai.response.model') or
633
+ attributes.get('gen_ai.request.model')
634
+ )
635
+
636
+ if model:
637
+ return calculate_cost(model, {"prompt_tokens": prompt_tokens, "completion_tokens": completion_tokens, "total_tokens": total_tokens})
638
+
639
+ return None
640
+
641
+ def shutdown(self, timeout_millis: int = 30000) -> None:
642
+ """Shutdown processor"""
643
+ if self.span_to_event:
644
+ logger.warning(f"Shutting down with {len(self.span_to_event)} incomplete spans")
645
+
646
+ def force_flush(self, timeout_millis: int = 30000) -> bool:
647
+ """Force flush - no-op for this processor"""
648
+ return True