lucidicai 1.2.15__py3-none-any.whl → 1.2.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. lucidicai/__init__.py +111 -21
  2. lucidicai/client.py +22 -5
  3. lucidicai/decorators.py +357 -0
  4. lucidicai/event.py +2 -2
  5. lucidicai/image_upload.py +24 -1
  6. lucidicai/providers/anthropic_handler.py +0 -7
  7. lucidicai/providers/image_storage.py +45 -0
  8. lucidicai/providers/langchain.py +0 -78
  9. lucidicai/providers/lucidic_exporter.py +259 -0
  10. lucidicai/providers/lucidic_span_processor.py +648 -0
  11. lucidicai/providers/openai_agents_instrumentor.py +307 -0
  12. lucidicai/providers/openai_handler.py +1 -56
  13. lucidicai/providers/otel_handlers.py +266 -0
  14. lucidicai/providers/otel_init.py +197 -0
  15. lucidicai/providers/otel_provider.py +168 -0
  16. lucidicai/providers/pydantic_ai_handler.py +2 -19
  17. lucidicai/providers/text_storage.py +53 -0
  18. lucidicai/providers/universal_image_interceptor.py +276 -0
  19. lucidicai/session.py +17 -4
  20. lucidicai/step.py +4 -4
  21. lucidicai/streaming.py +2 -3
  22. lucidicai/telemetry/__init__.py +0 -0
  23. lucidicai/telemetry/base_provider.py +21 -0
  24. lucidicai/telemetry/lucidic_exporter.py +259 -0
  25. lucidicai/telemetry/lucidic_span_processor.py +665 -0
  26. lucidicai/telemetry/openai_agents_instrumentor.py +306 -0
  27. lucidicai/telemetry/opentelemetry_converter.py +436 -0
  28. lucidicai/telemetry/otel_handlers.py +266 -0
  29. lucidicai/telemetry/otel_init.py +197 -0
  30. lucidicai/telemetry/otel_provider.py +168 -0
  31. lucidicai/telemetry/pydantic_ai_handler.py +600 -0
  32. lucidicai/telemetry/utils/__init__.py +0 -0
  33. lucidicai/telemetry/utils/image_storage.py +45 -0
  34. lucidicai/telemetry/utils/text_storage.py +53 -0
  35. lucidicai/telemetry/utils/universal_image_interceptor.py +276 -0
  36. {lucidicai-1.2.15.dist-info → lucidicai-1.2.17.dist-info}/METADATA +1 -1
  37. lucidicai-1.2.17.dist-info/RECORD +49 -0
  38. lucidicai-1.2.15.dist-info/RECORD +0 -25
  39. {lucidicai-1.2.15.dist-info → lucidicai-1.2.17.dist-info}/WHEEL +0 -0
  40. {lucidicai-1.2.15.dist-info → lucidicai-1.2.17.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,600 @@
1
+ """Pydantic AI provider handler for the Lucidic API"""
2
+ from typing import Any, Dict, Optional
3
+
4
+ from .base_provider import BaseProvider
5
+ from lucidicai.client import Client
6
+ from lucidicai.model_pricing import calculate_cost
7
+ from lucidicai.singleton import singleton
8
+
9
+
10
+ @singleton
11
+ class PydanticAIHandler(BaseProvider):
12
+ """Handler for tracking PydanticAI model interactions with Lucidic"""
13
+
14
+ def __init__(self):
15
+ super().__init__()
16
+ self._provider_name = "PydanticAI"
17
+ self._original_anthropic_request = None
18
+ self._original_anthropic_request_stream = None
19
+ self._original_openai_request = None
20
+ self._original_openai_request_stream = None
21
+ self._original_gemini_request = None
22
+ self._original_gemini_request_stream = None
23
+
24
+ def handle_response(self, response, kwargs, event=None):
25
+ """Handle responses from Pydantic AI models"""
26
+ if not event:
27
+ return response
28
+ return response
29
+
30
+ def _format_messages(self, messages):
31
+ """Format messages for event description"""
32
+ if not messages:
33
+ return "No messages provided"
34
+
35
+ # Extract text content from messages
36
+ formatted_messages = []
37
+ for message in messages:
38
+ if hasattr(message, 'content'):
39
+ if isinstance(message.content, str):
40
+ formatted_messages.append(message.content)
41
+ elif isinstance(message.content, list):
42
+ # Handle structured content
43
+ for item in message.content:
44
+ if isinstance(item, dict) and item.get('type') == 'text':
45
+ formatted_messages.append(item.get('text', ''))
46
+ elif isinstance(message, dict):
47
+ formatted_messages.append(str(message.get('content', message)))
48
+ else:
49
+ formatted_messages.append(str(message))
50
+
51
+ return ' | '.join(formatted_messages[:3]) # Limit to first 3 messages
52
+
53
+ def _handle_response(self, response, event_id, messages, model_settings):
54
+ """Handle non-streaming response"""
55
+ if not event_id:
56
+ return response
57
+
58
+ try:
59
+ # Extract response text and usage information
60
+ response_text = self._extract_response_text(response)
61
+ usage_info = self._extract_usage_info(response)
62
+ model_name = self._extract_model_name(response, model_settings)
63
+
64
+ # Calculate cost if usage info is available
65
+ cost = None
66
+ if usage_info and model_name:
67
+ cost = calculate_cost(model_name, usage_info)
68
+
69
+ Client().session.update_event(
70
+ event_id=event_id,
71
+ is_finished=True,
72
+ is_successful=True,
73
+ cost_added=cost,
74
+ model=model_name,
75
+ result=response_text
76
+ )
77
+
78
+ except Exception as e:
79
+ Client().session.update_event(
80
+ event_id=event_id,
81
+ is_finished=True,
82
+ is_successful=False,
83
+ result=f"Error processing response: {str(e)}"
84
+ )
85
+
86
+ return response
87
+
88
+ def _wrap_stream(self, original_stream, event_id, messages, model_instance):
89
+ """Wrap streaming response to track accumulation"""
90
+
91
+ class StreamWrapper:
92
+ def __init__(self, original_stream, event_id, handler, model_instance):
93
+ self._original_stream = original_stream
94
+ self._event_id = event_id
95
+ self._handler = handler
96
+ self._model_instance = model_instance
97
+ self._accumulated_text = ""
98
+ self._iterator = None
99
+
100
+ def __aiter__(self):
101
+ """Return an async iterator that properly implements the protocol"""
102
+ return AsyncStreamIterator(self._original_stream, self._event_id, self._handler, self._model_instance)
103
+
104
+ def stream_text(self, delta=True):
105
+ """Return the wrapped stream iterator for compatibility with PydanticAI"""
106
+ return AsyncStreamIterator(self._original_stream, self._event_id, self._handler, self._model_instance)
107
+
108
+ def stream(self):
109
+ """Deprecated compatibility method - use stream_text instead"""
110
+ return self.stream_text(delta=True)
111
+
112
+ # Delegate other methods to the original stream
113
+ def __getattr__(self, name):
114
+ return getattr(self._original_stream, name)
115
+
116
+ class AsyncStreamIterator:
117
+ def __init__(self, original_stream, event_id, handler, model_instance):
118
+ self._original_stream = original_stream
119
+ self._event_id = event_id
120
+ self._handler = handler
121
+ self._model_instance = model_instance
122
+ self._accumulated_text = ""
123
+ self._original_iterator = None
124
+ self._final_chunk_with_usage = None
125
+
126
+ def __aiter__(self):
127
+ """Return self to implement async iterator protocol"""
128
+ return self
129
+
130
+ async def __anext__(self):
131
+ """Implement async iterator protocol"""
132
+ if self._original_iterator is None:
133
+ # Initialize the original iterator - StreamedResponse is directly iterable
134
+ self._original_iterator = self._original_stream.__aiter__()
135
+
136
+ try:
137
+ # Get the next chunk from original iterator
138
+ chunk = await self._original_iterator.__anext__()
139
+
140
+ # Extract text content from the StreamedResponse chunk
141
+ chunk_text = self._handler._extract_chunk_text(chunk)
142
+ if chunk_text:
143
+ self._accumulated_text += chunk_text
144
+
145
+ # Check if this chunk contains usage information (final chunk)
146
+ if self._handler._is_final_chunk(chunk):
147
+ self._final_chunk_with_usage = chunk
148
+
149
+ return chunk
150
+
151
+ except StopAsyncIteration:
152
+ # Stream is done, update the event with accumulated text
153
+ if self._event_id and not Client().session._active_event.is_finished:
154
+ model_name = self._handler._extract_model_name(None, self._model_instance)
155
+ # Try to get usage info from the original stream
156
+ usage_info = None
157
+
158
+ # Try multiple ways to get usage info from streaming response
159
+ # First try the final chunk if we captured one with usage
160
+ if hasattr(self, '_final_chunk_with_usage') and self._final_chunk_with_usage:
161
+ usage_info = self._handler._extract_usage_info(self._final_chunk_with_usage)
162
+ elif hasattr(self._original_stream, 'usage') and self._original_stream.usage:
163
+ # Get the actual usage data by calling the method
164
+ usage_data = self._original_stream.usage()
165
+ usage_info = self._handler._extract_usage_info(usage_data)
166
+ elif hasattr(self._original_stream, 'usage_metadata') and self._original_stream.usage_metadata:
167
+ usage_info = self._handler._extract_usage_info(self._original_stream)
168
+ elif hasattr(self._original_stream, '_usage') and self._original_stream._usage:
169
+ usage_info = self._handler._extract_usage_info(self._original_stream._usage)
170
+ elif hasattr(self._original_stream, 'response') and hasattr(self._original_stream.response, 'usage'):
171
+ usage_info = self._handler._extract_usage_info(self._original_stream.response)
172
+
173
+ cost = None
174
+ if usage_info and model_name:
175
+ cost = calculate_cost(model_name, usage_info)
176
+
177
+ final_result = self._accumulated_text or "No content streamed"
178
+
179
+ Client().session.update_event(
180
+ event_id=self._event_id,
181
+ is_finished=True,
182
+ is_successful=True,
183
+ model=model_name,
184
+ cost_added=cost,
185
+ result=final_result
186
+ )
187
+
188
+ # Re-raise StopAsyncIteration to end iteration
189
+ raise
190
+
191
+ except Exception as e:
192
+ # Handle errors
193
+ if self._event_id and not Client().session._active_event.is_finished:
194
+ Client().session.update_event(
195
+ event_id=self._event_id,
196
+ is_finished=True,
197
+ is_successful=False,
198
+ result=f"Error during streaming: {str(e)}"
199
+ )
200
+ raise
201
+
202
+ return StreamWrapper(original_stream, event_id, self, model_instance)
203
+
204
+ def _extract_response_text(self, response):
205
+ """Extract text content from response"""
206
+ if hasattr(response, 'text'):
207
+ return response.text
208
+ elif hasattr(response, 'content'):
209
+ if isinstance(response.content, str):
210
+ return response.content
211
+ elif isinstance(response.content, list):
212
+ # Extract text from structured content
213
+ text_parts = []
214
+ for item in response.content:
215
+ if isinstance(item, dict) and item.get('type') == 'text':
216
+ text_parts.append(item.get('text', ''))
217
+ return ' '.join(text_parts)
218
+ elif hasattr(response, 'message') and hasattr(response.message, 'content'):
219
+ return response.message.content
220
+
221
+ return str(response)
222
+
223
+ def _extract_chunk_text(self, chunk):
224
+ """Extract text from PydanticAI StreamedResponse chunk"""
225
+ if not chunk:
226
+ return ""
227
+
228
+ # Try direct string check first
229
+ if isinstance(chunk, str):
230
+ return chunk
231
+
232
+ # PydanticAI StreamedResponse chunks can have different formats
233
+ # Try various attributes that might contain the text content
234
+
235
+ # Check for delta content (common in streaming responses)
236
+ if hasattr(chunk, 'delta') and chunk.delta:
237
+ # PydanticAI uses content_delta attribute
238
+ if hasattr(chunk.delta, 'content_delta') and chunk.delta.content_delta:
239
+ return chunk.delta.content_delta
240
+ elif hasattr(chunk.delta, 'content') and chunk.delta.content:
241
+ return chunk.delta.content
242
+ elif hasattr(chunk.delta, 'text') and chunk.delta.text:
243
+ return chunk.delta.text
244
+
245
+ # Check for direct text content
246
+ if hasattr(chunk, 'text') and chunk.text:
247
+ return chunk.text
248
+ elif hasattr(chunk, 'content') and chunk.content:
249
+ return chunk.content
250
+
251
+ # Check for choices (OpenAI style)
252
+ if hasattr(chunk, 'choices') and chunk.choices:
253
+ for choice in chunk.choices:
254
+ if hasattr(choice, 'delta') and choice.delta:
255
+ if hasattr(choice.delta, 'content') and choice.delta.content:
256
+ return choice.delta.content
257
+ elif hasattr(choice, 'text') and choice.text:
258
+ return choice.text
259
+
260
+ # Check for candidates (Gemini style)
261
+ if hasattr(chunk, 'candidates') and chunk.candidates:
262
+ for candidate in chunk.candidates:
263
+ if hasattr(candidate, 'content') and candidate.content:
264
+ if hasattr(candidate.content, 'parts') and candidate.content.parts:
265
+ for part in candidate.content.parts:
266
+ if hasattr(part, 'text') and part.text:
267
+ return part.text
268
+ elif hasattr(candidate, 'delta') and candidate.delta:
269
+ if hasattr(candidate.delta, 'content') and candidate.delta.content:
270
+ return candidate.delta.content
271
+
272
+ return ""
273
+
274
+ def _extract_usage_info(self, response_or_chunk):
275
+ """Extract usage information from response or chunk and normalize token keys"""
276
+ if not response_or_chunk:
277
+ return None
278
+
279
+ # Check if this is directly a Usage object from PydanticAI
280
+ if hasattr(response_or_chunk, 'request_tokens') and hasattr(response_or_chunk, 'response_tokens'):
281
+ # This is a PydanticAI Usage object, extract directly
282
+ usage_dict = {
283
+ 'request_tokens': response_or_chunk.request_tokens,
284
+ 'response_tokens': response_or_chunk.response_tokens,
285
+ 'total_tokens': response_or_chunk.total_tokens,
286
+ }
287
+ # Add details if available
288
+ if hasattr(response_or_chunk, 'details') and response_or_chunk.details:
289
+ usage_dict['details'] = response_or_chunk.details
290
+ else:
291
+ # Common usage patterns for other response types
292
+ usage_attrs = ['usage', 'usage_metadata', 'token_usage']
293
+ usage_dict = None
294
+
295
+ for attr in usage_attrs:
296
+ if hasattr(response_or_chunk, attr):
297
+ usage = getattr(response_or_chunk, attr)
298
+ if usage:
299
+ # Convert to dict format expected by calculate_cost
300
+ if hasattr(usage, '__dict__'):
301
+ usage_dict = usage.__dict__
302
+ elif isinstance(usage, dict):
303
+ usage_dict = usage
304
+ else:
305
+ continue
306
+ break
307
+
308
+ if not usage_dict:
309
+ return None
310
+
311
+ # Normalize token keys for PydanticAI format
312
+ normalized_usage = {}
313
+
314
+ # Map PydanticAI token keys to standard format
315
+ if 'request_tokens' in usage_dict:
316
+ normalized_usage['prompt_tokens'] = usage_dict['request_tokens']
317
+ normalized_usage['input_tokens'] = usage_dict['request_tokens']
318
+
319
+ if 'response_tokens' in usage_dict:
320
+ normalized_usage['completion_tokens'] = usage_dict['response_tokens']
321
+ normalized_usage['output_tokens'] = usage_dict['response_tokens']
322
+
323
+ # Map Gemini token keys to standard format
324
+ if 'prompt_token_count' in usage_dict:
325
+ normalized_usage['prompt_tokens'] = usage_dict['prompt_token_count']
326
+ normalized_usage['input_tokens'] = usage_dict['prompt_token_count']
327
+
328
+ if 'candidates_token_count' in usage_dict:
329
+ normalized_usage['completion_tokens'] = usage_dict['candidates_token_count']
330
+ normalized_usage['output_tokens'] = usage_dict['candidates_token_count']
331
+
332
+ if 'total_token_count' in usage_dict:
333
+ normalized_usage['total_tokens'] = usage_dict['total_token_count']
334
+
335
+ # Copy other standard keys if they exist
336
+ for key in ['prompt_tokens', 'completion_tokens', 'input_tokens', 'output_tokens', 'total_tokens']:
337
+ if key in usage_dict:
338
+ normalized_usage[key] = usage_dict[key]
339
+
340
+ # Copy all original keys for completeness
341
+ normalized_usage.update(usage_dict)
342
+
343
+ return normalized_usage
344
+
345
+ def _extract_model_name(self, response_or_chunk, model_instance=None):
346
+ """Extract model name from response or model instance"""
347
+ # Try to get from response first
348
+ if response_or_chunk and hasattr(response_or_chunk, 'model'):
349
+ return response_or_chunk.model
350
+
351
+ # Try from model instance
352
+ if model_instance:
353
+ if hasattr(model_instance, 'model_name'):
354
+ return model_instance.model_name
355
+ elif hasattr(model_instance, 'model'):
356
+ return model_instance.model
357
+ elif hasattr(model_instance, '_model_name'):
358
+ return model_instance._model_name
359
+ elif hasattr(model_instance, 'name'):
360
+ return model_instance.name
361
+
362
+ # Default fallback for PydanticAI models
363
+ return "gpt-4o-mini"
364
+
365
+ def _is_final_chunk(self, chunk):
366
+ """Check if this is the final chunk with usage information"""
367
+ if not chunk:
368
+ return False
369
+
370
+ # Check various usage attributes that might indicate final chunk
371
+ usage_attrs = ['usage', 'usage_metadata', 'token_usage', '_usage']
372
+ for attr in usage_attrs:
373
+ if hasattr(chunk, attr) and getattr(chunk, attr) is not None:
374
+ return True
375
+
376
+ # Check if chunk has response with usage
377
+ if hasattr(chunk, 'response') and hasattr(chunk.response, 'usage') and chunk.response.usage:
378
+ return True
379
+
380
+ return False
381
+
382
+ def _wrap_request(self, model_instance, messages, model_settings, model_request_parameters, original_method):
383
+ """Wrap regular request method to track LLM calls"""
384
+ description = self._format_messages(messages)
385
+ event_id = Client().session.create_event(
386
+ description=description,
387
+ result="Waiting for response..."
388
+ )
389
+
390
+ async def async_wrapper():
391
+ try:
392
+ # Make the original API call
393
+ response = await original_method(model_instance, messages, model_settings, model_request_parameters)
394
+
395
+ # Handle the response
396
+ return self._handle_response(response, event_id, messages, model_instance)
397
+
398
+ except Exception as e:
399
+ Client().session.update_event(
400
+ is_finished=True,
401
+ is_successful=False,
402
+ result=f"Error during request: {str(e)}"
403
+ )
404
+ raise
405
+
406
+ return async_wrapper()
407
+
408
+ def _wrap_request_stream_context_manager(self, model_instance, messages, model_settings, model_request_parameters, original_method):
409
+ """Return an async context manager for streaming requests"""
410
+ description = self._format_messages(messages)
411
+ event_id = Client().session.create_event(
412
+ description=description,
413
+ result="Streaming response..."
414
+ )
415
+
416
+ class WrappedStreamContextManager:
417
+ def __init__(self, original_method, model_instance, messages, model_settings, model_request_parameters, handler):
418
+ self.original_method = original_method
419
+ self.model_instance = model_instance
420
+ self.messages = messages
421
+ self.model_settings = model_settings
422
+ self.model_request_parameters = model_request_parameters
423
+ self.handler = handler
424
+ self.original_context_manager = None
425
+
426
+ async def __aenter__(self):
427
+ try:
428
+ # Get the original context manager (don't await it, it's already a context manager)
429
+ self.original_context_manager = self.original_method(
430
+ self.model_instance, self.messages, self.model_settings, self.model_request_parameters
431
+ )
432
+
433
+ # Enter the original context manager to get the stream
434
+ original_stream = await self.original_context_manager.__aenter__()
435
+
436
+ # Wrap the stream to capture the actual streamed content
437
+ return self.handler._wrap_stream(original_stream, event_id, self.messages, self.model_instance)
438
+
439
+ except Exception as e:
440
+ if Client().session._active_event:
441
+ Client().session.update_event(
442
+ is_finished=True,
443
+ is_successful=False,
444
+ result=f"Error during streaming: {str(e)}"
445
+ )
446
+ raise
447
+
448
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
449
+ # The stream wrapper handles event finalization, so we just delegate
450
+ if self.original_context_manager:
451
+ return await self.original_context_manager.__aexit__(exc_type, exc_val, exc_tb)
452
+
453
+ return WrappedStreamContextManager(original_method, model_instance, messages, model_settings, model_request_parameters, self)
454
+
455
+ async def _wrap_request_stream(self, model_instance, messages, model_settings, model_request_parameters, original_method):
456
+ """Wrap streaming request method"""
457
+ description = self._format_messages(messages)
458
+ event = Client().session.create_event(
459
+ description=description,
460
+ result="Streaming response..."
461
+ )
462
+
463
+ try:
464
+ # Get the original stream
465
+ original_stream = await original_method(model_instance, messages, model_settings, model_request_parameters)
466
+
467
+ # Return wrapped stream
468
+ return self._wrap_stream(original_stream, event, messages, model_instance)
469
+
470
+ except Exception as e:
471
+ if Client().session._active_event:
472
+ Client().session.update_event(
473
+ is_finished=True,
474
+ is_successful=False,
475
+ result=f"Error during streaming: {str(e)}"
476
+ )
477
+ raise
478
+
479
+ def override(self):
480
+ """
481
+ Override PydanticAI model methods to enable automatic tracking.
482
+
483
+ This method uses monkey-patching to intercept calls to PydanticAI's
484
+ AnthropicModel, OpenAIModel, and GeminiModel request methods, allowing Lucidic to
485
+ track all LLM interactions automatically.
486
+ """
487
+ # Patch Anthropic models
488
+ try:
489
+ from pydantic_ai.models.anthropic import AnthropicModel
490
+
491
+ # Store original methods for restoration later
492
+ self._original_anthropic_request = AnthropicModel.request
493
+ self._original_anthropic_request_stream = AnthropicModel.request_stream
494
+
495
+ # Create patched methods for Anthropic models
496
+ def patched_anthropic_request(model_instance, messages, model_settings=None, model_request_parameters=None):
497
+ return self._wrap_request(model_instance, messages, model_settings, model_request_parameters, self._original_anthropic_request)
498
+
499
+ def patched_anthropic_request_stream(model_instance, messages, model_settings=None, model_request_parameters=None):
500
+ return self._wrap_request_stream_context_manager(model_instance, messages, model_settings, model_request_parameters, self._original_anthropic_request_stream)
501
+
502
+ # Apply the patches
503
+ AnthropicModel.request = patched_anthropic_request
504
+ AnthropicModel.request_stream = patched_anthropic_request_stream
505
+
506
+ except ImportError:
507
+ # AnthropicModel not available, skip patching
508
+ pass
509
+
510
+ # Patch OpenAI models
511
+ try:
512
+ from pydantic_ai.models.openai import OpenAIModel
513
+
514
+ # Store original methods for restoration later
515
+ self._original_openai_request = OpenAIModel.request
516
+ self._original_openai_request_stream = OpenAIModel.request_stream
517
+
518
+ # Create patched methods for OpenAI models
519
+ def patched_openai_request(model_instance, messages, model_settings=None, model_request_parameters=None):
520
+ return self._wrap_request(model_instance, messages, model_settings, model_request_parameters, self._original_openai_request)
521
+
522
+ def patched_openai_request_stream(model_instance, messages, model_settings=None, model_request_parameters=None):
523
+ return self._wrap_request_stream_context_manager(model_instance, messages, model_settings, model_request_parameters, self._original_openai_request_stream)
524
+
525
+ # Apply the patches
526
+ OpenAIModel.request = patched_openai_request
527
+ OpenAIModel.request_stream = patched_openai_request_stream
528
+
529
+ except ImportError:
530
+ # OpenAIModel not available, skip patching
531
+ pass
532
+
533
+ # Patch Gemini models
534
+ try:
535
+ from pydantic_ai.models.gemini import GeminiModel
536
+
537
+ # Store original methods for restoration later
538
+ self._original_gemini_request = GeminiModel.request
539
+ self._original_gemini_request_stream = GeminiModel.request_stream
540
+
541
+ # Create patched methods for Gemini models
542
+ def patched_gemini_request(model_instance, messages, model_settings=None, model_request_parameters=None):
543
+ return self._wrap_request(model_instance, messages, model_settings, model_request_parameters, self._original_gemini_request)
544
+
545
+ def patched_gemini_request_stream(model_instance, messages, model_settings=None, model_request_parameters=None):
546
+ return self._wrap_request_stream_context_manager(model_instance, messages, model_settings, model_request_parameters, self._original_gemini_request_stream)
547
+
548
+ # Apply the patches
549
+ GeminiModel.request = patched_gemini_request
550
+ GeminiModel.request_stream = patched_gemini_request_stream
551
+
552
+ except ImportError:
553
+ # GeminiModel not available, skip patching
554
+ pass
555
+
556
+ def undo_override(self):
557
+ """
558
+ Restore original PydanticAI model methods.
559
+
560
+ This method restores the original, unpatched methods to their
561
+ respective model classes, effectively disabling Lucidic tracking.
562
+ """
563
+ # Restore Anthropic models
564
+ try:
565
+ from pydantic_ai.models.anthropic import AnthropicModel
566
+
567
+ # Restore original methods if they were previously stored
568
+ if hasattr(self, '_original_anthropic_request'):
569
+ AnthropicModel.request = self._original_anthropic_request
570
+ AnthropicModel.request_stream = self._original_anthropic_request_stream
571
+
572
+ except ImportError:
573
+ # AnthropicModel not available, nothing to restore
574
+ pass
575
+
576
+ # Restore OpenAI models
577
+ try:
578
+ from pydantic_ai.models.openai import OpenAIModel
579
+
580
+ # Restore original methods if they were previously stored
581
+ if hasattr(self, '_original_openai_request'):
582
+ OpenAIModel.request = self._original_openai_request
583
+ OpenAIModel.request_stream = self._original_openai_request_stream
584
+
585
+ except ImportError:
586
+ # OpenAIModel not available, nothing to restore
587
+ pass
588
+
589
+ # Restore Gemini models
590
+ try:
591
+ from pydantic_ai.models.gemini import GeminiModel
592
+
593
+ # Restore original methods if they were previously stored
594
+ if hasattr(self, '_original_gemini_request'):
595
+ GeminiModel.request = self._original_gemini_request
596
+ GeminiModel.request_stream = self._original_gemini_request_stream
597
+
598
+ except ImportError:
599
+ # GeminiModel not available, nothing to restore
600
+ pass
File without changes
@@ -0,0 +1,45 @@
1
+ """Thread-local storage for images to work around OpenTelemetry attribute size limits"""
2
+ import threading
3
+ import logging
4
+ import os
5
+
6
+ logger = logging.getLogger("Lucidic")
7
+ DEBUG = os.getenv("LUCIDIC_DEBUG", "False") == "True"
8
+
9
+ # Thread-local storage for images
10
+ _thread_local = threading.local()
11
+
12
+ def store_image(image_base64: str) -> str:
13
+ """Store image in thread-local storage and return placeholder"""
14
+ if not hasattr(_thread_local, 'images'):
15
+ _thread_local.images = []
16
+
17
+ _thread_local.images.append(image_base64)
18
+ placeholder = f"lucidic_image_{len(_thread_local.images) - 1}"
19
+
20
+ if DEBUG:
21
+ logger.info(f"[ImageStorage] Stored image of size {len(image_base64)}, placeholder: {placeholder}")
22
+
23
+ return placeholder
24
+
25
+ def get_stored_images():
26
+ """Get all stored images"""
27
+ if hasattr(_thread_local, 'images'):
28
+ return _thread_local.images
29
+ return []
30
+
31
+ def clear_stored_images():
32
+ """Clear stored images"""
33
+ if hasattr(_thread_local, 'images'):
34
+ _thread_local.images.clear()
35
+
36
+ def get_image_by_placeholder(placeholder: str):
37
+ """Get image by placeholder"""
38
+ if hasattr(_thread_local, 'images') and placeholder.startswith('lucidic_image_'):
39
+ try:
40
+ index = int(placeholder.split('_')[-1])
41
+ if 0 <= index < len(_thread_local.images):
42
+ return _thread_local.images[index]
43
+ except (ValueError, IndexError):
44
+ pass
45
+ return None