lucidicai 1.3.1__py3-none-any.whl → 1.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lucidicai/__init__.py +276 -16
- lucidicai/client.py +19 -1
- lucidicai/context.py +119 -0
- lucidicai/decorators.py +33 -15
- lucidicai/event.py +3 -1
- lucidicai/model_pricing.py +11 -0
- lucidicai/session.py +1 -1
- lucidicai/telemetry/lucidic_exporter.py +16 -4
- lucidicai/telemetry/lucidic_span_processor.py +67 -49
- lucidicai/telemetry/otel_handlers.py +207 -59
- lucidicai/telemetry/otel_init.py +163 -51
- lucidicai/telemetry/otel_provider.py +15 -5
- lucidicai/telemetry/utils/universal_image_interceptor.py +89 -0
- {lucidicai-1.3.1.dist-info → lucidicai-1.3.5.dist-info}/METADATA +1 -1
- {lucidicai-1.3.1.dist-info → lucidicai-1.3.5.dist-info}/RECORD +17 -16
- {lucidicai-1.3.1.dist-info → lucidicai-1.3.5.dist-info}/WHEEL +0 -0
- {lucidicai-1.3.1.dist-info → lucidicai-1.3.5.dist-info}/top_level.txt +0 -0
lucidicai/model_pricing.py
CHANGED
|
@@ -3,6 +3,12 @@ import logging
|
|
|
3
3
|
logger = logging.getLogger("Lucidic")
|
|
4
4
|
|
|
5
5
|
MODEL_PRICING = {
|
|
6
|
+
|
|
7
|
+
# OpenAI GPT-5 Series (Verified 2025)
|
|
8
|
+
"gpt-5": {"input": 10.0, "output": 10.0},
|
|
9
|
+
"gpt-5-mini": {"input": 0.250, "output": 2.0},
|
|
10
|
+
"gpt-5-nano": {"input": 0.05, "output": 0.4},
|
|
11
|
+
|
|
6
12
|
# OpenAI GPT-4o Series (Verified 2025)
|
|
7
13
|
"gpt-4o": {"input": 2.5, "output": 10.0},
|
|
8
14
|
"gpt-4o-mini": {"input": 0.15, "output": 0.6},
|
|
@@ -190,6 +196,7 @@ PROVIDER_AVERAGES = {
|
|
|
190
196
|
"together": {"input": 0.15, "output": 0.15}, # Together AI average
|
|
191
197
|
"perplexity": {"input": 0.4, "output": 1.5}, # Perplexity average
|
|
192
198
|
"grok": {"input": 2.4, "output": 12}, # Grok average
|
|
199
|
+
"groq": {"input": 0.3, "output": 0.6}, # Groq average (placeholder)
|
|
193
200
|
}
|
|
194
201
|
|
|
195
202
|
def get_provider_from_model(model: str) -> str:
|
|
@@ -218,6 +225,8 @@ def get_provider_from_model(model: str) -> str:
|
|
|
218
225
|
return "perplexity"
|
|
219
226
|
elif any(grok in model_lower for grok in ["grok", "xAI"]):
|
|
220
227
|
return "grok"
|
|
228
|
+
elif "groq" in model_lower:
|
|
229
|
+
return "groq"
|
|
221
230
|
else:
|
|
222
231
|
return "unknown"
|
|
223
232
|
|
|
@@ -228,6 +237,8 @@ def normalize_model_name(model: str) -> str:
|
|
|
228
237
|
model_lower = model.lower()
|
|
229
238
|
# Remove provider prefixes (generalizable pattern: any_provider/)
|
|
230
239
|
model_lower = re.sub(r'^[^/]+/', '', model_lower)
|
|
240
|
+
# Strip Google/Vertex prefixes
|
|
241
|
+
model_lower = model_lower.replace('publishers/google/models/', '').replace('models/', '')
|
|
231
242
|
|
|
232
243
|
# Strip date suffixes (20240229, 20241022, etc.) but preserve model versions like o1-mini, o3-mini
|
|
233
244
|
# Pattern: remove -YYYYMMDD or -YYYY-MM-DD at the end
|
lucidicai/session.py
CHANGED
|
@@ -8,6 +8,7 @@ from opentelemetry.trace import StatusCode
|
|
|
8
8
|
from opentelemetry.semconv_ai import SpanAttributes
|
|
9
9
|
|
|
10
10
|
from lucidicai.client import Client
|
|
11
|
+
from lucidicai.context import current_session_id
|
|
11
12
|
from lucidicai.model_pricing import calculate_cost
|
|
12
13
|
from lucidicai.image_upload import extract_base64_images
|
|
13
14
|
|
|
@@ -28,9 +29,6 @@ class LucidicSpanExporter(SpanExporter):
|
|
|
28
29
|
"""Export spans by converting them to Lucidic events"""
|
|
29
30
|
try:
|
|
30
31
|
client = Client()
|
|
31
|
-
if not client.session:
|
|
32
|
-
logger.debug("No active session, skipping span export")
|
|
33
|
-
return SpanExportResult.SUCCESS
|
|
34
32
|
|
|
35
33
|
for span in spans:
|
|
36
34
|
self._process_span(span, client)
|
|
@@ -100,6 +98,19 @@ class LucidicSpanExporter(SpanExporter):
|
|
|
100
98
|
attributes.get(SpanAttributes.LLM_REQUEST_MODEL) or \
|
|
101
99
|
attributes.get('gen_ai.request.model') or 'unknown'
|
|
102
100
|
|
|
101
|
+
# Resolve target session id for this span
|
|
102
|
+
target_session_id = attributes.get('lucidic.session_id')
|
|
103
|
+
if not target_session_id:
|
|
104
|
+
try:
|
|
105
|
+
target_session_id = current_session_id.get(None)
|
|
106
|
+
except Exception:
|
|
107
|
+
target_session_id = None
|
|
108
|
+
if not target_session_id:
|
|
109
|
+
if getattr(client, 'session', None) and getattr(client.session, 'session_id', None):
|
|
110
|
+
target_session_id = client.session.session_id
|
|
111
|
+
if not target_session_id:
|
|
112
|
+
return None
|
|
113
|
+
|
|
103
114
|
# Create event
|
|
104
115
|
event_kwargs = {
|
|
105
116
|
'description': description,
|
|
@@ -115,7 +126,7 @@ class LucidicSpanExporter(SpanExporter):
|
|
|
115
126
|
if step_id:
|
|
116
127
|
event_kwargs['step_id'] = step_id
|
|
117
128
|
|
|
118
|
-
return client.
|
|
129
|
+
return client.create_event_for_session(target_session_id, **event_kwargs)
|
|
119
130
|
|
|
120
131
|
except Exception as e:
|
|
121
132
|
logger.error(f"Failed to create event from span: {e}")
|
|
@@ -143,6 +154,7 @@ class LucidicSpanExporter(SpanExporter):
|
|
|
143
154
|
if cost is not None:
|
|
144
155
|
update_kwargs['cost_added'] = cost
|
|
145
156
|
|
|
157
|
+
# Route update to the same session; event_id is globally unique so server resolves it
|
|
146
158
|
client.session.update_event(**update_kwargs)
|
|
147
159
|
|
|
148
160
|
except Exception as e:
|
|
@@ -1,4 +1,9 @@
|
|
|
1
|
-
"""Custom span processor for real-time Lucidic event handling
|
|
1
|
+
"""Custom span processor for real-time Lucidic event handling
|
|
2
|
+
|
|
3
|
+
Updated to stamp spans with the correct session id from async-safe
|
|
4
|
+
context, and to create events for that session without mutating the
|
|
5
|
+
global client session.
|
|
6
|
+
"""
|
|
2
7
|
import os
|
|
3
8
|
import logging
|
|
4
9
|
import json
|
|
@@ -10,7 +15,8 @@ from opentelemetry.semconv_ai import SpanAttributes
|
|
|
10
15
|
|
|
11
16
|
from lucidicai.client import Client
|
|
12
17
|
from lucidicai.model_pricing import calculate_cost
|
|
13
|
-
from .
|
|
18
|
+
from lucidicai.context import current_session_id
|
|
19
|
+
from .utils.image_storage import get_stored_images, clear_stored_images, get_image_by_placeholder
|
|
14
20
|
from .utils.text_storage import get_stored_text, clear_stored_texts
|
|
15
21
|
|
|
16
22
|
logger = logging.getLogger("Lucidic")
|
|
@@ -35,11 +41,15 @@ class LucidicSpanProcessor(SpanProcessor):
|
|
|
35
41
|
logger.info(f"[SpanProcessor] on_start called for span: {span.name}")
|
|
36
42
|
# logger.info(f"[SpanProcessor] Span attributes at start: {dict(span.attributes or {})}")
|
|
37
43
|
|
|
44
|
+
# Stamp session id from contextvars if available
|
|
45
|
+
try:
|
|
46
|
+
sid = current_session_id.get(None)
|
|
47
|
+
if sid:
|
|
48
|
+
span.set_attribute('lucidic.session_id', sid)
|
|
49
|
+
except Exception:
|
|
50
|
+
pass
|
|
51
|
+
|
|
38
52
|
client = Client()
|
|
39
|
-
if not client.session:
|
|
40
|
-
logger.debug("No active session, skipping span tracking")
|
|
41
|
-
return
|
|
42
|
-
|
|
43
53
|
# Only process LLM spans
|
|
44
54
|
if not self._is_llm_span(span):
|
|
45
55
|
if DEBUG:
|
|
@@ -88,9 +98,6 @@ class LucidicSpanProcessor(SpanProcessor):
|
|
|
88
98
|
return
|
|
89
99
|
|
|
90
100
|
client = Client()
|
|
91
|
-
if not client.session:
|
|
92
|
-
return
|
|
93
|
-
|
|
94
101
|
span_context = self.span_contexts.pop(span_id, {})
|
|
95
102
|
|
|
96
103
|
# Create event with all the attributes now available
|
|
@@ -127,7 +134,7 @@ class LucidicSpanProcessor(SpanProcessor):
|
|
|
127
134
|
|
|
128
135
|
# Check span name
|
|
129
136
|
span_name_lower = span.name.lower()
|
|
130
|
-
llm_patterns = ['openai', 'anthropic', 'chat', 'completion', 'embedding', 'gemini', 'claude']
|
|
137
|
+
llm_patterns = ['openai', 'anthropic', 'chat', 'completion', 'embedding', 'gemini', 'claude', 'bedrock', 'vertex', 'cohere', 'groq']
|
|
131
138
|
|
|
132
139
|
if any(pattern in span_name_lower for pattern in llm_patterns):
|
|
133
140
|
return True
|
|
@@ -248,6 +255,22 @@ class LucidicSpanProcessor(SpanProcessor):
|
|
|
248
255
|
# Check success
|
|
249
256
|
is_successful = span.status.status_code != StatusCode.ERROR
|
|
250
257
|
|
|
258
|
+
# Resolve target session id for this span
|
|
259
|
+
target_session_id = attributes.get('lucidic.session_id')
|
|
260
|
+
if not target_session_id:
|
|
261
|
+
try:
|
|
262
|
+
target_session_id = current_session_id.get(None)
|
|
263
|
+
except Exception:
|
|
264
|
+
target_session_id = None
|
|
265
|
+
if not target_session_id:
|
|
266
|
+
# Fallback to global client session if set
|
|
267
|
+
if getattr(client, 'session', None) and getattr(client.session, 'session_id', None):
|
|
268
|
+
target_session_id = client.session.session_id
|
|
269
|
+
if not target_session_id:
|
|
270
|
+
if DEBUG:
|
|
271
|
+
logger.info("[SpanProcessor] No session id found for span; skipping event creation")
|
|
272
|
+
return None
|
|
273
|
+
|
|
251
274
|
# Create event with all data
|
|
252
275
|
event_kwargs = {
|
|
253
276
|
'description': description,
|
|
@@ -268,8 +291,8 @@ class LucidicSpanProcessor(SpanProcessor):
|
|
|
268
291
|
if step_id:
|
|
269
292
|
event_kwargs['step_id'] = step_id
|
|
270
293
|
|
|
271
|
-
# Create the event (already completed)
|
|
272
|
-
event_id = client.
|
|
294
|
+
# Create the event (already completed) for the resolved session id
|
|
295
|
+
event_id = client.create_event_for_session(target_session_id, **event_kwargs)
|
|
273
296
|
|
|
274
297
|
return event_id
|
|
275
298
|
|
|
@@ -397,53 +420,48 @@ class LucidicSpanProcessor(SpanProcessor):
|
|
|
397
420
|
while True:
|
|
398
421
|
prefix = f"gen_ai.prompt.{i}"
|
|
399
422
|
role = attributes.get(f"{prefix}.role")
|
|
400
|
-
|
|
401
|
-
if not role:
|
|
402
|
-
break
|
|
403
|
-
|
|
404
|
-
message = {"role": role}
|
|
405
|
-
|
|
406
|
-
# Get content
|
|
407
423
|
content = attributes.get(f"{prefix}.content")
|
|
424
|
+
|
|
425
|
+
# Check if any attributes exist for this index
|
|
426
|
+
attr_has_any = False
|
|
427
|
+
for key in attributes.keys():
|
|
428
|
+
if isinstance(key, str) and key.startswith(f"{prefix}."):
|
|
429
|
+
attr_has_any = True
|
|
430
|
+
break
|
|
431
|
+
|
|
432
|
+
stored_text = get_stored_text(i)
|
|
433
|
+
stored_images = get_stored_images()
|
|
434
|
+
|
|
435
|
+
# Break if no indexed attrs and not the first synthetic message case
|
|
436
|
+
if not attr_has_any and not (i == 0 and (stored_text or stored_images)):
|
|
437
|
+
break
|
|
438
|
+
|
|
439
|
+
message = {"role": role or "user"}
|
|
440
|
+
|
|
408
441
|
if content:
|
|
409
442
|
# Try to parse JSON content (for multimodal)
|
|
410
443
|
try:
|
|
411
444
|
import json
|
|
412
445
|
parsed_content = json.loads(content)
|
|
413
446
|
message["content"] = parsed_content
|
|
414
|
-
except:
|
|
447
|
+
except Exception:
|
|
415
448
|
message["content"] = content
|
|
416
449
|
else:
|
|
417
|
-
# Content
|
|
418
|
-
|
|
419
|
-
stored_text
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
450
|
+
# Content missing: synthesize from stored text/images
|
|
451
|
+
synthetic_content = []
|
|
452
|
+
if stored_text and i == 0:
|
|
453
|
+
synthetic_content.append({"type": "text", "text": stored_text})
|
|
454
|
+
if stored_images and i == 0:
|
|
455
|
+
for img in stored_images:
|
|
456
|
+
synthetic_content.append({"type": "image_url", "image_url": {"url": img}})
|
|
457
|
+
if synthetic_content:
|
|
423
458
|
if DEBUG:
|
|
424
|
-
logger.info(f"[SpanProcessor]
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
if stored_text:
|
|
431
|
-
synthetic_content.append({
|
|
432
|
-
"type": "text",
|
|
433
|
-
"text": stored_text
|
|
434
|
-
})
|
|
435
|
-
|
|
436
|
-
# Add images if available
|
|
437
|
-
if stored_images and i == 0: # Assume first message might have images
|
|
438
|
-
for idx, img in enumerate(stored_images):
|
|
439
|
-
synthetic_content.append({
|
|
440
|
-
"type": "image_url",
|
|
441
|
-
"image_url": {"url": img}
|
|
442
|
-
})
|
|
443
|
-
|
|
444
|
-
if synthetic_content:
|
|
445
|
-
message["content"] = synthetic_content
|
|
446
|
-
|
|
459
|
+
logger.info(f"[SpanProcessor] Using stored text/images for message {i}")
|
|
460
|
+
message["content"] = synthetic_content
|
|
461
|
+
elif not attr_has_any:
|
|
462
|
+
# No real attributes and nothing stored to synthesize -> stop
|
|
463
|
+
break
|
|
464
|
+
|
|
447
465
|
messages.append(message)
|
|
448
466
|
i += 1
|
|
449
467
|
|
|
@@ -1,4 +1,7 @@
|
|
|
1
|
-
"""OpenTelemetry-based handlers that maintain backward compatibility
|
|
1
|
+
"""OpenTelemetry-based handlers that maintain backward compatibility
|
|
2
|
+
|
|
3
|
+
Adds guards to avoid repeated monkey-patching under concurrent init.
|
|
4
|
+
"""
|
|
2
5
|
import logging
|
|
3
6
|
from typing import Optional
|
|
4
7
|
|
|
@@ -7,6 +10,11 @@ from .otel_init import LucidicTelemetry
|
|
|
7
10
|
|
|
8
11
|
logger = logging.getLogger("Lucidic")
|
|
9
12
|
|
|
13
|
+
import threading
|
|
14
|
+
|
|
15
|
+
_patch_lock = threading.Lock()
|
|
16
|
+
_openai_patched = False
|
|
17
|
+
_anthropic_patched = False
|
|
10
18
|
|
|
11
19
|
class OTelOpenAIHandler(BaseProvider):
|
|
12
20
|
"""OpenAI handler using OpenTelemetry instrumentation"""
|
|
@@ -35,37 +43,27 @@ class OTelOpenAIHandler(BaseProvider):
|
|
|
35
43
|
|
|
36
44
|
# Also patch OpenAI client to intercept images
|
|
37
45
|
try:
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
# Also patch AsyncOpenAI
|
|
60
|
-
if hasattr(openai, 'AsyncOpenAI'):
|
|
61
|
-
original_async_init = openai.AsyncOpenAI.__init__
|
|
62
|
-
def patched_async_init(self, *args, **kwargs):
|
|
63
|
-
original_async_init(self, *args, **kwargs)
|
|
64
|
-
# Patch this instance
|
|
65
|
-
patch_openai_client(self)
|
|
66
|
-
|
|
67
|
-
openai.AsyncOpenAI.__init__ = patched_async_init
|
|
68
|
-
|
|
46
|
+
with _patch_lock:
|
|
47
|
+
global _openai_patched
|
|
48
|
+
if not _openai_patched:
|
|
49
|
+
import openai
|
|
50
|
+
from .utils.universal_image_interceptor import UniversalImageInterceptor, patch_openai_client
|
|
51
|
+
interceptor = UniversalImageInterceptor.create_interceptor("openai")
|
|
52
|
+
if hasattr(openai, 'ChatCompletion'):
|
|
53
|
+
original = openai.ChatCompletion.create
|
|
54
|
+
openai.ChatCompletion.create = interceptor(original)
|
|
55
|
+
original_client_init = openai.OpenAI.__init__
|
|
56
|
+
def patched_init(self, *args, **kwargs):
|
|
57
|
+
original_client_init(self, *args, **kwargs)
|
|
58
|
+
patch_openai_client(self)
|
|
59
|
+
openai.OpenAI.__init__ = patched_init
|
|
60
|
+
if hasattr(openai, 'AsyncOpenAI'):
|
|
61
|
+
original_async_init = openai.AsyncOpenAI.__init__
|
|
62
|
+
def patched_async_init(self, *args, **kwargs):
|
|
63
|
+
original_async_init(self, *args, **kwargs)
|
|
64
|
+
patch_openai_client(self)
|
|
65
|
+
openai.AsyncOpenAI.__init__ = patched_async_init
|
|
66
|
+
_openai_patched = True
|
|
69
67
|
except Exception as e:
|
|
70
68
|
logger.warning(f"Could not patch OpenAI for image interception: {e}")
|
|
71
69
|
|
|
@@ -108,32 +106,25 @@ class OTelAnthropicHandler(BaseProvider):
|
|
|
108
106
|
|
|
109
107
|
# Also patch Anthropic client to intercept images
|
|
110
108
|
try:
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
def patched_async_init(self, *args, **kwargs):
|
|
131
|
-
original_async_init(self, *args, **kwargs)
|
|
132
|
-
# Patch this instance
|
|
133
|
-
patch_anthropic_client(self)
|
|
134
|
-
|
|
135
|
-
anthropic.AsyncAnthropic.__init__ = patched_async_init
|
|
136
|
-
|
|
109
|
+
with _patch_lock:
|
|
110
|
+
global _anthropic_patched
|
|
111
|
+
if not _anthropic_patched:
|
|
112
|
+
import anthropic
|
|
113
|
+
from .utils.universal_image_interceptor import UniversalImageInterceptor, patch_anthropic_client
|
|
114
|
+
interceptor = UniversalImageInterceptor.create_interceptor("anthropic")
|
|
115
|
+
async_interceptor = UniversalImageInterceptor.create_async_interceptor("anthropic")
|
|
116
|
+
original_client_init = anthropic.Anthropic.__init__
|
|
117
|
+
def patched_init(self, *args, **kwargs):
|
|
118
|
+
original_client_init(self, *args, **kwargs)
|
|
119
|
+
patch_anthropic_client(self)
|
|
120
|
+
anthropic.Anthropic.__init__ = patched_init
|
|
121
|
+
if hasattr(anthropic, 'AsyncAnthropic'):
|
|
122
|
+
original_async_init = anthropic.AsyncAnthropic.__init__
|
|
123
|
+
def patched_async_init(self, *args, **kwargs):
|
|
124
|
+
original_async_init(self, *args, **kwargs)
|
|
125
|
+
patch_anthropic_client(self)
|
|
126
|
+
anthropic.AsyncAnthropic.__init__ = patched_async_init
|
|
127
|
+
_anthropic_patched = True
|
|
137
128
|
except Exception as e:
|
|
138
129
|
logger.warning(f"Could not patch Anthropic for image interception: {e}")
|
|
139
130
|
|
|
@@ -356,4 +347,161 @@ class OTelLiteLLMHandler(BaseProvider):
|
|
|
356
347
|
logger.info("[OTel LiteLLM Handler] Instrumentation disabled")
|
|
357
348
|
|
|
358
349
|
except Exception as e:
|
|
359
|
-
logger.error(f"Error disabling LiteLLM instrumentation: {e}")
|
|
350
|
+
logger.error(f"Error disabling LiteLLM instrumentation: {e}")
|
|
351
|
+
|
|
352
|
+
|
|
353
|
+
class OTelBedrockHandler(BaseProvider):
|
|
354
|
+
"""AWS Bedrock handler using OpenTelemetry instrumentation"""
|
|
355
|
+
|
|
356
|
+
def __init__(self):
|
|
357
|
+
super().__init__()
|
|
358
|
+
self._provider_name = "Bedrock"
|
|
359
|
+
self.telemetry = LucidicTelemetry()
|
|
360
|
+
|
|
361
|
+
def handle_response(self, response, kwargs, session: Optional = None):
|
|
362
|
+
return response
|
|
363
|
+
|
|
364
|
+
def override(self):
|
|
365
|
+
try:
|
|
366
|
+
from lucidicai.client import Client
|
|
367
|
+
client = Client()
|
|
368
|
+
if not self.telemetry.is_initialized():
|
|
369
|
+
self.telemetry.initialize(agent_id=client.agent_id)
|
|
370
|
+
self.telemetry.instrument_providers(["bedrock"])
|
|
371
|
+
logger.info("[OTel Bedrock Handler] Instrumentation enabled")
|
|
372
|
+
except Exception as e:
|
|
373
|
+
logger.error(f"Failed to enable Bedrock instrumentation: {e}")
|
|
374
|
+
raise
|
|
375
|
+
|
|
376
|
+
def undo_override(self):
|
|
377
|
+
logger.info("[OTel Bedrock Handler] Instrumentation will be disabled on shutdown")
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
class OTelGoogleGenerativeAIHandler(BaseProvider):
|
|
381
|
+
"""Google Generative AI handler using OpenTelemetry instrumentation"""
|
|
382
|
+
|
|
383
|
+
def __init__(self):
|
|
384
|
+
super().__init__()
|
|
385
|
+
self._provider_name = "Google Generative AI"
|
|
386
|
+
self.telemetry = LucidicTelemetry()
|
|
387
|
+
|
|
388
|
+
def handle_response(self, response, kwargs, session: Optional = None):
|
|
389
|
+
return response
|
|
390
|
+
|
|
391
|
+
def override(self):
|
|
392
|
+
try:
|
|
393
|
+
from lucidicai.client import Client
|
|
394
|
+
client = Client()
|
|
395
|
+
if not self.telemetry.is_initialized():
|
|
396
|
+
self.telemetry.initialize(agent_id=client.agent_id)
|
|
397
|
+
self.telemetry.instrument_providers(["google"])
|
|
398
|
+
# Best-effort image interception for Google clients where applicable
|
|
399
|
+
try:
|
|
400
|
+
from .utils.universal_image_interceptor import patch_google_client, patch_google_genai
|
|
401
|
+
_ = patch_google_client
|
|
402
|
+
patch_google_genai()
|
|
403
|
+
except Exception as e:
|
|
404
|
+
logger.debug(f"[OTel Google Handler] Image interception not applied: {e}")
|
|
405
|
+
logger.info("[OTel Google Handler] Instrumentation enabled")
|
|
406
|
+
except Exception as e:
|
|
407
|
+
logger.error(f"Failed to enable Google Generative AI instrumentation: {e}")
|
|
408
|
+
raise
|
|
409
|
+
|
|
410
|
+
def undo_override(self):
|
|
411
|
+
logger.info("[OTel Google Handler] Instrumentation will be disabled on shutdown")
|
|
412
|
+
|
|
413
|
+
|
|
414
|
+
class OTelVertexAIHandler(BaseProvider):
|
|
415
|
+
"""Vertex AI handler using OpenTelemetry instrumentation"""
|
|
416
|
+
|
|
417
|
+
def __init__(self):
|
|
418
|
+
super().__init__()
|
|
419
|
+
self._provider_name = "Vertex AI"
|
|
420
|
+
self.telemetry = LucidicTelemetry()
|
|
421
|
+
|
|
422
|
+
def handle_response(self, response, kwargs, session: Optional = None):
|
|
423
|
+
return response
|
|
424
|
+
|
|
425
|
+
def override(self):
|
|
426
|
+
try:
|
|
427
|
+
from lucidicai.client import Client
|
|
428
|
+
client = Client()
|
|
429
|
+
if not self.telemetry.is_initialized():
|
|
430
|
+
self.telemetry.initialize(agent_id=client.agent_id)
|
|
431
|
+
self.telemetry.instrument_providers(["vertexai"])
|
|
432
|
+
# Best-effort image interception for Vertex AI clients where applicable
|
|
433
|
+
try:
|
|
434
|
+
from .utils.universal_image_interceptor import patch_vertexai_client
|
|
435
|
+
_ = patch_vertexai_client
|
|
436
|
+
except Exception as e:
|
|
437
|
+
logger.debug(f"[OTel Vertex Handler] Image interception not applied: {e}")
|
|
438
|
+
logger.info("[OTel Vertex Handler] Instrumentation enabled")
|
|
439
|
+
except Exception as e:
|
|
440
|
+
logger.error(f"Failed to enable Vertex AI instrumentation: {e}")
|
|
441
|
+
raise
|
|
442
|
+
|
|
443
|
+
def undo_override(self):
|
|
444
|
+
logger.info("[OTel Vertex Handler] Instrumentation will be disabled on shutdown")
|
|
445
|
+
|
|
446
|
+
|
|
447
|
+
class OTelCohereHandler(BaseProvider):
|
|
448
|
+
"""Cohere handler using OpenTelemetry instrumentation"""
|
|
449
|
+
|
|
450
|
+
def __init__(self):
|
|
451
|
+
super().__init__()
|
|
452
|
+
self._provider_name = "Cohere"
|
|
453
|
+
self.telemetry = LucidicTelemetry()
|
|
454
|
+
|
|
455
|
+
def handle_response(self, response, kwargs, session: Optional = None):
|
|
456
|
+
return response
|
|
457
|
+
|
|
458
|
+
def override(self):
|
|
459
|
+
try:
|
|
460
|
+
from lucidicai.client import Client
|
|
461
|
+
client = Client()
|
|
462
|
+
if not self.telemetry.is_initialized():
|
|
463
|
+
self.telemetry.initialize(agent_id=client.agent_id)
|
|
464
|
+
self.telemetry.instrument_providers(["cohere"])
|
|
465
|
+
logger.info("[OTel Cohere Handler] Instrumentation enabled")
|
|
466
|
+
except Exception as e:
|
|
467
|
+
logger.error(f"Failed to enable Cohere instrumentation: {e}")
|
|
468
|
+
raise
|
|
469
|
+
|
|
470
|
+
def undo_override(self):
|
|
471
|
+
logger.info("[OTel Cohere Handler] Instrumentation will be disabled on shutdown")
|
|
472
|
+
|
|
473
|
+
|
|
474
|
+
class OTelGroqHandler(BaseProvider):
|
|
475
|
+
"""Groq handler using OpenTelemetry instrumentation"""
|
|
476
|
+
|
|
477
|
+
def __init__(self):
|
|
478
|
+
super().__init__()
|
|
479
|
+
self._provider_name = "Groq"
|
|
480
|
+
self.telemetry = LucidicTelemetry()
|
|
481
|
+
|
|
482
|
+
def handle_response(self, response, kwargs, session: Optional = None):
|
|
483
|
+
return response
|
|
484
|
+
|
|
485
|
+
def override(self):
|
|
486
|
+
try:
|
|
487
|
+
from lucidicai.client import Client
|
|
488
|
+
client = Client()
|
|
489
|
+
if not self.telemetry.is_initialized():
|
|
490
|
+
self.telemetry.initialize(agent_id=client.agent_id)
|
|
491
|
+
self.telemetry.instrument_providers(["groq"])
|
|
492
|
+
# Best-effort image interception for Groq (OpenAI-compatible)
|
|
493
|
+
try:
|
|
494
|
+
import groq # noqa: F401
|
|
495
|
+
from .utils.universal_image_interceptor import UniversalImageInterceptor
|
|
496
|
+
# We cannot reliably patch class constructors here without instance; users calling Groq client
|
|
497
|
+
# will still have images captured via OpenLLMetry attributes; optional future improvement.
|
|
498
|
+
_ = UniversalImageInterceptor
|
|
499
|
+
except Exception as e:
|
|
500
|
+
logger.debug(f"[OTel Groq Handler] Image interception not applied: {e}")
|
|
501
|
+
logger.info("[OTel Groq Handler] Instrumentation enabled")
|
|
502
|
+
except Exception as e:
|
|
503
|
+
logger.error(f"Failed to enable Groq instrumentation: {e}")
|
|
504
|
+
raise
|
|
505
|
+
|
|
506
|
+
def undo_override(self):
|
|
507
|
+
logger.info("[OTel Groq Handler] Instrumentation will be disabled on shutdown")
|