lucidicai 1.2.15__py3-none-any.whl → 1.2.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lucidicai/__init__.py +111 -21
- lucidicai/client.py +22 -5
- lucidicai/decorators.py +357 -0
- lucidicai/event.py +2 -2
- lucidicai/image_upload.py +24 -1
- lucidicai/providers/anthropic_handler.py +0 -7
- lucidicai/providers/image_storage.py +45 -0
- lucidicai/providers/langchain.py +0 -78
- lucidicai/providers/lucidic_exporter.py +259 -0
- lucidicai/providers/lucidic_span_processor.py +648 -0
- lucidicai/providers/openai_agents_instrumentor.py +307 -0
- lucidicai/providers/openai_handler.py +1 -56
- lucidicai/providers/otel_handlers.py +266 -0
- lucidicai/providers/otel_init.py +197 -0
- lucidicai/providers/otel_provider.py +168 -0
- lucidicai/providers/pydantic_ai_handler.py +2 -19
- lucidicai/providers/text_storage.py +53 -0
- lucidicai/providers/universal_image_interceptor.py +276 -0
- lucidicai/session.py +17 -4
- lucidicai/step.py +4 -4
- lucidicai/streaming.py +2 -3
- lucidicai/telemetry/__init__.py +0 -0
- lucidicai/telemetry/base_provider.py +21 -0
- lucidicai/telemetry/lucidic_exporter.py +259 -0
- lucidicai/telemetry/lucidic_span_processor.py +665 -0
- lucidicai/telemetry/openai_agents_instrumentor.py +306 -0
- lucidicai/telemetry/opentelemetry_converter.py +436 -0
- lucidicai/telemetry/otel_handlers.py +266 -0
- lucidicai/telemetry/otel_init.py +197 -0
- lucidicai/telemetry/otel_provider.py +168 -0
- lucidicai/telemetry/pydantic_ai_handler.py +600 -0
- lucidicai/telemetry/utils/__init__.py +0 -0
- lucidicai/telemetry/utils/image_storage.py +45 -0
- lucidicai/telemetry/utils/text_storage.py +53 -0
- lucidicai/telemetry/utils/universal_image_interceptor.py +276 -0
- {lucidicai-1.2.15.dist-info → lucidicai-1.2.17.dist-info}/METADATA +1 -1
- lucidicai-1.2.17.dist-info/RECORD +49 -0
- lucidicai-1.2.15.dist-info/RECORD +0 -25
- {lucidicai-1.2.15.dist-info → lucidicai-1.2.17.dist-info}/WHEEL +0 -0
- {lucidicai-1.2.15.dist-info → lucidicai-1.2.17.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,307 @@
|
|
|
1
|
+
"""OpenAI Agents SDK instrumentor that hooks into OpenAI API calls"""
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Any, Dict, Optional, List
|
|
4
|
+
from opentelemetry import trace
|
|
5
|
+
from opentelemetry.trace import Status, StatusCode, SpanKind
|
|
6
|
+
from contextlib import contextmanager
|
|
7
|
+
import threading
|
|
8
|
+
import json
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger("Lucidic")
|
|
11
|
+
|
|
12
|
+
# Thread-local storage for context
|
|
13
|
+
_thread_local = threading.local()
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class OpenAIAgentsInstrumentor:
|
|
17
|
+
"""instrumentor that captures OpenAI API calls within agent runs"""
|
|
18
|
+
|
|
19
|
+
def __init__(self, tracer_provider=None):
|
|
20
|
+
self._tracer_provider = tracer_provider or trace.get_tracer_provider()
|
|
21
|
+
self._tracer = self._tracer_provider.get_tracer(__name__)
|
|
22
|
+
self._is_instrumented = False
|
|
23
|
+
self._original_openai_create = None
|
|
24
|
+
|
|
25
|
+
def instrument(self):
|
|
26
|
+
"""Enable instrumentation"""
|
|
27
|
+
if self._is_instrumented:
|
|
28
|
+
logger.warning("OpenAI Agents SDK already instrumented")
|
|
29
|
+
return
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
# First, patch OpenAI to capture API calls
|
|
33
|
+
self._patch_openai()
|
|
34
|
+
|
|
35
|
+
# Then set up agents tracing
|
|
36
|
+
from agents import set_trace_processors
|
|
37
|
+
from agents.tracing.processors import TracingProcessor
|
|
38
|
+
|
|
39
|
+
processor = OpenAIAgentsTracingProcessor(self)
|
|
40
|
+
set_trace_processors([processor])
|
|
41
|
+
|
|
42
|
+
self._is_instrumented = True
|
|
43
|
+
logger.info("OpenAI Agents SDK instrumentation enabled")
|
|
44
|
+
|
|
45
|
+
except Exception as e:
|
|
46
|
+
logger.error(f"Failed to instrument OpenAI Agents SDK: {e}")
|
|
47
|
+
raise
|
|
48
|
+
|
|
49
|
+
def uninstrument(self):
|
|
50
|
+
"""Disable instrumentation"""
|
|
51
|
+
if not self._is_instrumented:
|
|
52
|
+
return
|
|
53
|
+
|
|
54
|
+
try:
|
|
55
|
+
# Restore OpenAI
|
|
56
|
+
self._unpatch_openai()
|
|
57
|
+
|
|
58
|
+
# Restore default processor
|
|
59
|
+
from agents import set_trace_processors
|
|
60
|
+
from agents.tracing.processors import default_processor
|
|
61
|
+
set_trace_processors([default_processor])
|
|
62
|
+
|
|
63
|
+
self._is_instrumented = False
|
|
64
|
+
logger.info("OpenAI Agents SDK instrumentation disabled")
|
|
65
|
+
|
|
66
|
+
except Exception as e:
|
|
67
|
+
logger.error(f"Failed to uninstrument: {e}")
|
|
68
|
+
|
|
69
|
+
def _patch_openai(self):
|
|
70
|
+
"""Patch OpenAI client to capture messages"""
|
|
71
|
+
try:
|
|
72
|
+
import openai
|
|
73
|
+
|
|
74
|
+
# Store original
|
|
75
|
+
self._original_openai_create = openai.chat.completions.create
|
|
76
|
+
|
|
77
|
+
def wrapped_create(*args, **kwargs):
|
|
78
|
+
# Capture the messages
|
|
79
|
+
messages = kwargs.get('messages', [])
|
|
80
|
+
|
|
81
|
+
# Store in thread local
|
|
82
|
+
if not hasattr(_thread_local, 'current_messages'):
|
|
83
|
+
_thread_local.current_messages = []
|
|
84
|
+
_thread_local.current_messages = messages
|
|
85
|
+
|
|
86
|
+
# Call original
|
|
87
|
+
response = self._original_openai_create(*args, **kwargs)
|
|
88
|
+
|
|
89
|
+
# Store response
|
|
90
|
+
_thread_local.current_response = response
|
|
91
|
+
|
|
92
|
+
return response
|
|
93
|
+
|
|
94
|
+
# Replace
|
|
95
|
+
openai.chat.completions.create = wrapped_create
|
|
96
|
+
logger.debug("Patched OpenAI chat.completions.create")
|
|
97
|
+
|
|
98
|
+
except Exception as e:
|
|
99
|
+
logger.error(f"Failed to patch OpenAI: {e}")
|
|
100
|
+
|
|
101
|
+
def _unpatch_openai(self):
|
|
102
|
+
"""Restore OpenAI client"""
|
|
103
|
+
if self._original_openai_create:
|
|
104
|
+
try:
|
|
105
|
+
import openai
|
|
106
|
+
openai.chat.completions.create = self._original_openai_create
|
|
107
|
+
logger.debug("Restored OpenAI chat.completions.create")
|
|
108
|
+
except:
|
|
109
|
+
pass
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
class OpenAIAgentsTracingProcessor:
|
|
113
|
+
"""processor that captures richer data"""
|
|
114
|
+
|
|
115
|
+
def __init__(self, instrumentor: OpenAIAgentsInstrumentor):
|
|
116
|
+
self.instrumentor = instrumentor
|
|
117
|
+
self.tracer = instrumentor._tracer
|
|
118
|
+
self._active_spans = {}
|
|
119
|
+
self._agent_context = {} # Store agent context
|
|
120
|
+
|
|
121
|
+
def on_span_start(self, span_data: Any) -> None:
|
|
122
|
+
"""Called when a span starts"""
|
|
123
|
+
try:
|
|
124
|
+
span_id = str(id(span_data))
|
|
125
|
+
actual_data = getattr(span_data, 'span_data', span_data)
|
|
126
|
+
data_type = actual_data.__class__.__name__
|
|
127
|
+
|
|
128
|
+
# Create span name
|
|
129
|
+
if hasattr(actual_data, 'name'):
|
|
130
|
+
span_name = f"openai.agents.{actual_data.name}"
|
|
131
|
+
agent_name = actual_data.name
|
|
132
|
+
else:
|
|
133
|
+
span_name = f"openai.agents.{data_type}"
|
|
134
|
+
agent_name = data_type
|
|
135
|
+
|
|
136
|
+
# For agent spans, store context
|
|
137
|
+
if data_type == "AgentSpanData":
|
|
138
|
+
self._agent_context[agent_name] = {
|
|
139
|
+
'instructions': getattr(actual_data, 'instructions', None),
|
|
140
|
+
'name': agent_name
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
# Create span
|
|
144
|
+
otel_span = self.tracer.start_span(
|
|
145
|
+
name=span_name,
|
|
146
|
+
kind=SpanKind.INTERNAL,
|
|
147
|
+
attributes={
|
|
148
|
+
"gen_ai.system": "openai_agents",
|
|
149
|
+
"gen_ai.operation.name": data_type.lower().replace("spandata", ""),
|
|
150
|
+
}
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
# Add agent name
|
|
154
|
+
if hasattr(actual_data, 'name'):
|
|
155
|
+
otel_span.set_attribute("gen_ai.agent.name", actual_data.name)
|
|
156
|
+
|
|
157
|
+
self._active_spans[span_id] = {
|
|
158
|
+
'span': otel_span,
|
|
159
|
+
'type': data_type,
|
|
160
|
+
'data': actual_data
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
except Exception as e:
|
|
164
|
+
logger.error(f"Error in on_span_start: {e}")
|
|
165
|
+
|
|
166
|
+
def on_span_end(self, span_data: Any) -> None:
|
|
167
|
+
"""Called when a span ends"""
|
|
168
|
+
try:
|
|
169
|
+
span_id = str(id(span_data))
|
|
170
|
+
|
|
171
|
+
if span_id not in self._active_spans:
|
|
172
|
+
return
|
|
173
|
+
|
|
174
|
+
span_info = self._active_spans.pop(span_id)
|
|
175
|
+
otel_span = span_info['span']
|
|
176
|
+
data_type = span_info['type']
|
|
177
|
+
actual_data = getattr(span_data, 'span_data', span_data)
|
|
178
|
+
|
|
179
|
+
# Handle different span types
|
|
180
|
+
if data_type == "ResponseSpanData":
|
|
181
|
+
self._handle_response_span(otel_span, actual_data)
|
|
182
|
+
elif data_type == "FunctionSpanData":
|
|
183
|
+
self._handle_function_span(otel_span, actual_data)
|
|
184
|
+
elif data_type == "AgentSpanData":
|
|
185
|
+
self._handle_agent_span(otel_span, actual_data)
|
|
186
|
+
|
|
187
|
+
# Set status and end
|
|
188
|
+
otel_span.set_status(Status(StatusCode.OK))
|
|
189
|
+
otel_span.end()
|
|
190
|
+
|
|
191
|
+
except Exception as e:
|
|
192
|
+
logger.error(f"Error in on_span_end: {e}")
|
|
193
|
+
|
|
194
|
+
def _handle_response_span(self, otel_span: Any, span_data: Any) -> None:
|
|
195
|
+
"""Handle response span - this is where we capture prompts and completions"""
|
|
196
|
+
try:
|
|
197
|
+
# Log what we're working with
|
|
198
|
+
logger.debug(f"Handling response span, span_data type: {type(span_data)}")
|
|
199
|
+
|
|
200
|
+
# First check span_data.input for user messages
|
|
201
|
+
prompt_index = 0
|
|
202
|
+
|
|
203
|
+
# Get instructions (system prompt) from response
|
|
204
|
+
if hasattr(span_data, 'response') and span_data.response:
|
|
205
|
+
resp = span_data.response
|
|
206
|
+
if hasattr(resp, 'instructions') and resp.instructions:
|
|
207
|
+
otel_span.set_attribute(f"gen_ai.prompt.{prompt_index}.role", "system")
|
|
208
|
+
otel_span.set_attribute(f"gen_ai.prompt.{prompt_index}.content", str(resp.instructions)[:2048])
|
|
209
|
+
prompt_index += 1
|
|
210
|
+
|
|
211
|
+
# Get user messages from span_data.input
|
|
212
|
+
if hasattr(span_data, 'input') and span_data.input:
|
|
213
|
+
if isinstance(span_data.input, list):
|
|
214
|
+
# Input is a list of messages
|
|
215
|
+
for msg in span_data.input:
|
|
216
|
+
if isinstance(msg, dict):
|
|
217
|
+
role = msg.get('role', '')
|
|
218
|
+
content = msg.get('content', '')
|
|
219
|
+
otel_span.set_attribute(f"gen_ai.prompt.{prompt_index}.role", role)
|
|
220
|
+
otel_span.set_attribute(f"gen_ai.prompt.{prompt_index}.content", str(content)[:2048])
|
|
221
|
+
prompt_index += 1
|
|
222
|
+
elif isinstance(span_data.input, str):
|
|
223
|
+
# Input is a string
|
|
224
|
+
otel_span.set_attribute(f"gen_ai.prompt.{prompt_index}.role", "user")
|
|
225
|
+
otel_span.set_attribute(f"gen_ai.prompt.{prompt_index}.content", str(span_data.input)[:2048])
|
|
226
|
+
prompt_index += 1
|
|
227
|
+
|
|
228
|
+
# Get response from output
|
|
229
|
+
if hasattr(span_data, 'response') and span_data.response:
|
|
230
|
+
resp = span_data.response
|
|
231
|
+
|
|
232
|
+
# Look for the assistant's response in output
|
|
233
|
+
if hasattr(resp, 'output') and resp.output:
|
|
234
|
+
for item in resp.output:
|
|
235
|
+
if hasattr(item, 'type') and item.type == 'message':
|
|
236
|
+
if hasattr(item, 'content'):
|
|
237
|
+
content = item.content
|
|
238
|
+
if isinstance(content, list):
|
|
239
|
+
# Extract text
|
|
240
|
+
texts = []
|
|
241
|
+
for c in content:
|
|
242
|
+
if hasattr(c, 'text'):
|
|
243
|
+
texts.append(c.text)
|
|
244
|
+
if texts:
|
|
245
|
+
content = " ".join(texts)
|
|
246
|
+
|
|
247
|
+
otel_span.set_attribute("gen_ai.completion.0.role", "assistant")
|
|
248
|
+
otel_span.set_attribute("gen_ai.completion.0.content", str(content)[:2048])
|
|
249
|
+
break
|
|
250
|
+
|
|
251
|
+
# Note: Response extraction from thread local was removed since we already
|
|
252
|
+
# extract the completion from span_data.response.output above
|
|
253
|
+
|
|
254
|
+
# Set model and usage
|
|
255
|
+
if hasattr(span_data, 'response') and span_data.response:
|
|
256
|
+
resp = span_data.response
|
|
257
|
+
if hasattr(resp, 'model'):
|
|
258
|
+
otel_span.set_attribute("gen_ai.response.model", resp.model)
|
|
259
|
+
otel_span.set_attribute("gen_ai.request.model", resp.model)
|
|
260
|
+
|
|
261
|
+
if hasattr(resp, 'usage') and resp.usage:
|
|
262
|
+
usage = resp.usage
|
|
263
|
+
if hasattr(usage, 'input_tokens'):
|
|
264
|
+
otel_span.set_attribute("gen_ai.usage.prompt_tokens", usage.input_tokens)
|
|
265
|
+
if hasattr(usage, 'output_tokens'):
|
|
266
|
+
otel_span.set_attribute("gen_ai.usage.completion_tokens", usage.output_tokens)
|
|
267
|
+
if hasattr(usage, 'total_tokens'):
|
|
268
|
+
otel_span.set_attribute("gen_ai.usage.total_tokens", usage.total_tokens)
|
|
269
|
+
|
|
270
|
+
except Exception as e:
|
|
271
|
+
logger.error(f"Error handling response span: {e}")
|
|
272
|
+
|
|
273
|
+
def _handle_function_span(self, otel_span: Any, span_data: Any) -> None:
|
|
274
|
+
"""Handle function/tool spans"""
|
|
275
|
+
if hasattr(span_data, 'name'):
|
|
276
|
+
otel_span.set_attribute("gen_ai.tool.name", span_data.name)
|
|
277
|
+
|
|
278
|
+
if hasattr(span_data, 'input'):
|
|
279
|
+
otel_span.set_attribute("gen_ai.tool.parameters", json.dumps(span_data.input)[:500])
|
|
280
|
+
|
|
281
|
+
if hasattr(span_data, 'output'):
|
|
282
|
+
otel_span.set_attribute("gen_ai.tool.result", str(span_data.output)[:500])
|
|
283
|
+
|
|
284
|
+
def _handle_agent_span(self, otel_span: Any, span_data: Any) -> None:
|
|
285
|
+
"""Handle agent spans"""
|
|
286
|
+
# Agent spans typically don't have much data at end
|
|
287
|
+
pass
|
|
288
|
+
|
|
289
|
+
def on_trace_start(self, trace_data: Any) -> None:
|
|
290
|
+
"""Called when a trace starts"""
|
|
291
|
+
# Clear thread local
|
|
292
|
+
if hasattr(_thread_local, 'current_messages'):
|
|
293
|
+
del _thread_local.current_messages
|
|
294
|
+
if hasattr(_thread_local, 'current_response'):
|
|
295
|
+
del _thread_local.current_response
|
|
296
|
+
|
|
297
|
+
def on_trace_end(self, trace_data: Any) -> None:
|
|
298
|
+
"""Called when a trace ends"""
|
|
299
|
+
pass
|
|
300
|
+
|
|
301
|
+
def force_flush(self, timeout_seconds: float = 30.0) -> bool:
|
|
302
|
+
"""Force flush"""
|
|
303
|
+
return True
|
|
304
|
+
|
|
305
|
+
def shutdown(self) -> None:
|
|
306
|
+
"""Shutdown"""
|
|
307
|
+
pass
|
|
@@ -122,20 +122,7 @@ class OpenAIHandler(BaseProvider):
|
|
|
122
122
|
if session is None:
|
|
123
123
|
logger.info(f"[OpenAI Handler] No session, skipping tracking")
|
|
124
124
|
return await original_method(*args, **kwargs)
|
|
125
|
-
|
|
126
|
-
# Auto-create step if no active step exists
|
|
127
|
-
if session.active_step is None:
|
|
128
|
-
logger.info(f"[OpenAI Handler] No active step, auto-creating step")
|
|
129
|
-
try:
|
|
130
|
-
step_id = session.create_step(
|
|
131
|
-
state="Auto-created step for API call",
|
|
132
|
-
action=f"Execute {method_name}",
|
|
133
|
-
goal="Process API request"
|
|
134
|
-
)
|
|
135
|
-
logger.info(f"[OpenAI Handler] Created step: {step_id}")
|
|
136
|
-
except Exception as e:
|
|
137
|
-
logger.error(f"[OpenAI Handler] Failed to auto-create step: {e}")
|
|
138
|
-
return await original_method(*args, **kwargs)
|
|
125
|
+
|
|
139
126
|
|
|
140
127
|
# Prepare kwargs
|
|
141
128
|
self._prepare_streaming_kwargs(method_name, kwargs)
|
|
@@ -157,20 +144,6 @@ class OpenAIHandler(BaseProvider):
|
|
|
157
144
|
logger.info(f"[OpenAI Handler] No session, skipping tracking")
|
|
158
145
|
return original_method(*args, **kwargs)
|
|
159
146
|
|
|
160
|
-
# Auto-create step if no active step exists
|
|
161
|
-
if session.active_step is None:
|
|
162
|
-
logger.info(f"[OpenAI Handler] No active step, auto-creating step")
|
|
163
|
-
try:
|
|
164
|
-
step_id = session.create_step(
|
|
165
|
-
state="Auto-created step for API call",
|
|
166
|
-
action=f"Execute {method_name}",
|
|
167
|
-
goal="Process API request"
|
|
168
|
-
)
|
|
169
|
-
logger.info(f"[OpenAI Handler] Created step: {step_id}")
|
|
170
|
-
except Exception as e:
|
|
171
|
-
logger.error(f"[OpenAI Handler] Failed to auto-create step: {e}")
|
|
172
|
-
return original_method(*args, **kwargs)
|
|
173
|
-
|
|
174
147
|
# Prepare kwargs
|
|
175
148
|
self._prepare_streaming_kwargs(method_name, kwargs)
|
|
176
149
|
|
|
@@ -394,20 +367,6 @@ class OpenAIHandler(BaseProvider):
|
|
|
394
367
|
logger.info(f"[OpenAI Handler] No session, skipping tracking")
|
|
395
368
|
return await original_method(*args, **kwargs)
|
|
396
369
|
|
|
397
|
-
# Auto-create step if no active step exists
|
|
398
|
-
if session.active_step is None:
|
|
399
|
-
logger.info(f"[OpenAI Handler] No active step, auto-creating step")
|
|
400
|
-
try:
|
|
401
|
-
step_id = session.create_step(
|
|
402
|
-
state="Auto-created step for responses API call",
|
|
403
|
-
action="Execute responses.create",
|
|
404
|
-
goal="Process API request"
|
|
405
|
-
)
|
|
406
|
-
logger.info(f"[OpenAI Handler] Created step: {step_id}")
|
|
407
|
-
except Exception as e:
|
|
408
|
-
logger.error(f"[OpenAI Handler] Failed to auto-create step: {e}")
|
|
409
|
-
return await original_method(*args, **kwargs)
|
|
410
|
-
|
|
411
370
|
# Check for agent context
|
|
412
371
|
agent_name = self._get_agent_name_from_input(kwargs.get('input', []))
|
|
413
372
|
|
|
@@ -500,20 +459,6 @@ class OpenAIHandler(BaseProvider):
|
|
|
500
459
|
logger.info(f"[OpenAI Handler] No session, skipping tracking")
|
|
501
460
|
return original_method(*args, **kwargs)
|
|
502
461
|
|
|
503
|
-
# Auto-create step if no active step exists
|
|
504
|
-
if session.active_step is None:
|
|
505
|
-
logger.info(f"[OpenAI Handler] No active step, auto-creating step")
|
|
506
|
-
try:
|
|
507
|
-
step_id = session.create_step(
|
|
508
|
-
state="Auto-created step for responses API call",
|
|
509
|
-
action="Execute responses.create",
|
|
510
|
-
goal="Process API request"
|
|
511
|
-
)
|
|
512
|
-
logger.info(f"[OpenAI Handler] Created step: {step_id}")
|
|
513
|
-
except Exception as e:
|
|
514
|
-
logger.error(f"[OpenAI Handler] Failed to auto-create step: {e}")
|
|
515
|
-
return original_method(*args, **kwargs)
|
|
516
|
-
|
|
517
462
|
# Check for agent context
|
|
518
463
|
agent_name = self._get_agent_name_from_input(kwargs.get('input', []))
|
|
519
464
|
|
|
@@ -0,0 +1,266 @@
|
|
|
1
|
+
"""OpenTelemetry-based handlers that maintain backward compatibility"""
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
from .base_providers import BaseProvider
|
|
6
|
+
from .otel_init import LucidicTelemetry
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger("Lucidic")
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class OTelOpenAIHandler(BaseProvider):
|
|
12
|
+
"""OpenAI handler using OpenTelemetry instrumentation"""
|
|
13
|
+
|
|
14
|
+
def __init__(self):
|
|
15
|
+
super().__init__()
|
|
16
|
+
self._provider_name = "OpenAI"
|
|
17
|
+
self.telemetry = LucidicTelemetry()
|
|
18
|
+
|
|
19
|
+
def handle_response(self, response, kwargs, session: Optional = None):
|
|
20
|
+
"""Not needed with OpenTelemetry approach"""
|
|
21
|
+
return response
|
|
22
|
+
|
|
23
|
+
def override(self):
|
|
24
|
+
"""Enable OpenAI instrumentation"""
|
|
25
|
+
try:
|
|
26
|
+
from lucidicai.client import Client
|
|
27
|
+
client = Client()
|
|
28
|
+
|
|
29
|
+
# Initialize telemetry if needed
|
|
30
|
+
if not self.telemetry.is_initialized():
|
|
31
|
+
self.telemetry.initialize(agent_id=client.agent_id)
|
|
32
|
+
|
|
33
|
+
# Instrument OpenAI
|
|
34
|
+
self.telemetry.instrument_providers(["openai"])
|
|
35
|
+
|
|
36
|
+
# Also patch OpenAI client to intercept images
|
|
37
|
+
try:
|
|
38
|
+
import openai
|
|
39
|
+
from .universal_image_interceptor import UniversalImageInterceptor, patch_openai_client
|
|
40
|
+
|
|
41
|
+
# Create interceptor for OpenAI
|
|
42
|
+
interceptor = UniversalImageInterceptor.create_interceptor("openai")
|
|
43
|
+
|
|
44
|
+
# Patch the module-level create method
|
|
45
|
+
if hasattr(openai, 'ChatCompletion'):
|
|
46
|
+
# Old API
|
|
47
|
+
original = openai.ChatCompletion.create
|
|
48
|
+
openai.ChatCompletion.create = interceptor(original)
|
|
49
|
+
|
|
50
|
+
# Also patch any client instances that might be created
|
|
51
|
+
original_client_init = openai.OpenAI.__init__
|
|
52
|
+
def patched_init(self, *args, **kwargs):
|
|
53
|
+
original_client_init(self, *args, **kwargs)
|
|
54
|
+
# Patch this instance
|
|
55
|
+
patch_openai_client(self)
|
|
56
|
+
|
|
57
|
+
openai.OpenAI.__init__ = patched_init
|
|
58
|
+
|
|
59
|
+
# Also patch AsyncOpenAI
|
|
60
|
+
if hasattr(openai, 'AsyncOpenAI'):
|
|
61
|
+
original_async_init = openai.AsyncOpenAI.__init__
|
|
62
|
+
def patched_async_init(self, *args, **kwargs):
|
|
63
|
+
original_async_init(self, *args, **kwargs)
|
|
64
|
+
# Patch this instance
|
|
65
|
+
patch_openai_client(self)
|
|
66
|
+
|
|
67
|
+
openai.AsyncOpenAI.__init__ = patched_async_init
|
|
68
|
+
|
|
69
|
+
except Exception as e:
|
|
70
|
+
logger.warning(f"Could not patch OpenAI for image interception: {e}")
|
|
71
|
+
|
|
72
|
+
logger.info("[OTel OpenAI Handler] Instrumentation enabled")
|
|
73
|
+
|
|
74
|
+
except Exception as e:
|
|
75
|
+
logger.error(f"Failed to enable OpenAI instrumentation: {e}")
|
|
76
|
+
raise
|
|
77
|
+
|
|
78
|
+
def undo_override(self):
|
|
79
|
+
"""Disable instrumentation"""
|
|
80
|
+
# Telemetry uninstrumentation is handled globally
|
|
81
|
+
logger.info("[OTel OpenAI Handler] Instrumentation will be disabled on shutdown")
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class OTelAnthropicHandler(BaseProvider):
|
|
85
|
+
"""Anthropic handler using OpenTelemetry instrumentation"""
|
|
86
|
+
|
|
87
|
+
def __init__(self):
|
|
88
|
+
super().__init__()
|
|
89
|
+
self._provider_name = "Anthropic"
|
|
90
|
+
self.telemetry = LucidicTelemetry()
|
|
91
|
+
|
|
92
|
+
def handle_response(self, response, kwargs, session: Optional = None):
|
|
93
|
+
"""Not needed with OpenTelemetry approach"""
|
|
94
|
+
return response
|
|
95
|
+
|
|
96
|
+
def override(self):
|
|
97
|
+
"""Enable Anthropic instrumentation"""
|
|
98
|
+
try:
|
|
99
|
+
from lucidicai.client import Client
|
|
100
|
+
client = Client()
|
|
101
|
+
|
|
102
|
+
# Initialize telemetry if needed
|
|
103
|
+
if not self.telemetry.is_initialized():
|
|
104
|
+
self.telemetry.initialize(agent_id=client.agent_id)
|
|
105
|
+
|
|
106
|
+
# Instrument Anthropic
|
|
107
|
+
self.telemetry.instrument_providers(["anthropic"])
|
|
108
|
+
|
|
109
|
+
# Also patch Anthropic client to intercept images
|
|
110
|
+
try:
|
|
111
|
+
import anthropic
|
|
112
|
+
from .universal_image_interceptor import UniversalImageInterceptor, patch_anthropic_client
|
|
113
|
+
|
|
114
|
+
# Create interceptors for Anthropic
|
|
115
|
+
interceptor = UniversalImageInterceptor.create_interceptor("anthropic")
|
|
116
|
+
async_interceptor = UniversalImageInterceptor.create_async_interceptor("anthropic")
|
|
117
|
+
|
|
118
|
+
# Patch any client instances that might be created
|
|
119
|
+
original_client_init = anthropic.Anthropic.__init__
|
|
120
|
+
def patched_init(self, *args, **kwargs):
|
|
121
|
+
original_client_init(self, *args, **kwargs)
|
|
122
|
+
# Patch this instance
|
|
123
|
+
patch_anthropic_client(self)
|
|
124
|
+
|
|
125
|
+
anthropic.Anthropic.__init__ = patched_init
|
|
126
|
+
|
|
127
|
+
# Also patch async client
|
|
128
|
+
if hasattr(anthropic, 'AsyncAnthropic'):
|
|
129
|
+
original_async_init = anthropic.AsyncAnthropic.__init__
|
|
130
|
+
def patched_async_init(self, *args, **kwargs):
|
|
131
|
+
original_async_init(self, *args, **kwargs)
|
|
132
|
+
# Patch this instance
|
|
133
|
+
patch_anthropic_client(self)
|
|
134
|
+
|
|
135
|
+
anthropic.AsyncAnthropic.__init__ = patched_async_init
|
|
136
|
+
|
|
137
|
+
except Exception as e:
|
|
138
|
+
logger.warning(f"Could not patch Anthropic for image interception: {e}")
|
|
139
|
+
|
|
140
|
+
logger.info("[OTel Anthropic Handler] Instrumentation enabled")
|
|
141
|
+
|
|
142
|
+
except Exception as e:
|
|
143
|
+
logger.error(f"Failed to enable Anthropic instrumentation: {e}")
|
|
144
|
+
raise
|
|
145
|
+
|
|
146
|
+
def undo_override(self):
|
|
147
|
+
"""Disable instrumentation"""
|
|
148
|
+
logger.info("[OTel Anthropic Handler] Instrumentation will be disabled on shutdown")
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
class OTelLangChainHandler(BaseProvider):
|
|
152
|
+
"""LangChain handler using OpenTelemetry instrumentation"""
|
|
153
|
+
|
|
154
|
+
def __init__(self):
|
|
155
|
+
super().__init__()
|
|
156
|
+
self._provider_name = "LangChain"
|
|
157
|
+
self.telemetry = LucidicTelemetry()
|
|
158
|
+
|
|
159
|
+
def handle_response(self, response, kwargs, session: Optional = None):
|
|
160
|
+
"""Not needed with OpenTelemetry approach"""
|
|
161
|
+
return response
|
|
162
|
+
|
|
163
|
+
def override(self):
|
|
164
|
+
"""Enable LangChain instrumentation"""
|
|
165
|
+
try:
|
|
166
|
+
from lucidicai.client import Client
|
|
167
|
+
client = Client()
|
|
168
|
+
|
|
169
|
+
# Initialize telemetry if needed
|
|
170
|
+
if not self.telemetry.is_initialized():
|
|
171
|
+
self.telemetry.initialize(agent_id=client.agent_id)
|
|
172
|
+
|
|
173
|
+
# Instrument LangChain
|
|
174
|
+
self.telemetry.instrument_providers(["langchain"])
|
|
175
|
+
|
|
176
|
+
logger.info("[OTel LangChain Handler] Instrumentation enabled")
|
|
177
|
+
|
|
178
|
+
except Exception as e:
|
|
179
|
+
logger.error(f"Failed to enable LangChain instrumentation: {e}")
|
|
180
|
+
raise
|
|
181
|
+
|
|
182
|
+
def undo_override(self):
|
|
183
|
+
"""Disable instrumentation"""
|
|
184
|
+
logger.info("[OTel LangChain Handler] Instrumentation will be disabled on shutdown")
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
class OTelPydanticAIHandler(BaseProvider):
|
|
188
|
+
"""Pydantic AI handler - requires custom implementation"""
|
|
189
|
+
|
|
190
|
+
def __init__(self):
|
|
191
|
+
super().__init__()
|
|
192
|
+
self._provider_name = "PydanticAI"
|
|
193
|
+
self.telemetry = LucidicTelemetry()
|
|
194
|
+
self._original_methods = {}
|
|
195
|
+
|
|
196
|
+
def handle_response(self, response, kwargs, session: Optional = None):
|
|
197
|
+
"""Handle Pydantic AI responses"""
|
|
198
|
+
return response
|
|
199
|
+
|
|
200
|
+
def override(self):
|
|
201
|
+
"""Enable Pydantic AI instrumentation"""
|
|
202
|
+
try:
|
|
203
|
+
from lucidicai.client import Client
|
|
204
|
+
client = Client()
|
|
205
|
+
|
|
206
|
+
# Initialize telemetry if needed
|
|
207
|
+
if not self.telemetry.is_initialized():
|
|
208
|
+
self.telemetry.initialize(agent_id=client.agent_id)
|
|
209
|
+
|
|
210
|
+
# For now, we'll use the original Pydantic AI handler
|
|
211
|
+
# until OpenLLMetry adds support
|
|
212
|
+
from .pydantic_ai_handler import PydanticAIHandler
|
|
213
|
+
self._fallback_handler = PydanticAIHandler()
|
|
214
|
+
self._fallback_handler.override()
|
|
215
|
+
|
|
216
|
+
logger.info("[OTel PydanticAI Handler] Using fallback handler until OpenLLMetry support is available")
|
|
217
|
+
|
|
218
|
+
except Exception as e:
|
|
219
|
+
logger.error(f"Failed to enable Pydantic AI instrumentation: {e}")
|
|
220
|
+
raise
|
|
221
|
+
|
|
222
|
+
def undo_override(self):
|
|
223
|
+
"""Disable instrumentation"""
|
|
224
|
+
if hasattr(self, '_fallback_handler'):
|
|
225
|
+
self._fallback_handler.undo_override()
|
|
226
|
+
logger.info("[OTel PydanticAI Handler] Instrumentation disabled")
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
class OTelOpenAIAgentsHandler(BaseProvider):
|
|
230
|
+
"""OpenAI Agents handler using OpenTelemetry instrumentation"""
|
|
231
|
+
|
|
232
|
+
def __init__(self):
|
|
233
|
+
super().__init__()
|
|
234
|
+
self._provider_name = "OpenAI Agents"
|
|
235
|
+
self.telemetry = LucidicTelemetry()
|
|
236
|
+
self._is_instrumented = False
|
|
237
|
+
|
|
238
|
+
def handle_response(self, response, kwargs, session: Optional = None):
|
|
239
|
+
"""Not needed with OpenTelemetry approach"""
|
|
240
|
+
return response
|
|
241
|
+
|
|
242
|
+
def override(self):
|
|
243
|
+
"""Enable OpenAI Agents instrumentation"""
|
|
244
|
+
try:
|
|
245
|
+
from lucidicai.client import Client
|
|
246
|
+
client = Client()
|
|
247
|
+
|
|
248
|
+
# Initialize telemetry if needed
|
|
249
|
+
if not self.telemetry.is_initialized():
|
|
250
|
+
self.telemetry.initialize(agent_id=client.agent_id)
|
|
251
|
+
|
|
252
|
+
# Only instrument OpenAI Agents (it will handle OpenAI calls internally)
|
|
253
|
+
self.telemetry.instrument_providers(["openai_agents"])
|
|
254
|
+
|
|
255
|
+
self._is_instrumented = True
|
|
256
|
+
|
|
257
|
+
logger.info("[OTel OpenAI Agents Handler] Full instrumentation enabled")
|
|
258
|
+
|
|
259
|
+
except Exception as e:
|
|
260
|
+
logger.error(f"Failed to enable OpenAI Agents instrumentation: {e}")
|
|
261
|
+
raise
|
|
262
|
+
|
|
263
|
+
def undo_override(self):
|
|
264
|
+
"""Disable instrumentation"""
|
|
265
|
+
self._is_instrumented = False
|
|
266
|
+
logger.info("[OTel OpenAI Agents Handler] Instrumentation will be disabled on shutdown")
|