lucidicai 1.2.16__py3-none-any.whl → 1.2.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lucidicai/__init__.py +105 -30
- lucidicai/client.py +10 -4
- lucidicai/decorators.py +357 -0
- lucidicai/image_upload.py +24 -1
- lucidicai/providers/image_storage.py +45 -0
- lucidicai/providers/lucidic_exporter.py +259 -0
- lucidicai/providers/lucidic_span_processor.py +648 -0
- lucidicai/providers/openai_agents_instrumentor.py +307 -0
- lucidicai/providers/otel_handlers.py +266 -0
- lucidicai/providers/otel_init.py +197 -0
- lucidicai/providers/otel_provider.py +168 -0
- lucidicai/providers/pydantic_ai_handler.py +1 -1
- lucidicai/providers/text_storage.py +53 -0
- lucidicai/providers/universal_image_interceptor.py +276 -0
- lucidicai/session.py +9 -1
- lucidicai/telemetry/__init__.py +0 -0
- lucidicai/telemetry/base_provider.py +21 -0
- lucidicai/telemetry/lucidic_exporter.py +259 -0
- lucidicai/telemetry/lucidic_span_processor.py +665 -0
- lucidicai/telemetry/openai_agents_instrumentor.py +306 -0
- lucidicai/telemetry/opentelemetry_converter.py +436 -0
- lucidicai/telemetry/otel_handlers.py +266 -0
- lucidicai/telemetry/otel_init.py +197 -0
- lucidicai/telemetry/otel_provider.py +168 -0
- lucidicai/telemetry/pydantic_ai_handler.py +600 -0
- lucidicai/telemetry/utils/__init__.py +0 -0
- lucidicai/telemetry/utils/image_storage.py +45 -0
- lucidicai/telemetry/utils/text_storage.py +53 -0
- lucidicai/telemetry/utils/universal_image_interceptor.py +276 -0
- {lucidicai-1.2.16.dist-info → lucidicai-1.2.18.dist-info}/METADATA +1 -1
- lucidicai-1.2.18.dist-info/RECORD +49 -0
- lucidicai-1.2.16.dist-info/RECORD +0 -25
- {lucidicai-1.2.16.dist-info → lucidicai-1.2.18.dist-info}/WHEEL +0 -0
- {lucidicai-1.2.16.dist-info → lucidicai-1.2.18.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,306 @@
|
|
|
1
|
+
"""OpenAI Agents SDK instrumentor that hooks into OpenAI API calls"""
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Any, Dict, Optional, List
|
|
4
|
+
from opentelemetry import trace
|
|
5
|
+
from opentelemetry.trace import Status, StatusCode, SpanKind
|
|
6
|
+
import threading
|
|
7
|
+
import json
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger("Lucidic")
|
|
10
|
+
|
|
11
|
+
# Thread-local storage for context
|
|
12
|
+
_thread_local = threading.local()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class OpenAIAgentsInstrumentor:
|
|
16
|
+
"""instrumentor that captures OpenAI API calls within agent runs"""
|
|
17
|
+
|
|
18
|
+
def __init__(self, tracer_provider=None):
|
|
19
|
+
self._tracer_provider = tracer_provider or trace.get_tracer_provider()
|
|
20
|
+
self._tracer = self._tracer_provider.get_tracer(__name__)
|
|
21
|
+
self._is_instrumented = False
|
|
22
|
+
self._original_openai_create = None
|
|
23
|
+
|
|
24
|
+
def instrument(self):
|
|
25
|
+
"""Enable instrumentation"""
|
|
26
|
+
if self._is_instrumented:
|
|
27
|
+
logger.warning("OpenAI Agents SDK already instrumented")
|
|
28
|
+
return
|
|
29
|
+
|
|
30
|
+
try:
|
|
31
|
+
# First, patch OpenAI to capture API calls
|
|
32
|
+
self._patch_openai()
|
|
33
|
+
|
|
34
|
+
# Then set up agents tracing
|
|
35
|
+
from agents import set_trace_processors
|
|
36
|
+
from agents.tracing.processors import TracingProcessor
|
|
37
|
+
|
|
38
|
+
processor = OpenAIAgentsTracingProcessor(self)
|
|
39
|
+
set_trace_processors([processor])
|
|
40
|
+
|
|
41
|
+
self._is_instrumented = True
|
|
42
|
+
logger.info("OpenAI Agents SDK instrumentation enabled")
|
|
43
|
+
|
|
44
|
+
except Exception as e:
|
|
45
|
+
logger.error(f"Failed to instrument OpenAI Agents SDK: {e}")
|
|
46
|
+
raise
|
|
47
|
+
|
|
48
|
+
def uninstrument(self):
|
|
49
|
+
"""Disable instrumentation"""
|
|
50
|
+
if not self._is_instrumented:
|
|
51
|
+
return
|
|
52
|
+
|
|
53
|
+
try:
|
|
54
|
+
# Restore OpenAI
|
|
55
|
+
self._unpatch_openai()
|
|
56
|
+
|
|
57
|
+
# Restore default processor
|
|
58
|
+
from agents import set_trace_processors
|
|
59
|
+
from agents.tracing.processors import default_processor
|
|
60
|
+
set_trace_processors([default_processor])
|
|
61
|
+
|
|
62
|
+
self._is_instrumented = False
|
|
63
|
+
logger.info("OpenAI Agents SDK instrumentation disabled")
|
|
64
|
+
|
|
65
|
+
except Exception as e:
|
|
66
|
+
logger.error(f"Failed to uninstrument: {e}")
|
|
67
|
+
|
|
68
|
+
def _patch_openai(self):
|
|
69
|
+
"""Patch OpenAI client to capture messages"""
|
|
70
|
+
try:
|
|
71
|
+
import openai
|
|
72
|
+
|
|
73
|
+
# Store original
|
|
74
|
+
self._original_openai_create = openai.chat.completions.create
|
|
75
|
+
|
|
76
|
+
def wrapped_create(*args, **kwargs):
|
|
77
|
+
# Capture the messages
|
|
78
|
+
messages = kwargs.get('messages', [])
|
|
79
|
+
|
|
80
|
+
# Store in thread local
|
|
81
|
+
if not hasattr(_thread_local, 'current_messages'):
|
|
82
|
+
_thread_local.current_messages = []
|
|
83
|
+
_thread_local.current_messages = messages
|
|
84
|
+
|
|
85
|
+
# Call original
|
|
86
|
+
response = self._original_openai_create(*args, **kwargs)
|
|
87
|
+
|
|
88
|
+
# Store response
|
|
89
|
+
_thread_local.current_response = response
|
|
90
|
+
|
|
91
|
+
return response
|
|
92
|
+
|
|
93
|
+
# Replace
|
|
94
|
+
openai.chat.completions.create = wrapped_create
|
|
95
|
+
logger.debug("Patched OpenAI chat.completions.create")
|
|
96
|
+
|
|
97
|
+
except Exception as e:
|
|
98
|
+
logger.error(f"Failed to patch OpenAI: {e}")
|
|
99
|
+
|
|
100
|
+
def _unpatch_openai(self):
|
|
101
|
+
"""Restore OpenAI client"""
|
|
102
|
+
if self._original_openai_create:
|
|
103
|
+
try:
|
|
104
|
+
import openai
|
|
105
|
+
openai.chat.completions.create = self._original_openai_create
|
|
106
|
+
logger.debug("Restored OpenAI chat.completions.create")
|
|
107
|
+
except:
|
|
108
|
+
pass
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class OpenAIAgentsTracingProcessor:
|
|
112
|
+
"""processor that captures richer data"""
|
|
113
|
+
|
|
114
|
+
def __init__(self, instrumentor: OpenAIAgentsInstrumentor):
|
|
115
|
+
self.instrumentor = instrumentor
|
|
116
|
+
self.tracer = instrumentor._tracer
|
|
117
|
+
self._active_spans = {}
|
|
118
|
+
self._agent_context = {} # Store agent context
|
|
119
|
+
|
|
120
|
+
def on_span_start(self, span_data: Any) -> None:
|
|
121
|
+
"""Called when a span starts"""
|
|
122
|
+
try:
|
|
123
|
+
span_id = str(id(span_data))
|
|
124
|
+
actual_data = getattr(span_data, 'span_data', span_data)
|
|
125
|
+
data_type = actual_data.__class__.__name__
|
|
126
|
+
|
|
127
|
+
# Create span name
|
|
128
|
+
if hasattr(actual_data, 'name'):
|
|
129
|
+
span_name = f"openai.agents.{actual_data.name}"
|
|
130
|
+
agent_name = actual_data.name
|
|
131
|
+
else:
|
|
132
|
+
span_name = f"openai.agents.{data_type}"
|
|
133
|
+
agent_name = data_type
|
|
134
|
+
|
|
135
|
+
# For agent spans, store context
|
|
136
|
+
if data_type == "AgentSpanData":
|
|
137
|
+
self._agent_context[agent_name] = {
|
|
138
|
+
'instructions': getattr(actual_data, 'instructions', None),
|
|
139
|
+
'name': agent_name
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
# Create span
|
|
143
|
+
otel_span = self.tracer.start_span(
|
|
144
|
+
name=span_name,
|
|
145
|
+
kind=SpanKind.INTERNAL,
|
|
146
|
+
attributes={
|
|
147
|
+
"gen_ai.system": "openai_agents",
|
|
148
|
+
"gen_ai.operation.name": data_type.lower().replace("spandata", ""),
|
|
149
|
+
}
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
# Add agent name
|
|
153
|
+
if hasattr(actual_data, 'name'):
|
|
154
|
+
otel_span.set_attribute("gen_ai.agent.name", actual_data.name)
|
|
155
|
+
|
|
156
|
+
self._active_spans[span_id] = {
|
|
157
|
+
'span': otel_span,
|
|
158
|
+
'type': data_type,
|
|
159
|
+
'data': actual_data
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
except Exception as e:
|
|
163
|
+
logger.error(f"Error in on_span_start: {e}")
|
|
164
|
+
|
|
165
|
+
def on_span_end(self, span_data: Any) -> None:
|
|
166
|
+
"""Called when a span ends"""
|
|
167
|
+
try:
|
|
168
|
+
span_id = str(id(span_data))
|
|
169
|
+
|
|
170
|
+
if span_id not in self._active_spans:
|
|
171
|
+
return
|
|
172
|
+
|
|
173
|
+
span_info = self._active_spans.pop(span_id)
|
|
174
|
+
otel_span = span_info['span']
|
|
175
|
+
data_type = span_info['type']
|
|
176
|
+
actual_data = getattr(span_data, 'span_data', span_data)
|
|
177
|
+
|
|
178
|
+
# Handle different span types
|
|
179
|
+
if data_type == "ResponseSpanData":
|
|
180
|
+
self._handle_response_span(otel_span, actual_data)
|
|
181
|
+
elif data_type == "FunctionSpanData":
|
|
182
|
+
self._handle_function_span(otel_span, actual_data)
|
|
183
|
+
elif data_type == "AgentSpanData":
|
|
184
|
+
self._handle_agent_span(otel_span, actual_data)
|
|
185
|
+
|
|
186
|
+
# Set status and end
|
|
187
|
+
otel_span.set_status(Status(StatusCode.OK))
|
|
188
|
+
otel_span.end()
|
|
189
|
+
|
|
190
|
+
except Exception as e:
|
|
191
|
+
logger.error(f"Error in on_span_end: {e}")
|
|
192
|
+
|
|
193
|
+
def _handle_response_span(self, otel_span: Any, span_data: Any) -> None:
|
|
194
|
+
"""Handle response span - this is where we capture prompts and completions"""
|
|
195
|
+
try:
|
|
196
|
+
# Log what we're working with
|
|
197
|
+
logger.debug(f"Handling response span, span_data type: {type(span_data)}")
|
|
198
|
+
|
|
199
|
+
# First check span_data.input for user messages
|
|
200
|
+
prompt_index = 0
|
|
201
|
+
|
|
202
|
+
# Get instructions (system prompt) from response
|
|
203
|
+
if hasattr(span_data, 'response') and span_data.response:
|
|
204
|
+
resp = span_data.response
|
|
205
|
+
if hasattr(resp, 'instructions') and resp.instructions:
|
|
206
|
+
otel_span.set_attribute(f"gen_ai.prompt.{prompt_index}.role", "system")
|
|
207
|
+
otel_span.set_attribute(f"gen_ai.prompt.{prompt_index}.content", str(resp.instructions)[:2048])
|
|
208
|
+
prompt_index += 1
|
|
209
|
+
|
|
210
|
+
# Get user messages from span_data.input
|
|
211
|
+
if hasattr(span_data, 'input') and span_data.input:
|
|
212
|
+
if isinstance(span_data.input, list):
|
|
213
|
+
# Input is a list of messages
|
|
214
|
+
for msg in span_data.input:
|
|
215
|
+
if isinstance(msg, dict):
|
|
216
|
+
role = msg.get('role', '')
|
|
217
|
+
content = msg.get('content', '')
|
|
218
|
+
otel_span.set_attribute(f"gen_ai.prompt.{prompt_index}.role", role)
|
|
219
|
+
otel_span.set_attribute(f"gen_ai.prompt.{prompt_index}.content", str(content)[:2048])
|
|
220
|
+
prompt_index += 1
|
|
221
|
+
elif isinstance(span_data.input, str):
|
|
222
|
+
# Input is a string
|
|
223
|
+
otel_span.set_attribute(f"gen_ai.prompt.{prompt_index}.role", "user")
|
|
224
|
+
otel_span.set_attribute(f"gen_ai.prompt.{prompt_index}.content", str(span_data.input)[:2048])
|
|
225
|
+
prompt_index += 1
|
|
226
|
+
|
|
227
|
+
# Get response from output
|
|
228
|
+
if hasattr(span_data, 'response') and span_data.response:
|
|
229
|
+
resp = span_data.response
|
|
230
|
+
|
|
231
|
+
# Look for the assistant's response in output
|
|
232
|
+
if hasattr(resp, 'output') and resp.output:
|
|
233
|
+
for item in resp.output:
|
|
234
|
+
if hasattr(item, 'type') and item.type == 'message':
|
|
235
|
+
if hasattr(item, 'content'):
|
|
236
|
+
content = item.content
|
|
237
|
+
if isinstance(content, list):
|
|
238
|
+
# Extract text
|
|
239
|
+
texts = []
|
|
240
|
+
for c in content:
|
|
241
|
+
if hasattr(c, 'text'):
|
|
242
|
+
texts.append(c.text)
|
|
243
|
+
if texts:
|
|
244
|
+
content = " ".join(texts)
|
|
245
|
+
|
|
246
|
+
otel_span.set_attribute("gen_ai.completion.0.role", "assistant")
|
|
247
|
+
otel_span.set_attribute("gen_ai.completion.0.content", str(content)[:2048])
|
|
248
|
+
break
|
|
249
|
+
|
|
250
|
+
# Note: Response extraction from thread local was removed since we already
|
|
251
|
+
# extract the completion from span_data.response.output above
|
|
252
|
+
|
|
253
|
+
# Set model and usage
|
|
254
|
+
if hasattr(span_data, 'response') and span_data.response:
|
|
255
|
+
resp = span_data.response
|
|
256
|
+
if hasattr(resp, 'model'):
|
|
257
|
+
otel_span.set_attribute("gen_ai.response.model", resp.model)
|
|
258
|
+
otel_span.set_attribute("gen_ai.request.model", resp.model)
|
|
259
|
+
|
|
260
|
+
if hasattr(resp, 'usage') and resp.usage:
|
|
261
|
+
usage = resp.usage
|
|
262
|
+
if hasattr(usage, 'input_tokens'):
|
|
263
|
+
otel_span.set_attribute("gen_ai.usage.prompt_tokens", usage.input_tokens)
|
|
264
|
+
if hasattr(usage, 'output_tokens'):
|
|
265
|
+
otel_span.set_attribute("gen_ai.usage.completion_tokens", usage.output_tokens)
|
|
266
|
+
if hasattr(usage, 'total_tokens'):
|
|
267
|
+
otel_span.set_attribute("gen_ai.usage.total_tokens", usage.total_tokens)
|
|
268
|
+
|
|
269
|
+
except Exception as e:
|
|
270
|
+
logger.error(f"Error handling response span: {e}")
|
|
271
|
+
|
|
272
|
+
def _handle_function_span(self, otel_span: Any, span_data: Any) -> None:
|
|
273
|
+
"""Handle function/tool spans"""
|
|
274
|
+
if hasattr(span_data, 'name'):
|
|
275
|
+
otel_span.set_attribute("gen_ai.tool.name", span_data.name)
|
|
276
|
+
|
|
277
|
+
if hasattr(span_data, 'input'):
|
|
278
|
+
otel_span.set_attribute("gen_ai.tool.parameters", json.dumps(span_data.input)[:500])
|
|
279
|
+
|
|
280
|
+
if hasattr(span_data, 'output'):
|
|
281
|
+
otel_span.set_attribute("gen_ai.tool.result", str(span_data.output)[:500])
|
|
282
|
+
|
|
283
|
+
def _handle_agent_span(self, otel_span: Any, span_data: Any) -> None:
|
|
284
|
+
"""Handle agent spans"""
|
|
285
|
+
# Agent spans typically don't have much data at end
|
|
286
|
+
pass
|
|
287
|
+
|
|
288
|
+
def on_trace_start(self, trace_data: Any) -> None:
|
|
289
|
+
"""Called when a trace starts"""
|
|
290
|
+
# Clear thread local
|
|
291
|
+
if hasattr(_thread_local, 'current_messages'):
|
|
292
|
+
del _thread_local.current_messages
|
|
293
|
+
if hasattr(_thread_local, 'current_response'):
|
|
294
|
+
del _thread_local.current_response
|
|
295
|
+
|
|
296
|
+
def on_trace_end(self, trace_data: Any) -> None:
|
|
297
|
+
"""Called when a trace ends"""
|
|
298
|
+
pass
|
|
299
|
+
|
|
300
|
+
def force_flush(self, timeout_seconds: float = 30.0) -> bool:
|
|
301
|
+
"""Force flush"""
|
|
302
|
+
return True
|
|
303
|
+
|
|
304
|
+
def shutdown(self) -> None:
|
|
305
|
+
"""Shutdown"""
|
|
306
|
+
pass
|