openlit 1.34.29__py3-none-any.whl → 1.34.30__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/instrumentation/crewai/__init__.py +86 -24
- openlit/instrumentation/crewai/async_crewai.py +89 -0
- openlit/instrumentation/crewai/crewai.py +79 -131
- openlit/instrumentation/crewai/utils.py +512 -0
- openlit/instrumentation/litellm/utils.py +18 -9
- openlit/instrumentation/openai/utils.py +3 -1
- openlit/instrumentation/openai_agents/__init__.py +1 -1
- openlit/instrumentation/openai_agents/processor.py +388 -536
- openlit/semcov/__init__.py +30 -2
- {openlit-1.34.29.dist-info → openlit-1.34.30.dist-info}/METADATA +1 -1
- {openlit-1.34.29.dist-info → openlit-1.34.30.dist-info}/RECORD +13 -11
- {openlit-1.34.29.dist-info → openlit-1.34.30.dist-info}/LICENSE +0 -0
- {openlit-1.34.29.dist-info → openlit-1.34.30.dist-info}/WHEEL +0 -0
@@ -2,19 +2,14 @@
|
|
2
2
|
OpenLIT OpenAI Agents Instrumentation - Native TracingProcessor Implementation
|
3
3
|
"""
|
4
4
|
|
5
|
-
import json
|
6
5
|
import time
|
7
|
-
from
|
8
|
-
from typing import Any, Dict, Optional, TYPE_CHECKING
|
6
|
+
from typing import Any, Dict, TYPE_CHECKING
|
9
7
|
|
10
|
-
from opentelemetry import context as context_api
|
11
8
|
from opentelemetry.trace import SpanKind, Status, StatusCode, set_span_in_context
|
12
|
-
from opentelemetry.context import detach
|
13
9
|
|
14
10
|
from openlit.__helpers import (
|
15
11
|
common_framework_span_attributes,
|
16
12
|
handle_exception,
|
17
|
-
record_framework_metrics,
|
18
13
|
get_chat_model_cost
|
19
14
|
)
|
20
15
|
from openlit.semcov import SemanticConvention
|
@@ -48,553 +43,410 @@ except ImportError:
|
|
48
43
|
|
49
44
|
class OpenLITTracingProcessor(TracingProcessor):
|
50
45
|
"""
|
51
|
-
|
52
|
-
Provides superior business intelligence while maintaining perfect hierarchy
|
53
|
-
"""
|
46
|
+
OpenAI Agents tracing processor that integrates with OpenLIT observability.
|
54
47
|
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
return
|
60
|
-
|
61
|
-
self._tracer = tracer
|
62
|
-
self._version = version
|
63
|
-
self._environment = environment
|
64
|
-
self._application_name = application_name
|
65
|
-
self._pricing_info = pricing_info
|
66
|
-
self._capture_message_content = capture_message_content
|
67
|
-
self._metrics = metrics
|
68
|
-
self._disable_metrics = disable_metrics
|
69
|
-
self._detailed_tracing = detailed_tracing
|
70
|
-
|
71
|
-
# Track spans for hierarchy
|
72
|
-
self._root_spans: Dict[str, Any] = {}
|
73
|
-
self._otel_spans: Dict[str, Any] = {}
|
74
|
-
self._tokens: Dict[str, object] = {}
|
75
|
-
self._span_start_times: Dict[str, float] = {}
|
76
|
-
|
77
|
-
# Track handoff context for better span naming
|
78
|
-
self._last_handoff_from: Optional[str] = None
|
79
|
-
|
80
|
-
def on_trace_start(self, trace: "Trace") -> None:
|
81
|
-
"""Called when a trace is started - creates root workflow span"""
|
82
|
-
if not TRACING_AVAILABLE:
|
83
|
-
return
|
84
|
-
|
85
|
-
# Create root workflow span with {operation_type} {operation_name} format
|
86
|
-
workflow_name = getattr(trace, 'name', 'workflow')
|
87
|
-
span_name = f"agent {workflow_name}" # Follow {operation_type} {operation_name} pattern
|
88
|
-
|
89
|
-
# Use tracer.start_span for TracingProcessor pattern with proper context
|
90
|
-
otel_span = self._tracer.start_span(
|
91
|
-
name=span_name,
|
92
|
-
kind=SpanKind.CLIENT
|
93
|
-
)
|
94
|
-
|
95
|
-
# Set common framework attributes for root span
|
96
|
-
self._set_common_attributes(otel_span, trace.trace_id)
|
97
|
-
|
98
|
-
# Set agent name for root span using semantic conventions
|
99
|
-
if hasattr(trace, 'name') and trace.name:
|
100
|
-
otel_span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, trace.name)
|
101
|
-
|
102
|
-
# Set default model for root span
|
103
|
-
otel_span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, "gpt-4o")
|
104
|
-
|
105
|
-
self._root_spans[trace.trace_id] = otel_span
|
106
|
-
self._span_start_times[trace.trace_id] = time.time()
|
107
|
-
|
108
|
-
def on_span_start(self, span: "Span[Any]") -> None:
|
109
|
-
"""Called when a span is started - creates child spans with proper hierarchy"""
|
110
|
-
if not TRACING_AVAILABLE or not hasattr(span, 'started_at') or not span.started_at:
|
111
|
-
return
|
112
|
-
|
113
|
-
start_time = self._parse_timestamp(span.started_at)
|
114
|
-
|
115
|
-
# Determine parent span for proper hierarchy
|
116
|
-
parent_span = None
|
117
|
-
if span.parent_id and span.parent_id in self._otel_spans:
|
118
|
-
parent_span = self._otel_spans[span.parent_id]
|
119
|
-
elif span.trace_id in self._root_spans:
|
120
|
-
parent_span = self._root_spans[span.trace_id]
|
121
|
-
|
122
|
-
# Set context for parent-child relationship
|
123
|
-
context = set_span_in_context(parent_span) if parent_span else None
|
124
|
-
|
125
|
-
# Get semantic span name and operation type
|
126
|
-
span_name = self._get_span_name(span)
|
127
|
-
operation_type = self._get_operation_type(span.span_data)
|
128
|
-
|
129
|
-
# Create span with proper context
|
130
|
-
otel_span = self._tracer.start_span(
|
131
|
-
name=span_name,
|
132
|
-
context=context,
|
133
|
-
start_time=self._as_utc_nano(start_time),
|
134
|
-
kind=SpanKind.CLIENT
|
135
|
-
)
|
136
|
-
|
137
|
-
# Set common framework attributes for all spans
|
138
|
-
self._set_common_framework_attributes(otel_span, operation_type)
|
139
|
-
|
140
|
-
# Set span-specific attributes
|
141
|
-
self._set_span_attributes(otel_span, span)
|
142
|
-
|
143
|
-
# Track span and context
|
144
|
-
self._otel_spans[span.span_id] = otel_span
|
145
|
-
self._tokens[span.span_id] = context_api.attach(set_span_in_context(otel_span))
|
146
|
-
self._span_start_times[span.span_id] = time.time()
|
147
|
-
|
148
|
-
def on_span_end(self, span: "Span[Any]") -> None:
|
149
|
-
"""Called when a span is finished - adds business intelligence and ends span"""
|
150
|
-
if not TRACING_AVAILABLE or span.span_id not in self._otel_spans:
|
151
|
-
return
|
152
|
-
|
153
|
-
otel_span = self._otel_spans[span.span_id]
|
48
|
+
This processor enhances OpenAI Agents' native tracing system with OpenLIT's
|
49
|
+
comprehensive observability features including business intelligence,
|
50
|
+
cost tracking, and performance metrics.
|
51
|
+
"""
|
154
52
|
|
53
|
+
def __init__(self, tracer, version, environment, application_name,
|
54
|
+
pricing_info, capture_message_content, metrics,
|
55
|
+
disable_metrics, detailed_tracing, **kwargs):
|
56
|
+
"""Initialize the OpenLIT tracing processor."""
|
57
|
+
super().__init__()
|
58
|
+
|
59
|
+
# Core configuration
|
60
|
+
self.tracer = tracer
|
61
|
+
self.version = version
|
62
|
+
self.environment = environment
|
63
|
+
self.application_name = application_name
|
64
|
+
self.pricing_info = pricing_info
|
65
|
+
self.capture_message_content = capture_message_content
|
66
|
+
self.metrics = metrics
|
67
|
+
self.disable_metrics = disable_metrics
|
68
|
+
self.detailed_tracing = detailed_tracing
|
69
|
+
|
70
|
+
# Internal tracking
|
71
|
+
self.active_spans = {}
|
72
|
+
self.span_stack = []
|
73
|
+
|
74
|
+
def start_trace(self, trace_id: str, name: str, **kwargs):
|
75
|
+
"""
|
76
|
+
Start a new trace with OpenLIT enhancements.
|
77
|
+
|
78
|
+
Args:
|
79
|
+
trace_id: Unique trace identifier
|
80
|
+
name: Trace name
|
81
|
+
**kwargs: Additional trace metadata
|
82
|
+
"""
|
155
83
|
try:
|
156
|
-
#
|
157
|
-
self.
|
158
|
-
|
159
|
-
#
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
84
|
+
# Generate span name using OpenTelemetry conventions
|
85
|
+
span_name = self._get_span_name(name, **kwargs)
|
86
|
+
|
87
|
+
# Start root span with OpenLIT context
|
88
|
+
span = self.tracer.start_as_current_span(
|
89
|
+
span_name,
|
90
|
+
kind=SpanKind.CLIENT,
|
91
|
+
attributes={
|
92
|
+
SemanticConvention.GEN_AI_SYSTEM: "openai_agents",
|
93
|
+
SemanticConvention.GEN_AI_OPERATION:
|
94
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_WORKFLOW,
|
95
|
+
"trace.id": trace_id,
|
96
|
+
"trace.name": name,
|
97
|
+
}
|
98
|
+
)
|
99
|
+
|
100
|
+
# Create scope for common attributes
|
101
|
+
scope = type("GenericScope", (), {})()
|
102
|
+
scope._span = span # pylint: disable=protected-access
|
103
|
+
scope._start_time = time.time() # pylint: disable=protected-access
|
104
|
+
scope._end_time = None # pylint: disable=protected-access
|
105
|
+
|
106
|
+
# Apply common framework attributes
|
107
|
+
common_framework_span_attributes(
|
108
|
+
scope,
|
109
|
+
"openai_agents",
|
110
|
+
"api.openai.com",
|
111
|
+
443,
|
112
|
+
self.environment,
|
113
|
+
self.application_name,
|
114
|
+
self.version,
|
115
|
+
name
|
116
|
+
)
|
117
|
+
|
118
|
+
# Track active span
|
119
|
+
self.active_spans[trace_id] = span
|
120
|
+
self.span_stack.append(span)
|
121
|
+
|
122
|
+
return span
|
123
|
+
|
124
|
+
except Exception as e: # pylint: disable=broad-exception-caught
|
125
|
+
# Graceful degradation
|
126
|
+
handle_exception(None, e)
|
127
|
+
return None
|
172
128
|
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
# End span and cleanup
|
177
|
-
otel_span.end()
|
129
|
+
def end_trace(self, trace_id: str, **kwargs):
|
130
|
+
"""
|
131
|
+
End an active trace.
|
178
132
|
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
133
|
+
Args:
|
134
|
+
trace_id: Trace identifier to end
|
135
|
+
**kwargs: Additional metadata
|
136
|
+
"""
|
137
|
+
try:
|
138
|
+
span = self.active_spans.get(trace_id)
|
139
|
+
if span:
|
140
|
+
# Set final attributes and status
|
141
|
+
span.set_status(Status(StatusCode.OK))
|
142
|
+
|
143
|
+
# End span
|
144
|
+
span.end()
|
145
|
+
|
146
|
+
# Cleanup tracking
|
147
|
+
if trace_id in self.active_spans:
|
148
|
+
del self.active_spans[trace_id]
|
149
|
+
if span in self.span_stack:
|
150
|
+
self.span_stack.remove(span)
|
151
|
+
|
152
|
+
except Exception as e: # pylint: disable=broad-exception-caught
|
153
|
+
handle_exception(span if span else None, e)
|
154
|
+
|
155
|
+
def _get_span_name(self, operation_name: str, **metadata) -> str:
|
156
|
+
"""
|
157
|
+
Generate OpenTelemetry-compliant span names.
|
158
|
+
|
159
|
+
Args:
|
160
|
+
operation_name: Base operation name
|
161
|
+
**metadata: Additional context for naming
|
162
|
+
|
163
|
+
Returns:
|
164
|
+
Formatted span name following semantic conventions
|
165
|
+
"""
|
166
|
+
# Extract context for naming
|
167
|
+
agent_name = metadata.get('agent_name', '')
|
168
|
+
model_name = metadata.get('model_name', '')
|
169
|
+
tool_name = metadata.get('tool_name', '')
|
170
|
+
workflow_name = metadata.get('workflow_name', '')
|
171
|
+
|
172
|
+
# Apply OpenTelemetry semantic conventions for GenAI agents
|
173
|
+
if 'agent' in operation_name.lower():
|
174
|
+
if agent_name:
|
175
|
+
return f"invoke_agent {agent_name}"
|
176
|
+
return "invoke_agent"
|
177
|
+
if 'chat' in operation_name.lower():
|
178
|
+
if model_name:
|
179
|
+
return f"chat {model_name}"
|
180
|
+
return "chat response"
|
181
|
+
if 'tool' in operation_name.lower():
|
182
|
+
if tool_name:
|
183
|
+
return f"execute_tool {tool_name}"
|
184
|
+
return "execute_tool"
|
185
|
+
if 'handoff' in operation_name.lower():
|
186
|
+
target_agent = metadata.get('target_agent', 'unknown')
|
187
|
+
return f"invoke_agent {target_agent}"
|
188
|
+
if 'workflow' in operation_name.lower():
|
189
|
+
if workflow_name:
|
190
|
+
return f"workflow {workflow_name}"
|
191
|
+
return "workflow"
|
192
|
+
|
193
|
+
# Default case
|
194
|
+
return operation_name
|
195
|
+
|
196
|
+
def span_start(self, span_data, trace_id: str):
|
197
|
+
"""
|
198
|
+
Handle span start events from OpenAI Agents.
|
199
|
+
|
200
|
+
Args:
|
201
|
+
span_data: Span data from agents framework
|
202
|
+
trace_id: Associated trace identifier
|
203
|
+
"""
|
204
|
+
try:
|
205
|
+
# Extract span information
|
206
|
+
span_name = getattr(span_data, 'name', 'unknown_operation')
|
207
|
+
span_type = getattr(span_data, 'type', 'unknown')
|
208
|
+
|
209
|
+
# Generate enhanced span name
|
210
|
+
enhanced_name = self._get_span_name(
|
211
|
+
span_name,
|
212
|
+
agent_name=getattr(span_data, 'agent_name', None),
|
213
|
+
model_name=getattr(span_data, 'model_name', None),
|
214
|
+
tool_name=getattr(span_data, 'tool_name', None)
|
215
|
+
)
|
216
|
+
|
217
|
+
# Determine span operation type
|
218
|
+
operation_type = self._get_operation_type(span_type, span_name)
|
219
|
+
|
220
|
+
# Start span with proper context
|
221
|
+
parent_span = self.span_stack[-1] if self.span_stack else None
|
222
|
+
context = set_span_in_context(parent_span) if parent_span else None
|
223
|
+
|
224
|
+
span = self.tracer.start_as_current_span(
|
225
|
+
enhanced_name,
|
226
|
+
kind=SpanKind.CLIENT,
|
227
|
+
context=context,
|
228
|
+
attributes={
|
229
|
+
SemanticConvention.GEN_AI_SYSTEM: "openai_agents",
|
230
|
+
SemanticConvention.GEN_AI_OPERATION: operation_type,
|
231
|
+
"span.type": span_type,
|
232
|
+
"span.id": getattr(span_data, 'span_id', ''),
|
233
|
+
}
|
234
|
+
)
|
235
|
+
|
236
|
+
# Process specific span types
|
237
|
+
self._process_span_attributes(span, span_data, span_type)
|
238
|
+
|
239
|
+
# Track span
|
240
|
+
span_id = getattr(span_data, 'span_id', len(self.span_stack))
|
241
|
+
self.active_spans[f"{trace_id}:{span_id}"] = span
|
242
|
+
self.span_stack.append(span)
|
243
|
+
|
244
|
+
except Exception as e: # pylint: disable=broad-exception-caught
|
245
|
+
handle_exception(None, e)
|
246
|
+
|
247
|
+
def _get_operation_type(self, span_type: str, span_name: str) -> str:
|
248
|
+
"""Get operation type based on span characteristics."""
|
249
|
+
type_mapping = {
|
250
|
+
'agent': SemanticConvention.GEN_AI_OPERATION_TYPE_AGENT,
|
251
|
+
'generation': SemanticConvention.GEN_AI_OPERATION_CHAT,
|
252
|
+
'function': SemanticConvention.GEN_AI_OPERATION_CHAT,
|
253
|
+
'tool': SemanticConvention.GEN_AI_OPERATION_CHAT,
|
254
|
+
'handoff': SemanticConvention.GEN_AI_OPERATION_TYPE_AGENT,
|
255
|
+
}
|
183
256
|
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
del self._span_start_times[span.span_id]
|
257
|
+
# Check span type first
|
258
|
+
for key, operation in type_mapping.items():
|
259
|
+
if key in span_type.lower():
|
260
|
+
return operation
|
189
261
|
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
262
|
+
# Check span name
|
263
|
+
for key, operation in type_mapping.items():
|
264
|
+
if key in span_name.lower():
|
265
|
+
return operation
|
194
266
|
|
195
|
-
|
267
|
+
return SemanticConvention.GEN_AI_OPERATION_CHAT
|
196
268
|
|
269
|
+
def _process_span_attributes(self, span, span_data, span_type: str):
|
270
|
+
"""Process and set span attributes based on span type."""
|
197
271
|
try:
|
198
|
-
#
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
handle_exception(root_span, e)
|
203
|
-
finally:
|
204
|
-
root_span.end()
|
205
|
-
|
206
|
-
# Cleanup
|
207
|
-
if trace.trace_id in self._root_spans:
|
208
|
-
del self._root_spans[trace.trace_id]
|
209
|
-
if trace.trace_id in self._span_start_times:
|
210
|
-
del self._span_start_times[trace.trace_id]
|
211
|
-
|
212
|
-
def _get_span_name(self, span: "Span[Any]") -> str:
|
213
|
-
"""Get semantic span name using {operation_type} {operation_name} format"""
|
214
|
-
data = span.span_data
|
215
|
-
operation_type = self._get_operation_type(data)
|
216
|
-
|
217
|
-
# Extract operation name based on span type
|
218
|
-
operation_name = "unknown"
|
219
|
-
|
220
|
-
# Special handling for handoffs
|
221
|
-
if hasattr(data, '__class__') and data.__class__.__name__ == 'HandoffSpanData':
|
222
|
-
if hasattr(data, 'to_agent') and data.to_agent:
|
223
|
-
operation_name = f"to {data.to_agent}"
|
224
|
-
else:
|
225
|
-
operation_name = "handoff"
|
226
|
-
|
227
|
-
# Use agent name for agent spans
|
228
|
-
elif hasattr(data, '__class__') and data.__class__.__name__ == 'AgentSpanData':
|
229
|
-
# Try multiple possible attribute names for agent name
|
230
|
-
agent_name = None
|
231
|
-
|
232
|
-
for attr in ['agent_name', 'name', 'agent', 'agent_id']:
|
233
|
-
if hasattr(data, attr):
|
234
|
-
agent_name = getattr(data, attr)
|
235
|
-
if agent_name and isinstance(agent_name, str):
|
236
|
-
break
|
237
|
-
|
238
|
-
# If still no agent name, try looking in context or other attributes
|
239
|
-
if not agent_name:
|
240
|
-
# Try context or other nested attributes
|
241
|
-
if hasattr(data, 'context') and hasattr(data.context, 'agent'):
|
242
|
-
agent_name = getattr(data.context.agent, 'name', None)
|
243
|
-
elif hasattr(data, 'metadata') and hasattr(data.metadata, 'agent_name'):
|
244
|
-
agent_name = data.metadata.agent_name
|
272
|
+
# Common attributes
|
273
|
+
if hasattr(span_data, 'agent_name'):
|
274
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME,
|
275
|
+
span_data.agent_name)
|
245
276
|
|
246
|
-
if
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
277
|
+
if hasattr(span_data, 'model_name'):
|
278
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL,
|
279
|
+
span_data.model_name)
|
280
|
+
|
281
|
+
# Agent-specific attributes
|
282
|
+
if span_type == 'agent':
|
283
|
+
self._process_agent_span(span, span_data)
|
284
|
+
|
285
|
+
# Generation-specific attributes
|
286
|
+
elif span_type == 'generation':
|
287
|
+
self._process_generation_span(span, span_data)
|
288
|
+
|
289
|
+
# Function/Tool-specific attributes
|
290
|
+
elif span_type in ['function', 'tool']:
|
291
|
+
self._process_function_span(span, span_data)
|
292
|
+
|
293
|
+
# Handoff-specific attributes
|
294
|
+
elif span_type == 'handoff':
|
295
|
+
self._process_handoff_span(span, span_data)
|
296
|
+
|
297
|
+
except Exception as e: # pylint: disable=broad-exception-caught
|
298
|
+
handle_exception(span, e)
|
299
|
+
|
300
|
+
def _process_agent_span(self, span, agent_span):
|
301
|
+
"""Process agent span data (unused parameter)."""
|
302
|
+
# Agent-specific processing
|
303
|
+
if hasattr(agent_span, 'instructions'):
|
304
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_DESCRIPTION,
|
305
|
+
str(agent_span.instructions)[:500])
|
274
306
|
|
275
|
-
return mapping.get(class_name, SemanticConvention.GEN_AI_OPERATION_TYPE_FRAMEWORK)
|
276
|
-
|
277
|
-
def _set_common_framework_attributes(self, span: Any, operation_type: str) -> None:
|
278
|
-
"""Set common framework attributes using semantic conventions"""
|
279
|
-
# Create scope object for common_framework_span_attributes
|
280
|
-
scope = type("GenericScope", (), {})()
|
281
|
-
scope._span = span
|
282
|
-
scope._start_time = time.time()
|
283
|
-
scope._end_time = time.time()
|
284
|
-
|
285
|
-
# Use common framework attributes helper
|
286
|
-
# For framework operations, use localhost like other agent frameworks (AG2, Pydantic AI)
|
287
|
-
common_framework_span_attributes(
|
288
|
-
scope, SemanticConvention.GEN_AI_SYSTEM_OPENAI_AGENTS,
|
289
|
-
"localhost", 80, self._environment, self._application_name,
|
290
|
-
self._version, operation_type, None
|
291
|
-
)
|
292
|
-
|
293
|
-
def _set_common_attributes(self, span: Any, trace_id: str) -> None:
|
294
|
-
"""Set common framework attributes for root spans"""
|
295
|
-
self._set_common_framework_attributes(span, SemanticConvention.GEN_AI_OPERATION_TYPE_FRAMEWORK)
|
296
|
-
|
297
|
-
def _set_span_attributes(self, span: Any, agent_span: "Span[Any]") -> None:
|
298
|
-
"""Set span-specific attributes based on span data using semantic conventions"""
|
299
|
-
data = agent_span.span_data
|
300
|
-
|
301
|
-
# Agent-specific attributes using semantic conventions
|
302
|
-
if hasattr(data, 'agent_name') and data.agent_name:
|
303
|
-
span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, data.agent_name)
|
304
|
-
elif hasattr(data, 'name') and data.name:
|
305
|
-
span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, data.name)
|
306
|
-
|
307
|
-
# Enhanced model information extraction
|
308
|
-
model = self._extract_model_info(data, agent_span)
|
309
|
-
if model:
|
310
|
-
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, str(model))
|
311
|
-
|
312
|
-
# Enhanced input/output capture with MIME types (OpenLIT enhancement)
|
313
|
-
if self._capture_message_content:
|
314
|
-
self._capture_input_output(span, data)
|
315
|
-
|
316
|
-
# Enhanced token usage details (inspired by OpenInference)
|
317
|
-
self._capture_detailed_token_usage(span, data)
|
318
|
-
|
319
|
-
# Model invocation parameters as JSON (new feature from OpenInference)
|
320
|
-
self._capture_model_parameters(span, data)
|
321
|
-
|
322
|
-
# Tool/function information for tool calls
|
323
|
-
if hasattr(data, '__class__') and 'Function' in data.__class__.__name__:
|
324
|
-
if hasattr(data, 'function_name'):
|
325
|
-
span.set_attribute(SemanticConvention.GEN_AI_TOOL_NAME, data.function_name)
|
326
|
-
if hasattr(data, 'arguments'):
|
327
|
-
span.set_attribute(SemanticConvention.GEN_AI_TOOL_ARGS, str(data.arguments))
|
328
|
-
|
329
|
-
# Enhanced handoff information extraction
|
330
|
-
if hasattr(data, '__class__') and 'Handoff' in data.__class__.__name__:
|
331
|
-
target_agent = self._extract_handoff_target(data, agent_span)
|
332
|
-
if target_agent:
|
333
|
-
span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, target_agent)
|
334
|
-
else:
|
335
|
-
# Fallback for handoff spans without clear target
|
336
|
-
span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, "agent handoff")
|
337
|
-
|
338
|
-
# Request/response IDs if available
|
339
|
-
if hasattr(data, 'request_id'):
|
340
|
-
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, data.request_id)
|
341
|
-
elif hasattr(data, 'response_id'):
|
342
|
-
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, data.response_id)
|
343
|
-
|
344
|
-
def _extract_model_info(self, data: Any, agent_span: "Span[Any]") -> Optional[str]:
|
345
|
-
"""Extract model information from span data or agent configuration"""
|
346
|
-
# Try direct model attributes first
|
347
|
-
model_attrs = ['model', 'model_name', 'model_id', 'llm_model', 'openai_model']
|
348
|
-
|
349
|
-
model = self._check_model_attrs(data, model_attrs)
|
350
|
-
if model:
|
351
|
-
return model
|
352
|
-
|
353
|
-
# Try nested configuration objects
|
354
|
-
config_attrs = ['config', 'configuration', 'client_config', 'llm_config']
|
355
|
-
model = self._check_config_model_attrs(data, config_attrs, model_attrs)
|
356
|
-
if model:
|
357
|
-
return model
|
358
|
-
|
359
|
-
# Try looking in the agent span itself
|
360
307
|
if hasattr(agent_span, 'model'):
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
# Try nested objects
|
414
|
-
if hasattr(data, 'handoff_info'):
|
415
|
-
info = data.handoff_info
|
416
|
-
for attr in target_attrs + from_attrs:
|
417
|
-
if hasattr(info, attr):
|
418
|
-
value = getattr(info, attr)
|
419
|
-
if value and isinstance(value, str):
|
420
|
-
prefix = "to" if attr in target_attrs else "from"
|
421
|
-
return f"{prefix} {value}"
|
422
|
-
|
423
|
-
return None
|
424
|
-
|
425
|
-
def _capture_input_output(self, span: Any, data: Any) -> None:
|
426
|
-
"""Capture input/output content with MIME type detection (OpenLIT enhancement)"""
|
308
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL,
|
309
|
+
agent_span.model)
|
310
|
+
|
311
|
+
def _process_generation_span(self, span, generation_span):
|
312
|
+
"""Process generation span data."""
|
313
|
+
# Set generation-specific attributes
|
314
|
+
if hasattr(generation_span, 'prompt'):
|
315
|
+
span.set_attribute(SemanticConvention.GEN_AI_PROMPT,
|
316
|
+
str(generation_span.prompt)[:1000])
|
317
|
+
|
318
|
+
if hasattr(generation_span, 'completion'):
|
319
|
+
span.set_attribute(SemanticConvention.GEN_AI_COMPLETION,
|
320
|
+
str(generation_span.completion)[:1000])
|
321
|
+
|
322
|
+
if hasattr(generation_span, 'usage'):
|
323
|
+
usage = generation_span.usage
|
324
|
+
if hasattr(usage, 'prompt_tokens'):
|
325
|
+
span.set_attribute(SemanticConvention.GEN_AI_USAGE_PROMPT_TOKENS,
|
326
|
+
usage.prompt_tokens)
|
327
|
+
if hasattr(usage, 'completion_tokens'):
|
328
|
+
span.set_attribute(
|
329
|
+
SemanticConvention.GEN_AI_USAGE_COMPLETION_TOKENS,
|
330
|
+
usage.completion_tokens
|
331
|
+
)
|
332
|
+
|
333
|
+
def _process_function_span(self, span, function_span):
|
334
|
+
"""Process function/tool span data."""
|
335
|
+
if hasattr(function_span, 'function_name'):
|
336
|
+
span.set_attribute(SemanticConvention.GEN_AI_TOOL_NAME,
|
337
|
+
function_span.function_name)
|
338
|
+
|
339
|
+
if hasattr(function_span, 'arguments'):
|
340
|
+
span.set_attribute("gen_ai.tool.arguments",
|
341
|
+
str(function_span.arguments)[:500])
|
342
|
+
|
343
|
+
if hasattr(function_span, 'result'):
|
344
|
+
span.set_attribute("gen_ai.tool.result",
|
345
|
+
str(function_span.result)[:500])
|
346
|
+
|
347
|
+
def _process_handoff_span(self, span, handoff_span):
|
348
|
+
"""Process handoff span data."""
|
349
|
+
if hasattr(handoff_span, 'target_agent'):
|
350
|
+
span.set_attribute("gen_ai.handoff.target_agent",
|
351
|
+
handoff_span.target_agent)
|
352
|
+
|
353
|
+
if hasattr(handoff_span, 'reason'):
|
354
|
+
span.set_attribute("gen_ai.handoff.reason",
|
355
|
+
str(handoff_span.reason)[:200])
|
356
|
+
|
357
|
+
def span_end(self, span_data, trace_id: str):
|
358
|
+
"""Handle span end events."""
|
427
359
|
try:
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
360
|
+
span_id = getattr(span_data, 'span_id', '')
|
361
|
+
span_key = f"{trace_id}:{span_id}"
|
362
|
+
|
363
|
+
span = self.active_spans.get(span_key)
|
364
|
+
if span:
|
365
|
+
# Set final status
|
366
|
+
if hasattr(span_data, 'error') and span_data.error:
|
367
|
+
span.set_status(Status(StatusCode.ERROR,
|
368
|
+
str(span_data.error)))
|
435
369
|
else:
|
436
|
-
span.
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
content = str(data.response)
|
441
|
-
span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, content)
|
442
|
-
# Set MIME type based on content structure
|
443
|
-
if content.startswith('{') or content.startswith('['):
|
444
|
-
span.set_attribute("gen_ai.content.completion.mime_type", "application/json")
|
445
|
-
else:
|
446
|
-
span.set_attribute("gen_ai.content.completion.mime_type", "text/plain")
|
370
|
+
span.set_status(Status(StatusCode.OK))
|
371
|
+
|
372
|
+
# End span
|
373
|
+
span.end()
|
447
374
|
|
448
|
-
|
449
|
-
|
375
|
+
# Cleanup
|
376
|
+
if span_key in self.active_spans:
|
377
|
+
del self.active_spans[span_key]
|
378
|
+
if span in self.span_stack:
|
379
|
+
self.span_stack.remove(span)
|
450
380
|
|
451
|
-
|
452
|
-
|
381
|
+
except Exception as e: # pylint: disable=broad-exception-caught
|
382
|
+
handle_exception(span if 'span' in locals() else None, e)
|
383
|
+
|
384
|
+
def force_flush(self):
|
385
|
+
"""Force flush all pending spans."""
|
453
386
|
try:
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
def
|
480
|
-
|
387
|
+
# End any remaining spans
|
388
|
+
for span in list(self.active_spans.values()):
|
389
|
+
span.end()
|
390
|
+
|
391
|
+
self.active_spans.clear()
|
392
|
+
self.span_stack.clear()
|
393
|
+
|
394
|
+
except Exception as e: # pylint: disable=broad-exception-caught
|
395
|
+
handle_exception(None, e)
|
396
|
+
|
397
|
+
def shutdown(self):
|
398
|
+
"""Shutdown the processor."""
|
399
|
+
self.force_flush()
|
400
|
+
|
401
|
+
def _extract_model_info(self, span_data) -> Dict[str, Any]:
|
402
|
+
"""Extract model information from span data."""
|
403
|
+
model_info = {}
|
404
|
+
|
405
|
+
if hasattr(span_data, 'model'):
|
406
|
+
model_info['model'] = span_data.model
|
407
|
+
if hasattr(span_data, 'model_name'):
|
408
|
+
model_info['model'] = span_data.model_name
|
409
|
+
|
410
|
+
return model_info
|
411
|
+
|
412
|
+
def _calculate_cost(self, model: str, prompt_tokens: int,
|
413
|
+
completion_tokens: int) -> float:
|
414
|
+
"""Calculate cost based on token usage."""
|
481
415
|
try:
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
except Exception:
|
516
|
-
pass # Ignore export errors
|
517
|
-
|
518
|
-
def _process_span_completion(self, span: Any, agent_span: "Span[Any]") -> None:
|
519
|
-
"""Process span completion with enhanced business intelligence"""
|
520
|
-
data = agent_span.span_data
|
521
|
-
|
522
|
-
# Process response data if available
|
523
|
-
self._process_response_data(span, data)
|
524
|
-
|
525
|
-
# Extract and set token usage for business intelligence
|
526
|
-
self._extract_token_usage(span, data)
|
527
|
-
|
528
|
-
def _extract_token_usage(self, span: Any, data: Any) -> None:
|
529
|
-
"""Extract token usage and calculate costs (OpenLIT's business intelligence)"""
|
416
|
+
return get_chat_model_cost(
|
417
|
+
model, self.pricing_info, prompt_tokens, completion_tokens
|
418
|
+
)
|
419
|
+
except Exception: # pylint: disable=broad-exception-caught
|
420
|
+
return 0.0
|
421
|
+
|
422
|
+
# Abstract method implementations required by OpenAI Agents framework
|
423
|
+
def on_trace_start(self, trace):
|
424
|
+
"""Called when a trace starts - required by OpenAI Agents framework"""
|
425
|
+
try:
|
426
|
+
self.start_trace(getattr(trace, 'trace_id', 'unknown'),
|
427
|
+
getattr(trace, 'name', 'workflow'))
|
428
|
+
except Exception: # pylint: disable=broad-exception-caught
|
429
|
+
pass
|
430
|
+
|
431
|
+
def on_trace_end(self, trace):
|
432
|
+
"""Called when a trace ends - required by OpenAI Agents framework"""
|
433
|
+
try:
|
434
|
+
self.end_trace(getattr(trace, 'trace_id', 'unknown'))
|
435
|
+
except Exception: # pylint: disable=broad-exception-caught
|
436
|
+
pass
|
437
|
+
|
438
|
+
def on_span_start(self, span):
|
439
|
+
"""Called when a span starts - required by OpenAI Agents framework"""
|
440
|
+
try:
|
441
|
+
trace_id = getattr(span, 'trace_id', 'unknown')
|
442
|
+
self.span_start(span, trace_id)
|
443
|
+
except Exception: # pylint: disable=broad-exception-caught
|
444
|
+
pass
|
445
|
+
|
446
|
+
def on_span_end(self, span):
|
447
|
+
"""Called when a span ends - required by OpenAI Agents framework"""
|
530
448
|
try:
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
# Check direct usage attributes
|
536
|
-
if hasattr(data, 'usage'):
|
537
|
-
usage = data.usage
|
538
|
-
input_tokens = getattr(usage, 'input_tokens', 0) or getattr(usage, 'prompt_tokens', 0)
|
539
|
-
output_tokens = getattr(usage, 'output_tokens', 0) or getattr(usage, 'completion_tokens', 0)
|
540
|
-
|
541
|
-
# Check response object
|
542
|
-
elif hasattr(data, 'response') and hasattr(data.response, 'usage'):
|
543
|
-
usage = data.response.usage
|
544
|
-
input_tokens = getattr(usage, 'input_tokens', 0) or getattr(usage, 'prompt_tokens', 0)
|
545
|
-
output_tokens = getattr(usage, 'output_tokens', 0) or getattr(usage, 'completion_tokens', 0)
|
546
|
-
|
547
|
-
# Set token attributes
|
548
|
-
span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
|
549
|
-
span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens)
|
550
|
-
span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, input_tokens + output_tokens)
|
551
|
-
|
552
|
-
# Calculate cost (OpenLIT's business intelligence advantage)
|
553
|
-
model = getattr(data, 'model', 'gpt-4o')
|
554
|
-
cost = get_chat_model_cost(model, self._pricing_info, input_tokens, output_tokens)
|
555
|
-
span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
|
556
|
-
|
557
|
-
except Exception:
|
558
|
-
pass # Ignore errors in token usage extraction
|
559
|
-
|
560
|
-
def _process_response_data(self, span: Any, data: Any) -> None:
|
561
|
-
"""Process response data with content capture"""
|
562
|
-
if self._capture_message_content:
|
563
|
-
self._capture_input_output(span, data)
|
564
|
-
|
565
|
-
def _process_trace_completion(self, span: Any, trace: "Trace") -> None:
|
566
|
-
"""Process trace completion with business intelligence aggregation"""
|
567
|
-
# Add trace-level metadata
|
568
|
-
span.set_attribute(SemanticConvention.GEN_AI_OPERATION_NAME, "workflow")
|
569
|
-
|
570
|
-
# Calculate total duration
|
571
|
-
if trace.trace_id in self._span_start_times:
|
572
|
-
start_time = self._span_start_times[trace.trace_id]
|
573
|
-
duration = time.time() - start_time
|
574
|
-
span.set_attribute(SemanticConvention.GEN_AI_CLIENT_OPERATION_DURATION, duration)
|
575
|
-
|
576
|
-
def _parse_timestamp(self, timestamp: Any) -> float:
|
577
|
-
"""Parse timestamp from various formats"""
|
578
|
-
if isinstance(timestamp, (int, float)):
|
579
|
-
return float(timestamp)
|
580
|
-
elif isinstance(timestamp, str):
|
581
|
-
try:
|
582
|
-
# Try parsing ISO format
|
583
|
-
dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
|
584
|
-
return dt.timestamp()
|
585
|
-
except ValueError:
|
586
|
-
return time.time()
|
587
|
-
else:
|
588
|
-
return time.time()
|
589
|
-
|
590
|
-
def _as_utc_nano(self, timestamp: float) -> int:
|
591
|
-
"""Convert timestamp to UTC nanoseconds for OpenTelemetry"""
|
592
|
-
return int(timestamp * 1_000_000_000)
|
593
|
-
|
594
|
-
def force_flush(self) -> bool:
|
595
|
-
"""Force flush any pending spans (required by TracingProcessor)"""
|
596
|
-
return True
|
597
|
-
|
598
|
-
def shutdown(self) -> bool:
|
599
|
-
"""Shutdown the processor (required by TracingProcessor)"""
|
600
|
-
return True
|
449
|
+
trace_id = getattr(span, 'trace_id', 'unknown')
|
450
|
+
self.span_end(span, trace_id)
|
451
|
+
except Exception: # pylint: disable=broad-exception-caught
|
452
|
+
pass
|