openlit 1.34.28__py3-none-any.whl → 1.34.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/instrumentation/openai/utils.py +56 -23
- openlit/instrumentation/openai_agents/__init__.py +46 -26
- openlit/instrumentation/openai_agents/processor.py +600 -0
- openlit/semcov/__init__.py +1 -0
- {openlit-1.34.28.dist-info → openlit-1.34.29.dist-info}/METADATA +2 -1
- {openlit-1.34.28.dist-info → openlit-1.34.29.dist-info}/RECORD +8 -8
- openlit/instrumentation/openai_agents/openai_agents.py +0 -65
- {openlit-1.34.28.dist-info → openlit-1.34.29.dist-info}/LICENSE +0 -0
- {openlit-1.34.28.dist-info → openlit-1.34.29.dist-info}/WHEEL +0 -0
@@ -22,6 +22,14 @@ from openlit.__helpers import (
|
|
22
22
|
)
|
23
23
|
from openlit.semcov import SemanticConvention
|
24
24
|
|
25
|
+
def handle_not_given(value, default=None):
|
26
|
+
"""
|
27
|
+
Handle OpenAI's NotGiven values by converting them to appropriate defaults.
|
28
|
+
"""
|
29
|
+
if hasattr(value, '__class__') and value.__class__.__name__ == 'NotGiven':
|
30
|
+
return default
|
31
|
+
return value
|
32
|
+
|
25
33
|
def format_content(messages):
|
26
34
|
"""
|
27
35
|
Format the messages into a string for span events.
|
@@ -248,9 +256,15 @@ def common_response_logic(scope, pricing_info, environment, application_name, me
|
|
248
256
|
environment, application_name, is_stream, scope._tbt, scope._ttft, version)
|
249
257
|
|
250
258
|
# Span Attributes for Request parameters specific to responses API
|
251
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, scope._kwargs.get("temperature", 1.0))
|
252
|
-
scope._span.set_attribute(
|
253
|
-
|
259
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, handle_not_given(scope._kwargs.get("temperature"), 1.0))
|
260
|
+
scope._span.set_attribute(
|
261
|
+
SemanticConvention.GEN_AI_REQUEST_TOP_P,
|
262
|
+
handle_not_given(scope._kwargs.get("top_p"), 1.0)
|
263
|
+
)
|
264
|
+
scope._span.set_attribute(
|
265
|
+
SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS,
|
266
|
+
handle_not_given(scope._kwargs.get("max_output_tokens"), -1)
|
267
|
+
)
|
254
268
|
|
255
269
|
# Reasoning parameters
|
256
270
|
reasoning = scope._kwargs.get("reasoning", {})
|
@@ -427,20 +441,30 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
|
|
427
441
|
cost = get_chat_model_cost(request_model, pricing_info, input_tokens, output_tokens)
|
428
442
|
|
429
443
|
# Common Span Attributes
|
430
|
-
common_span_attributes(
|
431
|
-
|
432
|
-
|
433
|
-
|
444
|
+
common_span_attributes(
|
445
|
+
scope,
|
446
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
|
447
|
+
SemanticConvention.GEN_AI_SYSTEM_OPENAI,
|
448
|
+
scope._server_address, scope._server_port, request_model,
|
449
|
+
scope._response_model, environment, application_name,
|
450
|
+
is_stream, scope._tbt, scope._ttft, version
|
451
|
+
)
|
434
452
|
|
435
453
|
# Span Attributes for Request parameters
|
436
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED, str(scope._kwargs.get("seed", "")))
|
437
|
-
scope._span.set_attribute(
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
scope._span.set_attribute(SemanticConvention.
|
442
|
-
scope._span.set_attribute(
|
443
|
-
|
454
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED, str(handle_not_given(scope._kwargs.get("seed"), "")))
|
455
|
+
scope._span.set_attribute(
|
456
|
+
SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
457
|
+
handle_not_given(scope._kwargs.get("frequency_penalty"), 0.0)
|
458
|
+
)
|
459
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, handle_not_given(scope._kwargs.get("max_tokens"), -1))
|
460
|
+
scope._span.set_attribute(
|
461
|
+
SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
462
|
+
handle_not_given(scope._kwargs.get("presence_penalty"), 0.0)
|
463
|
+
)
|
464
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES, handle_not_given(scope._kwargs.get("stop"), []))
|
465
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, handle_not_given(scope._kwargs.get("temperature"), 1.0))
|
466
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P, handle_not_given(scope._kwargs.get("top_p"), 1.0))
|
467
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_USER, handle_not_given(scope._kwargs.get("user"), ""))
|
444
468
|
|
445
469
|
# Span Attributes for Response parameters
|
446
470
|
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, scope._response_id)
|
@@ -571,8 +595,11 @@ def common_embedding_logic(scope, request_model, pricing_info, environment, appl
|
|
571
595
|
environment, application_name, False, scope._tbt, scope._ttft, version)
|
572
596
|
|
573
597
|
# Span Attributes for Request parameters
|
574
|
-
scope._span.set_attribute(
|
575
|
-
|
598
|
+
scope._span.set_attribute(
|
599
|
+
SemanticConvention.GEN_AI_REQUEST_ENCODING_FORMATS,
|
600
|
+
[handle_not_given(scope._kwargs.get("encoding_format"), "float")]
|
601
|
+
)
|
602
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_USER, handle_not_given(scope._kwargs.get("user"), ""))
|
576
603
|
|
577
604
|
# Span Attributes for Cost and Tokens
|
578
605
|
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
|
@@ -611,9 +638,12 @@ def common_image_logic(scope, request_model, pricing_info, environment, applicat
|
|
611
638
|
environment, application_name, False, scope._tbt, scope._ttft, version)
|
612
639
|
|
613
640
|
# Span Attributes for Request parameters
|
614
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IMAGE_SIZE, scope._kwargs.get("size", "1024x1024"))
|
615
|
-
scope._span.set_attribute(
|
616
|
-
|
641
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IMAGE_SIZE, handle_not_given(scope._kwargs.get("size"), "1024x1024"))
|
642
|
+
scope._span.set_attribute(
|
643
|
+
SemanticConvention.GEN_AI_REQUEST_IMAGE_QUALITY,
|
644
|
+
handle_not_given(scope._kwargs.get("quality"), "standard")
|
645
|
+
)
|
646
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_USER, handle_not_given(scope._kwargs.get("user"), ""))
|
617
647
|
|
618
648
|
# Extract response data
|
619
649
|
response_dict = scope._response_dict
|
@@ -709,9 +739,12 @@ def common_audio_logic(scope, request_model, pricing_info, environment, applicat
|
|
709
739
|
environment, application_name, False, scope._tbt, scope._ttft, version)
|
710
740
|
|
711
741
|
# Span Attributes for Request parameters
|
712
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_AUDIO_VOICE, scope._kwargs.get("voice", "alloy"))
|
713
|
-
scope._span.set_attribute(
|
714
|
-
|
742
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_AUDIO_VOICE, handle_not_given(scope._kwargs.get("voice"), "alloy"))
|
743
|
+
scope._span.set_attribute(
|
744
|
+
SemanticConvention.GEN_AI_REQUEST_AUDIO_RESPONSE_FORMAT,
|
745
|
+
handle_not_given(scope._kwargs.get("response_format"), "mp3")
|
746
|
+
)
|
747
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_AUDIO_SPEED, handle_not_given(scope._kwargs.get("speed"), 1.0))
|
715
748
|
|
716
749
|
# Span Attributes for Cost
|
717
750
|
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
|
@@ -1,42 +1,62 @@
|
|
1
|
-
"""
|
1
|
+
"""
|
2
|
+
OpenLIT OpenAI Agents Instrumentation - Native Tracing Integration
|
3
|
+
"""
|
2
4
|
|
3
5
|
from typing import Collection
|
4
6
|
import importlib.metadata
|
5
7
|
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
6
|
-
from wrapt import wrap_function_wrapper
|
7
8
|
|
8
|
-
from openlit.instrumentation.openai_agents.
|
9
|
-
create_agent
|
10
|
-
)
|
9
|
+
from openlit.instrumentation.openai_agents.processor import OpenLITTracingProcessor
|
11
10
|
|
12
|
-
_instruments = (
|
11
|
+
_instruments = ("openai-agents >= 0.0.3",)
|
13
12
|
|
14
13
|
class OpenAIAgentsInstrumentor(BaseInstrumentor):
|
15
|
-
"""
|
16
|
-
An instrumentor for openai-agents's client library.
|
17
|
-
"""
|
14
|
+
"""OpenLIT instrumentor for OpenAI Agents using native tracing system"""
|
18
15
|
|
19
16
|
def instrumentation_dependencies(self) -> Collection[str]:
|
20
17
|
return _instruments
|
21
18
|
|
22
19
|
def _instrument(self, **kwargs):
|
23
|
-
|
24
|
-
environment = kwargs.get(
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
disable_metrics = kwargs.get(
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
20
|
+
version = importlib.metadata.version("openai-agents")
|
21
|
+
environment = kwargs.get("environment", "default")
|
22
|
+
application_name = kwargs.get("application_name", "default")
|
23
|
+
tracer = kwargs.get("tracer")
|
24
|
+
pricing_info = kwargs.get("pricing_info", {})
|
25
|
+
capture_message_content = kwargs.get("capture_message_content", False)
|
26
|
+
metrics = kwargs.get("metrics_dict")
|
27
|
+
disable_metrics = kwargs.get("disable_metrics")
|
28
|
+
detailed_tracing = kwargs.get("detailed_tracing", False)
|
29
|
+
|
30
|
+
# Create our processor with OpenLIT enhancements
|
31
|
+
processor = OpenLITTracingProcessor(
|
32
|
+
tracer=tracer,
|
33
|
+
version=version,
|
34
|
+
environment=environment,
|
35
|
+
application_name=application_name,
|
36
|
+
pricing_info=pricing_info,
|
37
|
+
capture_message_content=capture_message_content,
|
38
|
+
metrics=metrics,
|
39
|
+
disable_metrics=disable_metrics,
|
40
|
+
detailed_tracing=detailed_tracing
|
38
41
|
)
|
39
42
|
|
43
|
+
# Integrate with OpenAI Agents' native tracing system
|
44
|
+
try:
|
45
|
+
from agents import set_trace_processors
|
46
|
+
# Replace existing processors with our enhanced processor
|
47
|
+
set_trace_processors([processor])
|
48
|
+
except ImportError:
|
49
|
+
# Fallback: Add our processor to existing ones
|
50
|
+
try:
|
51
|
+
from agents import add_trace_processor
|
52
|
+
add_trace_processor(processor)
|
53
|
+
except ImportError:
|
54
|
+
pass # Agents package may not have tracing
|
55
|
+
|
40
56
|
def _uninstrument(self, **kwargs):
|
41
|
-
#
|
42
|
-
|
57
|
+
# Clear our processors
|
58
|
+
try:
|
59
|
+
from agents import set_trace_processors
|
60
|
+
set_trace_processors([])
|
61
|
+
except ImportError:
|
62
|
+
pass
|
@@ -0,0 +1,600 @@
|
|
1
|
+
"""
|
2
|
+
OpenLIT OpenAI Agents Instrumentation - Native TracingProcessor Implementation
|
3
|
+
"""
|
4
|
+
|
5
|
+
import json
|
6
|
+
import time
|
7
|
+
from datetime import datetime
|
8
|
+
from typing import Any, Dict, Optional, TYPE_CHECKING
|
9
|
+
|
10
|
+
from opentelemetry import context as context_api
|
11
|
+
from opentelemetry.trace import SpanKind, Status, StatusCode, set_span_in_context
|
12
|
+
from opentelemetry.context import detach
|
13
|
+
|
14
|
+
from openlit.__helpers import (
|
15
|
+
common_framework_span_attributes,
|
16
|
+
handle_exception,
|
17
|
+
record_framework_metrics,
|
18
|
+
get_chat_model_cost
|
19
|
+
)
|
20
|
+
from openlit.semcov import SemanticConvention
|
21
|
+
|
22
|
+
# Try to import agents framework components with fallback
|
23
|
+
try:
|
24
|
+
from agents import TracingProcessor
|
25
|
+
if TYPE_CHECKING:
|
26
|
+
from agents import Trace, Span
|
27
|
+
TRACING_AVAILABLE = True
|
28
|
+
except ImportError:
|
29
|
+
# Create dummy class for when agents is not available
|
30
|
+
class TracingProcessor:
|
31
|
+
"""Dummy TracingProcessor class for when agents is not available"""
|
32
|
+
|
33
|
+
def force_flush(self):
|
34
|
+
"""Dummy force_flush method"""
|
35
|
+
return None
|
36
|
+
|
37
|
+
def shutdown(self):
|
38
|
+
"""Dummy shutdown method"""
|
39
|
+
return None
|
40
|
+
|
41
|
+
if TYPE_CHECKING:
|
42
|
+
# Type hints only - these don't exist at runtime when agents unavailable
|
43
|
+
Trace = Any
|
44
|
+
Span = Any
|
45
|
+
|
46
|
+
TRACING_AVAILABLE = False
|
47
|
+
|
48
|
+
|
49
|
+
class OpenLITTracingProcessor(TracingProcessor):
|
50
|
+
"""
|
51
|
+
OpenLIT processor that integrates with OpenAI Agents' native tracing system
|
52
|
+
Provides superior business intelligence while maintaining perfect hierarchy
|
53
|
+
"""
|
54
|
+
|
55
|
+
def __init__(self, tracer: Any, version: str, environment: str,
|
56
|
+
application_name: str, pricing_info: dict, capture_message_content: bool,
|
57
|
+
metrics: Optional[Any], disable_metrics: bool, detailed_tracing: bool):
|
58
|
+
if not TRACING_AVAILABLE:
|
59
|
+
return
|
60
|
+
|
61
|
+
self._tracer = tracer
|
62
|
+
self._version = version
|
63
|
+
self._environment = environment
|
64
|
+
self._application_name = application_name
|
65
|
+
self._pricing_info = pricing_info
|
66
|
+
self._capture_message_content = capture_message_content
|
67
|
+
self._metrics = metrics
|
68
|
+
self._disable_metrics = disable_metrics
|
69
|
+
self._detailed_tracing = detailed_tracing
|
70
|
+
|
71
|
+
# Track spans for hierarchy
|
72
|
+
self._root_spans: Dict[str, Any] = {}
|
73
|
+
self._otel_spans: Dict[str, Any] = {}
|
74
|
+
self._tokens: Dict[str, object] = {}
|
75
|
+
self._span_start_times: Dict[str, float] = {}
|
76
|
+
|
77
|
+
# Track handoff context for better span naming
|
78
|
+
self._last_handoff_from: Optional[str] = None
|
79
|
+
|
80
|
+
def on_trace_start(self, trace: "Trace") -> None:
|
81
|
+
"""Called when a trace is started - creates root workflow span"""
|
82
|
+
if not TRACING_AVAILABLE:
|
83
|
+
return
|
84
|
+
|
85
|
+
# Create root workflow span with {operation_type} {operation_name} format
|
86
|
+
workflow_name = getattr(trace, 'name', 'workflow')
|
87
|
+
span_name = f"agent {workflow_name}" # Follow {operation_type} {operation_name} pattern
|
88
|
+
|
89
|
+
# Use tracer.start_span for TracingProcessor pattern with proper context
|
90
|
+
otel_span = self._tracer.start_span(
|
91
|
+
name=span_name,
|
92
|
+
kind=SpanKind.CLIENT
|
93
|
+
)
|
94
|
+
|
95
|
+
# Set common framework attributes for root span
|
96
|
+
self._set_common_attributes(otel_span, trace.trace_id)
|
97
|
+
|
98
|
+
# Set agent name for root span using semantic conventions
|
99
|
+
if hasattr(trace, 'name') and trace.name:
|
100
|
+
otel_span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, trace.name)
|
101
|
+
|
102
|
+
# Set default model for root span
|
103
|
+
otel_span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, "gpt-4o")
|
104
|
+
|
105
|
+
self._root_spans[trace.trace_id] = otel_span
|
106
|
+
self._span_start_times[trace.trace_id] = time.time()
|
107
|
+
|
108
|
+
def on_span_start(self, span: "Span[Any]") -> None:
|
109
|
+
"""Called when a span is started - creates child spans with proper hierarchy"""
|
110
|
+
if not TRACING_AVAILABLE or not hasattr(span, 'started_at') or not span.started_at:
|
111
|
+
return
|
112
|
+
|
113
|
+
start_time = self._parse_timestamp(span.started_at)
|
114
|
+
|
115
|
+
# Determine parent span for proper hierarchy
|
116
|
+
parent_span = None
|
117
|
+
if span.parent_id and span.parent_id in self._otel_spans:
|
118
|
+
parent_span = self._otel_spans[span.parent_id]
|
119
|
+
elif span.trace_id in self._root_spans:
|
120
|
+
parent_span = self._root_spans[span.trace_id]
|
121
|
+
|
122
|
+
# Set context for parent-child relationship
|
123
|
+
context = set_span_in_context(parent_span) if parent_span else None
|
124
|
+
|
125
|
+
# Get semantic span name and operation type
|
126
|
+
span_name = self._get_span_name(span)
|
127
|
+
operation_type = self._get_operation_type(span.span_data)
|
128
|
+
|
129
|
+
# Create span with proper context
|
130
|
+
otel_span = self._tracer.start_span(
|
131
|
+
name=span_name,
|
132
|
+
context=context,
|
133
|
+
start_time=self._as_utc_nano(start_time),
|
134
|
+
kind=SpanKind.CLIENT
|
135
|
+
)
|
136
|
+
|
137
|
+
# Set common framework attributes for all spans
|
138
|
+
self._set_common_framework_attributes(otel_span, operation_type)
|
139
|
+
|
140
|
+
# Set span-specific attributes
|
141
|
+
self._set_span_attributes(otel_span, span)
|
142
|
+
|
143
|
+
# Track span and context
|
144
|
+
self._otel_spans[span.span_id] = otel_span
|
145
|
+
self._tokens[span.span_id] = context_api.attach(set_span_in_context(otel_span))
|
146
|
+
self._span_start_times[span.span_id] = time.time()
|
147
|
+
|
148
|
+
def on_span_end(self, span: "Span[Any]") -> None:
|
149
|
+
"""Called when a span is finished - adds business intelligence and ends span"""
|
150
|
+
if not TRACING_AVAILABLE or span.span_id not in self._otel_spans:
|
151
|
+
return
|
152
|
+
|
153
|
+
otel_span = self._otel_spans[span.span_id]
|
154
|
+
|
155
|
+
try:
|
156
|
+
# Add response data and business intelligence
|
157
|
+
self._process_span_completion(otel_span, span)
|
158
|
+
|
159
|
+
# Set successful status
|
160
|
+
otel_span.set_status(Status(StatusCode.OK))
|
161
|
+
|
162
|
+
# Record metrics if enabled
|
163
|
+
if not self._disable_metrics and self._metrics and span.span_id in self._span_start_times:
|
164
|
+
start_time = self._span_start_times[span.span_id]
|
165
|
+
end_time = time.time()
|
166
|
+
operation_type = self._get_operation_type(span.span_data)
|
167
|
+
record_framework_metrics(
|
168
|
+
self._metrics, operation_type, SemanticConvention.GEN_AI_SYSTEM_OPENAI_AGENTS,
|
169
|
+
"localhost", 80, self._environment, self._application_name,
|
170
|
+
start_time, end_time
|
171
|
+
)
|
172
|
+
|
173
|
+
except Exception as e:
|
174
|
+
handle_exception(otel_span, e)
|
175
|
+
finally:
|
176
|
+
# End span and cleanup
|
177
|
+
otel_span.end()
|
178
|
+
|
179
|
+
# Cleanup context
|
180
|
+
if span.span_id in self._tokens:
|
181
|
+
detach(self._tokens[span.span_id])
|
182
|
+
del self._tokens[span.span_id]
|
183
|
+
|
184
|
+
# Cleanup tracking
|
185
|
+
if span.span_id in self._otel_spans:
|
186
|
+
del self._otel_spans[span.span_id]
|
187
|
+
if span.span_id in self._span_start_times:
|
188
|
+
del self._span_start_times[span.span_id]
|
189
|
+
|
190
|
+
def on_trace_end(self, trace: "Trace") -> None:
|
191
|
+
"""Called when a trace is finished - ends root span with business intelligence"""
|
192
|
+
if not TRACING_AVAILABLE or trace.trace_id not in self._root_spans:
|
193
|
+
return
|
194
|
+
|
195
|
+
root_span = self._root_spans[trace.trace_id]
|
196
|
+
|
197
|
+
try:
|
198
|
+
# Add trace-level business intelligence
|
199
|
+
self._process_trace_completion(root_span, trace)
|
200
|
+
root_span.set_status(Status(StatusCode.OK))
|
201
|
+
except Exception as e:
|
202
|
+
handle_exception(root_span, e)
|
203
|
+
finally:
|
204
|
+
root_span.end()
|
205
|
+
|
206
|
+
# Cleanup
|
207
|
+
if trace.trace_id in self._root_spans:
|
208
|
+
del self._root_spans[trace.trace_id]
|
209
|
+
if trace.trace_id in self._span_start_times:
|
210
|
+
del self._span_start_times[trace.trace_id]
|
211
|
+
|
212
|
+
def _get_span_name(self, span: "Span[Any]") -> str:
|
213
|
+
"""Get semantic span name using {operation_type} {operation_name} format"""
|
214
|
+
data = span.span_data
|
215
|
+
operation_type = self._get_operation_type(data)
|
216
|
+
|
217
|
+
# Extract operation name based on span type
|
218
|
+
operation_name = "unknown"
|
219
|
+
|
220
|
+
# Special handling for handoffs
|
221
|
+
if hasattr(data, '__class__') and data.__class__.__name__ == 'HandoffSpanData':
|
222
|
+
if hasattr(data, 'to_agent') and data.to_agent:
|
223
|
+
operation_name = f"to {data.to_agent}"
|
224
|
+
else:
|
225
|
+
operation_name = "handoff"
|
226
|
+
|
227
|
+
# Use agent name for agent spans
|
228
|
+
elif hasattr(data, '__class__') and data.__class__.__name__ == 'AgentSpanData':
|
229
|
+
# Try multiple possible attribute names for agent name
|
230
|
+
agent_name = None
|
231
|
+
|
232
|
+
for attr in ['agent_name', 'name', 'agent', 'agent_id']:
|
233
|
+
if hasattr(data, attr):
|
234
|
+
agent_name = getattr(data, attr)
|
235
|
+
if agent_name and isinstance(agent_name, str):
|
236
|
+
break
|
237
|
+
|
238
|
+
# If still no agent name, try looking in context or other attributes
|
239
|
+
if not agent_name:
|
240
|
+
# Try context or other nested attributes
|
241
|
+
if hasattr(data, 'context') and hasattr(data.context, 'agent'):
|
242
|
+
agent_name = getattr(data.context.agent, 'name', None)
|
243
|
+
elif hasattr(data, 'metadata') and hasattr(data.metadata, 'agent_name'):
|
244
|
+
agent_name = data.metadata.agent_name
|
245
|
+
|
246
|
+
if agent_name:
|
247
|
+
operation_name = agent_name
|
248
|
+
else:
|
249
|
+
# If no agent name found, use a more descriptive fallback
|
250
|
+
operation_name = "execution"
|
251
|
+
|
252
|
+
# Use name if available for other spans
|
253
|
+
elif hasattr(data, 'name') and isinstance(data.name, str):
|
254
|
+
operation_name = data.name
|
255
|
+
|
256
|
+
# Fallback to type-based names
|
257
|
+
else:
|
258
|
+
operation_name = getattr(data, 'type', 'operation')
|
259
|
+
|
260
|
+
# Return formatted name: {operation_type} {operation_name}
|
261
|
+
return f"{operation_type} {operation_name}"
|
262
|
+
|
263
|
+
def _get_operation_type(self, data: Any) -> str:
|
264
|
+
"""Map span data to operation types"""
|
265
|
+
class_name = data.__class__.__name__ if hasattr(data, '__class__') else str(type(data))
|
266
|
+
|
267
|
+
mapping = {
|
268
|
+
'AgentSpanData': SemanticConvention.GEN_AI_OPERATION_TYPE_AGENT,
|
269
|
+
'GenerationSpanData': SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
|
270
|
+
'FunctionSpanData': SemanticConvention.GEN_AI_OPERATION_TYPE_TOOLS,
|
271
|
+
'HandoffSpanData': SemanticConvention.GEN_AI_OPERATION_TYPE_AGENT,
|
272
|
+
'ResponseSpanData': SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
|
273
|
+
}
|
274
|
+
|
275
|
+
return mapping.get(class_name, SemanticConvention.GEN_AI_OPERATION_TYPE_FRAMEWORK)
|
276
|
+
|
277
|
+
def _set_common_framework_attributes(self, span: Any, operation_type: str) -> None:
|
278
|
+
"""Set common framework attributes using semantic conventions"""
|
279
|
+
# Create scope object for common_framework_span_attributes
|
280
|
+
scope = type("GenericScope", (), {})()
|
281
|
+
scope._span = span
|
282
|
+
scope._start_time = time.time()
|
283
|
+
scope._end_time = time.time()
|
284
|
+
|
285
|
+
# Use common framework attributes helper
|
286
|
+
# For framework operations, use localhost like other agent frameworks (AG2, Pydantic AI)
|
287
|
+
common_framework_span_attributes(
|
288
|
+
scope, SemanticConvention.GEN_AI_SYSTEM_OPENAI_AGENTS,
|
289
|
+
"localhost", 80, self._environment, self._application_name,
|
290
|
+
self._version, operation_type, None
|
291
|
+
)
|
292
|
+
|
293
|
+
def _set_common_attributes(self, span: Any, trace_id: str) -> None:
|
294
|
+
"""Set common framework attributes for root spans"""
|
295
|
+
self._set_common_framework_attributes(span, SemanticConvention.GEN_AI_OPERATION_TYPE_FRAMEWORK)
|
296
|
+
|
297
|
+
def _set_span_attributes(self, span: Any, agent_span: "Span[Any]") -> None:
|
298
|
+
"""Set span-specific attributes based on span data using semantic conventions"""
|
299
|
+
data = agent_span.span_data
|
300
|
+
|
301
|
+
# Agent-specific attributes using semantic conventions
|
302
|
+
if hasattr(data, 'agent_name') and data.agent_name:
|
303
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, data.agent_name)
|
304
|
+
elif hasattr(data, 'name') and data.name:
|
305
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, data.name)
|
306
|
+
|
307
|
+
# Enhanced model information extraction
|
308
|
+
model = self._extract_model_info(data, agent_span)
|
309
|
+
if model:
|
310
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, str(model))
|
311
|
+
|
312
|
+
# Enhanced input/output capture with MIME types (OpenLIT enhancement)
|
313
|
+
if self._capture_message_content:
|
314
|
+
self._capture_input_output(span, data)
|
315
|
+
|
316
|
+
# Enhanced token usage details (inspired by OpenInference)
|
317
|
+
self._capture_detailed_token_usage(span, data)
|
318
|
+
|
319
|
+
# Model invocation parameters as JSON (new feature from OpenInference)
|
320
|
+
self._capture_model_parameters(span, data)
|
321
|
+
|
322
|
+
# Tool/function information for tool calls
|
323
|
+
if hasattr(data, '__class__') and 'Function' in data.__class__.__name__:
|
324
|
+
if hasattr(data, 'function_name'):
|
325
|
+
span.set_attribute(SemanticConvention.GEN_AI_TOOL_NAME, data.function_name)
|
326
|
+
if hasattr(data, 'arguments'):
|
327
|
+
span.set_attribute(SemanticConvention.GEN_AI_TOOL_ARGS, str(data.arguments))
|
328
|
+
|
329
|
+
# Enhanced handoff information extraction
|
330
|
+
if hasattr(data, '__class__') and 'Handoff' in data.__class__.__name__:
|
331
|
+
target_agent = self._extract_handoff_target(data, agent_span)
|
332
|
+
if target_agent:
|
333
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, target_agent)
|
334
|
+
else:
|
335
|
+
# Fallback for handoff spans without clear target
|
336
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, "agent handoff")
|
337
|
+
|
338
|
+
# Request/response IDs if available
|
339
|
+
if hasattr(data, 'request_id'):
|
340
|
+
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, data.request_id)
|
341
|
+
elif hasattr(data, 'response_id'):
|
342
|
+
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, data.response_id)
|
343
|
+
|
344
|
+
def _extract_model_info(self, data: Any, agent_span: "Span[Any]") -> Optional[str]:
|
345
|
+
"""Extract model information from span data or agent configuration"""
|
346
|
+
# Try direct model attributes first
|
347
|
+
model_attrs = ['model', 'model_name', 'model_id', 'llm_model', 'openai_model']
|
348
|
+
|
349
|
+
model = self._check_model_attrs(data, model_attrs)
|
350
|
+
if model:
|
351
|
+
return model
|
352
|
+
|
353
|
+
# Try nested configuration objects
|
354
|
+
config_attrs = ['config', 'configuration', 'client_config', 'llm_config']
|
355
|
+
model = self._check_config_model_attrs(data, config_attrs, model_attrs)
|
356
|
+
if model:
|
357
|
+
return model
|
358
|
+
|
359
|
+
# Try looking in the agent span itself
|
360
|
+
if hasattr(agent_span, 'model'):
|
361
|
+
return str(agent_span.model)
|
362
|
+
|
363
|
+
# Try agent_config if available
|
364
|
+
if hasattr(agent_span, 'agent_config'):
|
365
|
+
model = self._check_model_attrs(agent_span.agent_config, model_attrs)
|
366
|
+
if model:
|
367
|
+
return model
|
368
|
+
|
369
|
+
# Default fallback
|
370
|
+
return "gpt-4o"
|
371
|
+
|
372
|
+
def _check_model_attrs(self, obj: Any, model_attrs: list) -> Optional[str]:
|
373
|
+
"""Helper method to check model attributes on an object"""
|
374
|
+
for attr in model_attrs:
|
375
|
+
if not hasattr(obj, attr):
|
376
|
+
continue
|
377
|
+
model_value = getattr(obj, attr)
|
378
|
+
if model_value and isinstance(model_value, str):
|
379
|
+
return model_value
|
380
|
+
return None
|
381
|
+
|
382
|
+
def _check_config_model_attrs(self, data: Any, config_attrs: list, model_attrs: list) -> Optional[str]:
|
383
|
+
"""Helper method to check model attributes in nested configuration objects"""
|
384
|
+
for config_attr in config_attrs:
|
385
|
+
if not hasattr(data, config_attr):
|
386
|
+
continue
|
387
|
+
config = getattr(data, config_attr)
|
388
|
+
if not config:
|
389
|
+
continue
|
390
|
+
model = self._check_model_attrs(config, model_attrs)
|
391
|
+
if model:
|
392
|
+
return model
|
393
|
+
return None
|
394
|
+
|
395
|
+
def _extract_handoff_target(self, data: Any, agent_span: "Span[Any]") -> Optional[str]:
|
396
|
+
"""Extract handoff target information with enhanced logic"""
|
397
|
+
# Try direct target attributes
|
398
|
+
target_attrs = ['to_agent', 'target_agent', 'destination_agent', 'next_agent']
|
399
|
+
for attr in target_attrs:
|
400
|
+
if hasattr(data, attr):
|
401
|
+
target = getattr(data, attr)
|
402
|
+
if target and isinstance(target, str):
|
403
|
+
return f"to {target}"
|
404
|
+
|
405
|
+
# Try from_agent for better handoff description
|
406
|
+
from_attrs = ['from_agent', 'source_agent', 'previous_agent']
|
407
|
+
for attr in from_attrs:
|
408
|
+
if hasattr(data, attr):
|
409
|
+
source = getattr(data, attr)
|
410
|
+
if source and isinstance(source, str):
|
411
|
+
return f"from {source}"
|
412
|
+
|
413
|
+
# Try nested objects
|
414
|
+
if hasattr(data, 'handoff_info'):
|
415
|
+
info = data.handoff_info
|
416
|
+
for attr in target_attrs + from_attrs:
|
417
|
+
if hasattr(info, attr):
|
418
|
+
value = getattr(info, attr)
|
419
|
+
if value and isinstance(value, str):
|
420
|
+
prefix = "to" if attr in target_attrs else "from"
|
421
|
+
return f"{prefix} {value}"
|
422
|
+
|
423
|
+
return None
|
424
|
+
|
425
|
+
def _capture_input_output(self, span: Any, data: Any) -> None:
|
426
|
+
"""Capture input/output content with MIME type detection (OpenLIT enhancement)"""
|
427
|
+
try:
|
428
|
+
# Capture input content
|
429
|
+
if hasattr(data, 'input') and data.input is not None:
|
430
|
+
content = str(data.input)
|
431
|
+
span.set_attribute(SemanticConvention.GEN_AI_CONTENT_PROMPT, content)
|
432
|
+
# Set MIME type based on content structure
|
433
|
+
if content.startswith('{') or content.startswith('['):
|
434
|
+
span.set_attribute("gen_ai.content.prompt.mime_type", "application/json")
|
435
|
+
else:
|
436
|
+
span.set_attribute("gen_ai.content.prompt.mime_type", "text/plain")
|
437
|
+
|
438
|
+
# Capture output/response content
|
439
|
+
if hasattr(data, 'response') and data.response is not None:
|
440
|
+
content = str(data.response)
|
441
|
+
span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, content)
|
442
|
+
# Set MIME type based on content structure
|
443
|
+
if content.startswith('{') or content.startswith('['):
|
444
|
+
span.set_attribute("gen_ai.content.completion.mime_type", "application/json")
|
445
|
+
else:
|
446
|
+
span.set_attribute("gen_ai.content.completion.mime_type", "text/plain")
|
447
|
+
|
448
|
+
except Exception:
|
449
|
+
pass # Ignore export errors
|
450
|
+
|
451
|
+
def _capture_detailed_token_usage(self, span: Any, data: Any) -> None:
|
452
|
+
"""Capture detailed token usage information (inspired by OpenInference)"""
|
453
|
+
try:
|
454
|
+
if hasattr(data, 'usage'):
|
455
|
+
usage = data.usage
|
456
|
+
|
457
|
+
# Standard token usage
|
458
|
+
if hasattr(usage, 'input_tokens'):
|
459
|
+
span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, usage.input_tokens)
|
460
|
+
if hasattr(usage, 'output_tokens'):
|
461
|
+
span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, usage.output_tokens)
|
462
|
+
|
463
|
+
# Enhanced token details (when available)
|
464
|
+
if hasattr(usage, 'input_tokens_details'):
|
465
|
+
details = usage.input_tokens_details
|
466
|
+
if hasattr(details, 'cached_tokens'):
|
467
|
+
span.set_attribute("gen_ai.usage.input_tokens.cached", details.cached_tokens)
|
468
|
+
if hasattr(details, 'reasoning_tokens'):
|
469
|
+
span.set_attribute("gen_ai.usage.input_tokens.reasoning", details.reasoning_tokens)
|
470
|
+
|
471
|
+
if hasattr(usage, 'output_tokens_details'):
|
472
|
+
details = usage.output_tokens_details
|
473
|
+
if hasattr(details, 'reasoning_tokens'):
|
474
|
+
span.set_attribute("gen_ai.usage.output_tokens.reasoning", details.reasoning_tokens)
|
475
|
+
|
476
|
+
except Exception:
|
477
|
+
pass # Ignore export errors
|
478
|
+
|
479
|
+
def _capture_model_parameters(self, span: Any, data: Any) -> None:
|
480
|
+
"""Capture model invocation parameters as JSON (new feature from OpenInference)"""
|
481
|
+
try:
|
482
|
+
# Look for model configuration parameters
|
483
|
+
params = {}
|
484
|
+
|
485
|
+
# Common parameter attributes
|
486
|
+
param_attrs = ['temperature', 'max_tokens', 'top_p', 'frequency_penalty', 'presence_penalty']
|
487
|
+
for attr in param_attrs:
|
488
|
+
if hasattr(data, attr):
|
489
|
+
params[attr] = getattr(data, attr)
|
490
|
+
|
491
|
+
# Try nested config objects
|
492
|
+
if hasattr(data, 'config'):
|
493
|
+
config = data.config
|
494
|
+
for attr in param_attrs:
|
495
|
+
if hasattr(config, attr):
|
496
|
+
params[attr] = getattr(config, attr)
|
497
|
+
|
498
|
+
# Try response object if available
|
499
|
+
if hasattr(data, 'response') and hasattr(data.response, 'model_dump'):
|
500
|
+
try:
|
501
|
+
response_dict = data.response.model_dump()
|
502
|
+
if response_dict and isinstance(response_dict, dict):
|
503
|
+
# Extract model parameters from response
|
504
|
+
if 'model' in response_dict:
|
505
|
+
params['model'] = response_dict['model']
|
506
|
+
if 'usage' in response_dict:
|
507
|
+
params['usage'] = response_dict['usage']
|
508
|
+
except Exception:
|
509
|
+
pass
|
510
|
+
|
511
|
+
# Set as JSON if we found any parameters
|
512
|
+
if params:
|
513
|
+
span.set_attribute("gen_ai.request.parameters", json.dumps(params))
|
514
|
+
|
515
|
+
except Exception:
|
516
|
+
pass # Ignore export errors
|
517
|
+
|
518
|
+
def _process_span_completion(self, span: Any, agent_span: "Span[Any]") -> None:
|
519
|
+
"""Process span completion with enhanced business intelligence"""
|
520
|
+
data = agent_span.span_data
|
521
|
+
|
522
|
+
# Process response data if available
|
523
|
+
self._process_response_data(span, data)
|
524
|
+
|
525
|
+
# Extract and set token usage for business intelligence
|
526
|
+
self._extract_token_usage(span, data)
|
527
|
+
|
528
|
+
def _extract_token_usage(self, span: Any, data: Any) -> None:
|
529
|
+
"""Extract token usage and calculate costs (OpenLIT's business intelligence)"""
|
530
|
+
try:
|
531
|
+
# Try to extract token usage from various possible locations
|
532
|
+
input_tokens = 0
|
533
|
+
output_tokens = 0
|
534
|
+
|
535
|
+
# Check direct usage attributes
|
536
|
+
if hasattr(data, 'usage'):
|
537
|
+
usage = data.usage
|
538
|
+
input_tokens = getattr(usage, 'input_tokens', 0) or getattr(usage, 'prompt_tokens', 0)
|
539
|
+
output_tokens = getattr(usage, 'output_tokens', 0) or getattr(usage, 'completion_tokens', 0)
|
540
|
+
|
541
|
+
# Check response object
|
542
|
+
elif hasattr(data, 'response') and hasattr(data.response, 'usage'):
|
543
|
+
usage = data.response.usage
|
544
|
+
input_tokens = getattr(usage, 'input_tokens', 0) or getattr(usage, 'prompt_tokens', 0)
|
545
|
+
output_tokens = getattr(usage, 'output_tokens', 0) or getattr(usage, 'completion_tokens', 0)
|
546
|
+
|
547
|
+
# Set token attributes
|
548
|
+
span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
|
549
|
+
span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens)
|
550
|
+
span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, input_tokens + output_tokens)
|
551
|
+
|
552
|
+
# Calculate cost (OpenLIT's business intelligence advantage)
|
553
|
+
model = getattr(data, 'model', 'gpt-4o')
|
554
|
+
cost = get_chat_model_cost(model, self._pricing_info, input_tokens, output_tokens)
|
555
|
+
span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
|
556
|
+
|
557
|
+
except Exception:
|
558
|
+
pass # Ignore errors in token usage extraction
|
559
|
+
|
560
|
+
def _process_response_data(self, span: Any, data: Any) -> None:
|
561
|
+
"""Process response data with content capture"""
|
562
|
+
if self._capture_message_content:
|
563
|
+
self._capture_input_output(span, data)
|
564
|
+
|
565
|
+
def _process_trace_completion(self, span: Any, trace: "Trace") -> None:
|
566
|
+
"""Process trace completion with business intelligence aggregation"""
|
567
|
+
# Add trace-level metadata
|
568
|
+
span.set_attribute(SemanticConvention.GEN_AI_OPERATION_NAME, "workflow")
|
569
|
+
|
570
|
+
# Calculate total duration
|
571
|
+
if trace.trace_id in self._span_start_times:
|
572
|
+
start_time = self._span_start_times[trace.trace_id]
|
573
|
+
duration = time.time() - start_time
|
574
|
+
span.set_attribute(SemanticConvention.GEN_AI_CLIENT_OPERATION_DURATION, duration)
|
575
|
+
|
576
|
+
def _parse_timestamp(self, timestamp: Any) -> float:
|
577
|
+
"""Parse timestamp from various formats"""
|
578
|
+
if isinstance(timestamp, (int, float)):
|
579
|
+
return float(timestamp)
|
580
|
+
elif isinstance(timestamp, str):
|
581
|
+
try:
|
582
|
+
# Try parsing ISO format
|
583
|
+
dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
|
584
|
+
return dt.timestamp()
|
585
|
+
except ValueError:
|
586
|
+
return time.time()
|
587
|
+
else:
|
588
|
+
return time.time()
|
589
|
+
|
590
|
+
def _as_utc_nano(self, timestamp: float) -> int:
|
591
|
+
"""Convert timestamp to UTC nanoseconds for OpenTelemetry"""
|
592
|
+
return int(timestamp * 1_000_000_000)
|
593
|
+
|
594
|
+
def force_flush(self) -> bool:
|
595
|
+
"""Force flush any pending spans (required by TracingProcessor)"""
|
596
|
+
return True
|
597
|
+
|
598
|
+
def shutdown(self) -> bool:
|
599
|
+
"""Shutdown the processor (required by TracingProcessor)"""
|
600
|
+
return True
|
openlit/semcov/__init__.py
CHANGED
@@ -135,6 +135,7 @@ class SemanticConvention:
|
|
135
135
|
GEN_AI_SYSTEM_FIRECRAWL = "firecrawl"
|
136
136
|
GEN_AI_SYSTEM_LETTA = "letta"
|
137
137
|
GEN_AI_SYSTEM_TOGETHER = "together"
|
138
|
+
GEN_AI_SYSTEM_OPENAI_AGENTS = "openai_agents"
|
138
139
|
GEN_AI_SYSTEM_PYDANTIC_AI = "pydantic_ai"
|
139
140
|
|
140
141
|
# GenAI Framework Component Attributes (Standard)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.34.
|
3
|
+
Version: 1.34.29
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
License: Apache-2.0
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
@@ -18,6 +18,7 @@ Requires-Dist: anthropic (>=0.42.0,<1.0.0)
|
|
18
18
|
Requires-Dist: boto3 (>=1.34.0,<2.0.0)
|
19
19
|
Requires-Dist: botocore (>=1.34.0,<2.0.0)
|
20
20
|
Requires-Dist: openai (>=1.1.1,<2.0.0)
|
21
|
+
Requires-Dist: openai-agents (>=0.0.3)
|
21
22
|
Requires-Dist: opentelemetry-api (>=1.30.0,<2.0.0)
|
22
23
|
Requires-Dist: opentelemetry-exporter-otlp (>=1.30.0,<2.0.0)
|
23
24
|
Requires-Dist: opentelemetry-instrumentation (>=0.52b0,<1.0.0)
|
@@ -119,9 +119,9 @@ openlit/instrumentation/ollama/utils.py,sha256=TIE3_ur2U-iyCclna7TzwjDIFC9PZjRnZ
|
|
119
119
|
openlit/instrumentation/openai/__init__.py,sha256=4RWRhrRa589jiwvFf8_fLBW6UB5Btrd17mcDKv5VhJk,5546
|
120
120
|
openlit/instrumentation/openai/async_openai.py,sha256=QvEEKZnZYl9Vf-wsX1voTMMZed1eNhRI9aUT8CtFJi0,18003
|
121
121
|
openlit/instrumentation/openai/openai.py,sha256=34_FqOwSroNOm_mmLzZb8Y7xtr5StwnUyRQmHP6HHJc,17698
|
122
|
-
openlit/instrumentation/openai/utils.py,sha256=
|
123
|
-
openlit/instrumentation/openai_agents/__init__.py,sha256=
|
124
|
-
openlit/instrumentation/openai_agents/
|
122
|
+
openlit/instrumentation/openai/utils.py,sha256=um8ReamuvmgpKDd6Xr32PGjeWejD8M8e7yzL5P_soBM,36340
|
123
|
+
openlit/instrumentation/openai_agents/__init__.py,sha256=0CkxeGyUJcucDegogDtw_lKsA0drQogzwtgyBVmasIE,2351
|
124
|
+
openlit/instrumentation/openai_agents/processor.py,sha256=7hHM8OuIwLwfCfEBZQlqaHKXUdTkiiZz23JppwsnXxw,25860
|
125
125
|
openlit/instrumentation/phidata/__init__.py,sha256=tqls5-UI6FzbjxYgq_qqAfALhWJm8dHn2NtgqiQA4f8,1557
|
126
126
|
openlit/instrumentation/phidata/phidata.py,sha256=ohrxs6i0Oik75P2BrjNGbK71tdZg94ZMmaXixrXwV5M,4834
|
127
127
|
openlit/instrumentation/pinecone/__init__.py,sha256=-3wD35oCnwjwBQV3-gZs2XgpZ2wT9jmiMGjalpF9BhI,3683
|
@@ -159,8 +159,8 @@ openlit/instrumentation/vllm/vllm.py,sha256=VzazF2f4LLwjZDO_G8lIN_d622oSJM0fIO9w
|
|
159
159
|
openlit/otel/events.py,sha256=VrMjTpvnLtYRBHCiFwJojTQqqNpRCxoD4yJYeQrtPsk,3560
|
160
160
|
openlit/otel/metrics.py,sha256=GM2PDloBGRhBTkHHkYaqmOwIAQkY124ZhW4sEqW1Fgk,7086
|
161
161
|
openlit/otel/tracing.py,sha256=tjV2bEbEDPUB1Z46gE-UsJsb04sRdFrfbhIDkxViZc0,3103
|
162
|
-
openlit/semcov/__init__.py,sha256=
|
163
|
-
openlit-1.34.
|
164
|
-
openlit-1.34.
|
165
|
-
openlit-1.34.
|
166
|
-
openlit-1.34.
|
162
|
+
openlit/semcov/__init__.py,sha256=V1nmtUWyXVydBzCRUYknwpVlPxThZIP64VKiedd9e8A,18707
|
163
|
+
openlit-1.34.29.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
164
|
+
openlit-1.34.29.dist-info/METADATA,sha256=MTEXtKUMyUJ6AjDFY7MQMx_r75J38RaTSj7uWdG0G9E,23509
|
165
|
+
openlit-1.34.29.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
166
|
+
openlit-1.34.29.dist-info/RECORD,,
|
@@ -1,65 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Module for monitoring AG2 API calls.
|
3
|
-
"""
|
4
|
-
|
5
|
-
import logging
|
6
|
-
from opentelemetry.trace import SpanKind, Status, StatusCode
|
7
|
-
from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
|
8
|
-
from openlit.__helpers import (
|
9
|
-
handle_exception,
|
10
|
-
)
|
11
|
-
from openlit.semcov import SemanticConvention
|
12
|
-
|
13
|
-
# Initialize logger for logging potential issues and operations
|
14
|
-
logger = logging.getLogger(__name__)
|
15
|
-
|
16
|
-
def set_span_attributes(span, version, operation_name, environment,
|
17
|
-
application_name, server_address, server_port, request_model):
|
18
|
-
"""
|
19
|
-
Set common attributes for the span.
|
20
|
-
"""
|
21
|
-
|
22
|
-
# Set Span attributes (OTel Semconv)
|
23
|
-
span.set_attribute(TELEMETRY_SDK_NAME, 'openlit')
|
24
|
-
span.set_attribute(SemanticConvention.GEN_AI_OPERATION, operation_name)
|
25
|
-
span.set_attribute(SemanticConvention.GEN_AI_SYSTEM, SemanticConvention.GEN_AI_SYSTEM_AG2)
|
26
|
-
span.set_attribute(SemanticConvention.SERVER_ADDRESS, server_address)
|
27
|
-
span.set_attribute(SemanticConvention.SERVER_PORT, server_port)
|
28
|
-
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, request_model)
|
29
|
-
|
30
|
-
# Set Span attributes (Extras)
|
31
|
-
span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
|
32
|
-
span.set_attribute(SERVICE_NAME, application_name)
|
33
|
-
span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
|
34
|
-
|
35
|
-
def create_agent(version, environment, application_name,
|
36
|
-
tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics):
|
37
|
-
"""
|
38
|
-
Generates a telemetry wrapper for GenAI function call
|
39
|
-
"""
|
40
|
-
def wrapper(wrapped, instance, args, kwargs):
|
41
|
-
server_address, server_port = '127.0.0.1', 80
|
42
|
-
|
43
|
-
agent_name = kwargs.get('name', 'openai_agent')
|
44
|
-
span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_CREATE_AGENT} {agent_name}'
|
45
|
-
|
46
|
-
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
47
|
-
try:
|
48
|
-
response = wrapped(*args, **kwargs)
|
49
|
-
|
50
|
-
set_span_attributes(span, version, SemanticConvention.GEN_AI_OPERATION_TYPE_CREATE_AGENT,
|
51
|
-
environment, application_name, server_address, server_port, kwargs.get('model', 'gpt-4o'))
|
52
|
-
span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, agent_name)
|
53
|
-
|
54
|
-
span.set_attribute(SemanticConvention.GEN_AI_AGENT_DESCRIPTION, kwargs.get('instructions', ''))
|
55
|
-
|
56
|
-
span.set_status(Status(StatusCode.OK))
|
57
|
-
|
58
|
-
return response
|
59
|
-
|
60
|
-
except Exception as e:
|
61
|
-
handle_exception(span, e)
|
62
|
-
logger.error('Error in trace creation: %s', e)
|
63
|
-
return response
|
64
|
-
|
65
|
-
return wrapper
|
File without changes
|
File without changes
|