openlit 1.34.32__py3-none-any.whl → 1.34.33__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/instrumentation/pydantic_ai/__init__.py +88 -0
- openlit/instrumentation/pydantic_ai/async_pydantic_ai.py +38 -0
- openlit/instrumentation/pydantic_ai/pydantic_ai.py +99 -10
- openlit/instrumentation/pydantic_ai/utils.py +834 -81
- openlit/semcov/__init__.py +49 -0
- {openlit-1.34.32.dist-info → openlit-1.34.33.dist-info}/METADATA +1 -1
- {openlit-1.34.32.dist-info → openlit-1.34.33.dist-info}/RECORD +9 -8
- {openlit-1.34.32.dist-info → openlit-1.34.33.dist-info}/LICENSE +0 -0
- {openlit-1.34.32.dist-info → openlit-1.34.33.dist-info}/WHEEL +0 -0
@@ -8,6 +8,12 @@ from wrapt import wrap_function_wrapper
|
|
8
8
|
from openlit.instrumentation.pydantic_ai.pydantic_ai import (
|
9
9
|
agent_create,
|
10
10
|
agent_run,
|
11
|
+
graph_execution,
|
12
|
+
user_prompt_processing,
|
13
|
+
model_request_processing,
|
14
|
+
tool_calls_processing,
|
15
|
+
)
|
16
|
+
from openlit.instrumentation.pydantic_ai.async_pydantic_ai import (
|
11
17
|
async_agent_run,
|
12
18
|
)
|
13
19
|
|
@@ -77,6 +83,88 @@ class PydanticAIInstrumentor(BaseInstrumentor):
|
|
77
83
|
),
|
78
84
|
)
|
79
85
|
|
86
|
+
# Enhanced instrumentation for richer span hierarchy
|
87
|
+
# These wrap internal Pydantic AI graph execution components
|
88
|
+
try:
|
89
|
+
# Agent.iter() - Graph execution iterator
|
90
|
+
wrap_function_wrapper(
|
91
|
+
"pydantic_ai.agent",
|
92
|
+
"Agent.iter",
|
93
|
+
graph_execution(
|
94
|
+
version,
|
95
|
+
environment,
|
96
|
+
application_name,
|
97
|
+
tracer,
|
98
|
+
pricing_info,
|
99
|
+
capture_message_content,
|
100
|
+
metrics,
|
101
|
+
disable_metrics,
|
102
|
+
),
|
103
|
+
)
|
104
|
+
except Exception:
|
105
|
+
# If Agent.iter doesn't exist, skip this instrumentation
|
106
|
+
pass
|
107
|
+
|
108
|
+
try:
|
109
|
+
# UserPromptNode.run() - User prompt processing
|
110
|
+
wrap_function_wrapper(
|
111
|
+
"pydantic_ai._agent_graph",
|
112
|
+
"UserPromptNode.run",
|
113
|
+
user_prompt_processing(
|
114
|
+
version,
|
115
|
+
environment,
|
116
|
+
application_name,
|
117
|
+
tracer,
|
118
|
+
pricing_info,
|
119
|
+
capture_message_content,
|
120
|
+
metrics,
|
121
|
+
disable_metrics,
|
122
|
+
),
|
123
|
+
)
|
124
|
+
except Exception:
|
125
|
+
# If UserPromptNode.run doesn't exist, skip this instrumentation
|
126
|
+
pass
|
127
|
+
|
128
|
+
try:
|
129
|
+
# ModelRequestNode.run() - Model request processing
|
130
|
+
wrap_function_wrapper(
|
131
|
+
"pydantic_ai._agent_graph",
|
132
|
+
"ModelRequestNode.run",
|
133
|
+
model_request_processing(
|
134
|
+
version,
|
135
|
+
environment,
|
136
|
+
application_name,
|
137
|
+
tracer,
|
138
|
+
pricing_info,
|
139
|
+
capture_message_content,
|
140
|
+
metrics,
|
141
|
+
disable_metrics,
|
142
|
+
),
|
143
|
+
)
|
144
|
+
except Exception:
|
145
|
+
# If ModelRequestNode.run doesn't exist, skip this instrumentation
|
146
|
+
pass
|
147
|
+
|
148
|
+
try:
|
149
|
+
# CallToolsNode.run() - Tool calls processing
|
150
|
+
wrap_function_wrapper(
|
151
|
+
"pydantic_ai._agent_graph",
|
152
|
+
"CallToolsNode.run",
|
153
|
+
tool_calls_processing(
|
154
|
+
version,
|
155
|
+
environment,
|
156
|
+
application_name,
|
157
|
+
tracer,
|
158
|
+
pricing_info,
|
159
|
+
capture_message_content,
|
160
|
+
metrics,
|
161
|
+
disable_metrics,
|
162
|
+
),
|
163
|
+
)
|
164
|
+
except Exception:
|
165
|
+
# If CallToolsNode.run doesn't exist, skip this instrumentation
|
166
|
+
pass
|
167
|
+
|
80
168
|
def _uninstrument(self, **kwargs):
|
81
169
|
# Proper uninstrumentation logic to revert patched methods
|
82
170
|
pass
|
@@ -0,0 +1,38 @@
|
|
1
|
+
"""
|
2
|
+
Module for monitoring async Pydantic AI API calls.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from openlit.instrumentation.pydantic_ai.utils import (
|
6
|
+
common_agent_run_async,
|
7
|
+
)
|
8
|
+
|
9
|
+
|
10
|
+
def async_agent_run(
|
11
|
+
version,
|
12
|
+
environment,
|
13
|
+
application_name,
|
14
|
+
tracer,
|
15
|
+
pricing_info,
|
16
|
+
capture_message_content,
|
17
|
+
metrics,
|
18
|
+
disable_metrics,
|
19
|
+
):
|
20
|
+
"""
|
21
|
+
Generates a telemetry wrapper for async GenAI function call
|
22
|
+
"""
|
23
|
+
|
24
|
+
async def wrapper(wrapped, instance, args, kwargs):
|
25
|
+
return await common_agent_run_async(
|
26
|
+
wrapped,
|
27
|
+
instance,
|
28
|
+
args,
|
29
|
+
kwargs,
|
30
|
+
tracer,
|
31
|
+
version,
|
32
|
+
environment,
|
33
|
+
application_name,
|
34
|
+
capture_message_content,
|
35
|
+
pricing_info=pricing_info,
|
36
|
+
)
|
37
|
+
|
38
|
+
return wrapper
|
@@ -5,6 +5,10 @@ Module for monitoring Pydantic AI API calls.
|
|
5
5
|
from openlit.instrumentation.pydantic_ai.utils import (
|
6
6
|
common_agent_run,
|
7
7
|
common_agent_create,
|
8
|
+
common_graph_execution,
|
9
|
+
common_user_prompt_processing,
|
10
|
+
common_model_request_processing,
|
11
|
+
common_tool_calls_processing,
|
8
12
|
)
|
9
13
|
|
10
14
|
|
@@ -23,7 +27,6 @@ def agent_create(
|
|
23
27
|
"""
|
24
28
|
|
25
29
|
def wrapper(wrapped, instance, args, kwargs):
|
26
|
-
response = wrapped(*args, **kwargs)
|
27
30
|
return common_agent_create(
|
28
31
|
wrapped,
|
29
32
|
instance,
|
@@ -34,7 +37,6 @@ def agent_create(
|
|
34
37
|
environment,
|
35
38
|
application_name,
|
36
39
|
capture_message_content,
|
37
|
-
response=response,
|
38
40
|
)
|
39
41
|
|
40
42
|
return wrapper
|
@@ -55,7 +57,6 @@ def agent_run(
|
|
55
57
|
"""
|
56
58
|
|
57
59
|
def wrapper(wrapped, instance, args, kwargs):
|
58
|
-
response = wrapped(*args, **kwargs)
|
59
60
|
return common_agent_run(
|
60
61
|
wrapped,
|
61
62
|
instance,
|
@@ -66,13 +67,13 @@ def agent_run(
|
|
66
67
|
environment,
|
67
68
|
application_name,
|
68
69
|
capture_message_content,
|
69
|
-
|
70
|
+
pricing_info=pricing_info,
|
70
71
|
)
|
71
72
|
|
72
73
|
return wrapper
|
73
74
|
|
74
75
|
|
75
|
-
def
|
76
|
+
def graph_execution(
|
76
77
|
version,
|
77
78
|
environment,
|
78
79
|
application_name,
|
@@ -83,12 +84,101 @@ def async_agent_run(
|
|
83
84
|
disable_metrics,
|
84
85
|
):
|
85
86
|
"""
|
86
|
-
Generates a telemetry wrapper for
|
87
|
+
Generates a telemetry wrapper for Pydantic AI graph execution
|
87
88
|
"""
|
88
89
|
|
89
|
-
|
90
|
-
|
91
|
-
|
90
|
+
def wrapper(wrapped, instance, args, kwargs):
|
91
|
+
return common_graph_execution(
|
92
|
+
wrapped,
|
93
|
+
instance,
|
94
|
+
args,
|
95
|
+
kwargs,
|
96
|
+
tracer,
|
97
|
+
version,
|
98
|
+
environment,
|
99
|
+
application_name,
|
100
|
+
capture_message_content,
|
101
|
+
)
|
102
|
+
|
103
|
+
return wrapper
|
104
|
+
|
105
|
+
|
106
|
+
def user_prompt_processing(
|
107
|
+
version,
|
108
|
+
environment,
|
109
|
+
application_name,
|
110
|
+
tracer,
|
111
|
+
pricing_info,
|
112
|
+
capture_message_content,
|
113
|
+
metrics,
|
114
|
+
disable_metrics,
|
115
|
+
):
|
116
|
+
"""
|
117
|
+
Generates a telemetry wrapper for Pydantic AI user prompt processing
|
118
|
+
"""
|
119
|
+
|
120
|
+
def wrapper(wrapped, instance, args, kwargs):
|
121
|
+
return common_user_prompt_processing(
|
122
|
+
wrapped,
|
123
|
+
instance,
|
124
|
+
args,
|
125
|
+
kwargs,
|
126
|
+
tracer,
|
127
|
+
version,
|
128
|
+
environment,
|
129
|
+
application_name,
|
130
|
+
capture_message_content,
|
131
|
+
)
|
132
|
+
|
133
|
+
return wrapper
|
134
|
+
|
135
|
+
|
136
|
+
def model_request_processing(
|
137
|
+
version,
|
138
|
+
environment,
|
139
|
+
application_name,
|
140
|
+
tracer,
|
141
|
+
pricing_info,
|
142
|
+
capture_message_content,
|
143
|
+
metrics,
|
144
|
+
disable_metrics,
|
145
|
+
):
|
146
|
+
"""
|
147
|
+
Generates a telemetry wrapper for Pydantic AI model request processing
|
148
|
+
"""
|
149
|
+
|
150
|
+
def wrapper(wrapped, instance, args, kwargs):
|
151
|
+
return common_model_request_processing(
|
152
|
+
wrapped,
|
153
|
+
instance,
|
154
|
+
args,
|
155
|
+
kwargs,
|
156
|
+
tracer,
|
157
|
+
version,
|
158
|
+
environment,
|
159
|
+
application_name,
|
160
|
+
capture_message_content,
|
161
|
+
)
|
162
|
+
|
163
|
+
return wrapper
|
164
|
+
|
165
|
+
|
166
|
+
def tool_calls_processing(
|
167
|
+
version,
|
168
|
+
environment,
|
169
|
+
application_name,
|
170
|
+
tracer,
|
171
|
+
pricing_info,
|
172
|
+
capture_message_content,
|
173
|
+
metrics,
|
174
|
+
disable_metrics,
|
175
|
+
):
|
176
|
+
"""
|
177
|
+
Generates a telemetry wrapper for Pydantic AI tool calls processing
|
178
|
+
"""
|
179
|
+
|
180
|
+
def wrapper(wrapped, instance, args, kwargs):
|
181
|
+
return common_tool_calls_processing(
|
92
182
|
wrapped,
|
93
183
|
instance,
|
94
184
|
args,
|
@@ -98,7 +188,6 @@ def async_agent_run(
|
|
98
188
|
environment,
|
99
189
|
application_name,
|
100
190
|
capture_message_content,
|
101
|
-
response=response,
|
102
191
|
)
|
103
192
|
|
104
193
|
return wrapper
|
@@ -1,51 +1,471 @@
|
|
1
1
|
"""
|
2
|
-
Pydantic AI OpenTelemetry instrumentation utility functions
|
2
|
+
Optimized Pydantic AI OpenTelemetry instrumentation utility functions
|
3
|
+
This version reduces code duplication and improves performance while maintaining all data.
|
3
4
|
"""
|
4
5
|
|
5
6
|
import logging
|
7
|
+
import json
|
8
|
+
from typing import Dict, Any, Optional, List, Tuple
|
6
9
|
from opentelemetry.sdk.resources import (
|
7
10
|
SERVICE_NAME,
|
8
11
|
TELEMETRY_SDK_NAME,
|
9
12
|
DEPLOYMENT_ENVIRONMENT,
|
10
13
|
)
|
11
14
|
from opentelemetry.trace import Status, StatusCode, SpanKind
|
15
|
+
from opentelemetry import context as context_api
|
12
16
|
from openlit.__helpers import handle_exception
|
13
17
|
from openlit.semcov import SemanticConvention
|
14
18
|
|
19
|
+
# Try to import enhanced helpers for business intelligence
|
20
|
+
try:
|
21
|
+
from openlit.__helpers import get_chat_model_cost
|
22
|
+
|
23
|
+
ENHANCED_HELPERS_AVAILABLE = True
|
24
|
+
except ImportError:
|
25
|
+
ENHANCED_HELPERS_AVAILABLE = False
|
26
|
+
|
15
27
|
# Initialize logger for logging potential issues and operations
|
16
28
|
logger = logging.getLogger(__name__)
|
17
29
|
|
30
|
+
# Constants for common node names to avoid hardcoding
|
31
|
+
INTERNAL_NODE_NAMES = {"tool_calls_node", "model_request_node", "user_prompt_node"}
|
32
|
+
|
33
|
+
|
34
|
+
class PydanticAIInstrumentationContext:
|
35
|
+
"""
|
36
|
+
Context object to hold common instrumentation data and reduce repeated extraction.
|
37
|
+
"""
|
38
|
+
|
39
|
+
def __init__(self, instance, args, kwargs, version, environment, application_name):
|
40
|
+
self.instance = instance
|
41
|
+
self.args = args
|
42
|
+
self.kwargs = kwargs
|
43
|
+
self.version = version
|
44
|
+
self.environment = environment
|
45
|
+
self.application_name = application_name
|
46
|
+
|
47
|
+
# Pre-extract common data to avoid repeated parsing
|
48
|
+
self._agent_name = None
|
49
|
+
self._model_name = None
|
50
|
+
self._server_info = None
|
51
|
+
self._messages = None
|
52
|
+
self._tools = None
|
53
|
+
self._model_params = None
|
54
|
+
|
55
|
+
@property
|
56
|
+
def agent_name(self) -> str:
|
57
|
+
"""Get agent name with caching."""
|
58
|
+
if self._agent_name is None:
|
59
|
+
self._agent_name = getattr(self.instance, "name", None) or "pydantic_agent"
|
60
|
+
return self._agent_name
|
61
|
+
|
62
|
+
@property
|
63
|
+
def model_name(self) -> str:
|
64
|
+
"""Get model name with caching."""
|
65
|
+
if self._model_name is None:
|
66
|
+
if hasattr(self.instance, "model") and hasattr(
|
67
|
+
self.instance.model, "model_name"
|
68
|
+
):
|
69
|
+
self._model_name = str(self.instance.model.model_name)
|
70
|
+
else:
|
71
|
+
self._model_name = "unknown"
|
72
|
+
return self._model_name
|
73
|
+
|
74
|
+
@property
|
75
|
+
def server_info(self) -> Tuple[str, int]:
|
76
|
+
"""Get server address and port with caching."""
|
77
|
+
if self._server_info is None:
|
78
|
+
# Determine server based on model
|
79
|
+
if "openai" in self.model_name.lower():
|
80
|
+
self._server_info = ("api.openai.com", 443)
|
81
|
+
else:
|
82
|
+
self._server_info = ("127.0.0.1", 80)
|
83
|
+
return self._server_info
|
84
|
+
|
85
|
+
@property
|
86
|
+
def messages(self) -> List[Dict]:
|
87
|
+
"""Get extracted messages with caching."""
|
88
|
+
if self._messages is None:
|
89
|
+
self._messages = self._extract_messages()
|
90
|
+
return self._messages
|
91
|
+
|
92
|
+
@property
|
93
|
+
def tools(self) -> List:
|
94
|
+
"""Get extracted tools with caching."""
|
95
|
+
if self._tools is None:
|
96
|
+
self._tools = self._extract_tools()
|
97
|
+
return self._tools
|
98
|
+
|
99
|
+
@property
|
100
|
+
def model_params(self) -> Dict[str, Any]:
|
101
|
+
"""Get model parameters with caching."""
|
102
|
+
if self._model_params is None:
|
103
|
+
self._model_params = self._extract_model_parameters()
|
104
|
+
return self._model_params
|
105
|
+
|
106
|
+
def _extract_messages(self) -> List[Dict]:
|
107
|
+
"""Extract messages from context."""
|
108
|
+
messages = []
|
109
|
+
try:
|
110
|
+
# Extract user message from args
|
111
|
+
if self.args and len(self.args) > 0:
|
112
|
+
user_message = self.args[0]
|
113
|
+
if isinstance(user_message, str):
|
114
|
+
messages.append({"role": "user", "content": user_message})
|
115
|
+
|
116
|
+
# Extract system prompt if available
|
117
|
+
if (
|
118
|
+
hasattr(self.instance, "_system_prompts")
|
119
|
+
and self.instance._system_prompts
|
120
|
+
):
|
121
|
+
system_prompt = str(self.instance._system_prompts)
|
122
|
+
if system_prompt:
|
123
|
+
messages.insert(0, {"role": "system", "content": system_prompt})
|
124
|
+
|
125
|
+
# Extract additional context from kwargs
|
126
|
+
if "message_history" in self.kwargs:
|
127
|
+
history = self.kwargs["message_history"]
|
128
|
+
if isinstance(history, list):
|
129
|
+
messages.extend(history)
|
130
|
+
except Exception as e:
|
131
|
+
logger.debug("Failed to extract messages: %s", e)
|
132
|
+
|
133
|
+
return messages
|
134
|
+
|
135
|
+
def _extract_tools(self) -> List:
|
136
|
+
"""Extract tool definitions from instance."""
|
137
|
+
tools = []
|
138
|
+
try:
|
139
|
+
if hasattr(self.instance, "_tools") and self.instance._tools:
|
140
|
+
tools = self.instance._tools
|
141
|
+
except Exception as e:
|
142
|
+
logger.debug("Failed to extract tools: %s", e)
|
143
|
+
return tools
|
144
|
+
|
145
|
+
def _extract_model_parameters(self) -> Dict[str, Any]:
|
146
|
+
"""Extract model parameters from instance."""
|
147
|
+
parameters = {}
|
148
|
+
try:
|
149
|
+
if hasattr(self.instance, "model"):
|
150
|
+
model = self.instance.model
|
151
|
+
param_names = [
|
152
|
+
"temperature",
|
153
|
+
"top_p",
|
154
|
+
"max_tokens",
|
155
|
+
"frequency_penalty",
|
156
|
+
"presence_penalty",
|
157
|
+
"stop",
|
158
|
+
"seed",
|
159
|
+
"top_k",
|
160
|
+
]
|
161
|
+
|
162
|
+
for param in param_names:
|
163
|
+
if hasattr(model, param):
|
164
|
+
value = getattr(model, param)
|
165
|
+
if value is not None:
|
166
|
+
parameters[param] = value
|
167
|
+
except Exception as e:
|
168
|
+
logger.debug("Failed to extract model parameters: %s", e)
|
169
|
+
|
170
|
+
return parameters
|
171
|
+
|
18
172
|
|
19
173
|
def set_span_attributes(
|
20
174
|
span,
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
server_port,
|
27
|
-
request_model,
|
28
|
-
agent_name,
|
175
|
+
operation_name: str,
|
176
|
+
ctx: PydanticAIInstrumentationContext,
|
177
|
+
agent_name: Optional[str] = None,
|
178
|
+
lifecycle_phase: Optional[str] = None,
|
179
|
+
additional_attrs: Optional[Dict[str, Any]] = None,
|
29
180
|
):
|
30
181
|
"""
|
31
|
-
|
182
|
+
Optimized function to set common OpenTelemetry span attributes.
|
183
|
+
|
184
|
+
Args:
|
185
|
+
span: OpenTelemetry span object
|
186
|
+
operation_name: The operation name for the span
|
187
|
+
ctx: PydanticAIInstrumentationContext with cached data
|
188
|
+
agent_name: Optional agent name (uses ctx.agent_name if not provided)
|
189
|
+
lifecycle_phase: Optional lifecycle phase
|
190
|
+
additional_attrs: Optional additional attributes to set
|
32
191
|
"""
|
33
192
|
|
34
|
-
# Set
|
193
|
+
# Set core attributes
|
35
194
|
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
36
195
|
span.set_attribute(SemanticConvention.GEN_AI_OPERATION, operation_name)
|
37
196
|
span.set_attribute(
|
38
197
|
SemanticConvention.GEN_AI_SYSTEM, SemanticConvention.GEN_AI_SYSTEM_PYDANTIC_AI
|
39
198
|
)
|
40
|
-
|
199
|
+
|
200
|
+
# Set agent name if meaningful
|
201
|
+
final_agent_name = agent_name or ctx.agent_name
|
202
|
+
if final_agent_name and final_agent_name not in INTERNAL_NODE_NAMES:
|
203
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, final_agent_name)
|
204
|
+
|
205
|
+
# Set server info
|
206
|
+
server_address, server_port = ctx.server_info
|
41
207
|
span.set_attribute(SemanticConvention.SERVER_ADDRESS, server_address)
|
42
208
|
span.set_attribute(SemanticConvention.SERVER_PORT, server_port)
|
43
|
-
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, request_model)
|
44
209
|
|
45
|
-
# Set
|
46
|
-
span.set_attribute(
|
47
|
-
|
48
|
-
|
210
|
+
# Set model info
|
211
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, ctx.model_name)
|
212
|
+
|
213
|
+
# Set environment attributes
|
214
|
+
span.set_attribute(DEPLOYMENT_ENVIRONMENT, ctx.environment)
|
215
|
+
span.set_attribute(SERVICE_NAME, ctx.application_name)
|
216
|
+
span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, ctx.version)
|
217
|
+
|
218
|
+
# Set lifecycle phase if provided
|
219
|
+
if lifecycle_phase:
|
220
|
+
span.set_attribute(
|
221
|
+
SemanticConvention.GEN_AI_AGENT_LIFECYCLE_PHASE, lifecycle_phase
|
222
|
+
)
|
223
|
+
|
224
|
+
# Set additional attributes
|
225
|
+
if additional_attrs:
|
226
|
+
for key, value in additional_attrs.items():
|
227
|
+
span.set_attribute(key, value)
|
228
|
+
|
229
|
+
|
230
|
+
def add_message_tracking(span, messages: List[Dict], message_type: str = "input"):
|
231
|
+
"""
|
232
|
+
Optimized message tracking function.
|
233
|
+
"""
|
234
|
+
if not messages:
|
235
|
+
return
|
236
|
+
|
237
|
+
try:
|
238
|
+
# Convert to standard format
|
239
|
+
formatted_messages = []
|
240
|
+
for message in messages:
|
241
|
+
formatted_message = {
|
242
|
+
"role": message.get("role", "user"),
|
243
|
+
"content": message.get("content", ""),
|
244
|
+
}
|
245
|
+
if "tool_calls" in message:
|
246
|
+
formatted_message["tool_calls"] = message["tool_calls"]
|
247
|
+
formatted_messages.append(formatted_message)
|
248
|
+
|
249
|
+
# Set message attributes
|
250
|
+
if message_type == "input":
|
251
|
+
span.set_attribute(
|
252
|
+
SemanticConvention.GEN_AI_CONTENT_PROMPT, json.dumps(formatted_messages)
|
253
|
+
)
|
254
|
+
else:
|
255
|
+
span.set_attribute(
|
256
|
+
SemanticConvention.GEN_AI_OUTPUT_MESSAGES,
|
257
|
+
json.dumps(formatted_messages),
|
258
|
+
)
|
259
|
+
|
260
|
+
# Add metadata
|
261
|
+
if formatted_messages:
|
262
|
+
span.set_attribute(
|
263
|
+
SemanticConvention.GEN_AI_MESSAGE_ROLE,
|
264
|
+
formatted_messages[0].get("role", "user"),
|
265
|
+
)
|
266
|
+
total_length = sum(
|
267
|
+
len(str(msg.get("content", ""))) for msg in formatted_messages
|
268
|
+
)
|
269
|
+
span.set_attribute("gen_ai.message.total_length", total_length)
|
270
|
+
|
271
|
+
except Exception as e:
|
272
|
+
logger.debug("Failed to add message tracking: %s", e)
|
273
|
+
|
274
|
+
|
275
|
+
def add_tool_tracking(span, tools: List):
|
276
|
+
"""
|
277
|
+
Optimized tool tracking function.
|
278
|
+
"""
|
279
|
+
if not tools:
|
280
|
+
return
|
281
|
+
|
282
|
+
try:
|
283
|
+
formatted_tools = []
|
284
|
+
for tool in tools:
|
285
|
+
if hasattr(tool, "name"):
|
286
|
+
formatted_tool = {
|
287
|
+
"name": tool.name,
|
288
|
+
"description": getattr(tool, "description", ""),
|
289
|
+
}
|
290
|
+
if hasattr(tool, "json_schema"):
|
291
|
+
formatted_tool["schema"] = tool.json_schema
|
292
|
+
else:
|
293
|
+
formatted_tool = {
|
294
|
+
"name": tool.get("name", ""),
|
295
|
+
"description": tool.get("description", ""),
|
296
|
+
}
|
297
|
+
if "schema" in tool:
|
298
|
+
formatted_tool["schema"] = tool["schema"]
|
299
|
+
formatted_tools.append(formatted_tool)
|
300
|
+
|
301
|
+
span.set_attribute(
|
302
|
+
SemanticConvention.GEN_AI_AGENT_TOOLS, json.dumps(formatted_tools)
|
303
|
+
)
|
304
|
+
|
305
|
+
except Exception as e:
|
306
|
+
logger.debug("Failed to add tool tracking: %s", e)
|
307
|
+
|
308
|
+
|
309
|
+
def execute_with_error_handling(
|
310
|
+
span, wrapped, args, kwargs, capture_completion: bool = False
|
311
|
+
):
|
312
|
+
"""
|
313
|
+
Execute wrapped function with standardized error handling.
|
314
|
+
"""
|
315
|
+
try:
|
316
|
+
response = wrapped(*args, **kwargs)
|
317
|
+
|
318
|
+
# Add completion content if requested
|
319
|
+
if capture_completion and hasattr(response, "data"):
|
320
|
+
span.set_attribute(
|
321
|
+
SemanticConvention.GEN_AI_CONTENT_COMPLETION, str(response.data)
|
322
|
+
)
|
323
|
+
|
324
|
+
span.set_status(Status(StatusCode.OK))
|
325
|
+
return response
|
326
|
+
|
327
|
+
except Exception as e:
|
328
|
+
handle_exception(span, e)
|
329
|
+
logger.error("Error in instrumentation: %s", e)
|
330
|
+
raise
|
331
|
+
|
332
|
+
|
333
|
+
# Context extraction utilities for internal nodes
|
334
|
+
def extract_context_info(args, kwargs) -> Dict[str, Any]:
|
335
|
+
"""
|
336
|
+
Extract context information from internal node arguments.
|
337
|
+
This reduces code duplication across node instrumentation functions.
|
338
|
+
"""
|
339
|
+
info = {
|
340
|
+
"model_info": "",
|
341
|
+
"agent_name": "",
|
342
|
+
"user_input": "",
|
343
|
+
"tool_info": "",
|
344
|
+
"tool_count": 0,
|
345
|
+
"message_count": 0,
|
346
|
+
}
|
347
|
+
|
348
|
+
try:
|
349
|
+
if args and len(args) > 0:
|
350
|
+
context = args[0]
|
351
|
+
|
352
|
+
# Extract model info
|
353
|
+
if hasattr(context, "deps") and hasattr(context.deps, "model"):
|
354
|
+
model = context.deps.model
|
355
|
+
if hasattr(model, "model_name"):
|
356
|
+
info["model_info"] = str(model.model_name)
|
357
|
+
|
358
|
+
# Extract agent name
|
359
|
+
if hasattr(context, "deps") and hasattr(context.deps, "agent"):
|
360
|
+
agent = context.deps.agent
|
361
|
+
if hasattr(agent, "name") and agent.name:
|
362
|
+
info["agent_name"] = str(agent.name)
|
363
|
+
elif hasattr(context, "agent") and hasattr(context.agent, "name"):
|
364
|
+
info["agent_name"] = str(context.agent.name)
|
365
|
+
|
366
|
+
# Extract user input
|
367
|
+
if hasattr(context, "user_input"):
|
368
|
+
info["user_input"] = str(context.user_input)[:50]
|
369
|
+
|
370
|
+
# Extract tool information
|
371
|
+
if hasattr(context, "tool_calls") and context.tool_calls:
|
372
|
+
info["tool_count"] = len(context.tool_calls)
|
373
|
+
if context.tool_calls:
|
374
|
+
info["tool_info"] = getattr(
|
375
|
+
context.tool_calls[0], "function", {}
|
376
|
+
).get("name", "")
|
377
|
+
|
378
|
+
# Extract message count
|
379
|
+
if hasattr(context, "messages") and context.messages:
|
380
|
+
info["message_count"] = len(context.messages)
|
381
|
+
|
382
|
+
except Exception as e:
|
383
|
+
logger.debug("Failed to extract context info: %s", e)
|
384
|
+
|
385
|
+
return info
|
386
|
+
|
387
|
+
|
388
|
+
def add_business_intelligence_attributes(
|
389
|
+
span, model_name: str, response, pricing_info, capture_message_content: bool
|
390
|
+
):
|
391
|
+
"""
|
392
|
+
Optimized business intelligence attributes function.
|
393
|
+
"""
|
394
|
+
try:
|
395
|
+
# Extract usage information
|
396
|
+
usage_info = {"input_tokens": 0, "output_tokens": 0, "total_tokens": 0}
|
397
|
+
|
398
|
+
if hasattr(response, "usage"):
|
399
|
+
usage_obj = response.usage
|
400
|
+
usage_info["input_tokens"] = (
|
401
|
+
getattr(usage_obj, "input_tokens", 0)
|
402
|
+
or getattr(usage_obj, "request_tokens", 0)
|
403
|
+
or getattr(usage_obj, "prompt_tokens", 0)
|
404
|
+
or 0
|
405
|
+
)
|
406
|
+
usage_info["output_tokens"] = (
|
407
|
+
getattr(usage_obj, "output_tokens", 0)
|
408
|
+
or getattr(usage_obj, "response_tokens", 0)
|
409
|
+
or getattr(usage_obj, "completion_tokens", 0)
|
410
|
+
or 0
|
411
|
+
)
|
412
|
+
usage_info["total_tokens"] = (
|
413
|
+
usage_info["input_tokens"] + usage_info["output_tokens"]
|
414
|
+
)
|
415
|
+
|
416
|
+
# Set usage attributes
|
417
|
+
if usage_info["input_tokens"] > 0:
|
418
|
+
span.set_attribute(
|
419
|
+
SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, usage_info["input_tokens"]
|
420
|
+
)
|
421
|
+
if usage_info["output_tokens"] > 0:
|
422
|
+
span.set_attribute(
|
423
|
+
SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS,
|
424
|
+
usage_info["output_tokens"],
|
425
|
+
)
|
426
|
+
if usage_info["total_tokens"] > 0:
|
427
|
+
span.set_attribute(
|
428
|
+
SemanticConvention.GEN_AI_USAGE_TOTAL_TOKENS, usage_info["total_tokens"]
|
429
|
+
)
|
430
|
+
|
431
|
+
# Calculate cost
|
432
|
+
if (
|
433
|
+
ENHANCED_HELPERS_AVAILABLE
|
434
|
+
and pricing_info
|
435
|
+
and usage_info["input_tokens"] > 0
|
436
|
+
):
|
437
|
+
try:
|
438
|
+
cost = get_chat_model_cost(
|
439
|
+
model_name,
|
440
|
+
pricing_info,
|
441
|
+
usage_info["input_tokens"],
|
442
|
+
usage_info["output_tokens"],
|
443
|
+
)
|
444
|
+
if cost > 0:
|
445
|
+
span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
|
446
|
+
except Exception as e:
|
447
|
+
logger.debug("Failed to calculate cost: %s", e)
|
448
|
+
|
449
|
+
# Add performance metrics
|
450
|
+
if hasattr(response, "duration") and response.duration:
|
451
|
+
span.set_attribute(
|
452
|
+
SemanticConvention.GEN_AI_CLIENT_OPERATION_DURATION, response.duration
|
453
|
+
)
|
454
|
+
if usage_info["total_tokens"] > 0:
|
455
|
+
tokens_per_second = usage_info["total_tokens"] / response.duration
|
456
|
+
span.set_attribute(
|
457
|
+
SemanticConvention.GEN_AI_PERFORMANCE_TOKENS_PER_SECOND,
|
458
|
+
tokens_per_second,
|
459
|
+
)
|
460
|
+
|
461
|
+
# Enhanced content capture
|
462
|
+
if capture_message_content and hasattr(response, "output") and response.output:
|
463
|
+
span.set_attribute(
|
464
|
+
SemanticConvention.GEN_AI_CONTENT_COMPLETION, str(response.output)
|
465
|
+
)
|
466
|
+
|
467
|
+
except Exception as e:
|
468
|
+
logger.debug("Failed to add business intelligence attributes: %s", e)
|
49
469
|
|
50
470
|
|
51
471
|
def common_agent_run(
|
@@ -58,54 +478,135 @@ def common_agent_run(
|
|
58
478
|
environment,
|
59
479
|
application_name,
|
60
480
|
capture_message_content,
|
61
|
-
|
481
|
+
pricing_info=None,
|
62
482
|
):
|
63
483
|
"""
|
64
|
-
|
484
|
+
Optimized agent run function using context caching and standardized patterns.
|
65
485
|
"""
|
486
|
+
# Suppression check
|
487
|
+
if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY):
|
488
|
+
return wrapped(*args, **kwargs)
|
66
489
|
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
span_name = (
|
71
|
-
f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK} {agent_name}"
|
490
|
+
# Create cached context
|
491
|
+
ctx = PydanticAIInstrumentationContext(
|
492
|
+
instance, args, kwargs, version, environment, application_name
|
72
493
|
)
|
73
494
|
|
495
|
+
# Determine span name
|
496
|
+
operation_type = SemanticConvention.GEN_AI_OPERATION_TYPE_AGENT
|
497
|
+
span_name = f"{operation_type} {ctx.agent_name}"
|
498
|
+
|
74
499
|
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
SemanticConvention.GEN_AI_AGENT_DESCRIPTION,
|
89
|
-
str(instance._system_prompts),
|
90
|
-
)
|
91
|
-
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, request_model)
|
92
|
-
|
93
|
-
if capture_message_content:
|
94
|
-
span.add_event(
|
95
|
-
name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
|
96
|
-
attributes={
|
97
|
-
SemanticConvention.GEN_AI_CONTENT_COMPLETION: response.output,
|
98
|
-
},
|
99
|
-
)
|
500
|
+
# Set common attributes
|
501
|
+
set_span_attributes(
|
502
|
+
span=span,
|
503
|
+
operation_name=SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK,
|
504
|
+
ctx=ctx,
|
505
|
+
lifecycle_phase=SemanticConvention.GEN_AI_AGENT_LIFECYCLE_PHASE_EXECUTE,
|
506
|
+
additional_attrs={
|
507
|
+
SemanticConvention.GEN_AI_AGENT_DESCRIPTION: str(
|
508
|
+
getattr(instance, "_system_prompts", "")
|
509
|
+
),
|
510
|
+
SemanticConvention.GEN_AI_RESPONSE_MODEL: ctx.model_name,
|
511
|
+
},
|
512
|
+
)
|
100
513
|
|
101
|
-
|
514
|
+
# Add message tracking if enabled
|
515
|
+
if capture_message_content and ctx.messages:
|
516
|
+
add_message_tracking(span, ctx.messages, "input")
|
102
517
|
|
103
|
-
|
518
|
+
# Add tool tracking if tools exist
|
519
|
+
if ctx.tools:
|
520
|
+
add_tool_tracking(span, ctx.tools)
|
104
521
|
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
522
|
+
# Add model parameters if available
|
523
|
+
if ctx.model_params:
|
524
|
+
span.set_attribute(
|
525
|
+
SemanticConvention.GEN_AI_REQUEST_PARAMETERS,
|
526
|
+
json.dumps(ctx.model_params),
|
527
|
+
)
|
528
|
+
|
529
|
+
# Execute with error handling
|
530
|
+
response = execute_with_error_handling(
|
531
|
+
span, wrapped, args, kwargs, capture_completion=False
|
532
|
+
)
|
533
|
+
|
534
|
+
# Add business intelligence
|
535
|
+
add_business_intelligence_attributes(
|
536
|
+
span, ctx.model_name, response, pricing_info, capture_message_content
|
537
|
+
)
|
538
|
+
|
539
|
+
return response
|
540
|
+
|
541
|
+
|
542
|
+
async def common_agent_run_async(
|
543
|
+
wrapped,
|
544
|
+
instance,
|
545
|
+
args,
|
546
|
+
kwargs,
|
547
|
+
tracer,
|
548
|
+
version,
|
549
|
+
environment,
|
550
|
+
application_name,
|
551
|
+
capture_message_content,
|
552
|
+
pricing_info=None,
|
553
|
+
):
|
554
|
+
"""
|
555
|
+
Optimized async agent run function using context caching and standardized patterns.
|
556
|
+
"""
|
557
|
+
# Suppression check
|
558
|
+
if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY):
|
559
|
+
return await wrapped(*args, **kwargs)
|
560
|
+
|
561
|
+
# Create cached context
|
562
|
+
ctx = PydanticAIInstrumentationContext(
|
563
|
+
instance, args, kwargs, version, environment, application_name
|
564
|
+
)
|
565
|
+
|
566
|
+
# Determine span name
|
567
|
+
operation_type = SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK
|
568
|
+
span_name = f"{operation_type} {ctx.agent_name}"
|
569
|
+
|
570
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
571
|
+
# Set common attributes
|
572
|
+
set_span_attributes(
|
573
|
+
span=span,
|
574
|
+
operation_name=SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK,
|
575
|
+
ctx=ctx,
|
576
|
+
lifecycle_phase=SemanticConvention.GEN_AI_AGENT_LIFECYCLE_PHASE_EXECUTE,
|
577
|
+
additional_attrs={
|
578
|
+
SemanticConvention.GEN_AI_AGENT_DESCRIPTION: str(
|
579
|
+
getattr(instance, "_system_prompts", "")
|
580
|
+
),
|
581
|
+
SemanticConvention.GEN_AI_RESPONSE_MODEL: ctx.model_name,
|
582
|
+
},
|
583
|
+
)
|
584
|
+
|
585
|
+
# Add message tracking if enabled
|
586
|
+
if capture_message_content and ctx.messages:
|
587
|
+
add_message_tracking(span, ctx.messages, "input")
|
588
|
+
|
589
|
+
# Add tool tracking if tools exist
|
590
|
+
if ctx.tools:
|
591
|
+
add_tool_tracking(span, ctx.tools)
|
592
|
+
|
593
|
+
# Add model parameters if available
|
594
|
+
if ctx.model_params:
|
595
|
+
span.set_attribute(
|
596
|
+
SemanticConvention.GEN_AI_REQUEST_PARAMETERS,
|
597
|
+
json.dumps(ctx.model_params),
|
598
|
+
)
|
599
|
+
|
600
|
+
# Execute async function
|
601
|
+
response = await wrapped(*args, **kwargs)
|
602
|
+
|
603
|
+
# Add business intelligence
|
604
|
+
add_business_intelligence_attributes(
|
605
|
+
span, ctx.model_name, response, pricing_info, capture_message_content
|
606
|
+
)
|
607
|
+
|
608
|
+
span.set_status(Status(StatusCode.OK))
|
609
|
+
return response
|
109
610
|
|
110
611
|
|
111
612
|
def common_agent_create(
|
@@ -118,43 +619,295 @@ def common_agent_create(
|
|
118
619
|
environment,
|
119
620
|
application_name,
|
120
621
|
capture_message_content,
|
121
|
-
response,
|
122
622
|
):
|
123
623
|
"""
|
124
|
-
|
624
|
+
Optimized agent creation function using context caching and standardized patterns.
|
125
625
|
"""
|
626
|
+
# Suppression check
|
627
|
+
if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY):
|
628
|
+
return wrapped(*args, **kwargs)
|
126
629
|
|
127
|
-
|
630
|
+
# Create minimal context for agent creation
|
128
631
|
agent_name = kwargs.get("name", "pydantic_agent")
|
129
|
-
|
632
|
+
request_model = (
|
633
|
+
args[0] if args else kwargs.get("model", "google-gla:gemini-1.5-flash")
|
634
|
+
)
|
635
|
+
|
636
|
+
# Create a minimal context object for creation
|
637
|
+
class CreateContext:
|
638
|
+
"""Minimal context for agent creation instrumentation."""
|
639
|
+
|
640
|
+
def __init__(self):
|
641
|
+
self.agent_name = agent_name
|
642
|
+
self.model_name = request_model
|
643
|
+
self.server_info = ("127.0.0.1", 80)
|
644
|
+
self.environment = environment
|
645
|
+
self.application_name = application_name
|
646
|
+
self.version = version
|
647
|
+
self.messages = []
|
648
|
+
self.tools = kwargs.get("tools", [])
|
649
|
+
self.model_params = {}
|
650
|
+
|
651
|
+
def get_context_info(self):
|
652
|
+
"""Get context information for instrumentation."""
|
653
|
+
return {
|
654
|
+
"agent_name": self.agent_name,
|
655
|
+
"model_name": self.model_name,
|
656
|
+
"tools_count": len(self.tools),
|
657
|
+
}
|
658
|
+
|
659
|
+
def has_tools(self):
|
660
|
+
"""Check if agent has tools configured."""
|
661
|
+
return len(self.tools) > 0
|
662
|
+
|
663
|
+
ctx = CreateContext()
|
664
|
+
|
665
|
+
with tracer.start_as_current_span(
|
666
|
+
f"create_agent {agent_name}", kind=SpanKind.CLIENT
|
667
|
+
) as span:
|
668
|
+
# Set common attributes
|
669
|
+
set_span_attributes(
|
670
|
+
span=span,
|
671
|
+
operation_name=SemanticConvention.GEN_AI_OPERATION_TYPE_CREATE_AGENT,
|
672
|
+
ctx=ctx,
|
673
|
+
lifecycle_phase=SemanticConvention.GEN_AI_AGENT_LIFECYCLE_PHASE_CREATE,
|
674
|
+
additional_attrs={
|
675
|
+
SemanticConvention.GEN_AI_AGENT_DESCRIPTION: str(
|
676
|
+
kwargs.get("system_prompt", "")
|
677
|
+
),
|
678
|
+
SemanticConvention.GEN_AI_RESPONSE_MODEL: request_model,
|
679
|
+
},
|
680
|
+
)
|
681
|
+
|
682
|
+
# Add tools if any are provided during creation
|
683
|
+
if ctx.tools:
|
684
|
+
add_tool_tracking(span, ctx.tools)
|
685
|
+
|
686
|
+
# Execute with error handling
|
687
|
+
return execute_with_error_handling(
|
688
|
+
span, wrapped, args, kwargs, capture_completion=False
|
689
|
+
)
|
690
|
+
|
691
|
+
|
692
|
+
def common_graph_execution(
|
693
|
+
wrapped,
|
694
|
+
instance,
|
695
|
+
args,
|
696
|
+
kwargs,
|
697
|
+
tracer,
|
698
|
+
version,
|
699
|
+
environment,
|
700
|
+
application_name,
|
701
|
+
capture_message_content,
|
702
|
+
):
|
703
|
+
"""
|
704
|
+
Handle telemetry for Pydantic AI graph execution operations.
|
705
|
+
This wraps the Agent.iter() method to track graph execution.
|
706
|
+
"""
|
707
|
+
|
708
|
+
# CRITICAL: Suppression check to prevent double instrumentation
|
709
|
+
if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY):
|
710
|
+
return wrapped(*args, **kwargs)
|
711
|
+
|
712
|
+
# Create cached context for agent-based operations
|
713
|
+
ctx = PydanticAIInstrumentationContext(
|
714
|
+
instance, args, kwargs, version, environment, application_name
|
715
|
+
)
|
716
|
+
|
717
|
+
operation_type = SemanticConvention.GEN_AI_OPERATION_TYPE_GRAPH_EXECUTION
|
718
|
+
span_name = f"{operation_type} {ctx.agent_name}"
|
130
719
|
|
131
720
|
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
132
|
-
|
133
|
-
|
134
|
-
|
721
|
+
# Set common attributes
|
722
|
+
set_span_attributes(
|
723
|
+
span=span,
|
724
|
+
operation_name=operation_type,
|
725
|
+
ctx=ctx,
|
726
|
+
lifecycle_phase=SemanticConvention.GEN_AI_AGENT_LIFECYCLE_PHASE_GRAPH_EXECUTION,
|
727
|
+
additional_attrs={
|
728
|
+
SemanticConvention.GEN_AI_AGENT_DESCRIPTION: str(
|
729
|
+
getattr(instance, "_system_prompts", "")
|
730
|
+
),
|
731
|
+
},
|
732
|
+
)
|
733
|
+
|
734
|
+
# Add model parameters if available
|
735
|
+
if ctx.model_params:
|
736
|
+
span.set_attribute(
|
737
|
+
SemanticConvention.GEN_AI_REQUEST_PARAMETERS,
|
738
|
+
json.dumps(ctx.model_params),
|
739
|
+
)
|
740
|
+
|
741
|
+
# Execute with error handling
|
742
|
+
return execute_with_error_handling(
|
743
|
+
span, wrapped, args, kwargs, capture_completion=False
|
744
|
+
)
|
745
|
+
|
746
|
+
|
747
|
+
def common_internal_node(
|
748
|
+
wrapped,
|
749
|
+
instance,
|
750
|
+
args,
|
751
|
+
kwargs,
|
752
|
+
tracer,
|
753
|
+
version,
|
754
|
+
environment,
|
755
|
+
application_name,
|
756
|
+
capture_message_content,
|
757
|
+
operation_type,
|
758
|
+
lifecycle_phase,
|
759
|
+
node_type="internal",
|
760
|
+
):
|
761
|
+
"""
|
762
|
+
Optimized generic function for internal node instrumentation.
|
763
|
+
This consolidates common logic for all internal node types.
|
764
|
+
"""
|
765
|
+
# Suppression check
|
766
|
+
if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY):
|
767
|
+
return wrapped(*args, **kwargs)
|
768
|
+
|
769
|
+
# Extract context info efficiently
|
770
|
+
context_info = extract_context_info(args, kwargs)
|
771
|
+
|
772
|
+
# Determine span name
|
773
|
+
if context_info["model_info"]:
|
774
|
+
span_name = f"{operation_type} {context_info['model_info']}"
|
775
|
+
elif context_info["agent_name"]:
|
776
|
+
span_name = f"{operation_type} {context_info['agent_name']}"
|
777
|
+
elif context_info["tool_info"]:
|
778
|
+
span_name = f"{operation_type} {context_info['tool_info']}"
|
779
|
+
else:
|
780
|
+
span_name = f"{operation_type} {node_type}"
|
781
|
+
|
782
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
783
|
+
# Set basic attributes
|
784
|
+
span.set_attribute(SemanticConvention.GEN_AI_OPERATION, operation_type)
|
785
|
+
span.set_attribute(
|
786
|
+
SemanticConvention.GEN_AI_SYSTEM,
|
787
|
+
SemanticConvention.GEN_AI_SYSTEM_PYDANTIC_AI,
|
788
|
+
)
|
789
|
+
span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
|
790
|
+
span.set_attribute(
|
791
|
+
SemanticConvention.GEN_AI_AGENT_LIFECYCLE_PHASE, lifecycle_phase
|
792
|
+
)
|
793
|
+
|
794
|
+
# Set server info
|
795
|
+
if operation_type == SemanticConvention.GEN_AI_OPERATION_TYPE_MODEL_REQUEST:
|
796
|
+
span.set_attribute(SemanticConvention.SERVER_ADDRESS, "api.openai.com")
|
797
|
+
span.set_attribute(SemanticConvention.SERVER_PORT, 443)
|
798
|
+
else:
|
799
|
+
span.set_attribute(SemanticConvention.SERVER_ADDRESS, "127.0.0.1")
|
800
|
+
span.set_attribute(SemanticConvention.SERVER_PORT, 80)
|
801
|
+
|
802
|
+
# Set extracted context attributes
|
803
|
+
if context_info["model_info"]:
|
804
|
+
span.set_attribute(
|
805
|
+
SemanticConvention.GEN_AI_REQUEST_MODEL, context_info["model_info"]
|
135
806
|
)
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
SemanticConvention.GEN_AI_OPERATION_TYPE_CREATE_AGENT,
|
140
|
-
environment,
|
141
|
-
application_name,
|
142
|
-
server_address,
|
143
|
-
server_port,
|
144
|
-
request_model,
|
145
|
-
agent_name,
|
807
|
+
if context_info["agent_name"]:
|
808
|
+
span.set_attribute(
|
809
|
+
SemanticConvention.GEN_AI_AGENT_NAME, context_info["agent_name"]
|
146
810
|
)
|
811
|
+
if context_info["user_input"]:
|
147
812
|
span.set_attribute(
|
148
|
-
SemanticConvention.
|
149
|
-
|
813
|
+
SemanticConvention.GEN_AI_CONTENT_PROMPT, context_info["user_input"]
|
814
|
+
)
|
815
|
+
if context_info["tool_info"]:
|
816
|
+
span.set_attribute(
|
817
|
+
SemanticConvention.GEN_AI_TOOL_NAME, context_info["tool_info"]
|
150
818
|
)
|
151
|
-
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, request_model)
|
152
819
|
|
153
|
-
|
820
|
+
# Execute with error handling
|
821
|
+
return execute_with_error_handling(
|
822
|
+
span, wrapped, args, kwargs, capture_completion=False
|
823
|
+
)
|
154
824
|
|
155
|
-
return response
|
156
825
|
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
826
|
+
def common_user_prompt_processing(
|
827
|
+
wrapped,
|
828
|
+
instance,
|
829
|
+
args,
|
830
|
+
kwargs,
|
831
|
+
tracer,
|
832
|
+
version,
|
833
|
+
environment,
|
834
|
+
application_name,
|
835
|
+
capture_message_content,
|
836
|
+
):
|
837
|
+
"""
|
838
|
+
Optimized user prompt processing function using generic internal node handler.
|
839
|
+
"""
|
840
|
+
return common_internal_node(
|
841
|
+
wrapped,
|
842
|
+
instance,
|
843
|
+
args,
|
844
|
+
kwargs,
|
845
|
+
tracer,
|
846
|
+
version,
|
847
|
+
environment,
|
848
|
+
application_name,
|
849
|
+
capture_message_content,
|
850
|
+
operation_type=SemanticConvention.GEN_AI_OPERATION_TYPE_USER_PROMPT_PROCESSING,
|
851
|
+
lifecycle_phase=SemanticConvention.GEN_AI_AGENT_LIFECYCLE_PHASE_USER_PROMPT_PROCESSING,
|
852
|
+
node_type="user_input",
|
853
|
+
)
|
854
|
+
|
855
|
+
|
856
|
+
def common_model_request_processing(
|
857
|
+
wrapped,
|
858
|
+
instance,
|
859
|
+
args,
|
860
|
+
kwargs,
|
861
|
+
tracer,
|
862
|
+
version,
|
863
|
+
environment,
|
864
|
+
application_name,
|
865
|
+
capture_message_content,
|
866
|
+
):
|
867
|
+
"""
|
868
|
+
Optimized model request processing function using generic internal node handler.
|
869
|
+
"""
|
870
|
+
return common_internal_node(
|
871
|
+
wrapped,
|
872
|
+
instance,
|
873
|
+
args,
|
874
|
+
kwargs,
|
875
|
+
tracer,
|
876
|
+
version,
|
877
|
+
environment,
|
878
|
+
application_name,
|
879
|
+
capture_message_content,
|
880
|
+
operation_type=SemanticConvention.GEN_AI_OPERATION_TYPE_MODEL_REQUEST,
|
881
|
+
lifecycle_phase=SemanticConvention.GEN_AI_AGENT_LIFECYCLE_PHASE_MODEL_REQUEST,
|
882
|
+
node_type="llm",
|
883
|
+
)
|
884
|
+
|
885
|
+
|
886
|
+
def common_tool_calls_processing(
|
887
|
+
wrapped,
|
888
|
+
instance,
|
889
|
+
args,
|
890
|
+
kwargs,
|
891
|
+
tracer,
|
892
|
+
version,
|
893
|
+
environment,
|
894
|
+
application_name,
|
895
|
+
capture_message_content,
|
896
|
+
):
|
897
|
+
"""
|
898
|
+
Optimized tool calls processing function using generic internal node handler.
|
899
|
+
"""
|
900
|
+
return common_internal_node(
|
901
|
+
wrapped,
|
902
|
+
instance,
|
903
|
+
args,
|
904
|
+
kwargs,
|
905
|
+
tracer,
|
906
|
+
version,
|
907
|
+
environment,
|
908
|
+
application_name,
|
909
|
+
capture_message_content,
|
910
|
+
operation_type=SemanticConvention.GEN_AI_OPERATION_TYPE_TOOLS,
|
911
|
+
lifecycle_phase=SemanticConvention.GEN_AI_AGENT_LIFECYCLE_PHASE_TOOL_EXECUTION,
|
912
|
+
node_type="tools",
|
913
|
+
)
|
openlit/semcov/__init__.py
CHANGED
@@ -80,6 +80,19 @@ class SemanticConvention:
|
|
80
80
|
GEN_AI_OPERATION_TYPE_AGENT = "invoke_agent"
|
81
81
|
GEN_AI_OPERATION_TYPE_CREATE_AGENT = "create_agent"
|
82
82
|
GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK = "execute_task"
|
83
|
+
GEN_AI_OPERATION_TYPE_GRAPH_EXECUTION = "graph_execution"
|
84
|
+
GEN_AI_OPERATION_TYPE_USER_PROMPT_PROCESSING = "user_prompt_processing"
|
85
|
+
GEN_AI_OPERATION_TYPE_MODEL_REQUEST = "model_request"
|
86
|
+
GEN_AI_OPERATION_TYPE_TOOL_COORDINATION = "tool_coordination"
|
87
|
+
|
88
|
+
# Model Request Types
|
89
|
+
GEN_AI_MODEL_REQUEST_TYPE_INITIAL = "initial"
|
90
|
+
GEN_AI_MODEL_REQUEST_TYPE_TOOL_RESPONSE = "tool_response"
|
91
|
+
GEN_AI_MODEL_REQUEST_TYPE_CHAT = "chat"
|
92
|
+
|
93
|
+
# Tool Processing Types
|
94
|
+
GEN_AI_TOOL_PROCESSING_TYPE_EXECUTION = "execution"
|
95
|
+
GEN_AI_TOOL_PROCESSING_TYPE_COORDINATION = "coordination"
|
83
96
|
GEN_AI_OPERATION_TYPE_RETRIEVE = "retrieve"
|
84
97
|
|
85
98
|
# GenAI Output Types (OTel Semconv)
|
@@ -508,3 +521,39 @@ class SemanticConvention:
|
|
508
521
|
GEN_AI_AGENT_SENDER = "gen_ai.agent.sender"
|
509
522
|
GEN_AI_AGENT_MESSAGE_TYPE = "gen_ai.agent.message_type"
|
510
523
|
GEN_AI_AGENT_REPLY_MODE = "gen_ai.agent.reply_mode"
|
524
|
+
|
525
|
+
# === ENHANCED SEMANTIC CONVENTIONS FOR COMPREHENSIVE INSTRUMENTATION ===
|
526
|
+
|
527
|
+
# Message structure attributes (reuse existing prompt for input, add output messages)
|
528
|
+
# Note: For input messages, we reuse GEN_AI_CONTENT_PROMPT for consistency
|
529
|
+
GEN_AI_OUTPUT_MESSAGES = "gen_ai.output_messages"
|
530
|
+
GEN_AI_MESSAGE_ROLE = "gen_ai.message.role"
|
531
|
+
GEN_AI_MESSAGE_CONTENT = "gen_ai.message.content"
|
532
|
+
|
533
|
+
# Tool result tracking (extending existing tool attributes)
|
534
|
+
GEN_AI_TOOL_RESULT = "gen_ai.tool.result"
|
535
|
+
GEN_AI_TOOL_SCHEMA = "gen_ai.tool.schema"
|
536
|
+
|
537
|
+
# Model invocation parameters (for comprehensive model tracking)
|
538
|
+
GEN_AI_REQUEST_PARAMETERS = "gen_ai.request.parameters"
|
539
|
+
|
540
|
+
# Session and conversation tracking
|
541
|
+
GEN_AI_SESSION_ID = "gen_ai.session.id"
|
542
|
+
GEN_AI_USER_ID = "gen_ai.user.id"
|
543
|
+
|
544
|
+
# Agent lifecycle phases
|
545
|
+
GEN_AI_AGENT_LIFECYCLE_PHASE = "gen_ai.agent.lifecycle.phase"
|
546
|
+
GEN_AI_AGENT_LIFECYCLE_PHASE_CREATE = "create"
|
547
|
+
GEN_AI_AGENT_LIFECYCLE_PHASE_EXECUTE = "execute"
|
548
|
+
GEN_AI_AGENT_LIFECYCLE_PHASE_GRAPH_EXECUTION = "graph_execution"
|
549
|
+
GEN_AI_AGENT_LIFECYCLE_PHASE_USER_PROMPT_PROCESSING = "user_prompt_processing"
|
550
|
+
GEN_AI_AGENT_LIFECYCLE_PHASE_MODEL_REQUEST = "model_request"
|
551
|
+
GEN_AI_AGENT_LIFECYCLE_PHASE_TOOL_EXECUTION = "tool_execution"
|
552
|
+
|
553
|
+
# Performance metrics (extending existing cost tracking)
|
554
|
+
GEN_AI_PERFORMANCE_TOKENS_PER_SECOND = "gen_ai.performance.tokens_per_second"
|
555
|
+
# Note: For latency/duration, we reuse existing GEN_AI_CLIENT_OPERATION_DURATION
|
556
|
+
|
557
|
+
# Tool execution metadata
|
558
|
+
GEN_AI_TOOL_EXECUTION_DURATION = "gen_ai.tool.execution.duration"
|
559
|
+
GEN_AI_TOOL_EXECUTION_SUCCESS = "gen_ai.tool.execution.success"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.34.
|
3
|
+
Version: 1.34.33
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
License: Apache-2.0
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
@@ -131,9 +131,10 @@ openlit/instrumentation/pinecone/utils.py,sha256=6FouF3XlQn4G0VHPv5pxoe9G7u5SLEs
|
|
131
131
|
openlit/instrumentation/premai/__init__.py,sha256=6s6lW4g_E-STrFJA6LoiQg82O_g3D01ddkPA2OX5USk,1971
|
132
132
|
openlit/instrumentation/premai/premai.py,sha256=zCRrl8hoBDKQh9dlB4NbBi6xgi3BBQBx7rWccAb5Nbs,6602
|
133
133
|
openlit/instrumentation/premai/utils.py,sha256=UpYne7CgOXTWoq2kEAxXXELZYW1HumdYxBqd4hBlbNA,12772
|
134
|
-
openlit/instrumentation/pydantic_ai/__init__.py,sha256=
|
135
|
-
openlit/instrumentation/pydantic_ai/
|
136
|
-
openlit/instrumentation/pydantic_ai/
|
134
|
+
openlit/instrumentation/pydantic_ai/__init__.py,sha256=G3ZIewyw1pz0dZoDwO09zKiD_ZHdsdASjh_W6XOsU6g,5311
|
135
|
+
openlit/instrumentation/pydantic_ai/async_pydantic_ai.py,sha256=A-HM_3IjB5j5jIv2U0vyNMPrL4z84MBtLn4YRoSsa3Q,782
|
136
|
+
openlit/instrumentation/pydantic_ai/pydantic_ai.py,sha256=TTA4ou_8ngJ_isIauw9VwMrEYtB4rwVuFm8KB07YCT8,3886
|
137
|
+
openlit/instrumentation/pydantic_ai/utils.py,sha256=FbderCr3ED9S4Qj11Up9HjDZHk0eVM-vsfbdsvAMsR0,29934
|
137
138
|
openlit/instrumentation/qdrant/__init__.py,sha256=qBD9kWikrQozgkcYDPgsladlac5VpvtGBwulYqwQQfI,3293
|
138
139
|
openlit/instrumentation/qdrant/async_qdrant.py,sha256=kj-q9de8JDGZ6Fw5hlWIcly0SxvFD5M2a6K9PHMuejc,2553
|
139
140
|
openlit/instrumentation/qdrant/qdrant.py,sha256=zjaVRpwnvTPJ78mzsM1Dy3Vk-_3rtOI_hKMM9-enWkg,2517
|
@@ -159,8 +160,8 @@ openlit/instrumentation/vllm/vllm.py,sha256=zdzKUkQYmpFlOQ8rObzRiVZEyHOaJFxUagwC
|
|
159
160
|
openlit/otel/events.py,sha256=iOyKIYHA-QYq5bnHVTV_JKeGC5Tsi1icc5nOa3Km_fA,3825
|
160
161
|
openlit/otel/metrics.py,sha256=ipH2NB65yOG7yGB32vqmMQ5HjWSeKAk3q4hzGRXRBOs,7238
|
161
162
|
openlit/otel/tracing.py,sha256=6KZc-Yubq-S7wEPshUiMNFkw8XN5WvrqIovWaq3gsKw,3301
|
162
|
-
openlit/semcov/__init__.py,sha256=
|
163
|
-
openlit-1.34.
|
164
|
-
openlit-1.34.
|
165
|
-
openlit-1.34.
|
166
|
-
openlit-1.34.
|
163
|
+
openlit/semcov/__init__.py,sha256=5uZzmgqaVvxo9Nhdy1mHhY-DoMvpU0JxtFf2huyzlnE,25307
|
164
|
+
openlit-1.34.33.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
165
|
+
openlit-1.34.33.dist-info/METADATA,sha256=ZxLzDhsj6vxtDbWM7Q9WJbt03IfFl_WEWoJqYP9z0yA,23552
|
166
|
+
openlit-1.34.33.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
167
|
+
openlit-1.34.33.dist-info/RECORD,,
|
File without changes
|
File without changes
|