langtrace-python-sdk 3.8.3__py3-none-any.whl → 3.8.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langtrace_python_sdk/instrumentation/__init__.py +2 -0
- langtrace_python_sdk/instrumentation/agno/instrumentation.py +6 -3
- langtrace_python_sdk/instrumentation/agno/patch.py +277 -229
- langtrace_python_sdk/instrumentation/openai_agents/__init__.py +5 -0
- langtrace_python_sdk/instrumentation/openai_agents/instrumentation.py +52 -0
- langtrace_python_sdk/instrumentation/openai_agents/patch.py +533 -0
- langtrace_python_sdk/instrumentation/phidata/instrumentation.py +6 -3
- langtrace_python_sdk/langtrace.py +4 -3
- langtrace_python_sdk/utils/llm.py +151 -1
- langtrace_python_sdk/version.py +1 -1
- {langtrace_python_sdk-3.8.3.dist-info → langtrace_python_sdk-3.8.5.dist-info}/METADATA +3 -2
- {langtrace_python_sdk-3.8.3.dist-info → langtrace_python_sdk-3.8.5.dist-info}/RECORD +15 -12
- {langtrace_python_sdk-3.8.3.dist-info → langtrace_python_sdk-3.8.5.dist-info}/WHEEL +0 -0
- {langtrace_python_sdk-3.8.3.dist-info → langtrace_python_sdk-3.8.5.dist-info}/entry_points.txt +0 -0
- {langtrace_python_sdk-3.8.3.dist-info → langtrace_python_sdk-3.8.5.dist-info}/licenses/LICENSE +0 -0
@@ -24,6 +24,7 @@ from .milvus import MilvusInstrumentation
|
|
24
24
|
from .mistral import MistralInstrumentation
|
25
25
|
from .ollama import OllamaInstrumentor
|
26
26
|
from .openai import OpenAIInstrumentation
|
27
|
+
from .openai_agents import OpenAIAgentsInstrumentation
|
27
28
|
from .phidata import PhiDataInstrumentation
|
28
29
|
from .pinecone import PineconeInstrumentation
|
29
30
|
from .pymongo import PyMongoInstrumentation
|
@@ -64,4 +65,5 @@ __all__ = [
|
|
64
65
|
"PhiDataInstrumentation",
|
65
66
|
"AgnoInstrumentation",
|
66
67
|
"CleanLabInstrumentation",
|
68
|
+
"OpenAIAgentsInstrumentation",
|
67
69
|
]
|
@@ -1,5 +1,5 @@
|
|
1
1
|
"""
|
2
|
-
Copyright (c)
|
2
|
+
Copyright (c) 2025 Scale3 Labs
|
3
3
|
|
4
4
|
Licensed under the Apache License, Version 2.0 (the "License");
|
5
5
|
you may not use this file except in compliance with the License.
|
@@ -14,13 +14,16 @@ See the License for the specific language governing permissions and
|
|
14
14
|
limitations under the License.
|
15
15
|
"""
|
16
16
|
|
17
|
+
from typing import Collection
|
18
|
+
|
19
|
+
from importlib_metadata import version as v
|
17
20
|
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
18
21
|
from opentelemetry.trace import get_tracer
|
19
22
|
from wrapt import wrap_function_wrapper as _W
|
20
|
-
|
21
|
-
from importlib_metadata import version as v
|
23
|
+
|
22
24
|
from .patch import patch_agent, patch_memory
|
23
25
|
|
26
|
+
|
24
27
|
class AgnoInstrumentation(BaseInstrumentor):
|
25
28
|
def instrumentation_dependencies(self) -> Collection[str]:
|
26
29
|
return ["agno >= 1.1.4"]
|
@@ -1,10 +1,28 @@
|
|
1
|
+
"""
|
2
|
+
Copyright (c) 2025 Scale3 Labs
|
3
|
+
|
4
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
you may not use this file except in compliance with the License.
|
6
|
+
You may obtain a copy of the License at
|
7
|
+
|
8
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
|
10
|
+
Unless required by applicable law or agreed to in writing, software
|
11
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
See the License for the specific language governing permissions and
|
14
|
+
limitations under the License.
|
15
|
+
"""
|
16
|
+
|
1
17
|
import json
|
18
|
+
import time
|
19
|
+
from typing import Any
|
20
|
+
|
2
21
|
from importlib_metadata import version as v
|
3
22
|
from langtrace.trace_attributes import FrameworkSpanAttributes
|
4
23
|
from opentelemetry import baggage
|
5
24
|
from opentelemetry.trace import Span, SpanKind, Tracer
|
6
25
|
from opentelemetry.trace.status import Status, StatusCode
|
7
|
-
from typing import Dict, Any, Optional
|
8
26
|
|
9
27
|
from langtrace_python_sdk.constants import LANGTRACE_SDK_NAME
|
10
28
|
from langtrace_python_sdk.constants.instrumentation.common import (
|
@@ -15,50 +33,12 @@ from langtrace_python_sdk.utils import set_span_attribute
|
|
15
33
|
from langtrace_python_sdk.utils.llm import get_span_name, set_span_attributes
|
16
34
|
from langtrace_python_sdk.utils.misc import serialize_args, serialize_kwargs
|
17
35
|
|
18
|
-
def _safe_serialize(obj):
|
19
|
-
"""Safely serialize objects that might not be JSON serializable"""
|
20
|
-
if hasattr(obj, 'to_dict'):
|
21
|
-
return obj.to_dict()
|
22
|
-
elif hasattr(obj, '__dict__'):
|
23
|
-
return {k: _safe_serialize(v) for k, v in obj.__dict__.items() if not k.startswith('_')}
|
24
|
-
elif isinstance(obj, dict):
|
25
|
-
return {k: _safe_serialize(v) for k, v in obj.items()}
|
26
|
-
elif isinstance(obj, (list, tuple)):
|
27
|
-
return [_safe_serialize(i) for i in obj]
|
28
|
-
return str(obj)
|
29
|
-
|
30
|
-
def _safe_json_dumps(obj):
|
31
|
-
"""Safely dump an object to JSON, handling non-serializable types"""
|
32
|
-
try:
|
33
|
-
return json.dumps(obj)
|
34
|
-
except (TypeError, ValueError):
|
35
|
-
return json.dumps(_safe_serialize(obj))
|
36
36
|
|
37
|
-
def
|
38
|
-
"""Helper function to extract and format metrics"""
|
39
|
-
if not metrics:
|
40
|
-
return {}
|
41
|
-
|
42
|
-
if hasattr(metrics, 'to_dict'):
|
43
|
-
metrics = metrics.to_dict()
|
44
|
-
elif hasattr(metrics, '__dict__'):
|
45
|
-
metrics = {k: v for k, v in metrics.__dict__.items() if not k.startswith('_')}
|
46
|
-
|
47
|
-
formatted_metrics = {}
|
48
|
-
|
49
|
-
for key in ['time', 'time_to_first_token', 'input_tokens', 'output_tokens',
|
50
|
-
'prompt_tokens', 'completion_tokens', 'total_tokens',
|
51
|
-
'prompt_tokens_details', 'completion_tokens_details', 'tool_call_times']:
|
52
|
-
if key in metrics:
|
53
|
-
formatted_metrics[key] = metrics[key]
|
54
|
-
|
55
|
-
return formatted_metrics
|
56
|
-
|
57
|
-
|
58
|
-
def patch_memory(operation_name, version, tracer: Tracer):
|
37
|
+
def patch_agent(operation_name, version, tracer: Tracer):
|
59
38
|
def traced_method(wrapped, instance, args, kwargs):
|
60
|
-
service_provider = SERVICE_PROVIDERS
|
39
|
+
service_provider = SERVICE_PROVIDERS.get("AGNO", "agno")
|
61
40
|
extra_attributes = baggage.get_baggage(LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY)
|
41
|
+
|
62
42
|
span_attributes = {
|
63
43
|
"langtrace.sdk.name": "langtrace-python-sdk",
|
64
44
|
"langtrace.service.name": service_provider,
|
@@ -68,20 +48,12 @@ def patch_memory(operation_name, version, tracer: Tracer):
|
|
68
48
|
**(extra_attributes if extra_attributes is not None else {}),
|
69
49
|
}
|
70
50
|
|
71
|
-
span_attributes.update({
|
72
|
-
"agno.memory.type": type(instance).__name__,
|
73
|
-
"agno.memory.create_session_summary": str(instance.create_session_summary),
|
74
|
-
"agno.memory.create_user_memories": str(instance.create_user_memories),
|
75
|
-
"agno.memory.retrieval": str(instance.retrieval)
|
76
|
-
})
|
77
|
-
|
78
51
|
inputs = {}
|
79
52
|
if len(args) > 0:
|
80
53
|
inputs["args"] = serialize_args(*args)
|
81
54
|
if len(kwargs) > 0:
|
82
55
|
inputs["kwargs"] = serialize_kwargs(**kwargs)
|
83
|
-
span_attributes["agno.
|
84
|
-
|
56
|
+
span_attributes["agno.agent.inputs"] = json.dumps(inputs)
|
85
57
|
attributes = FrameworkSpanAttributes(**span_attributes)
|
86
58
|
|
87
59
|
with tracer.start_as_current_span(
|
@@ -89,30 +61,110 @@ def patch_memory(operation_name, version, tracer: Tracer):
|
|
89
61
|
) as span:
|
90
62
|
try:
|
91
63
|
set_span_attributes(span, attributes)
|
64
|
+
AgnoSpanAttributes(span=span, instance=instance)
|
65
|
+
|
92
66
|
result = wrapped(*args, **kwargs)
|
93
|
-
|
94
|
-
if result is not None:
|
95
|
-
set_span_attribute(span, "agno.memory.output", str(result))
|
96
|
-
|
97
|
-
if instance.summary is not None:
|
98
|
-
set_span_attribute(span, "agno.memory.summary", str(instance.summary))
|
99
|
-
if instance.memories is not None:
|
100
|
-
set_span_attribute(span, "agno.memory.memories_count", str(len(instance.memories)))
|
101
67
|
|
102
68
|
span.set_status(Status(StatusCode.OK))
|
103
|
-
return result
|
104
69
|
|
70
|
+
if operation_name in ["Agent._run", "Agent._arun", "Agent.run", "Agent.arun", "Agent.print_response"]:
|
71
|
+
try:
|
72
|
+
if hasattr(instance, "run_response") and instance.run_response:
|
73
|
+
if hasattr(instance.run_response, "run_id") and instance.run_response.run_id:
|
74
|
+
set_span_attribute(span, "agno.agent.run_id", instance.run_response.run_id)
|
75
|
+
|
76
|
+
if hasattr(instance.run_response, "created_at") and instance.run_response.created_at:
|
77
|
+
set_span_attribute(span, "agno.agent.timestamp", instance.run_response.created_at)
|
78
|
+
|
79
|
+
if hasattr(instance.run_response, "content") and instance.run_response.content:
|
80
|
+
content = str(instance.run_response.content)
|
81
|
+
set_span_attribute(span, "agno.agent.response_content", content)
|
82
|
+
|
83
|
+
# Capture any tools that were used
|
84
|
+
if hasattr(instance.run_response, "tools") and instance.run_response.tools:
|
85
|
+
tools = instance.run_response.tools
|
86
|
+
tool_summary = []
|
87
|
+
for tool in tools:
|
88
|
+
if 'tool_name' in tool:
|
89
|
+
tool_summary.append(tool['tool_name'])
|
90
|
+
elif 'function' in tool and 'name' in tool['function']:
|
91
|
+
tool_summary.append(tool['function']['name'])
|
92
|
+
set_span_attribute(span, "agno.agent.tools_used", json.dumps(tool_summary))
|
93
|
+
|
94
|
+
if hasattr(instance.run_response, "metrics") and instance.run_response.metrics:
|
95
|
+
metrics = instance.run_response.metrics
|
96
|
+
for metric_name, metric_values in metrics.items():
|
97
|
+
if isinstance(metric_values, list):
|
98
|
+
|
99
|
+
if all(isinstance(v, (int, float)) for v in metric_values):
|
100
|
+
set_span_attribute(
|
101
|
+
span,
|
102
|
+
f"agno.agent.metrics.{metric_name}",
|
103
|
+
sum(metric_values) / len(metric_values) if metric_values else 0
|
104
|
+
)
|
105
|
+
elif len(metric_values) > 0:
|
106
|
+
set_span_attribute(
|
107
|
+
span,
|
108
|
+
f"agno.agent.metrics.{metric_name}",
|
109
|
+
str(metric_values[-1])
|
110
|
+
)
|
111
|
+
else:
|
112
|
+
set_span_attribute(
|
113
|
+
span,
|
114
|
+
f"agno.agent.metrics.{metric_name}",
|
115
|
+
str(metric_values)
|
116
|
+
)
|
117
|
+
|
118
|
+
if 'input_tokens' in metrics:
|
119
|
+
if isinstance(metrics['input_tokens'], list) and metrics['input_tokens']:
|
120
|
+
set_span_attribute(span, "agno.agent.token_usage.input",
|
121
|
+
sum(metrics['input_tokens']))
|
122
|
+
else:
|
123
|
+
set_span_attribute(span, "agno.agent.token_usage.input",
|
124
|
+
metrics['input_tokens'])
|
125
|
+
|
126
|
+
if 'output_tokens' in metrics:
|
127
|
+
if isinstance(metrics['output_tokens'], list) and metrics['output_tokens']:
|
128
|
+
set_span_attribute(span, "agno.agent.token_usage.output",
|
129
|
+
sum(metrics['output_tokens']))
|
130
|
+
else:
|
131
|
+
set_span_attribute(span, "agno.agent.token_usage.output",
|
132
|
+
metrics['output_tokens'])
|
133
|
+
|
134
|
+
if 'total_tokens' in metrics:
|
135
|
+
if isinstance(metrics['total_tokens'], list) and metrics['total_tokens']:
|
136
|
+
set_span_attribute(span, "agno.agent.token_usage.total",
|
137
|
+
sum(metrics['total_tokens']))
|
138
|
+
else:
|
139
|
+
set_span_attribute(span, "agno.agent.token_usage.total",
|
140
|
+
metrics['total_tokens'])
|
141
|
+
except Exception as err:
|
142
|
+
set_span_attribute(span, "agno.agent.run_response_error", str(err))
|
143
|
+
|
144
|
+
return result
|
145
|
+
|
105
146
|
except Exception as err:
|
106
147
|
span.record_exception(err)
|
107
148
|
span.set_status(Status(StatusCode.ERROR, str(err)))
|
108
149
|
raise
|
109
|
-
|
150
|
+
|
110
151
|
return traced_method
|
111
152
|
|
112
|
-
|
153
|
+
|
154
|
+
def patch_memory(operation_name, version, tracer: Tracer):
|
155
|
+
"""
|
156
|
+
Apply instrumentation patches to AgentMemory class methods.
|
157
|
+
|
158
|
+
Args:
|
159
|
+
operation_name: The name of the operation
|
160
|
+
version: The version of Agno
|
161
|
+
tracer: The OpenTelemetry tracer
|
162
|
+
"""
|
113
163
|
def traced_method(wrapped, instance, args, kwargs):
|
114
|
-
service_provider = SERVICE_PROVIDERS
|
164
|
+
service_provider = SERVICE_PROVIDERS.get("AGNO", "agno")
|
115
165
|
extra_attributes = baggage.get_baggage(LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY)
|
166
|
+
|
167
|
+
# Collect basic span attributes
|
116
168
|
span_attributes = {
|
117
169
|
"langtrace.sdk.name": "langtrace-python-sdk",
|
118
170
|
"langtrace.service.name": service_provider,
|
@@ -122,194 +174,190 @@ def patch_agent(operation_name, version, tracer: Tracer):
|
|
122
174
|
**(extra_attributes if extra_attributes is not None else {}),
|
123
175
|
}
|
124
176
|
|
177
|
+
# Collect inputs
|
178
|
+
inputs = {}
|
179
|
+
if len(args) > 0:
|
180
|
+
inputs["args"] = serialize_args(*args)
|
181
|
+
if len(kwargs) > 0:
|
182
|
+
inputs["kwargs"] = serialize_kwargs(**kwargs)
|
183
|
+
|
184
|
+
span_attributes["agno.memory.inputs"] = json.dumps(inputs)
|
185
|
+
|
186
|
+
if hasattr(instance, "messages"):
|
187
|
+
span_attributes["agno.memory.messages_count_before"] = len(instance.messages)
|
188
|
+
if hasattr(instance, "runs"):
|
189
|
+
span_attributes["agno.memory.runs_count_before"] = len(instance.runs)
|
190
|
+
if hasattr(instance, "memories") and instance.memories:
|
191
|
+
span_attributes["agno.memory.memories_count_before"] = len(instance.memories)
|
192
|
+
|
125
193
|
attributes = FrameworkSpanAttributes(**span_attributes)
|
126
194
|
|
127
195
|
with tracer.start_as_current_span(
|
128
196
|
get_span_name(operation_name), kind=SpanKind.CLIENT
|
129
197
|
) as span:
|
198
|
+
start_time = time.time()
|
130
199
|
try:
|
200
|
+
# Set attributes
|
131
201
|
set_span_attributes(span, attributes)
|
132
|
-
|
133
|
-
|
202
|
+
|
203
|
+
# Execute the wrapped method
|
134
204
|
result = wrapped(*args, **kwargs)
|
135
|
-
|
136
|
-
if not is_streaming and not operation_name.startswith('Agent._'):
|
137
|
-
if hasattr(result, 'to_dict'):
|
138
|
-
_process_response(span, result)
|
139
|
-
return result
|
140
|
-
|
141
|
-
# Handle streaming (generator) case
|
142
|
-
return _process_generator(span, result)
|
143
205
|
|
206
|
+
# Add memory stats after operation
|
207
|
+
if hasattr(instance, "messages"):
|
208
|
+
set_span_attribute(span, "agno.memory.messages_count_after", len(instance.messages))
|
209
|
+
if hasattr(instance, "runs"):
|
210
|
+
set_span_attribute(span, "agno.memory.runs_count_after", len(instance.runs))
|
211
|
+
if hasattr(instance, "memories") and instance.memories:
|
212
|
+
set_span_attribute(span, "agno.memory.memories_count_after", len(instance.memories))
|
213
|
+
|
214
|
+
# Record execution time
|
215
|
+
set_span_attribute(span, "agno.memory.execution_time_ms", int((time.time() - start_time) * 1000))
|
216
|
+
|
217
|
+
# Record success status
|
218
|
+
span.set_status(Status(StatusCode.OK))
|
219
|
+
|
220
|
+
# Add result if relevant
|
221
|
+
if result is not None:
|
222
|
+
set_span_attribute(span, "agno.memory.result", str(result))
|
223
|
+
|
224
|
+
return result
|
225
|
+
|
144
226
|
except Exception as err:
|
227
|
+
# Record the exception
|
145
228
|
span.record_exception(err)
|
146
229
|
span.set_status(Status(StatusCode.ERROR, str(err)))
|
147
230
|
raise
|
148
|
-
|
149
|
-
# Helper function to process a generator
|
150
|
-
def _process_generator(span, result_generator):
|
151
|
-
accumulated_content = ""
|
152
|
-
current_tool_call = None
|
153
|
-
response_metadata = None
|
154
|
-
seen_tool_calls = set()
|
155
|
-
|
156
|
-
try:
|
157
|
-
for response in result_generator:
|
158
|
-
if not hasattr(response, 'to_dict'):
|
159
|
-
yield response
|
160
|
-
continue
|
161
|
-
|
162
|
-
_process_response(span, response,
|
163
|
-
accumulated_content=accumulated_content,
|
164
|
-
current_tool_call=current_tool_call,
|
165
|
-
response_metadata=response_metadata,
|
166
|
-
seen_tool_calls=seen_tool_calls)
|
167
|
-
|
168
|
-
if response.content:
|
169
|
-
accumulated_content += response.content
|
170
|
-
|
171
|
-
yield response
|
172
|
-
|
173
|
-
except Exception as err:
|
174
|
-
span.record_exception(err)
|
175
|
-
span.set_status(Status(StatusCode.ERROR, str(err)))
|
176
|
-
raise
|
177
|
-
finally:
|
178
|
-
span.set_status(Status(StatusCode.OK))
|
179
|
-
if len(seen_tool_calls) > 0:
|
180
|
-
span.set_attribute("agno.agent.total_tool_calls", len(seen_tool_calls))
|
181
231
|
|
182
|
-
def _process_response(span, response, accumulated_content="", current_tool_call=None,
|
183
|
-
response_metadata=None, seen_tool_calls=set()):
|
184
|
-
if not response_metadata:
|
185
|
-
response_metadata = {
|
186
|
-
"run_id": response.run_id,
|
187
|
-
"agent_id": response.agent_id,
|
188
|
-
"session_id": response.session_id,
|
189
|
-
"model": response.model,
|
190
|
-
"content_type": response.content_type,
|
191
|
-
}
|
192
|
-
for key, value in response_metadata.items():
|
193
|
-
if value is not None:
|
194
|
-
set_span_attribute(span, f"agno.agent.{key}", str(value))
|
195
|
-
|
196
|
-
if response.content:
|
197
|
-
if accumulated_content:
|
198
|
-
accumulated_content += response.content
|
199
|
-
else:
|
200
|
-
accumulated_content = response.content
|
201
|
-
set_span_attribute(span, "agno.agent.response", accumulated_content)
|
202
|
-
|
203
|
-
if response.messages:
|
204
|
-
for msg in response.messages:
|
205
|
-
if msg.tool_calls:
|
206
|
-
for tool_call in msg.tool_calls:
|
207
|
-
tool_id = tool_call.get('id')
|
208
|
-
if tool_id and tool_id not in seen_tool_calls:
|
209
|
-
seen_tool_calls.add(tool_id)
|
210
|
-
tool_info = {
|
211
|
-
'id': tool_id,
|
212
|
-
'name': tool_call.get('function', {}).get('name'),
|
213
|
-
'arguments': tool_call.get('function', {}).get('arguments'),
|
214
|
-
'start_time': msg.created_at,
|
215
|
-
}
|
216
|
-
current_tool_call = tool_info
|
217
|
-
set_span_attribute(span, f"agno.agent.tool_call.{tool_id}", _safe_json_dumps(tool_info))
|
218
|
-
|
219
|
-
if msg.metrics:
|
220
|
-
metrics = _extract_metrics(msg.metrics)
|
221
|
-
role_prefix = f"agno.agent.metrics.{msg.role}"
|
222
|
-
for key, value in metrics.items():
|
223
|
-
set_span_attribute(span, f"{role_prefix}.{key}", str(value))
|
224
|
-
|
225
|
-
if response.tools:
|
226
|
-
for tool in response.tools:
|
227
|
-
tool_id = tool.get('tool_call_id')
|
228
|
-
if tool_id and current_tool_call and current_tool_call['id'] == tool_id:
|
229
|
-
tool_result = {
|
230
|
-
**current_tool_call,
|
231
|
-
'result': tool.get('content'),
|
232
|
-
'error': tool.get('tool_call_error'),
|
233
|
-
'end_time': tool.get('created_at'),
|
234
|
-
'metrics': tool.get('metrics'),
|
235
|
-
}
|
236
|
-
set_span_attribute(span, f"agno.agent.tool_call.{tool_id}", _safe_json_dumps(tool_result))
|
237
|
-
current_tool_call = None
|
238
|
-
|
239
|
-
if response.metrics:
|
240
|
-
metrics = _extract_metrics(response.metrics)
|
241
|
-
for key, value in metrics.items():
|
242
|
-
set_span_attribute(span, f"agno.agent.metrics.{key}", str(value))
|
243
|
-
|
244
|
-
if len(seen_tool_calls) > 0:
|
245
|
-
span.set_attribute("agno.agent.total_tool_calls", len(seen_tool_calls))
|
246
|
-
|
247
232
|
return traced_method
|
248
233
|
|
249
|
-
class AgnoSpanAttributes:
|
250
|
-
span: Span
|
251
|
-
agent_data: dict
|
252
234
|
|
253
|
-
|
235
|
+
class AgnoSpanAttributes:
|
236
|
+
"""
|
237
|
+
Helper class to extract and set Agno Agent attributes on spans.
|
238
|
+
"""
|
239
|
+
|
240
|
+
def __init__(self, span: Span, instance: Any) -> None:
|
241
|
+
"""
|
242
|
+
Initialize with a span and Agno instance.
|
243
|
+
|
244
|
+
Args:
|
245
|
+
span: OpenTelemetry span to update
|
246
|
+
instance: Agno Agent instance
|
247
|
+
"""
|
254
248
|
self.span = span
|
255
249
|
self.instance = instance
|
256
|
-
self.agent_data = {
|
257
|
-
|
258
|
-
"model": {},
|
259
|
-
"tools": [],
|
260
|
-
}
|
261
|
-
|
250
|
+
self.agent_data = {}
|
251
|
+
|
262
252
|
self.run()
|
263
|
-
|
264
|
-
def run(self):
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
"name": self.instance.name,
|
269
|
-
"markdown": self.instance.markdown,
|
270
|
-
"reasoning": self.instance.reasoning,
|
271
|
-
"add_references": self.instance.add_references,
|
272
|
-
"show_tool_calls": self.instance.show_tool_calls,
|
273
|
-
"stream": self.instance.stream,
|
274
|
-
"stream_intermediate_steps": self.instance.stream_intermediate_steps,
|
275
|
-
}
|
253
|
+
|
254
|
+
def run(self) -> None:
|
255
|
+
"""Process the instance attributes and add them to the span."""
|
256
|
+
# Collect basic agent attributes
|
257
|
+
self.collect_agent_attributes()
|
276
258
|
|
277
|
-
|
259
|
+
# Add attributes to span
|
260
|
+
for key, value in self.agent_data.items():
|
278
261
|
if value is not None:
|
279
|
-
set_span_attribute(
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
if self.instance
|
298
|
-
|
262
|
+
set_span_attribute(
|
263
|
+
self.span,
|
264
|
+
f"agno.agent.{key}",
|
265
|
+
str(value) if not isinstance(value, (int, float, bool)) else value
|
266
|
+
)
|
267
|
+
|
268
|
+
def collect_agent_attributes(self) -> None:
|
269
|
+
"""Collect important attributes from the Agent instance."""
|
270
|
+
# Extract basic agent information
|
271
|
+
if hasattr(self.instance, "agent_id"):
|
272
|
+
self.agent_data["id"] = self.instance.agent_id
|
273
|
+
|
274
|
+
if hasattr(self.instance, "name"):
|
275
|
+
self.agent_data["name"] = self.instance.name
|
276
|
+
|
277
|
+
if hasattr(self.instance, "session_id"):
|
278
|
+
self.agent_data["session_id"] = self.instance.session_id
|
279
|
+
|
280
|
+
if hasattr(self.instance, "user_id"):
|
281
|
+
self.agent_data["user_id"] = self.instance.user_id
|
282
|
+
|
283
|
+
if hasattr(self.instance, "run_id"):
|
284
|
+
self.agent_data["run_id"] = self.instance.run_id
|
285
|
+
|
286
|
+
# Extract model information
|
287
|
+
if hasattr(self.instance, "model") and self.instance.model:
|
288
|
+
model = self.instance.model
|
289
|
+
model_info = {}
|
290
|
+
|
291
|
+
if hasattr(model, "id"):
|
292
|
+
model_info["id"] = model.id
|
293
|
+
|
294
|
+
if hasattr(model, "name"):
|
295
|
+
model_info["name"] = model.name
|
296
|
+
|
297
|
+
if hasattr(model, "provider"):
|
298
|
+
model_info["provider"] = model.provider
|
299
|
+
|
300
|
+
# Add temperature if available
|
301
|
+
if hasattr(model, "temperature") and model.temperature is not None:
|
302
|
+
model_info["temperature"] = model.temperature
|
303
|
+
|
304
|
+
# Add max_tokens if available
|
305
|
+
if hasattr(model, "max_tokens") and model.max_tokens is not None:
|
306
|
+
model_info["max_tokens"] = model.max_tokens
|
307
|
+
|
308
|
+
self.agent_data["model"] = json.dumps(model_info)
|
309
|
+
|
310
|
+
# Extract tool information
|
311
|
+
if hasattr(self.instance, "tools") and self.instance.tools:
|
312
|
+
tool_info = []
|
299
313
|
for tool in self.instance.tools:
|
314
|
+
tool_data = {}
|
315
|
+
|
316
|
+
# Handle different types of tools
|
300
317
|
if hasattr(tool, "name"):
|
301
|
-
|
318
|
+
tool_data["name"] = tool.name
|
319
|
+
|
320
|
+
# Handle DuckDuckGoTools and similar toolkits
|
321
|
+
if hasattr(tool, "functions") and isinstance(tool.functions, dict):
|
322
|
+
tool_data["functions"] = list(tool.functions.keys())
|
323
|
+
|
302
324
|
elif hasattr(tool, "__name__"):
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
"
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
325
|
+
tool_data["name"] = tool.__name__
|
326
|
+
else:
|
327
|
+
tool_data["name"] = str(tool)
|
328
|
+
|
329
|
+
# Add functions if available
|
330
|
+
if not "functions" in tool_data and hasattr(tool, "functions"):
|
331
|
+
if callable(getattr(tool, "functions")):
|
332
|
+
try:
|
333
|
+
tool_functions = tool.functions()
|
334
|
+
if isinstance(tool_functions, list):
|
335
|
+
tool_data["functions"] = [f.__name__ if hasattr(f, "__name__") else str(f)
|
336
|
+
for f in tool_functions]
|
337
|
+
except:
|
338
|
+
pass
|
339
|
+
|
340
|
+
tool_info.append(tool_data)
|
341
|
+
|
342
|
+
self.agent_data["tools"] = json.dumps(tool_info)
|
343
|
+
|
344
|
+
# Extract reasoning settings
|
345
|
+
if hasattr(self.instance, "reasoning") and self.instance.reasoning:
|
346
|
+
self.agent_data["reasoning_enabled"] = True
|
347
|
+
|
348
|
+
if hasattr(self.instance, "reasoning_model") and self.instance.reasoning_model:
|
349
|
+
self.agent_data["reasoning_model"] = str(self.instance.reasoning_model.id)
|
350
|
+
|
351
|
+
if hasattr(self.instance, "reasoning_min_steps"):
|
352
|
+
self.agent_data["reasoning_min_steps"] = self.instance.reasoning_min_steps
|
353
|
+
|
354
|
+
if hasattr(self.instance, "reasoning_max_steps"):
|
355
|
+
self.agent_data["reasoning_max_steps"] = self.instance.reasoning_max_steps
|
356
|
+
|
357
|
+
# Extract knowledge settings
|
358
|
+
if hasattr(self.instance, "knowledge") and self.instance.knowledge:
|
359
|
+
self.agent_data["knowledge_enabled"] = True
|
360
|
+
|
361
|
+
# Extract streaming settings
|
362
|
+
if hasattr(self.instance, "stream"):
|
363
|
+
self.agent_data["stream"] = self.instance.stream
|