langtrace-python-sdk 3.8.4__py3-none-any.whl → 3.8.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langtrace_python_sdk/instrumentation/agno/patch.py +277 -229
- langtrace_python_sdk/utils/llm.py +151 -1
- langtrace_python_sdk/version.py +1 -1
- {langtrace_python_sdk-3.8.4.dist-info → langtrace_python_sdk-3.8.5.dist-info}/METADATA +1 -1
- {langtrace_python_sdk-3.8.4.dist-info → langtrace_python_sdk-3.8.5.dist-info}/RECORD +8 -8
- {langtrace_python_sdk-3.8.4.dist-info → langtrace_python_sdk-3.8.5.dist-info}/WHEEL +0 -0
- {langtrace_python_sdk-3.8.4.dist-info → langtrace_python_sdk-3.8.5.dist-info}/entry_points.txt +0 -0
- {langtrace_python_sdk-3.8.4.dist-info → langtrace_python_sdk-3.8.5.dist-info}/licenses/LICENSE +0 -0
@@ -1,10 +1,28 @@
|
|
1
|
+
"""
|
2
|
+
Copyright (c) 2025 Scale3 Labs
|
3
|
+
|
4
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
you may not use this file except in compliance with the License.
|
6
|
+
You may obtain a copy of the License at
|
7
|
+
|
8
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
|
10
|
+
Unless required by applicable law or agreed to in writing, software
|
11
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
See the License for the specific language governing permissions and
|
14
|
+
limitations under the License.
|
15
|
+
"""
|
16
|
+
|
1
17
|
import json
|
18
|
+
import time
|
19
|
+
from typing import Any
|
20
|
+
|
2
21
|
from importlib_metadata import version as v
|
3
22
|
from langtrace.trace_attributes import FrameworkSpanAttributes
|
4
23
|
from opentelemetry import baggage
|
5
24
|
from opentelemetry.trace import Span, SpanKind, Tracer
|
6
25
|
from opentelemetry.trace.status import Status, StatusCode
|
7
|
-
from typing import Dict, Any, Optional
|
8
26
|
|
9
27
|
from langtrace_python_sdk.constants import LANGTRACE_SDK_NAME
|
10
28
|
from langtrace_python_sdk.constants.instrumentation.common import (
|
@@ -15,50 +33,12 @@ from langtrace_python_sdk.utils import set_span_attribute
|
|
15
33
|
from langtrace_python_sdk.utils.llm import get_span_name, set_span_attributes
|
16
34
|
from langtrace_python_sdk.utils.misc import serialize_args, serialize_kwargs
|
17
35
|
|
18
|
-
def _safe_serialize(obj):
|
19
|
-
"""Safely serialize objects that might not be JSON serializable"""
|
20
|
-
if hasattr(obj, 'to_dict'):
|
21
|
-
return obj.to_dict()
|
22
|
-
elif hasattr(obj, '__dict__'):
|
23
|
-
return {k: _safe_serialize(v) for k, v in obj.__dict__.items() if not k.startswith('_')}
|
24
|
-
elif isinstance(obj, dict):
|
25
|
-
return {k: _safe_serialize(v) for k, v in obj.items()}
|
26
|
-
elif isinstance(obj, (list, tuple)):
|
27
|
-
return [_safe_serialize(i) for i in obj]
|
28
|
-
return str(obj)
|
29
|
-
|
30
|
-
def _safe_json_dumps(obj):
|
31
|
-
"""Safely dump an object to JSON, handling non-serializable types"""
|
32
|
-
try:
|
33
|
-
return json.dumps(obj)
|
34
|
-
except (TypeError, ValueError):
|
35
|
-
return json.dumps(_safe_serialize(obj))
|
36
36
|
|
37
|
-
def
|
38
|
-
"""Helper function to extract and format metrics"""
|
39
|
-
if not metrics:
|
40
|
-
return {}
|
41
|
-
|
42
|
-
if hasattr(metrics, 'to_dict'):
|
43
|
-
metrics = metrics.to_dict()
|
44
|
-
elif hasattr(metrics, '__dict__'):
|
45
|
-
metrics = {k: v for k, v in metrics.__dict__.items() if not k.startswith('_')}
|
46
|
-
|
47
|
-
formatted_metrics = {}
|
48
|
-
|
49
|
-
for key in ['time', 'time_to_first_token', 'input_tokens', 'output_tokens',
|
50
|
-
'prompt_tokens', 'completion_tokens', 'total_tokens',
|
51
|
-
'prompt_tokens_details', 'completion_tokens_details', 'tool_call_times']:
|
52
|
-
if key in metrics:
|
53
|
-
formatted_metrics[key] = metrics[key]
|
54
|
-
|
55
|
-
return formatted_metrics
|
56
|
-
|
57
|
-
|
58
|
-
def patch_memory(operation_name, version, tracer: Tracer):
|
37
|
+
def patch_agent(operation_name, version, tracer: Tracer):
|
59
38
|
def traced_method(wrapped, instance, args, kwargs):
|
60
|
-
service_provider = SERVICE_PROVIDERS
|
39
|
+
service_provider = SERVICE_PROVIDERS.get("AGNO", "agno")
|
61
40
|
extra_attributes = baggage.get_baggage(LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY)
|
41
|
+
|
62
42
|
span_attributes = {
|
63
43
|
"langtrace.sdk.name": "langtrace-python-sdk",
|
64
44
|
"langtrace.service.name": service_provider,
|
@@ -68,20 +48,12 @@ def patch_memory(operation_name, version, tracer: Tracer):
|
|
68
48
|
**(extra_attributes if extra_attributes is not None else {}),
|
69
49
|
}
|
70
50
|
|
71
|
-
span_attributes.update({
|
72
|
-
"agno.memory.type": type(instance).__name__,
|
73
|
-
"agno.memory.create_session_summary": str(instance.create_session_summary),
|
74
|
-
"agno.memory.create_user_memories": str(instance.create_user_memories),
|
75
|
-
"agno.memory.retrieval": str(instance.retrieval)
|
76
|
-
})
|
77
|
-
|
78
51
|
inputs = {}
|
79
52
|
if len(args) > 0:
|
80
53
|
inputs["args"] = serialize_args(*args)
|
81
54
|
if len(kwargs) > 0:
|
82
55
|
inputs["kwargs"] = serialize_kwargs(**kwargs)
|
83
|
-
span_attributes["agno.
|
84
|
-
|
56
|
+
span_attributes["agno.agent.inputs"] = json.dumps(inputs)
|
85
57
|
attributes = FrameworkSpanAttributes(**span_attributes)
|
86
58
|
|
87
59
|
with tracer.start_as_current_span(
|
@@ -89,30 +61,110 @@ def patch_memory(operation_name, version, tracer: Tracer):
|
|
89
61
|
) as span:
|
90
62
|
try:
|
91
63
|
set_span_attributes(span, attributes)
|
64
|
+
AgnoSpanAttributes(span=span, instance=instance)
|
65
|
+
|
92
66
|
result = wrapped(*args, **kwargs)
|
93
|
-
|
94
|
-
if result is not None:
|
95
|
-
set_span_attribute(span, "agno.memory.output", str(result))
|
96
|
-
|
97
|
-
if instance.summary is not None:
|
98
|
-
set_span_attribute(span, "agno.memory.summary", str(instance.summary))
|
99
|
-
if instance.memories is not None:
|
100
|
-
set_span_attribute(span, "agno.memory.memories_count", str(len(instance.memories)))
|
101
67
|
|
102
68
|
span.set_status(Status(StatusCode.OK))
|
103
|
-
return result
|
104
69
|
|
70
|
+
if operation_name in ["Agent._run", "Agent._arun", "Agent.run", "Agent.arun", "Agent.print_response"]:
|
71
|
+
try:
|
72
|
+
if hasattr(instance, "run_response") and instance.run_response:
|
73
|
+
if hasattr(instance.run_response, "run_id") and instance.run_response.run_id:
|
74
|
+
set_span_attribute(span, "agno.agent.run_id", instance.run_response.run_id)
|
75
|
+
|
76
|
+
if hasattr(instance.run_response, "created_at") and instance.run_response.created_at:
|
77
|
+
set_span_attribute(span, "agno.agent.timestamp", instance.run_response.created_at)
|
78
|
+
|
79
|
+
if hasattr(instance.run_response, "content") and instance.run_response.content:
|
80
|
+
content = str(instance.run_response.content)
|
81
|
+
set_span_attribute(span, "agno.agent.response_content", content)
|
82
|
+
|
83
|
+
# Capture any tools that were used
|
84
|
+
if hasattr(instance.run_response, "tools") and instance.run_response.tools:
|
85
|
+
tools = instance.run_response.tools
|
86
|
+
tool_summary = []
|
87
|
+
for tool in tools:
|
88
|
+
if 'tool_name' in tool:
|
89
|
+
tool_summary.append(tool['tool_name'])
|
90
|
+
elif 'function' in tool and 'name' in tool['function']:
|
91
|
+
tool_summary.append(tool['function']['name'])
|
92
|
+
set_span_attribute(span, "agno.agent.tools_used", json.dumps(tool_summary))
|
93
|
+
|
94
|
+
if hasattr(instance.run_response, "metrics") and instance.run_response.metrics:
|
95
|
+
metrics = instance.run_response.metrics
|
96
|
+
for metric_name, metric_values in metrics.items():
|
97
|
+
if isinstance(metric_values, list):
|
98
|
+
|
99
|
+
if all(isinstance(v, (int, float)) for v in metric_values):
|
100
|
+
set_span_attribute(
|
101
|
+
span,
|
102
|
+
f"agno.agent.metrics.{metric_name}",
|
103
|
+
sum(metric_values) / len(metric_values) if metric_values else 0
|
104
|
+
)
|
105
|
+
elif len(metric_values) > 0:
|
106
|
+
set_span_attribute(
|
107
|
+
span,
|
108
|
+
f"agno.agent.metrics.{metric_name}",
|
109
|
+
str(metric_values[-1])
|
110
|
+
)
|
111
|
+
else:
|
112
|
+
set_span_attribute(
|
113
|
+
span,
|
114
|
+
f"agno.agent.metrics.{metric_name}",
|
115
|
+
str(metric_values)
|
116
|
+
)
|
117
|
+
|
118
|
+
if 'input_tokens' in metrics:
|
119
|
+
if isinstance(metrics['input_tokens'], list) and metrics['input_tokens']:
|
120
|
+
set_span_attribute(span, "agno.agent.token_usage.input",
|
121
|
+
sum(metrics['input_tokens']))
|
122
|
+
else:
|
123
|
+
set_span_attribute(span, "agno.agent.token_usage.input",
|
124
|
+
metrics['input_tokens'])
|
125
|
+
|
126
|
+
if 'output_tokens' in metrics:
|
127
|
+
if isinstance(metrics['output_tokens'], list) and metrics['output_tokens']:
|
128
|
+
set_span_attribute(span, "agno.agent.token_usage.output",
|
129
|
+
sum(metrics['output_tokens']))
|
130
|
+
else:
|
131
|
+
set_span_attribute(span, "agno.agent.token_usage.output",
|
132
|
+
metrics['output_tokens'])
|
133
|
+
|
134
|
+
if 'total_tokens' in metrics:
|
135
|
+
if isinstance(metrics['total_tokens'], list) and metrics['total_tokens']:
|
136
|
+
set_span_attribute(span, "agno.agent.token_usage.total",
|
137
|
+
sum(metrics['total_tokens']))
|
138
|
+
else:
|
139
|
+
set_span_attribute(span, "agno.agent.token_usage.total",
|
140
|
+
metrics['total_tokens'])
|
141
|
+
except Exception as err:
|
142
|
+
set_span_attribute(span, "agno.agent.run_response_error", str(err))
|
143
|
+
|
144
|
+
return result
|
145
|
+
|
105
146
|
except Exception as err:
|
106
147
|
span.record_exception(err)
|
107
148
|
span.set_status(Status(StatusCode.ERROR, str(err)))
|
108
149
|
raise
|
109
|
-
|
150
|
+
|
110
151
|
return traced_method
|
111
152
|
|
112
|
-
|
153
|
+
|
154
|
+
def patch_memory(operation_name, version, tracer: Tracer):
|
155
|
+
"""
|
156
|
+
Apply instrumentation patches to AgentMemory class methods.
|
157
|
+
|
158
|
+
Args:
|
159
|
+
operation_name: The name of the operation
|
160
|
+
version: The version of Agno
|
161
|
+
tracer: The OpenTelemetry tracer
|
162
|
+
"""
|
113
163
|
def traced_method(wrapped, instance, args, kwargs):
|
114
|
-
service_provider = SERVICE_PROVIDERS
|
164
|
+
service_provider = SERVICE_PROVIDERS.get("AGNO", "agno")
|
115
165
|
extra_attributes = baggage.get_baggage(LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY)
|
166
|
+
|
167
|
+
# Collect basic span attributes
|
116
168
|
span_attributes = {
|
117
169
|
"langtrace.sdk.name": "langtrace-python-sdk",
|
118
170
|
"langtrace.service.name": service_provider,
|
@@ -122,194 +174,190 @@ def patch_agent(operation_name, version, tracer: Tracer):
|
|
122
174
|
**(extra_attributes if extra_attributes is not None else {}),
|
123
175
|
}
|
124
176
|
|
177
|
+
# Collect inputs
|
178
|
+
inputs = {}
|
179
|
+
if len(args) > 0:
|
180
|
+
inputs["args"] = serialize_args(*args)
|
181
|
+
if len(kwargs) > 0:
|
182
|
+
inputs["kwargs"] = serialize_kwargs(**kwargs)
|
183
|
+
|
184
|
+
span_attributes["agno.memory.inputs"] = json.dumps(inputs)
|
185
|
+
|
186
|
+
if hasattr(instance, "messages"):
|
187
|
+
span_attributes["agno.memory.messages_count_before"] = len(instance.messages)
|
188
|
+
if hasattr(instance, "runs"):
|
189
|
+
span_attributes["agno.memory.runs_count_before"] = len(instance.runs)
|
190
|
+
if hasattr(instance, "memories") and instance.memories:
|
191
|
+
span_attributes["agno.memory.memories_count_before"] = len(instance.memories)
|
192
|
+
|
125
193
|
attributes = FrameworkSpanAttributes(**span_attributes)
|
126
194
|
|
127
195
|
with tracer.start_as_current_span(
|
128
196
|
get_span_name(operation_name), kind=SpanKind.CLIENT
|
129
197
|
) as span:
|
198
|
+
start_time = time.time()
|
130
199
|
try:
|
200
|
+
# Set attributes
|
131
201
|
set_span_attributes(span, attributes)
|
132
|
-
|
133
|
-
|
202
|
+
|
203
|
+
# Execute the wrapped method
|
134
204
|
result = wrapped(*args, **kwargs)
|
135
|
-
|
136
|
-
if not is_streaming and not operation_name.startswith('Agent._'):
|
137
|
-
if hasattr(result, 'to_dict'):
|
138
|
-
_process_response(span, result)
|
139
|
-
return result
|
140
|
-
|
141
|
-
# Handle streaming (generator) case
|
142
|
-
return _process_generator(span, result)
|
143
205
|
|
206
|
+
# Add memory stats after operation
|
207
|
+
if hasattr(instance, "messages"):
|
208
|
+
set_span_attribute(span, "agno.memory.messages_count_after", len(instance.messages))
|
209
|
+
if hasattr(instance, "runs"):
|
210
|
+
set_span_attribute(span, "agno.memory.runs_count_after", len(instance.runs))
|
211
|
+
if hasattr(instance, "memories") and instance.memories:
|
212
|
+
set_span_attribute(span, "agno.memory.memories_count_after", len(instance.memories))
|
213
|
+
|
214
|
+
# Record execution time
|
215
|
+
set_span_attribute(span, "agno.memory.execution_time_ms", int((time.time() - start_time) * 1000))
|
216
|
+
|
217
|
+
# Record success status
|
218
|
+
span.set_status(Status(StatusCode.OK))
|
219
|
+
|
220
|
+
# Add result if relevant
|
221
|
+
if result is not None:
|
222
|
+
set_span_attribute(span, "agno.memory.result", str(result))
|
223
|
+
|
224
|
+
return result
|
225
|
+
|
144
226
|
except Exception as err:
|
227
|
+
# Record the exception
|
145
228
|
span.record_exception(err)
|
146
229
|
span.set_status(Status(StatusCode.ERROR, str(err)))
|
147
230
|
raise
|
148
|
-
|
149
|
-
# Helper function to process a generator
|
150
|
-
def _process_generator(span, result_generator):
|
151
|
-
accumulated_content = ""
|
152
|
-
current_tool_call = None
|
153
|
-
response_metadata = None
|
154
|
-
seen_tool_calls = set()
|
155
|
-
|
156
|
-
try:
|
157
|
-
for response in result_generator:
|
158
|
-
if not hasattr(response, 'to_dict'):
|
159
|
-
yield response
|
160
|
-
continue
|
161
|
-
|
162
|
-
_process_response(span, response,
|
163
|
-
accumulated_content=accumulated_content,
|
164
|
-
current_tool_call=current_tool_call,
|
165
|
-
response_metadata=response_metadata,
|
166
|
-
seen_tool_calls=seen_tool_calls)
|
167
|
-
|
168
|
-
if response.content:
|
169
|
-
accumulated_content += response.content
|
170
|
-
|
171
|
-
yield response
|
172
|
-
|
173
|
-
except Exception as err:
|
174
|
-
span.record_exception(err)
|
175
|
-
span.set_status(Status(StatusCode.ERROR, str(err)))
|
176
|
-
raise
|
177
|
-
finally:
|
178
|
-
span.set_status(Status(StatusCode.OK))
|
179
|
-
if len(seen_tool_calls) > 0:
|
180
|
-
span.set_attribute("agno.agent.total_tool_calls", len(seen_tool_calls))
|
181
231
|
|
182
|
-
def _process_response(span, response, accumulated_content="", current_tool_call=None,
|
183
|
-
response_metadata=None, seen_tool_calls=set()):
|
184
|
-
if not response_metadata:
|
185
|
-
response_metadata = {
|
186
|
-
"run_id": response.run_id,
|
187
|
-
"agent_id": response.agent_id,
|
188
|
-
"session_id": response.session_id,
|
189
|
-
"model": response.model,
|
190
|
-
"content_type": response.content_type,
|
191
|
-
}
|
192
|
-
for key, value in response_metadata.items():
|
193
|
-
if value is not None:
|
194
|
-
set_span_attribute(span, f"agno.agent.{key}", str(value))
|
195
|
-
|
196
|
-
if response.content:
|
197
|
-
if accumulated_content:
|
198
|
-
accumulated_content += response.content
|
199
|
-
else:
|
200
|
-
accumulated_content = response.content
|
201
|
-
set_span_attribute(span, "agno.agent.response", accumulated_content)
|
202
|
-
|
203
|
-
if response.messages:
|
204
|
-
for msg in response.messages:
|
205
|
-
if msg.tool_calls:
|
206
|
-
for tool_call in msg.tool_calls:
|
207
|
-
tool_id = tool_call.get('id')
|
208
|
-
if tool_id and tool_id not in seen_tool_calls:
|
209
|
-
seen_tool_calls.add(tool_id)
|
210
|
-
tool_info = {
|
211
|
-
'id': tool_id,
|
212
|
-
'name': tool_call.get('function', {}).get('name'),
|
213
|
-
'arguments': tool_call.get('function', {}).get('arguments'),
|
214
|
-
'start_time': msg.created_at,
|
215
|
-
}
|
216
|
-
current_tool_call = tool_info
|
217
|
-
set_span_attribute(span, f"agno.agent.tool_call.{tool_id}", _safe_json_dumps(tool_info))
|
218
|
-
|
219
|
-
if msg.metrics:
|
220
|
-
metrics = _extract_metrics(msg.metrics)
|
221
|
-
role_prefix = f"agno.agent.metrics.{msg.role}"
|
222
|
-
for key, value in metrics.items():
|
223
|
-
set_span_attribute(span, f"{role_prefix}.{key}", str(value))
|
224
|
-
|
225
|
-
if response.tools:
|
226
|
-
for tool in response.tools:
|
227
|
-
tool_id = tool.get('tool_call_id')
|
228
|
-
if tool_id and current_tool_call and current_tool_call['id'] == tool_id:
|
229
|
-
tool_result = {
|
230
|
-
**current_tool_call,
|
231
|
-
'result': tool.get('content'),
|
232
|
-
'error': tool.get('tool_call_error'),
|
233
|
-
'end_time': tool.get('created_at'),
|
234
|
-
'metrics': tool.get('metrics'),
|
235
|
-
}
|
236
|
-
set_span_attribute(span, f"agno.agent.tool_call.{tool_id}", _safe_json_dumps(tool_result))
|
237
|
-
current_tool_call = None
|
238
|
-
|
239
|
-
if response.metrics:
|
240
|
-
metrics = _extract_metrics(response.metrics)
|
241
|
-
for key, value in metrics.items():
|
242
|
-
set_span_attribute(span, f"agno.agent.metrics.{key}", str(value))
|
243
|
-
|
244
|
-
if len(seen_tool_calls) > 0:
|
245
|
-
span.set_attribute("agno.agent.total_tool_calls", len(seen_tool_calls))
|
246
|
-
|
247
232
|
return traced_method
|
248
233
|
|
249
|
-
class AgnoSpanAttributes:
|
250
|
-
span: Span
|
251
|
-
agent_data: dict
|
252
234
|
|
253
|
-
|
235
|
+
class AgnoSpanAttributes:
|
236
|
+
"""
|
237
|
+
Helper class to extract and set Agno Agent attributes on spans.
|
238
|
+
"""
|
239
|
+
|
240
|
+
def __init__(self, span: Span, instance: Any) -> None:
|
241
|
+
"""
|
242
|
+
Initialize with a span and Agno instance.
|
243
|
+
|
244
|
+
Args:
|
245
|
+
span: OpenTelemetry span to update
|
246
|
+
instance: Agno Agent instance
|
247
|
+
"""
|
254
248
|
self.span = span
|
255
249
|
self.instance = instance
|
256
|
-
self.agent_data = {
|
257
|
-
|
258
|
-
"model": {},
|
259
|
-
"tools": [],
|
260
|
-
}
|
261
|
-
|
250
|
+
self.agent_data = {}
|
251
|
+
|
262
252
|
self.run()
|
263
|
-
|
264
|
-
def run(self):
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
"name": self.instance.name,
|
269
|
-
"markdown": self.instance.markdown,
|
270
|
-
"reasoning": self.instance.reasoning,
|
271
|
-
"add_references": self.instance.add_references,
|
272
|
-
"show_tool_calls": self.instance.show_tool_calls,
|
273
|
-
"stream": self.instance.stream,
|
274
|
-
"stream_intermediate_steps": self.instance.stream_intermediate_steps,
|
275
|
-
}
|
253
|
+
|
254
|
+
def run(self) -> None:
|
255
|
+
"""Process the instance attributes and add them to the span."""
|
256
|
+
# Collect basic agent attributes
|
257
|
+
self.collect_agent_attributes()
|
276
258
|
|
277
|
-
|
259
|
+
# Add attributes to span
|
260
|
+
for key, value in self.agent_data.items():
|
278
261
|
if value is not None:
|
279
|
-
set_span_attribute(
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
if self.instance
|
298
|
-
|
262
|
+
set_span_attribute(
|
263
|
+
self.span,
|
264
|
+
f"agno.agent.{key}",
|
265
|
+
str(value) if not isinstance(value, (int, float, bool)) else value
|
266
|
+
)
|
267
|
+
|
268
|
+
def collect_agent_attributes(self) -> None:
|
269
|
+
"""Collect important attributes from the Agent instance."""
|
270
|
+
# Extract basic agent information
|
271
|
+
if hasattr(self.instance, "agent_id"):
|
272
|
+
self.agent_data["id"] = self.instance.agent_id
|
273
|
+
|
274
|
+
if hasattr(self.instance, "name"):
|
275
|
+
self.agent_data["name"] = self.instance.name
|
276
|
+
|
277
|
+
if hasattr(self.instance, "session_id"):
|
278
|
+
self.agent_data["session_id"] = self.instance.session_id
|
279
|
+
|
280
|
+
if hasattr(self.instance, "user_id"):
|
281
|
+
self.agent_data["user_id"] = self.instance.user_id
|
282
|
+
|
283
|
+
if hasattr(self.instance, "run_id"):
|
284
|
+
self.agent_data["run_id"] = self.instance.run_id
|
285
|
+
|
286
|
+
# Extract model information
|
287
|
+
if hasattr(self.instance, "model") and self.instance.model:
|
288
|
+
model = self.instance.model
|
289
|
+
model_info = {}
|
290
|
+
|
291
|
+
if hasattr(model, "id"):
|
292
|
+
model_info["id"] = model.id
|
293
|
+
|
294
|
+
if hasattr(model, "name"):
|
295
|
+
model_info["name"] = model.name
|
296
|
+
|
297
|
+
if hasattr(model, "provider"):
|
298
|
+
model_info["provider"] = model.provider
|
299
|
+
|
300
|
+
# Add temperature if available
|
301
|
+
if hasattr(model, "temperature") and model.temperature is not None:
|
302
|
+
model_info["temperature"] = model.temperature
|
303
|
+
|
304
|
+
# Add max_tokens if available
|
305
|
+
if hasattr(model, "max_tokens") and model.max_tokens is not None:
|
306
|
+
model_info["max_tokens"] = model.max_tokens
|
307
|
+
|
308
|
+
self.agent_data["model"] = json.dumps(model_info)
|
309
|
+
|
310
|
+
# Extract tool information
|
311
|
+
if hasattr(self.instance, "tools") and self.instance.tools:
|
312
|
+
tool_info = []
|
299
313
|
for tool in self.instance.tools:
|
314
|
+
tool_data = {}
|
315
|
+
|
316
|
+
# Handle different types of tools
|
300
317
|
if hasattr(tool, "name"):
|
301
|
-
|
318
|
+
tool_data["name"] = tool.name
|
319
|
+
|
320
|
+
# Handle DuckDuckGoTools and similar toolkits
|
321
|
+
if hasattr(tool, "functions") and isinstance(tool.functions, dict):
|
322
|
+
tool_data["functions"] = list(tool.functions.keys())
|
323
|
+
|
302
324
|
elif hasattr(tool, "__name__"):
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
"
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
325
|
+
tool_data["name"] = tool.__name__
|
326
|
+
else:
|
327
|
+
tool_data["name"] = str(tool)
|
328
|
+
|
329
|
+
# Add functions if available
|
330
|
+
if not "functions" in tool_data and hasattr(tool, "functions"):
|
331
|
+
if callable(getattr(tool, "functions")):
|
332
|
+
try:
|
333
|
+
tool_functions = tool.functions()
|
334
|
+
if isinstance(tool_functions, list):
|
335
|
+
tool_data["functions"] = [f.__name__ if hasattr(f, "__name__") else str(f)
|
336
|
+
for f in tool_functions]
|
337
|
+
except:
|
338
|
+
pass
|
339
|
+
|
340
|
+
tool_info.append(tool_data)
|
341
|
+
|
342
|
+
self.agent_data["tools"] = json.dumps(tool_info)
|
343
|
+
|
344
|
+
# Extract reasoning settings
|
345
|
+
if hasattr(self.instance, "reasoning") and self.instance.reasoning:
|
346
|
+
self.agent_data["reasoning_enabled"] = True
|
347
|
+
|
348
|
+
if hasattr(self.instance, "reasoning_model") and self.instance.reasoning_model:
|
349
|
+
self.agent_data["reasoning_model"] = str(self.instance.reasoning_model.id)
|
350
|
+
|
351
|
+
if hasattr(self.instance, "reasoning_min_steps"):
|
352
|
+
self.agent_data["reasoning_min_steps"] = self.instance.reasoning_min_steps
|
353
|
+
|
354
|
+
if hasattr(self.instance, "reasoning_max_steps"):
|
355
|
+
self.agent_data["reasoning_max_steps"] = self.instance.reasoning_max_steps
|
356
|
+
|
357
|
+
# Extract knowledge settings
|
358
|
+
if hasattr(self.instance, "knowledge") and self.instance.knowledge:
|
359
|
+
self.agent_data["knowledge_enabled"] = True
|
360
|
+
|
361
|
+
# Extract streaming settings
|
362
|
+
if hasattr(self.instance, "stream"):
|
363
|
+
self.agent_data["stream"] = self.instance.stream
|
@@ -94,6 +94,139 @@ def calculate_price_from_usage(model, usage):
|
|
94
94
|
return 0
|
95
95
|
|
96
96
|
|
97
|
+
def convert_mistral_messages_to_serializable(mistral_messages):
|
98
|
+
serializable_messages = []
|
99
|
+
|
100
|
+
try:
|
101
|
+
for message in mistral_messages:
|
102
|
+
serializable_message = {"role": message.role}
|
103
|
+
|
104
|
+
# Handle content
|
105
|
+
if hasattr(message, "content"):
|
106
|
+
serializable_message["content"] = message.content
|
107
|
+
|
108
|
+
# Handle tool_calls
|
109
|
+
if hasattr(message, "tool_calls") and message.tool_calls is not None:
|
110
|
+
serializable_tool_calls = []
|
111
|
+
|
112
|
+
for tool_call in message.tool_calls:
|
113
|
+
serializable_tool_call = {}
|
114
|
+
|
115
|
+
# Handle id, type, and index
|
116
|
+
if hasattr(tool_call, "id"):
|
117
|
+
serializable_tool_call["id"] = tool_call.id
|
118
|
+
if hasattr(tool_call, "type"):
|
119
|
+
serializable_tool_call["type"] = tool_call.type
|
120
|
+
if hasattr(tool_call, "index"):
|
121
|
+
serializable_tool_call["index"] = tool_call.index
|
122
|
+
|
123
|
+
# Handle function
|
124
|
+
if hasattr(tool_call, "function"):
|
125
|
+
function_call = tool_call.function
|
126
|
+
serializable_function = {}
|
127
|
+
|
128
|
+
if hasattr(function_call, "name"):
|
129
|
+
serializable_function["name"] = function_call.name
|
130
|
+
if hasattr(function_call, "arguments"):
|
131
|
+
serializable_function["arguments"] = function_call.arguments
|
132
|
+
|
133
|
+
serializable_tool_call["function"] = serializable_function
|
134
|
+
|
135
|
+
serializable_tool_calls.append(serializable_tool_call)
|
136
|
+
|
137
|
+
serializable_message["tool_calls"] = serializable_tool_calls
|
138
|
+
|
139
|
+
# Handle tool_call_id for tool messages
|
140
|
+
if hasattr(message, "tool_call_id"):
|
141
|
+
serializable_message["tool_call_id"] = message.tool_call_id
|
142
|
+
|
143
|
+
serializable_messages.append(serializable_message)
|
144
|
+
except Exception as e:
|
145
|
+
pass
|
146
|
+
|
147
|
+
return serializable_messages
|
148
|
+
|
149
|
+
|
150
|
+
def convert_gemini_messages_to_serializable(formatted_messages, system_message=None):
|
151
|
+
"""
|
152
|
+
Converts Gemini-formatted messages back to a JSON serializable format.
|
153
|
+
|
154
|
+
Args:
|
155
|
+
formatted_messages: The formatted messages from Gemini.
|
156
|
+
system_message (str, optional): System message content.
|
157
|
+
|
158
|
+
Returns:
|
159
|
+
List[dict]: JSON serializable list of message dictionaries.
|
160
|
+
"""
|
161
|
+
serializable_messages = []
|
162
|
+
|
163
|
+
try:
|
164
|
+
# Add system message if present
|
165
|
+
if system_message:
|
166
|
+
serializable_messages.append({
|
167
|
+
"role": "system",
|
168
|
+
"content": system_message
|
169
|
+
})
|
170
|
+
|
171
|
+
for message_item in formatted_messages:
|
172
|
+
# Handle the case where the item is a dict with 'role' and 'content' keys
|
173
|
+
if isinstance(message_item, dict) and 'role' in message_item and 'content' in message_item:
|
174
|
+
role = message_item['role']
|
175
|
+
content_value = message_item['content']
|
176
|
+
|
177
|
+
# Initialize our serializable message
|
178
|
+
serializable_message = {"role": role}
|
179
|
+
|
180
|
+
# If content is a list of Content objects
|
181
|
+
if isinstance(content_value, list) and len(content_value) > 0:
|
182
|
+
for content_obj in content_value:
|
183
|
+
# Process each Content object
|
184
|
+
if hasattr(content_obj, 'parts') and hasattr(content_obj, 'role'):
|
185
|
+
parts = content_obj.parts
|
186
|
+
|
187
|
+
# Extract text from parts
|
188
|
+
text_parts = []
|
189
|
+
for part in parts:
|
190
|
+
if hasattr(part, 'text') and part.text:
|
191
|
+
text_parts.append(part.text)
|
192
|
+
|
193
|
+
if text_parts:
|
194
|
+
serializable_message["content"] = " ".join(text_parts)
|
195
|
+
|
196
|
+
# Here you can add additional processing for other part types
|
197
|
+
# like function_call, function_response, inline_data, etc.
|
198
|
+
# Similar to the previous implementation
|
199
|
+
|
200
|
+
# If content is a string or already a primitive type
|
201
|
+
elif isinstance(content_value, (str, int, float, bool)) or content_value is None:
|
202
|
+
serializable_message["content"] = content_value
|
203
|
+
|
204
|
+
# Add the processed message to our list
|
205
|
+
serializable_messages.append(serializable_message)
|
206
|
+
|
207
|
+
# Handle the case where the item is a Content object directly
|
208
|
+
elif hasattr(message_item, 'role') and hasattr(message_item, 'parts'):
|
209
|
+
# This is the case from the previous implementation
|
210
|
+
# Process a Content object directly
|
211
|
+
serializable_message = {"role": message_item.role}
|
212
|
+
|
213
|
+
parts = message_item.parts
|
214
|
+
text_parts = []
|
215
|
+
|
216
|
+
for part in parts:
|
217
|
+
if hasattr(part, 'text') and part.text:
|
218
|
+
text_parts.append(part.text)
|
219
|
+
|
220
|
+
if text_parts:
|
221
|
+
serializable_message["content"] = " ".join(text_parts)
|
222
|
+
|
223
|
+
serializable_messages.append(serializable_message)
|
224
|
+
except Exception as e:
|
225
|
+
pass
|
226
|
+
|
227
|
+
return serializable_messages
|
228
|
+
|
229
|
+
|
97
230
|
def get_langtrace_attributes(version, service_provider, vendor_type="llm"):
|
98
231
|
return {
|
99
232
|
SpanAttributes.LANGTRACE_SDK_NAME: LANGTRACE_SDK_NAME,
|
@@ -120,6 +253,23 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None, operation_name=
|
|
120
253
|
or kwargs.get("top_k", None)
|
121
254
|
or kwargs.get("top_n", None)
|
122
255
|
)
|
256
|
+
|
257
|
+
try:
|
258
|
+
prompts = json.dumps(prompts) if prompts else None
|
259
|
+
except Exception as e:
|
260
|
+
if "is not JSON serializable" in str(e):
|
261
|
+
# check model
|
262
|
+
if kwargs.get("model") is not None:
|
263
|
+
if kwargs.get("model").startswith("gemini"):
|
264
|
+
prompts = json.dumps(convert_gemini_messages_to_serializable(prompts))
|
265
|
+
elif kwargs.get("model").startswith("mistral"):
|
266
|
+
prompts = json.dumps(convert_mistral_messages_to_serializable(prompts))
|
267
|
+
else:
|
268
|
+
prompts = "[]"
|
269
|
+
else:
|
270
|
+
prompts = "[]"
|
271
|
+
else:
|
272
|
+
prompts = "[]"
|
123
273
|
|
124
274
|
top_p = kwargs.get("p", None) or kwargs.get("top_p", None)
|
125
275
|
tools = kwargs.get("tools", None)
|
@@ -132,7 +282,7 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None, operation_name=
|
|
132
282
|
SpanAttributes.LLM_IS_STREAMING: kwargs.get("stream"),
|
133
283
|
SpanAttributes.LLM_REQUEST_TEMPERATURE: kwargs.get("temperature"),
|
134
284
|
SpanAttributes.LLM_TOP_K: top_k,
|
135
|
-
SpanAttributes.LLM_PROMPTS:
|
285
|
+
SpanAttributes.LLM_PROMPTS: prompts if prompts else None,
|
136
286
|
SpanAttributes.LLM_USER: user,
|
137
287
|
SpanAttributes.LLM_REQUEST_TOP_P: top_p,
|
138
288
|
SpanAttributes.LLM_REQUEST_MAX_TOKENS: kwargs.get("max_tokens"),
|
langtrace_python_sdk/version.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "3.8.
|
1
|
+
__version__ = "3.8.5"
|
@@ -116,7 +116,7 @@ examples/weaviate_example/__init__.py,sha256=8JMDBsRSEV10HfTd-YC7xb4txBjD3la56sn
|
|
116
116
|
examples/weaviate_example/query_text.py,sha256=wPHQTc_58kPoKTZMygVjTj-2ZcdrIuaausJfMxNQnQc,127162
|
117
117
|
langtrace_python_sdk/__init__.py,sha256=VZM6i71NR7pBQK6XvJWRelknuTYUhqwqE7PlicKa5Wg,1166
|
118
118
|
langtrace_python_sdk/langtrace.py,sha256=T-DsDrwWaL4gAUK1lkTRRpmvoO7F2WtO5hQZdyrVAxE,13791
|
119
|
-
langtrace_python_sdk/version.py,sha256=
|
119
|
+
langtrace_python_sdk/version.py,sha256=mb3dZLLIE3dKNa7hv1kuERgx4o1UEUlj7DsxZRc2A38,22
|
120
120
|
langtrace_python_sdk/constants/__init__.py,sha256=3CNYkWMdd1DrkGqzLUgNZXjdAlM6UFMlf_F-odAToyc,146
|
121
121
|
langtrace_python_sdk/constants/exporter/langtrace_exporter.py,sha256=EVCrouYCpY98f0KSaKr4PzNxPULTZZO6dSA_crEOyJU,106
|
122
122
|
langtrace_python_sdk/constants/instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -144,7 +144,7 @@ langtrace_python_sdk/extensions/langtrace_filesystem.py,sha256=34fZutG28EJ66l67O
|
|
144
144
|
langtrace_python_sdk/instrumentation/__init__.py,sha256=DC96oELorZedDf5zooQ6HGi-dmieXwUephgeB_LzfaU,2559
|
145
145
|
langtrace_python_sdk/instrumentation/agno/__init__.py,sha256=95fn4oA-CHB0mxc6KnVB20KSbXGl_ZZr9n99EEaXzrY,91
|
146
146
|
langtrace_python_sdk/instrumentation/agno/instrumentation.py,sha256=XUnfvqpp13IgdF03xGKasq7kGjeaN1sXLIwCf-Nt_Nc,2667
|
147
|
-
langtrace_python_sdk/instrumentation/agno/patch.py,sha256=
|
147
|
+
langtrace_python_sdk/instrumentation/agno/patch.py,sha256=qCUxCkzU9cYu_d8BzLgj_Ss97qib07tRVYpYDiNnNMs,16876
|
148
148
|
langtrace_python_sdk/instrumentation/anthropic/__init__.py,sha256=donrurJAGYlxrSRA3BIf76jGeUcAx9Tq8CVpah68S0Y,101
|
149
149
|
langtrace_python_sdk/instrumentation/anthropic/instrumentation.py,sha256=ndXdruI0BG7n75rsuEpKjfzePxrZxg40gZ39ONmD_v4,1845
|
150
150
|
langtrace_python_sdk/instrumentation/anthropic/patch.py,sha256=ztPN4VZujoxYOKhTbFnup7Ibms9NAzYCPAJY43NUgKw,4935
|
@@ -249,7 +249,7 @@ langtrace_python_sdk/instrumentation/weaviate/patch.py,sha256=Lqixz32uAvDA2VLU3z
|
|
249
249
|
langtrace_python_sdk/types/__init__.py,sha256=SJSJzkgPjGGTVJXUZ_FyR3p9DJ5kWGx7iAnJfY4ZYHU,4669
|
250
250
|
langtrace_python_sdk/utils/__init__.py,sha256=VVDOG-QLd59ZvSHp0avjof0sbxlZ1QQOf0KoOF7ofhQ,3310
|
251
251
|
langtrace_python_sdk/utils/langtrace_sampler.py,sha256=BupNndHbU9IL_wGleKetz8FdcveqHMBVz1bfKTTW80w,1753
|
252
|
-
langtrace_python_sdk/utils/llm.py,sha256=
|
252
|
+
langtrace_python_sdk/utils/llm.py,sha256=giJU33LvMPaRjPAjUwBCehgHj_ei1HwM7gLJSVWYLnI,23238
|
253
253
|
langtrace_python_sdk/utils/misc.py,sha256=LaQr5LOmZMiuwVdjYh7aIu6o2C_Xb1wgpQGNOVmRzfE,1918
|
254
254
|
langtrace_python_sdk/utils/prompt_registry.py,sha256=n5dQMVLBw8aJZY8Utvf67bncc25ELf6AH9BYw8_hSzo,2619
|
255
255
|
langtrace_python_sdk/utils/sdk_version_checker.py,sha256=F-VVVH7Fmhr5LcY0IIe-34zIi5RQcx26uuxFpPzZesM,1782
|
@@ -300,8 +300,8 @@ tests/pinecone/cassettes/test_query.yaml,sha256=b5v9G3ssUy00oG63PlFUR3JErF2Js-5A
|
|
300
300
|
tests/pinecone/cassettes/test_upsert.yaml,sha256=neWmQ1v3d03V8WoLl8FoFeeCYImb8pxlJBWnFd_lITU,38607
|
301
301
|
tests/qdrant/conftest.py,sha256=9n0uHxxIjWk9fbYc4bx-uP8lSAgLBVx-cV9UjnsyCHM,381
|
302
302
|
tests/qdrant/test_qdrant.py,sha256=pzjAjVY2kmsmGfrI2Gs2xrolfuaNHz7l1fqGQCjp5_o,3353
|
303
|
-
langtrace_python_sdk-3.8.
|
304
|
-
langtrace_python_sdk-3.8.
|
305
|
-
langtrace_python_sdk-3.8.
|
306
|
-
langtrace_python_sdk-3.8.
|
307
|
-
langtrace_python_sdk-3.8.
|
303
|
+
langtrace_python_sdk-3.8.5.dist-info/METADATA,sha256=oM9Ya7m7DDYBDT1VtPe3uguAKkvdJImtz2WGgSxmLbo,15844
|
304
|
+
langtrace_python_sdk-3.8.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
305
|
+
langtrace_python_sdk-3.8.5.dist-info/entry_points.txt,sha256=1_b9-qvf2fE7uQNZcbUei9vLpFZBbbh9LrtGw95ssAo,70
|
306
|
+
langtrace_python_sdk-3.8.5.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
|
307
|
+
langtrace_python_sdk-3.8.5.dist-info/RECORD,,
|
File without changes
|
{langtrace_python_sdk-3.8.4.dist-info → langtrace_python_sdk-3.8.5.dist-info}/entry_points.txt
RENAMED
File without changes
|
{langtrace_python_sdk-3.8.4.dist-info → langtrace_python_sdk-3.8.5.dist-info}/licenses/LICENSE
RENAMED
File without changes
|