quraite 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quraite/__init__.py +3 -0
- quraite/adapters/__init__.py +134 -0
- quraite/adapters/agno_adapter.py +159 -0
- quraite/adapters/base.py +123 -0
- quraite/adapters/bedrock_agents_adapter.py +343 -0
- quraite/adapters/flowise_adapter.py +275 -0
- quraite/adapters/google_adk_adapter.py +209 -0
- quraite/adapters/http_adapter.py +239 -0
- quraite/adapters/langflow_adapter.py +192 -0
- quraite/adapters/langgraph_adapter.py +304 -0
- quraite/adapters/langgraph_server_adapter.py +252 -0
- quraite/adapters/n8n_adapter.py +220 -0
- quraite/adapters/openai_agents_adapter.py +269 -0
- quraite/adapters/pydantic_ai_adapter.py +312 -0
- quraite/adapters/smolagents_adapter.py +152 -0
- quraite/logger.py +62 -0
- quraite/schema/__init__.py +0 -0
- quraite/schema/message.py +54 -0
- quraite/schema/response.py +16 -0
- quraite/serve/__init__.py +1 -0
- quraite/serve/cloudflared.py +210 -0
- quraite/serve/local_agent.py +360 -0
- quraite/traces/traces_adk_openinference.json +379 -0
- quraite/traces/traces_agno_multi_agent.json +669 -0
- quraite/traces/traces_agno_openinference.json +321 -0
- quraite/traces/traces_crewai_openinference.json +155 -0
- quraite/traces/traces_langgraph_openinference.json +349 -0
- quraite/traces/traces_langgraph_openinference_multi_agent.json +2705 -0
- quraite/traces/traces_langgraph_traceloop.json +510 -0
- quraite/traces/traces_openai_agents_multi_agent_1.json +402 -0
- quraite/traces/traces_openai_agents_openinference.json +341 -0
- quraite/traces/traces_pydantic_openinference.json +286 -0
- quraite/traces/traces_pydantic_openinference_multi_agent_1.json +399 -0
- quraite/traces/traces_pydantic_openinference_multi_agent_2.json +398 -0
- quraite/traces/traces_smol_agents_openinference.json +397 -0
- quraite/traces/traces_smol_agents_tool_calling_openinference.json +704 -0
- quraite/tracing/__init__.py +24 -0
- quraite/tracing/constants.py +16 -0
- quraite/tracing/span_exporter.py +115 -0
- quraite/tracing/span_processor.py +49 -0
- quraite/tracing/tool_extractors.py +290 -0
- quraite/tracing/trace.py +494 -0
- quraite/tracing/types.py +179 -0
- quraite/tracing/utils.py +170 -0
- quraite/utils/__init__.py +0 -0
- quraite/utils/json_utils.py +269 -0
- quraite-0.0.1.dist-info/METADATA +44 -0
- quraite-0.0.1.dist-info/RECORD +49 -0
- quraite-0.0.1.dist-info/WHEEL +4 -0
quraite/tracing/trace.py
ADDED
|
@@ -0,0 +1,494 @@
|
|
|
1
|
+
# mypy: disable-error-code="arg-type,attr-defined"
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
import json
|
|
5
|
+
from functools import cached_property
|
|
6
|
+
from typing import Any, List
|
|
7
|
+
|
|
8
|
+
from openinference.semconv.trace import OpenInferenceSpanKindValues, SpanAttributes
|
|
9
|
+
from opentelemetry.sdk.trace import ReadableSpan
|
|
10
|
+
from opentelemetry.sdk.trace import Span as OTelSpan
|
|
11
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
12
|
+
|
|
13
|
+
from quraite.logger import get_logger
|
|
14
|
+
from quraite.schema.message import (
|
|
15
|
+
AssistantMessage,
|
|
16
|
+
MessageContentText,
|
|
17
|
+
SystemMessage,
|
|
18
|
+
ToolCall,
|
|
19
|
+
ToolMessage,
|
|
20
|
+
)
|
|
21
|
+
from quraite.tracing.constants import Framework
|
|
22
|
+
from quraite.tracing.types import Event, Link, Resource, SpanContext, SpanKind, Status
|
|
23
|
+
from quraite.tracing.utils import unflatten_messages
|
|
24
|
+
|
|
25
|
+
logger = get_logger(__name__)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class TokenInfo(BaseModel):
|
|
29
|
+
"""Token Count information."""
|
|
30
|
+
|
|
31
|
+
input_tokens: int
|
|
32
|
+
"""Number of input tokens."""
|
|
33
|
+
|
|
34
|
+
output_tokens: int
|
|
35
|
+
"""Number of output tokens."""
|
|
36
|
+
|
|
37
|
+
@property
|
|
38
|
+
def total_tokens(self) -> int:
|
|
39
|
+
"""Total number of tokens."""
|
|
40
|
+
return self.input_tokens + self.output_tokens
|
|
41
|
+
|
|
42
|
+
model_config = ConfigDict(extra="forbid")
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class CostInfo(BaseModel):
|
|
46
|
+
"""Cost information."""
|
|
47
|
+
|
|
48
|
+
input_cost: float
|
|
49
|
+
"Cost associated to the input tokens."
|
|
50
|
+
|
|
51
|
+
output_cost: float
|
|
52
|
+
"""Cost associated to the output tokens."""
|
|
53
|
+
|
|
54
|
+
@property
|
|
55
|
+
def total_cost(self) -> float:
|
|
56
|
+
"""Total cost."""
|
|
57
|
+
return self.input_cost + self.output_cost
|
|
58
|
+
|
|
59
|
+
model_config = ConfigDict(extra="forbid")
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class AgentSpan(BaseModel):
|
|
63
|
+
"""A span that can be exported to JSON or printed to the console."""
|
|
64
|
+
|
|
65
|
+
name: str
|
|
66
|
+
kind: SpanKind
|
|
67
|
+
parent: SpanContext | None = None
|
|
68
|
+
start_time: int | None = None
|
|
69
|
+
end_time: int | None = None
|
|
70
|
+
status: Status
|
|
71
|
+
context: SpanContext
|
|
72
|
+
attributes: dict[str, Any]
|
|
73
|
+
links: list[Link]
|
|
74
|
+
events: list[Event]
|
|
75
|
+
resource: Resource
|
|
76
|
+
|
|
77
|
+
# TODO: Revisit this. It is supposed to be False.
|
|
78
|
+
# If it is False, SpanContext causes it to fail.
|
|
79
|
+
model_config = ConfigDict(arbitrary_types_allowed=False)
|
|
80
|
+
|
|
81
|
+
@classmethod
|
|
82
|
+
def from_otel(cls, otel_span: OTelSpan) -> AgentSpan:
|
|
83
|
+
"""Create an AgentSpan from an OTEL Span."""
|
|
84
|
+
return cls(
|
|
85
|
+
name=otel_span.name,
|
|
86
|
+
kind=SpanKind.from_otel(otel_span.kind),
|
|
87
|
+
parent=SpanContext.from_otel(otel_span.parent),
|
|
88
|
+
start_time=otel_span.start_time,
|
|
89
|
+
end_time=otel_span.end_time,
|
|
90
|
+
status=Status.from_otel(otel_span.status),
|
|
91
|
+
context=SpanContext.from_otel(otel_span.context),
|
|
92
|
+
attributes=dict(otel_span.attributes) if otel_span.attributes else {},
|
|
93
|
+
links=[Link.from_otel(link) for link in otel_span.links],
|
|
94
|
+
events=[Event.from_otel(event) for event in otel_span.events],
|
|
95
|
+
resource=Resource.from_otel(otel_span.resource),
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
@classmethod
|
|
99
|
+
def from_readable_oi_span(cls, readable_span: ReadableSpan) -> AgentSpan:
|
|
100
|
+
"""Create an AgentSpan from a ReadableSpan."""
|
|
101
|
+
return cls(
|
|
102
|
+
name=readable_span.name,
|
|
103
|
+
kind=SpanKind.from_otel(readable_span.kind),
|
|
104
|
+
parent=SpanContext.from_otel(readable_span.parent),
|
|
105
|
+
start_time=readable_span.start_time,
|
|
106
|
+
end_time=readable_span.end_time,
|
|
107
|
+
status=Status.from_otel(readable_span.status),
|
|
108
|
+
context=SpanContext.from_otel(readable_span.context),
|
|
109
|
+
attributes=(
|
|
110
|
+
dict(readable_span.attributes) if readable_span.attributes else {}
|
|
111
|
+
),
|
|
112
|
+
links=[Link.from_otel(link) for link in readable_span.links],
|
|
113
|
+
events=[Event.from_otel(event) for event in readable_span.events],
|
|
114
|
+
resource=Resource.from_otel(readable_span.resource),
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
def to_readable_span(self) -> ReadableSpan:
|
|
118
|
+
"""Create an ReadableSpan from the AgentSpan."""
|
|
119
|
+
return ReadableSpan(
|
|
120
|
+
name=self.name,
|
|
121
|
+
kind=self.kind,
|
|
122
|
+
parent=self.parent,
|
|
123
|
+
start_time=self.start_time,
|
|
124
|
+
end_time=self.end_time,
|
|
125
|
+
status=self.status,
|
|
126
|
+
context=self.context,
|
|
127
|
+
attributes=self.attributes,
|
|
128
|
+
links=self.links,
|
|
129
|
+
events=self.events,
|
|
130
|
+
resource=self.resource,
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
def is_agent_invocation(self) -> bool:
|
|
134
|
+
"""Check whether this span is an agent invocation (the very first span)."""
|
|
135
|
+
return self.get_oi_span_kind() == OpenInferenceSpanKindValues.AGENT
|
|
136
|
+
|
|
137
|
+
def is_llm_call(self) -> bool:
|
|
138
|
+
"""Check whether this span is a call to an LLM."""
|
|
139
|
+
return self.get_oi_span_kind() == OpenInferenceSpanKindValues.LLM
|
|
140
|
+
|
|
141
|
+
def is_tool_execution(self) -> bool:
|
|
142
|
+
"""Check whether this span is an execution of a tool."""
|
|
143
|
+
return self.get_oi_span_kind() == OpenInferenceSpanKindValues.TOOL
|
|
144
|
+
|
|
145
|
+
def get_oi_span_kind(self) -> OpenInferenceSpanKindValues:
|
|
146
|
+
"""Get the OpenInference span kind from the span."""
|
|
147
|
+
|
|
148
|
+
return OpenInferenceSpanKindValues(
|
|
149
|
+
self.attributes.get(
|
|
150
|
+
SpanAttributes.OPENINFERENCE_SPAN_KIND,
|
|
151
|
+
OpenInferenceSpanKindValues.UNKNOWN,
|
|
152
|
+
)
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
def to_llm_messages(self) -> List[dict[str, Any]]:
|
|
156
|
+
"""
|
|
157
|
+
Convert LLM span to output messages.
|
|
158
|
+
|
|
159
|
+
Note: Removes function_call_name and function_call_arguments_json from messages.
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
List of output messages from this LLM span
|
|
163
|
+
"""
|
|
164
|
+
if not self.is_llm_call():
|
|
165
|
+
return []
|
|
166
|
+
|
|
167
|
+
# Extract output messages
|
|
168
|
+
unflattened = unflatten_messages(self.attributes)
|
|
169
|
+
output_msgs = unflattened.get("llm.output_messages", [])
|
|
170
|
+
|
|
171
|
+
# Remove function_call_name and function_call_arguments_json from messages
|
|
172
|
+
cleaned_messages = []
|
|
173
|
+
for msg in output_msgs:
|
|
174
|
+
if isinstance(msg, dict):
|
|
175
|
+
cleaned_msg = {
|
|
176
|
+
k: v
|
|
177
|
+
for k, v in msg.items()
|
|
178
|
+
if k not in ("function_call_name", "function_call_arguments_json")
|
|
179
|
+
}
|
|
180
|
+
cleaned_messages.append(cleaned_msg)
|
|
181
|
+
else:
|
|
182
|
+
cleaned_messages.append(msg)
|
|
183
|
+
|
|
184
|
+
return cleaned_messages
|
|
185
|
+
|
|
186
|
+
def to_tool_message(
|
|
187
|
+
self, framework: Framework = Framework.LANGGRAPH
|
|
188
|
+
) -> dict[str, Any] | None:
|
|
189
|
+
"""
|
|
190
|
+
Convert tool execution span to a tool message.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
framework: The agent framework to use for extraction
|
|
194
|
+
|
|
195
|
+
Returns:
|
|
196
|
+
Tool message dict or None if not a tool span or extraction fails
|
|
197
|
+
"""
|
|
198
|
+
if not self.is_tool_execution():
|
|
199
|
+
return None
|
|
200
|
+
|
|
201
|
+
from .tool_extractors import get_tool_extractor
|
|
202
|
+
|
|
203
|
+
# Convert AgentSpan to dict format for tool extractor
|
|
204
|
+
span_dict = {
|
|
205
|
+
"attributes": self.attributes,
|
|
206
|
+
"context": {"span_id": self.context.span_id},
|
|
207
|
+
"parent_id": (
|
|
208
|
+
str(self.parent.span_id)
|
|
209
|
+
if self.parent and self.parent.span_id
|
|
210
|
+
else None
|
|
211
|
+
),
|
|
212
|
+
"start_time": self.start_time,
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
# Extract tool info using framework-specific extractor
|
|
216
|
+
tool_extractor = get_tool_extractor(framework)
|
|
217
|
+
tool_info = tool_extractor(span_dict)
|
|
218
|
+
|
|
219
|
+
if tool_info:
|
|
220
|
+
return tool_info.to_dict()
|
|
221
|
+
|
|
222
|
+
return None
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
class AgentTrace(BaseModel):
|
|
226
|
+
"""A trace that can be exported to JSON or printed to the console."""
|
|
227
|
+
|
|
228
|
+
spans: list[AgentSpan] = Field(default_factory=list)
|
|
229
|
+
"""A list of [`AgentSpan`][any_agent.tracing.agent_trace.AgentSpan] that form the trace.
|
|
230
|
+
"""
|
|
231
|
+
|
|
232
|
+
final_output: str | AssistantMessage | None = Field(default=None)
|
|
233
|
+
"""Contains the final output message returned by the agent.
|
|
234
|
+
"""
|
|
235
|
+
|
|
236
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
237
|
+
|
|
238
|
+
def _invalidate_tokens_and_cost_cache(self) -> None:
|
|
239
|
+
"""Clear the cached tokens_and_cost property if it exists."""
|
|
240
|
+
if "tokens" in self.__dict__:
|
|
241
|
+
del self.tokens
|
|
242
|
+
if "cost" in self.__dict__:
|
|
243
|
+
del self.cost
|
|
244
|
+
|
|
245
|
+
def add_span(self, span: AgentSpan | OTelSpan) -> None:
|
|
246
|
+
"""Add an AgentSpan to the trace and clear the tokens_and_cost cache if present."""
|
|
247
|
+
if not isinstance(span, AgentSpan):
|
|
248
|
+
span = AgentSpan.from_otel(span)
|
|
249
|
+
self.spans.append(span)
|
|
250
|
+
self._invalidate_tokens_and_cost_cache()
|
|
251
|
+
|
|
252
|
+
def add_spans(self, spans: list[AgentSpan]) -> None:
|
|
253
|
+
"""Add a list of AgentSpans to the trace and clear the tokens_and_cost cache if present."""
|
|
254
|
+
self.spans.extend(spans)
|
|
255
|
+
self._invalidate_tokens_and_cost_cache()
|
|
256
|
+
|
|
257
|
+
def _convert_llm_message(self, msg: dict[str, Any]) -> AssistantMessage | None:
|
|
258
|
+
"""Convert an LLM output message dict to an AssistantMessage."""
|
|
259
|
+
|
|
260
|
+
role = msg.get("role")
|
|
261
|
+
tool_calls = msg.get("tool_calls")
|
|
262
|
+
|
|
263
|
+
if role in ["assistant", "model"]: # model is used by Google ADK
|
|
264
|
+
text_content = []
|
|
265
|
+
|
|
266
|
+
# openinference support content as a string or a list of dicts
|
|
267
|
+
# https://github.com/Arize-ai/openinference/blob/main/spec/multimodal_attributes.md
|
|
268
|
+
content = msg.get("content")
|
|
269
|
+
if content:
|
|
270
|
+
if isinstance(content, str):
|
|
271
|
+
text_content = [MessageContentText(text=content)]
|
|
272
|
+
|
|
273
|
+
contents = msg.get("contents")
|
|
274
|
+
if contents and isinstance(contents, list):
|
|
275
|
+
for content in contents:
|
|
276
|
+
if not isinstance(content, dict):
|
|
277
|
+
continue
|
|
278
|
+
|
|
279
|
+
message_content = content.get("message_content")
|
|
280
|
+
if not isinstance(message_content, dict):
|
|
281
|
+
continue
|
|
282
|
+
|
|
283
|
+
if message_content.get("type") == "text":
|
|
284
|
+
text_content.append(
|
|
285
|
+
MessageContentText(text=message_content.get("text"))
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
tool_calls_list = None
|
|
289
|
+
if tool_calls and isinstance(tool_calls, list):
|
|
290
|
+
tool_calls_list = []
|
|
291
|
+
for tc in tool_calls:
|
|
292
|
+
if not isinstance(tc, dict):
|
|
293
|
+
continue
|
|
294
|
+
|
|
295
|
+
function = tc.get("function", {})
|
|
296
|
+
if not isinstance(function, dict):
|
|
297
|
+
continue
|
|
298
|
+
|
|
299
|
+
tool_call_id = tc.get("id", "")
|
|
300
|
+
tool_name = function.get("name", "")
|
|
301
|
+
arguments_str = function.get("arguments", "")
|
|
302
|
+
|
|
303
|
+
# Parse arguments - could be JSON string or already a dict
|
|
304
|
+
arguments = {}
|
|
305
|
+
if arguments_str:
|
|
306
|
+
if isinstance(arguments_str, str):
|
|
307
|
+
try:
|
|
308
|
+
arguments = json.loads(arguments_str)
|
|
309
|
+
except (json.JSONDecodeError, TypeError):
|
|
310
|
+
arguments = {}
|
|
311
|
+
elif isinstance(arguments_str, dict):
|
|
312
|
+
arguments = arguments_str
|
|
313
|
+
|
|
314
|
+
tool_calls_list.append(
|
|
315
|
+
ToolCall(
|
|
316
|
+
id=tool_call_id,
|
|
317
|
+
name=tool_name,
|
|
318
|
+
arguments=arguments,
|
|
319
|
+
)
|
|
320
|
+
)
|
|
321
|
+
|
|
322
|
+
return AssistantMessage(
|
|
323
|
+
content=text_content if text_content else None,
|
|
324
|
+
tool_calls=tool_calls_list if tool_calls_list else None,
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
return None
|
|
328
|
+
|
|
329
|
+
def _convert_tool_message(self, msg: dict[str, Any]) -> ToolMessage | None:
|
|
330
|
+
"""Convert a tool message dict to a ToolMessage."""
|
|
331
|
+
content = msg.get("response", "")
|
|
332
|
+
|
|
333
|
+
return ToolMessage(
|
|
334
|
+
tool_name=msg.get("tool_name"),
|
|
335
|
+
tool_call_id=msg.get("tool_call_id"),
|
|
336
|
+
content=[MessageContentText(type="text", text=str(content))],
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
def to_agent_trajectory(
|
|
340
|
+
self,
|
|
341
|
+
framework: Framework = Framework.LANGGRAPH,
|
|
342
|
+
only_leaf_llms: bool = True,
|
|
343
|
+
) -> List[AssistantMessage | ToolMessage]:
|
|
344
|
+
"""
|
|
345
|
+
Convert trace spans to agent trajectory.
|
|
346
|
+
|
|
347
|
+
Args:
|
|
348
|
+
framework: Agent framework to use for tool extraction
|
|
349
|
+
only_leaf_llms: If True, only include LLM spans that have no LLM children
|
|
350
|
+
|
|
351
|
+
Returns:
|
|
352
|
+
List of AssistantMessage and ToolMessage in chronological order
|
|
353
|
+
"""
|
|
354
|
+
# Framework-specific sorting:
|
|
355
|
+
# - Some frameworks (pydantic_ai, agno) execute tools DURING the LLM span
|
|
356
|
+
# (tools are nested inside LLM), so we sort by end_time to get correct order
|
|
357
|
+
# This means the parent span does not end before the nested span ends.
|
|
358
|
+
# - Other frameworks (langgraph, openai_agents, etc.) execute tools AFTER the LLM span
|
|
359
|
+
# ends, so start_time sort works fine
|
|
360
|
+
nested_tool_frameworks = (Framework.PYDANTIC, Framework.AGNO)
|
|
361
|
+
|
|
362
|
+
if framework in nested_tool_frameworks:
|
|
363
|
+
# Sort by end_time: LLM output is ready at end_time, then tool results follow
|
|
364
|
+
sorted_spans = sorted(
|
|
365
|
+
self.spans,
|
|
366
|
+
key=lambda s: s.end_time if s.end_time is not None else 0,
|
|
367
|
+
)
|
|
368
|
+
else:
|
|
369
|
+
# Sort by start_time: LLM completes, then tools start after
|
|
370
|
+
sorted_spans = sorted(
|
|
371
|
+
self.spans,
|
|
372
|
+
key=lambda s: s.start_time if s.start_time is not None else 0,
|
|
373
|
+
)
|
|
374
|
+
|
|
375
|
+
# Build parent-child relationships for only_leaf_llms filtering
|
|
376
|
+
span_children: dict[str, list[str]] = {}
|
|
377
|
+
llm_span_ids: set[str] = set()
|
|
378
|
+
|
|
379
|
+
if only_leaf_llms:
|
|
380
|
+
for span in sorted_spans:
|
|
381
|
+
span_id = str(span.context.span_id) if span.context.span_id else None
|
|
382
|
+
parent_id = (
|
|
383
|
+
str(span.parent.span_id)
|
|
384
|
+
if span.parent and span.parent.span_id
|
|
385
|
+
else None
|
|
386
|
+
)
|
|
387
|
+
|
|
388
|
+
if parent_id and parent_id != "null" and span_id:
|
|
389
|
+
if parent_id not in span_children:
|
|
390
|
+
span_children[parent_id] = []
|
|
391
|
+
span_children[parent_id].append(span_id)
|
|
392
|
+
|
|
393
|
+
if span.is_llm_call() and span_id:
|
|
394
|
+
llm_span_ids.add(span_id)
|
|
395
|
+
|
|
396
|
+
def has_llm_children(span_id: str) -> bool:
|
|
397
|
+
"""Check if a span has LLM children."""
|
|
398
|
+
if not only_leaf_llms:
|
|
399
|
+
return False
|
|
400
|
+
children = span_children.get(span_id, [])
|
|
401
|
+
return any(child_id in llm_span_ids for child_id in children)
|
|
402
|
+
|
|
403
|
+
# Collect messages from spans
|
|
404
|
+
messages: List[AssistantMessage | ToolMessage] = []
|
|
405
|
+
|
|
406
|
+
for span in sorted_spans:
|
|
407
|
+
span_id = str(span.context.span_id) if span.context.span_id else None
|
|
408
|
+
|
|
409
|
+
if span.is_llm_call():
|
|
410
|
+
# Skip if this LLM has LLM children (not a leaf)
|
|
411
|
+
if only_leaf_llms and span_id and has_llm_children(span_id):
|
|
412
|
+
continue
|
|
413
|
+
|
|
414
|
+
for llm_msg in span.to_llm_messages():
|
|
415
|
+
converted = self._convert_llm_message(llm_msg)
|
|
416
|
+
if converted:
|
|
417
|
+
messages.append(converted)
|
|
418
|
+
|
|
419
|
+
elif span.is_tool_execution():
|
|
420
|
+
# Use span method to extract tool message
|
|
421
|
+
tool_msg = span.to_tool_message(framework=framework)
|
|
422
|
+
if tool_msg:
|
|
423
|
+
converted = self._convert_tool_message(tool_msg)
|
|
424
|
+
if converted:
|
|
425
|
+
messages.append(converted)
|
|
426
|
+
|
|
427
|
+
return messages
|
|
428
|
+
|
|
429
|
+
@cached_property
|
|
430
|
+
def system_messages(self) -> List[SystemMessage]:
|
|
431
|
+
"""
|
|
432
|
+
Extract unique system messages from all LLM spans in the trace.
|
|
433
|
+
|
|
434
|
+
Returns:
|
|
435
|
+
List of unique system messages from LLM input messages (deduplicated)
|
|
436
|
+
"""
|
|
437
|
+
seen_messages = set()
|
|
438
|
+
system_messages: List[SystemMessage] = []
|
|
439
|
+
|
|
440
|
+
for span in self.spans:
|
|
441
|
+
if span.is_llm_call():
|
|
442
|
+
# Extract input messages
|
|
443
|
+
unflattened = unflatten_messages(span.attributes)
|
|
444
|
+
input_msgs = unflattened.get("llm.input_messages", [])
|
|
445
|
+
|
|
446
|
+
# Filter for system messages
|
|
447
|
+
for msg in input_msgs:
|
|
448
|
+
if isinstance(msg, dict) and msg.get("role") == "system":
|
|
449
|
+
if msg.get("content"):
|
|
450
|
+
prompt = msg.get("content")
|
|
451
|
+
elif msg.get("contents"):
|
|
452
|
+
contents = msg.get("contents")
|
|
453
|
+
if not contents:
|
|
454
|
+
continue
|
|
455
|
+
|
|
456
|
+
prompt = contents[0].get("message_content", {}).get("text")
|
|
457
|
+
|
|
458
|
+
# Use JSON string as hashable key for deduplication
|
|
459
|
+
msg_key = json.dumps(prompt, sort_keys=True)
|
|
460
|
+
if msg_key not in seen_messages:
|
|
461
|
+
seen_messages.add(msg_key)
|
|
462
|
+
system_messages.append(
|
|
463
|
+
SystemMessage(content=[MessageContentText(text=prompt)])
|
|
464
|
+
)
|
|
465
|
+
|
|
466
|
+
return system_messages
|
|
467
|
+
|
|
468
|
+
@cached_property
|
|
469
|
+
def tokens(self) -> TokenInfo:
|
|
470
|
+
"""The [`TokenInfo`][any_agent.tracing.agent_trace.TokenInfo] for this trace. Cached after first computation."""
|
|
471
|
+
sum_input_tokens = 0
|
|
472
|
+
sum_output_tokens = 0
|
|
473
|
+
for span in self.spans:
|
|
474
|
+
if span.is_llm_call():
|
|
475
|
+
sum_input_tokens += span.attributes.get(
|
|
476
|
+
SpanAttributes.LLM_TOKEN_COUNT_PROMPT, 0
|
|
477
|
+
)
|
|
478
|
+
sum_output_tokens += span.attributes.get(
|
|
479
|
+
SpanAttributes.LLM_TOKEN_COUNT_COMPLETION, 0
|
|
480
|
+
)
|
|
481
|
+
return TokenInfo(input_tokens=sum_input_tokens, output_tokens=sum_output_tokens)
|
|
482
|
+
|
|
483
|
+
@cached_property
|
|
484
|
+
def cost(self) -> CostInfo:
|
|
485
|
+
"""The [`CostInfo`][any_agent.tracing.agent_trace.CostInfo] for this trace. Cached after first computation."""
|
|
486
|
+
sum_input_cost = 0.0
|
|
487
|
+
sum_output_cost = 0.0
|
|
488
|
+
for span in self.spans:
|
|
489
|
+
if span.is_llm_call():
|
|
490
|
+
sum_input_cost += span.attributes.get(SpanAttributes.LLM_COST_PROMPT, 0)
|
|
491
|
+
sum_output_cost += span.attributes.get(
|
|
492
|
+
SpanAttributes.LLM_COST_COMPLETION, 0
|
|
493
|
+
)
|
|
494
|
+
return CostInfo(input_cost=sum_input_cost, output_cost=sum_output_cost)
|
quraite/tracing/types.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel, Field
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class TraceFlags(BaseModel):
|
|
8
|
+
"""Serializable trace flags."""
|
|
9
|
+
|
|
10
|
+
value: int = 0
|
|
11
|
+
|
|
12
|
+
@classmethod
|
|
13
|
+
def from_otel(cls, flags: Any | None) -> "TraceFlags":
|
|
14
|
+
"""Convert from OpenTelemetry TraceFlags."""
|
|
15
|
+
if flags is None:
|
|
16
|
+
return cls(value=0)
|
|
17
|
+
return cls(value=flags.value if hasattr(flags, "value") else 0)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class TraceState(BaseModel):
|
|
21
|
+
"""Serializable trace state."""
|
|
22
|
+
|
|
23
|
+
entries: dict[str, str] = Field(default_factory=dict)
|
|
24
|
+
|
|
25
|
+
@classmethod
|
|
26
|
+
def from_otel(cls, state: Any | None) -> "TraceState":
|
|
27
|
+
"""Convert from OpenTelemetry TraceState."""
|
|
28
|
+
if state is None:
|
|
29
|
+
return cls()
|
|
30
|
+
return cls(entries=dict(state.items()) if hasattr(state, "items") else {})
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class SpanKind(str, Enum):
|
|
34
|
+
"""String-based enum for span kind to make it serializable."""
|
|
35
|
+
|
|
36
|
+
INTERNAL = "internal"
|
|
37
|
+
SERVER = "server"
|
|
38
|
+
CLIENT = "client"
|
|
39
|
+
PRODUCER = "producer"
|
|
40
|
+
CONSUMER = "consumer"
|
|
41
|
+
|
|
42
|
+
@classmethod
|
|
43
|
+
def from_otel(cls, kind: Any | None) -> "SpanKind":
|
|
44
|
+
"""Convert from OpenTelemetry SpanKind."""
|
|
45
|
+
if kind is None:
|
|
46
|
+
return cls.INTERNAL
|
|
47
|
+
|
|
48
|
+
mapping = {
|
|
49
|
+
0: cls.INTERNAL,
|
|
50
|
+
1: cls.SERVER,
|
|
51
|
+
2: cls.CLIENT,
|
|
52
|
+
3: cls.PRODUCER,
|
|
53
|
+
4: cls.CONSUMER,
|
|
54
|
+
}
|
|
55
|
+
return mapping.get(kind.value, cls.INTERNAL)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class SpanContext(BaseModel):
|
|
59
|
+
"""Serializable span context."""
|
|
60
|
+
|
|
61
|
+
trace_id: int | None = None
|
|
62
|
+
span_id: int | None = None
|
|
63
|
+
is_remote: bool = False
|
|
64
|
+
trace_flags: TraceFlags = Field(default_factory=TraceFlags)
|
|
65
|
+
trace_state: TraceState = Field(default_factory=TraceState)
|
|
66
|
+
|
|
67
|
+
@classmethod
|
|
68
|
+
def from_otel(cls, context: Any | None) -> "SpanContext":
|
|
69
|
+
"""Convert from OpenTelemetry SpanContext."""
|
|
70
|
+
if context is None:
|
|
71
|
+
return cls()
|
|
72
|
+
|
|
73
|
+
return cls(
|
|
74
|
+
trace_id=getattr(context, "trace_id", None),
|
|
75
|
+
span_id=getattr(context, "span_id", None),
|
|
76
|
+
is_remote=getattr(context, "is_remote", False),
|
|
77
|
+
trace_flags=TraceFlags.from_otel(getattr(context, "trace_flags", None)),
|
|
78
|
+
trace_state=TraceState.from_otel(getattr(context, "trace_state", None)),
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class StatusCode(str, Enum):
|
|
83
|
+
"""String-based enum for status code to make it serializable."""
|
|
84
|
+
|
|
85
|
+
UNSET = "unset"
|
|
86
|
+
OK = "ok"
|
|
87
|
+
ERROR = "error"
|
|
88
|
+
|
|
89
|
+
@classmethod
|
|
90
|
+
def from_otel(cls, code: Any | None) -> "StatusCode":
|
|
91
|
+
"""Convert from OpenTelemetry StatusCode."""
|
|
92
|
+
if code is None:
|
|
93
|
+
return cls.UNSET
|
|
94
|
+
|
|
95
|
+
mapping = {"UNSET": cls.UNSET, "OK": cls.OK, "ERROR": cls.ERROR}
|
|
96
|
+
|
|
97
|
+
if hasattr(code, "name"):
|
|
98
|
+
return mapping.get(code.name, cls.UNSET)
|
|
99
|
+
return cls.UNSET
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class Status(BaseModel):
|
|
103
|
+
"""Serializable status."""
|
|
104
|
+
|
|
105
|
+
status_code: StatusCode = StatusCode.UNSET
|
|
106
|
+
description: str | None = None
|
|
107
|
+
|
|
108
|
+
@classmethod
|
|
109
|
+
def from_otel(cls, status: Any | None) -> "Status":
|
|
110
|
+
"""Convert from OpenTelemetry Status."""
|
|
111
|
+
if status is None:
|
|
112
|
+
return cls()
|
|
113
|
+
|
|
114
|
+
return cls(
|
|
115
|
+
status_code=StatusCode.from_otel(getattr(status, "status_code", None)),
|
|
116
|
+
description=getattr(status, "description", ""),
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
class AttributeValue(BaseModel):
|
|
121
|
+
"""A wrapper for attribute values that can be serialized."""
|
|
122
|
+
|
|
123
|
+
value: str | int | float | bool | list[str | int | float | bool]
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
class Link(BaseModel):
|
|
127
|
+
"""Serializable link."""
|
|
128
|
+
|
|
129
|
+
context: SpanContext
|
|
130
|
+
attributes: dict[str, Any] | None = None
|
|
131
|
+
|
|
132
|
+
@classmethod
|
|
133
|
+
def from_otel(cls, link: Any | None) -> "Link":
|
|
134
|
+
"""Convert from OpenTelemetry Link."""
|
|
135
|
+
if link is None:
|
|
136
|
+
return cls(context=SpanContext())
|
|
137
|
+
|
|
138
|
+
return cls(
|
|
139
|
+
context=SpanContext.from_otel(getattr(link, "context", None)),
|
|
140
|
+
attributes=getattr(link, "attributes", None),
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
class Event(BaseModel):
|
|
145
|
+
"""Serializable event."""
|
|
146
|
+
|
|
147
|
+
name: str
|
|
148
|
+
timestamp: int = 0
|
|
149
|
+
attributes: dict[str, Any] | None = None
|
|
150
|
+
|
|
151
|
+
@classmethod
|
|
152
|
+
def from_otel(cls, event: Any | None) -> "Event":
|
|
153
|
+
"""Convert from OpenTelemetry Event."""
|
|
154
|
+
if event is None:
|
|
155
|
+
return cls(name="")
|
|
156
|
+
|
|
157
|
+
return cls(
|
|
158
|
+
name=getattr(event, "name", ""),
|
|
159
|
+
timestamp=getattr(event, "timestamp", 0),
|
|
160
|
+
attributes=getattr(event, "attributes", None),
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
class Resource(BaseModel):
|
|
165
|
+
"""Serializable resource."""
|
|
166
|
+
|
|
167
|
+
attributes: dict[str, Any] = Field(default_factory=dict)
|
|
168
|
+
schema_url: str = ""
|
|
169
|
+
|
|
170
|
+
@classmethod
|
|
171
|
+
def from_otel(cls, resource: Any | None) -> "Resource":
|
|
172
|
+
"""Convert from OpenTelemetry Resource."""
|
|
173
|
+
if resource is None:
|
|
174
|
+
return cls()
|
|
175
|
+
|
|
176
|
+
return cls(
|
|
177
|
+
attributes=getattr(resource, "attributes", {}),
|
|
178
|
+
schema_url=getattr(resource, "schema_url", ""),
|
|
179
|
+
)
|