quraite 0.0.2__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quraite/__init__.py +3 -3
- quraite/adapters/__init__.py +134 -134
- quraite/adapters/agno_adapter.py +159 -159
- quraite/adapters/base.py +123 -123
- quraite/adapters/bedrock_agents_adapter.py +343 -343
- quraite/adapters/flowise_adapter.py +275 -275
- quraite/adapters/google_adk_adapter.py +209 -209
- quraite/adapters/http_adapter.py +239 -239
- quraite/adapters/langflow_adapter.py +192 -192
- quraite/adapters/langgraph_adapter.py +304 -304
- quraite/adapters/langgraph_server_adapter.py +252 -252
- quraite/adapters/n8n_adapter.py +220 -220
- quraite/adapters/openai_agents_adapter.py +269 -269
- quraite/adapters/pydantic_ai_adapter.py +312 -312
- quraite/adapters/smolagents_adapter.py +152 -152
- quraite/logger.py +61 -64
- quraite/schema/message.py +91 -54
- quraite/schema/response.py +16 -16
- quraite/serve/__init__.py +1 -1
- quraite/serve/cloudflared.py +210 -210
- quraite/serve/local_agent.py +360 -360
- quraite/tracing/__init__.py +24 -24
- quraite/tracing/constants.py +16 -16
- quraite/tracing/span_exporter.py +115 -115
- quraite/tracing/span_processor.py +49 -49
- quraite/tracing/tool_extractors.py +290 -290
- quraite/tracing/trace.py +564 -494
- quraite/tracing/types.py +179 -179
- quraite/tracing/utils.py +170 -170
- quraite/utils/json_utils.py +269 -269
- {quraite-0.0.2.dist-info → quraite-0.1.0.dist-info}/METADATA +9 -9
- quraite-0.1.0.dist-info/RECORD +35 -0
- {quraite-0.0.2.dist-info → quraite-0.1.0.dist-info}/WHEEL +1 -1
- quraite/traces/traces_adk_openinference.json +0 -379
- quraite/traces/traces_agno_multi_agent.json +0 -669
- quraite/traces/traces_agno_openinference.json +0 -321
- quraite/traces/traces_crewai_openinference.json +0 -155
- quraite/traces/traces_langgraph_openinference.json +0 -349
- quraite/traces/traces_langgraph_openinference_multi_agent.json +0 -2705
- quraite/traces/traces_langgraph_traceloop.json +0 -510
- quraite/traces/traces_openai_agents_multi_agent_1.json +0 -402
- quraite/traces/traces_openai_agents_openinference.json +0 -341
- quraite/traces/traces_pydantic_openinference.json +0 -286
- quraite/traces/traces_pydantic_openinference_multi_agent_1.json +0 -399
- quraite/traces/traces_pydantic_openinference_multi_agent_2.json +0 -398
- quraite/traces/traces_smol_agents_openinference.json +0 -397
- quraite/traces/traces_smol_agents_tool_calling_openinference.json +0 -704
- quraite-0.0.2.dist-info/RECORD +0 -49
quraite/tracing/utils.py
CHANGED
|
@@ -1,170 +1,170 @@
|
|
|
1
|
-
import re
|
|
2
|
-
from typing import Any
|
|
3
|
-
|
|
4
|
-
# Keys to skip (they're just OpenInference namespacing prefixes)
|
|
5
|
-
SKIP_KEYS = {"message", "tool_call", "tool"}
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
def unflatten_messages(attributes: dict[str, Any]) -> dict[str, Any]:
|
|
9
|
-
"""
|
|
10
|
-
Unflatten llm.input_messages and llm.output_messages from dot notation
|
|
11
|
-
to nested list structures.
|
|
12
|
-
|
|
13
|
-
Args:
|
|
14
|
-
attributes: Flattened span attributes dict
|
|
15
|
-
|
|
16
|
-
Returns:
|
|
17
|
-
New dict with unflattened input_messages and output_messages
|
|
18
|
-
"""
|
|
19
|
-
result = {}
|
|
20
|
-
input_messages: dict[int, dict] = {}
|
|
21
|
-
output_messages: dict[int, dict] = {}
|
|
22
|
-
|
|
23
|
-
# Patterns to match message attributes
|
|
24
|
-
input_pattern = re.compile(r"^llm\.input_messages\.(\d+)\.(.+)$")
|
|
25
|
-
output_pattern = re.compile(r"^llm\.output_messages\.(\d+)\.(.+)$")
|
|
26
|
-
|
|
27
|
-
for key, value in attributes.items():
|
|
28
|
-
# Check for input messages
|
|
29
|
-
input_match = input_pattern.match(key)
|
|
30
|
-
if input_match:
|
|
31
|
-
idx = int(input_match.group(1))
|
|
32
|
-
rest = input_match.group(2)
|
|
33
|
-
if idx not in input_messages:
|
|
34
|
-
input_messages[idx] = {}
|
|
35
|
-
_set_nested_value(input_messages[idx], rest, value)
|
|
36
|
-
continue
|
|
37
|
-
|
|
38
|
-
# Check for output messages
|
|
39
|
-
output_match = output_pattern.match(key)
|
|
40
|
-
if output_match:
|
|
41
|
-
idx = int(output_match.group(1))
|
|
42
|
-
rest = output_match.group(2)
|
|
43
|
-
if idx not in output_messages:
|
|
44
|
-
output_messages[idx] = {}
|
|
45
|
-
_set_nested_value(output_messages[idx], rest, value)
|
|
46
|
-
continue
|
|
47
|
-
|
|
48
|
-
# Keep other attributes as-is
|
|
49
|
-
result[key] = value
|
|
50
|
-
|
|
51
|
-
# Convert dicts to sorted lists
|
|
52
|
-
if input_messages:
|
|
53
|
-
result["llm.input_messages"] = _dict_to_list(input_messages)
|
|
54
|
-
if output_messages:
|
|
55
|
-
result["llm.output_messages"] = _dict_to_list(output_messages)
|
|
56
|
-
|
|
57
|
-
return result
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
def _set_nested_value(d: dict, path: str, value: Any) -> None:
|
|
61
|
-
"""
|
|
62
|
-
Set a nested value in a dict using dot notation path.
|
|
63
|
-
Skips intermediate keys like 'message' and 'tool_call'.
|
|
64
|
-
|
|
65
|
-
Handles paths like:
|
|
66
|
-
- message.role -> role
|
|
67
|
-
- message.content -> content
|
|
68
|
-
- message.tool_calls.0.tool_call.function.name -> tool_calls[0].function.name
|
|
69
|
-
"""
|
|
70
|
-
# Filter out the namespace prefixes
|
|
71
|
-
parts = [p for p in path.split(".") if p not in SKIP_KEYS]
|
|
72
|
-
current = d
|
|
73
|
-
|
|
74
|
-
for i, part in enumerate(parts[:-1]):
|
|
75
|
-
next_part = parts[i + 1]
|
|
76
|
-
|
|
77
|
-
# Check if next part is an index (for arrays)
|
|
78
|
-
if next_part.isdigit():
|
|
79
|
-
if part not in current:
|
|
80
|
-
current[part] = {}
|
|
81
|
-
current = current[part]
|
|
82
|
-
elif part.isdigit():
|
|
83
|
-
# Current part is an index
|
|
84
|
-
idx = int(part)
|
|
85
|
-
if not isinstance(current, dict):
|
|
86
|
-
current = {}
|
|
87
|
-
if idx not in current:
|
|
88
|
-
current[idx] = {}
|
|
89
|
-
current = current[idx]
|
|
90
|
-
else:
|
|
91
|
-
if part not in current:
|
|
92
|
-
current[part] = {}
|
|
93
|
-
current = current[part]
|
|
94
|
-
|
|
95
|
-
# Set the final value
|
|
96
|
-
final_key = parts[-1]
|
|
97
|
-
if final_key.isdigit():
|
|
98
|
-
current[int(final_key)] = value
|
|
99
|
-
else:
|
|
100
|
-
current[final_key] = value
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
def _dict_to_list(d: dict[int, Any]) -> list:
|
|
104
|
-
"""Convert a dict with integer keys to a sorted list."""
|
|
105
|
-
if not d:
|
|
106
|
-
return []
|
|
107
|
-
max_idx = max(d.keys())
|
|
108
|
-
result = []
|
|
109
|
-
for i in range(max_idx + 1):
|
|
110
|
-
if i in d:
|
|
111
|
-
item = d[i]
|
|
112
|
-
# Recursively convert any nested dicts with int keys to lists
|
|
113
|
-
result.append(_convert_nested_arrays(item))
|
|
114
|
-
else:
|
|
115
|
-
result.append(None)
|
|
116
|
-
return result
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
def _convert_nested_arrays(obj: Any) -> Any:
|
|
120
|
-
"""Recursively convert dicts with integer keys to lists."""
|
|
121
|
-
if isinstance(obj, dict):
|
|
122
|
-
# Check if all keys are integers (should be a list)
|
|
123
|
-
if obj and all(isinstance(k, int) for k in obj.keys()):
|
|
124
|
-
return _dict_to_list(obj)
|
|
125
|
-
# Otherwise process each value
|
|
126
|
-
return {k: _convert_nested_arrays(v) for k, v in obj.items()}
|
|
127
|
-
return obj
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
def unflatten_llm_attributes(attributes: dict[str, Any]) -> dict[str, Any]:
|
|
131
|
-
"""
|
|
132
|
-
Unflatten all LLM-related attributes (messages and tools).
|
|
133
|
-
|
|
134
|
-
Args:
|
|
135
|
-
attributes: Flattened span attributes dict
|
|
136
|
-
|
|
137
|
-
Returns:
|
|
138
|
-
New dict with unflattened messages and tools
|
|
139
|
-
"""
|
|
140
|
-
result = unflatten_messages(attributes)
|
|
141
|
-
|
|
142
|
-
return result
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
# Example usage
|
|
146
|
-
if __name__ == "__main__":
|
|
147
|
-
import json
|
|
148
|
-
|
|
149
|
-
# Example flattened attributes
|
|
150
|
-
flattened = {
|
|
151
|
-
"llm.input_messages.0.message.role": "system",
|
|
152
|
-
"llm.input_messages.0.message.content": "You are helpful.",
|
|
153
|
-
"llm.input_messages.1.message.role": "user",
|
|
154
|
-
"llm.input_messages.1.message.content": "What is 2+2?",
|
|
155
|
-
"llm.input_messages.2.message.role": "assistant",
|
|
156
|
-
"llm.input_messages.2.message.tool_calls.0.tool_call.id": "call_123",
|
|
157
|
-
"llm.input_messages.2.message.tool_calls.0.tool_call.function.name": "calculator",
|
|
158
|
-
"llm.input_messages.2.message.tool_calls.0.tool_call.function.arguments": '{"a": 2, "b": 2}',
|
|
159
|
-
"llm.input_messages.3.message.role": "tool",
|
|
160
|
-
"llm.input_messages.3.message.tool_call_id": "call_123",
|
|
161
|
-
"llm.input_messages.3.message.content": "4",
|
|
162
|
-
"llm.output_messages.0.message.role": "assistant",
|
|
163
|
-
"llm.output_messages.0.message.content": "The answer is 4.",
|
|
164
|
-
"llm.model_name": "gpt-4",
|
|
165
|
-
"llm.tools.0.tool.json_schema": '{"type": "function", "function": {"name": "calculator"}}',
|
|
166
|
-
"openinference.span.kind": "LLM",
|
|
167
|
-
}
|
|
168
|
-
|
|
169
|
-
unflattened = unflatten_llm_attributes(flattened)
|
|
170
|
-
print(json.dumps(unflattened, indent=2))
|
|
1
|
+
import re
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
# Keys to skip (they're just OpenInference namespacing prefixes)
|
|
5
|
+
SKIP_KEYS = {"message", "tool_call", "tool"}
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def unflatten_messages(attributes: dict[str, Any]) -> dict[str, Any]:
|
|
9
|
+
"""
|
|
10
|
+
Unflatten llm.input_messages and llm.output_messages from dot notation
|
|
11
|
+
to nested list structures.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
attributes: Flattened span attributes dict
|
|
15
|
+
|
|
16
|
+
Returns:
|
|
17
|
+
New dict with unflattened input_messages and output_messages
|
|
18
|
+
"""
|
|
19
|
+
result = {}
|
|
20
|
+
input_messages: dict[int, dict] = {}
|
|
21
|
+
output_messages: dict[int, dict] = {}
|
|
22
|
+
|
|
23
|
+
# Patterns to match message attributes
|
|
24
|
+
input_pattern = re.compile(r"^llm\.input_messages\.(\d+)\.(.+)$")
|
|
25
|
+
output_pattern = re.compile(r"^llm\.output_messages\.(\d+)\.(.+)$")
|
|
26
|
+
|
|
27
|
+
for key, value in attributes.items():
|
|
28
|
+
# Check for input messages
|
|
29
|
+
input_match = input_pattern.match(key)
|
|
30
|
+
if input_match:
|
|
31
|
+
idx = int(input_match.group(1))
|
|
32
|
+
rest = input_match.group(2)
|
|
33
|
+
if idx not in input_messages:
|
|
34
|
+
input_messages[idx] = {}
|
|
35
|
+
_set_nested_value(input_messages[idx], rest, value)
|
|
36
|
+
continue
|
|
37
|
+
|
|
38
|
+
# Check for output messages
|
|
39
|
+
output_match = output_pattern.match(key)
|
|
40
|
+
if output_match:
|
|
41
|
+
idx = int(output_match.group(1))
|
|
42
|
+
rest = output_match.group(2)
|
|
43
|
+
if idx not in output_messages:
|
|
44
|
+
output_messages[idx] = {}
|
|
45
|
+
_set_nested_value(output_messages[idx], rest, value)
|
|
46
|
+
continue
|
|
47
|
+
|
|
48
|
+
# Keep other attributes as-is
|
|
49
|
+
result[key] = value
|
|
50
|
+
|
|
51
|
+
# Convert dicts to sorted lists
|
|
52
|
+
if input_messages:
|
|
53
|
+
result["llm.input_messages"] = _dict_to_list(input_messages)
|
|
54
|
+
if output_messages:
|
|
55
|
+
result["llm.output_messages"] = _dict_to_list(output_messages)
|
|
56
|
+
|
|
57
|
+
return result
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def _set_nested_value(d: dict, path: str, value: Any) -> None:
|
|
61
|
+
"""
|
|
62
|
+
Set a nested value in a dict using dot notation path.
|
|
63
|
+
Skips intermediate keys like 'message' and 'tool_call'.
|
|
64
|
+
|
|
65
|
+
Handles paths like:
|
|
66
|
+
- message.role -> role
|
|
67
|
+
- message.content -> content
|
|
68
|
+
- message.tool_calls.0.tool_call.function.name -> tool_calls[0].function.name
|
|
69
|
+
"""
|
|
70
|
+
# Filter out the namespace prefixes
|
|
71
|
+
parts = [p for p in path.split(".") if p not in SKIP_KEYS]
|
|
72
|
+
current = d
|
|
73
|
+
|
|
74
|
+
for i, part in enumerate(parts[:-1]):
|
|
75
|
+
next_part = parts[i + 1]
|
|
76
|
+
|
|
77
|
+
# Check if next part is an index (for arrays)
|
|
78
|
+
if next_part.isdigit():
|
|
79
|
+
if part not in current:
|
|
80
|
+
current[part] = {}
|
|
81
|
+
current = current[part]
|
|
82
|
+
elif part.isdigit():
|
|
83
|
+
# Current part is an index
|
|
84
|
+
idx = int(part)
|
|
85
|
+
if not isinstance(current, dict):
|
|
86
|
+
current = {}
|
|
87
|
+
if idx not in current:
|
|
88
|
+
current[idx] = {}
|
|
89
|
+
current = current[idx]
|
|
90
|
+
else:
|
|
91
|
+
if part not in current:
|
|
92
|
+
current[part] = {}
|
|
93
|
+
current = current[part]
|
|
94
|
+
|
|
95
|
+
# Set the final value
|
|
96
|
+
final_key = parts[-1]
|
|
97
|
+
if final_key.isdigit():
|
|
98
|
+
current[int(final_key)] = value
|
|
99
|
+
else:
|
|
100
|
+
current[final_key] = value
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _dict_to_list(d: dict[int, Any]) -> list:
|
|
104
|
+
"""Convert a dict with integer keys to a sorted list."""
|
|
105
|
+
if not d:
|
|
106
|
+
return []
|
|
107
|
+
max_idx = max(d.keys())
|
|
108
|
+
result = []
|
|
109
|
+
for i in range(max_idx + 1):
|
|
110
|
+
if i in d:
|
|
111
|
+
item = d[i]
|
|
112
|
+
# Recursively convert any nested dicts with int keys to lists
|
|
113
|
+
result.append(_convert_nested_arrays(item))
|
|
114
|
+
else:
|
|
115
|
+
result.append(None)
|
|
116
|
+
return result
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def _convert_nested_arrays(obj: Any) -> Any:
|
|
120
|
+
"""Recursively convert dicts with integer keys to lists."""
|
|
121
|
+
if isinstance(obj, dict):
|
|
122
|
+
# Check if all keys are integers (should be a list)
|
|
123
|
+
if obj and all(isinstance(k, int) for k in obj.keys()):
|
|
124
|
+
return _dict_to_list(obj)
|
|
125
|
+
# Otherwise process each value
|
|
126
|
+
return {k: _convert_nested_arrays(v) for k, v in obj.items()}
|
|
127
|
+
return obj
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def unflatten_llm_attributes(attributes: dict[str, Any]) -> dict[str, Any]:
|
|
131
|
+
"""
|
|
132
|
+
Unflatten all LLM-related attributes (messages and tools).
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
attributes: Flattened span attributes dict
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
New dict with unflattened messages and tools
|
|
139
|
+
"""
|
|
140
|
+
result = unflatten_messages(attributes)
|
|
141
|
+
|
|
142
|
+
return result
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
# Example usage
|
|
146
|
+
if __name__ == "__main__":
|
|
147
|
+
import json
|
|
148
|
+
|
|
149
|
+
# Example flattened attributes
|
|
150
|
+
flattened = {
|
|
151
|
+
"llm.input_messages.0.message.role": "system",
|
|
152
|
+
"llm.input_messages.0.message.content": "You are helpful.",
|
|
153
|
+
"llm.input_messages.1.message.role": "user",
|
|
154
|
+
"llm.input_messages.1.message.content": "What is 2+2?",
|
|
155
|
+
"llm.input_messages.2.message.role": "assistant",
|
|
156
|
+
"llm.input_messages.2.message.tool_calls.0.tool_call.id": "call_123",
|
|
157
|
+
"llm.input_messages.2.message.tool_calls.0.tool_call.function.name": "calculator",
|
|
158
|
+
"llm.input_messages.2.message.tool_calls.0.tool_call.function.arguments": '{"a": 2, "b": 2}',
|
|
159
|
+
"llm.input_messages.3.message.role": "tool",
|
|
160
|
+
"llm.input_messages.3.message.tool_call_id": "call_123",
|
|
161
|
+
"llm.input_messages.3.message.content": "4",
|
|
162
|
+
"llm.output_messages.0.message.role": "assistant",
|
|
163
|
+
"llm.output_messages.0.message.content": "The answer is 4.",
|
|
164
|
+
"llm.model_name": "gpt-4",
|
|
165
|
+
"llm.tools.0.tool.json_schema": '{"type": "function", "function": {"name": "calculator"}}',
|
|
166
|
+
"openinference.span.kind": "LLM",
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
unflattened = unflatten_llm_attributes(flattened)
|
|
170
|
+
print(json.dumps(unflattened, indent=2))
|