quraite 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. quraite/__init__.py +3 -0
  2. quraite/adapters/__init__.py +134 -0
  3. quraite/adapters/agno_adapter.py +159 -0
  4. quraite/adapters/base.py +123 -0
  5. quraite/adapters/bedrock_agents_adapter.py +343 -0
  6. quraite/adapters/flowise_adapter.py +275 -0
  7. quraite/adapters/google_adk_adapter.py +209 -0
  8. quraite/adapters/http_adapter.py +239 -0
  9. quraite/adapters/langflow_adapter.py +192 -0
  10. quraite/adapters/langgraph_adapter.py +304 -0
  11. quraite/adapters/langgraph_server_adapter.py +252 -0
  12. quraite/adapters/n8n_adapter.py +220 -0
  13. quraite/adapters/openai_agents_adapter.py +269 -0
  14. quraite/adapters/pydantic_ai_adapter.py +312 -0
  15. quraite/adapters/smolagents_adapter.py +152 -0
  16. quraite/logger.py +62 -0
  17. quraite/schema/__init__.py +0 -0
  18. quraite/schema/message.py +54 -0
  19. quraite/schema/response.py +16 -0
  20. quraite/serve/__init__.py +1 -0
  21. quraite/serve/cloudflared.py +210 -0
  22. quraite/serve/local_agent.py +360 -0
  23. quraite/traces/traces_adk_openinference.json +379 -0
  24. quraite/traces/traces_agno_multi_agent.json +669 -0
  25. quraite/traces/traces_agno_openinference.json +321 -0
  26. quraite/traces/traces_crewai_openinference.json +155 -0
  27. quraite/traces/traces_langgraph_openinference.json +349 -0
  28. quraite/traces/traces_langgraph_openinference_multi_agent.json +2705 -0
  29. quraite/traces/traces_langgraph_traceloop.json +510 -0
  30. quraite/traces/traces_openai_agents_multi_agent_1.json +402 -0
  31. quraite/traces/traces_openai_agents_openinference.json +341 -0
  32. quraite/traces/traces_pydantic_openinference.json +286 -0
  33. quraite/traces/traces_pydantic_openinference_multi_agent_1.json +399 -0
  34. quraite/traces/traces_pydantic_openinference_multi_agent_2.json +398 -0
  35. quraite/traces/traces_smol_agents_openinference.json +397 -0
  36. quraite/traces/traces_smol_agents_tool_calling_openinference.json +704 -0
  37. quraite/tracing/__init__.py +24 -0
  38. quraite/tracing/constants.py +16 -0
  39. quraite/tracing/span_exporter.py +115 -0
  40. quraite/tracing/span_processor.py +49 -0
  41. quraite/tracing/tool_extractors.py +290 -0
  42. quraite/tracing/trace.py +494 -0
  43. quraite/tracing/types.py +179 -0
  44. quraite/tracing/utils.py +170 -0
  45. quraite/utils/__init__.py +0 -0
  46. quraite/utils/json_utils.py +269 -0
  47. quraite-0.0.1.dist-info/METADATA +44 -0
  48. quraite-0.0.1.dist-info/RECORD +49 -0
  49. quraite-0.0.1.dist-info/WHEEL +4 -0
@@ -0,0 +1,24 @@
1
+ """Tracing infrastructure for OpenTelemetry span collection and processing."""
2
+
3
+ from quraite.tracing.span_exporter import QuraiteInMemorySpanExporter
4
+ from quraite.tracing.span_processor import QuraiteSimpleSpanProcessor
5
+ from quraite.tracing.tool_extractors import Framework, ToolCallInfo, get_tool_extractor
6
+ from quraite.tracing.trace import AgentSpan, AgentTrace, CostInfo, TokenInfo
7
+ from quraite.tracing.types import Event, Link, Resource, SpanContext, Status
8
+
9
+ __all__ = [
10
+ "AgentSpan",
11
+ "AgentTrace",
12
+ "CostInfo",
13
+ "TokenInfo",
14
+ "Framework",
15
+ "ToolCallInfo",
16
+ "get_tool_extractor",
17
+ "QuraiteInMemorySpanExporter",
18
+ "QuraiteSimpleSpanProcessor",
19
+ "Event",
20
+ "Link",
21
+ "Resource",
22
+ "SpanContext",
23
+ "Status",
24
+ ]
@@ -0,0 +1,16 @@
1
+ from enum import Enum
2
+
3
+ QURAITE_ADAPTER_TRACE_PREFIX = "quraite-adapter"
4
+
5
+ QURAITE_TRACER_NAME = "quraite.instrumentation"
6
+
7
+
8
+ class Framework(str, Enum):
9
+ """Supported agent frameworks."""
10
+
11
+ PYDANTIC = "pydantic"
12
+ LANGGRAPH = "langgraph"
13
+ GOOGLE_ADK = "google_adk"
14
+ OPENAI_AGENTS = "openai_agents"
15
+ AGNO = "agno"
16
+ SMOLAGENTS = "smolagents"
@@ -0,0 +1,115 @@
1
+ import json
2
+ import os
3
+ import threading
4
+ import typing
5
+ from collections import defaultdict
6
+
7
+ from opentelemetry.sdk.trace import ReadableSpan
8
+ from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
9
+
10
+
11
+ class QuraiteInMemorySpanExporter(SpanExporter):
12
+ def __init__(self) -> None:
13
+ # self.spans: typing.List[ReadableSpan] = []
14
+ self.traces: typing.Dict[int, typing.List[ReadableSpan]] = defaultdict(list)
15
+ self.testcase_to_trace: typing.Dict[str, int] = {}
16
+
17
+ self._stopped = False
18
+ self._lock = threading.Lock()
19
+
20
+ def handle_testcase_trace(self, span: ReadableSpan) -> None:
21
+ """Handle a testcase trace."""
22
+ # print(f"🟢 testcase trace received: {span.name}")
23
+ # print(f"🟢 Span: {span.context}")
24
+ formatted_trace_id = format(span.context.trace_id, "032x")[:8]
25
+ # print(f"🟢 testcase formatted trace id: {formatted_trace_id}")
26
+ self.traces[formatted_trace_id] = []
27
+ self.testcase_to_trace[span.name] = formatted_trace_id
28
+
29
+ def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult:
30
+ """Stores a list of spans in memory."""
31
+ if self._stopped:
32
+ return SpanExportResult.FAILURE
33
+
34
+ with self._lock:
35
+ for span in spans:
36
+ formatted_trace_id = format(span.context.trace_id, "032x")[:8]
37
+ # print(f"🟢 span formatted context trace id: {formatted_trace_id}")
38
+ self.traces[formatted_trace_id].append(span)
39
+ # self.spans.append(span)
40
+
41
+ return SpanExportResult.SUCCESS
42
+
43
+ def shutdown(self) -> None:
44
+ """Shut downs the exporter.
45
+
46
+ Calls to export after the exporter has been shut down will fail.
47
+ """
48
+ print("Shutting down exporter")
49
+ self._stopped = True
50
+
51
+ def force_flush(self, timeout_millis: int = 30000) -> bool:
52
+ return True
53
+
54
+ def get_traces(self) -> typing.Dict[int, typing.List[ReadableSpan]]:
55
+ """Get all spans grouped by trace ID"""
56
+ return dict(self.traces)
57
+
58
+ def get_trace_by_testcase(self, testcase_name: str) -> typing.List[ReadableSpan]:
59
+ """Get all spans for a specific testcase"""
60
+ with self._lock:
61
+ return self.traces.get(self.testcase_to_trace.get(testcase_name, None), [])
62
+
63
+ def get_trace(self, trace_id: int) -> typing.List[ReadableSpan]:
64
+ """Get all spans for a specific trace"""
65
+ return self.traces.get(trace_id, [])
66
+
67
+ def get_trace_count(self) -> int:
68
+ """Get the number of unique traces"""
69
+ return len(self.traces)
70
+
71
+ def print_trace_summary(self):
72
+ """Print a summary of all traces"""
73
+ print(f"\n{'='*60}")
74
+ print(f"Total Traces: {self.get_trace_count()}")
75
+ for trace_id, spans in self.traces.items():
76
+ print(f"Trace ID: {trace_id}...")
77
+ print(f"Spans in trace: {len(spans)}")
78
+ print(f"Total Testcases: {self.testcase_to_trace}")
79
+ print(f"TraceIDs: {self.traces.keys()}")
80
+ print(f"{'='*60}\n")
81
+
82
+ # for trace_id, spans in self.traces.items():
83
+ # trace_id_hex = format(trace_id, '032x')
84
+ # print(f"📊 Trace ID: {trace_id_hex}")
85
+ # print(f" Spans in trace: {len(spans)}")
86
+
87
+ # # Sort spans by start time to show execution order
88
+ # sorted_spans = sorted(spans, key=lambda s: s.start_time)
89
+
90
+ # for span in sorted_spans:
91
+ # duration_ms = (span.end_time - span.start_time) / 1e6
92
+ # parent_id = format(span.parent.span_id, '016x') if span.parent else "None"
93
+ # indent = " " if span.parent else ""
94
+ # print(f" {indent}├─ {span.name}")
95
+ # print(f" {indent} ├─ Span ID: {format(span.context.span_id, '016x')}")
96
+ # print(f" {indent} ├─ Parent ID: {parent_id}")
97
+ # print(f" {indent} ├─ Duration: {duration_ms:.2f}ms")
98
+ # if span.attributes:
99
+ # print(f" {indent} └─ Attributes: {dict(span.attributes)}")
100
+ # print()
101
+
102
+ def save_traces_to_file(self, filename: typing.Optional[str] = "traces.json"):
103
+ """Save a trace to a file"""
104
+ traces = []
105
+ for trace_id, spans in self.traces.items():
106
+ traces.append(
107
+ {
108
+ "trace_id": trace_id,
109
+ "spans": [json.loads(span.to_json()) for span in spans],
110
+ }
111
+ )
112
+
113
+ os.makedirs(os.path.dirname(f"traces/{filename}"), exist_ok=True)
114
+ with open(f"traces/{filename}", "w") as f:
115
+ json.dump(traces, f, indent=2)
@@ -0,0 +1,49 @@
1
+ import typing
2
+
3
+ from opentelemetry.context import (
4
+ _SUPPRESS_INSTRUMENTATION_KEY,
5
+ Context,
6
+ attach,
7
+ detach,
8
+ set_value,
9
+ )
10
+ from opentelemetry.sdk.trace.export import ReadableSpan, SpanExporter, SpanProcessor
11
+ from opentelemetry.trace import Span, logger
12
+
13
+ from quraite.tracing.constants import QURAITE_ADAPTER_TRACE_PREFIX
14
+ from quraite.tracing.span_exporter import QuraiteInMemorySpanExporter
15
+
16
+
17
+ class QuraiteSimpleSpanProcessor(SpanProcessor):
18
+ """Simple SpanProcessor implementation.
19
+
20
+ SimpleSpanProcessor is an implementation of `SpanProcessor` that
21
+ passes ended spans directly to the configured `SpanExporter`.
22
+ """
23
+
24
+ def __init__(self, span_exporter: SpanExporter):
25
+ self.span_exporter: QuraiteInMemorySpanExporter = span_exporter
26
+
27
+ def on_start(
28
+ self, span: Span, parent_context: typing.Optional[Context] = None
29
+ ) -> None:
30
+ if QURAITE_ADAPTER_TRACE_PREFIX in span.name:
31
+ self.span_exporter.handle_testcase_trace(span)
32
+
33
+ def on_end(self, span: ReadableSpan) -> None:
34
+ if not span.context.trace_flags.sampled:
35
+ return
36
+ token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))
37
+ try:
38
+ self.span_exporter.export((span,))
39
+ # pylint: disable=broad-exception-caught
40
+ except Exception:
41
+ logger.exception("Exception while exporting Span.")
42
+ detach(token)
43
+
44
+ def shutdown(self) -> None:
45
+ self.span_exporter.shutdown()
46
+
47
+ def force_flush(self, timeout_millis: int = 30000) -> bool:
48
+ # pylint: disable=unused-argument
49
+ return True
@@ -0,0 +1,290 @@
1
+ """
2
+ Framework-specific tool extractors for converting span attributes to standardized tool call information.
3
+
4
+ These extractors handle the varying attribute structures across different agent frameworks
5
+ (pydantic, langgraph, adk, openai_agents, agno, smolagents, etc.)
6
+ """
7
+
8
+ import json
9
+ from typing import Any, Protocol
10
+
11
+ from quraite.tracing.constants import Framework
12
+
13
+
14
+ class ToolCallInfo:
15
+ """Standardized tool call information extracted from a TOOL span."""
16
+
17
+ def __init__(
18
+ self,
19
+ tool_name: str,
20
+ tool_call_id: str | None,
21
+ arguments: str | dict,
22
+ response: Any,
23
+ ):
24
+ self.tool_name = tool_name
25
+ self.tool_call_id = tool_call_id
26
+ self.arguments = arguments
27
+ self.response = response
28
+
29
+ def to_dict(self) -> dict[str, Any]:
30
+ return {
31
+ "role": "tool",
32
+ "tool_name": self.tool_name,
33
+ "tool_call_id": self.tool_call_id,
34
+ "arguments": self.arguments,
35
+ "response": self.response,
36
+ }
37
+
38
+
39
+ class ToolExtractor(Protocol):
40
+ """Protocol for framework-specific tool extractors."""
41
+
42
+ def __call__(self, span: dict[str, Any]) -> ToolCallInfo | None: ...
43
+
44
+
45
+ # =============================================================================
46
+ # Framework-specific tool extractors
47
+ # =============================================================================
48
+
49
+
50
+ def extract_tool_pydantic(span: dict[str, Any]) -> ToolCallInfo | None:
51
+ """
52
+ Extract tool info from Pydantic AI tool spans.
53
+
54
+ Attributes:
55
+ - tool.name: "customer_balance"
56
+ - tool_call.id: "call_xxx"
57
+ - tool_arguments: "{\"include_pending\":true}"
58
+ - tool_response: "$123.45"
59
+ """
60
+ attrs = span.get("attributes", {})
61
+
62
+ tool_name = attrs.get("tool.name") or attrs.get("gen_ai.tool.name")
63
+ if not tool_name:
64
+ return None
65
+
66
+ tool_call_id = attrs.get("tool_call.id") or attrs.get("gen_ai.tool.call.id")
67
+ arguments = attrs.get("tool_arguments", "{}")
68
+ response = attrs.get("tool_response", "")
69
+
70
+ return ToolCallInfo(
71
+ tool_name=tool_name,
72
+ tool_call_id=tool_call_id,
73
+ arguments=arguments,
74
+ response=response,
75
+ )
76
+
77
+
78
+ def extract_tool_langgraph(span: dict[str, Any]) -> ToolCallInfo | None:
79
+ """
80
+ Extract tool info from LangGraph tool spans.
81
+
82
+ Attributes:
83
+ - tool.name: "add"
84
+ - tool.description: "Add two numbers."
85
+ - input.value: "{'b': 1, 'a': 1}"
86
+ - output.value: JSON with content
87
+ """
88
+ attrs = span.get("attributes", {})
89
+
90
+ tool_name = attrs.get("tool.name")
91
+ if not tool_name:
92
+ return None
93
+
94
+ arguments = attrs.get("input.value", "{}")
95
+ output_value = attrs.get("output.value", "")
96
+
97
+ # Also check for response attribute (some LangGraph spans store response here)
98
+ response_value = attrs.get("response", output_value)
99
+
100
+ # Try to parse output to extract content
101
+ response = response_value
102
+ if isinstance(response_value, str):
103
+ try:
104
+ parsed = json.loads(response_value)
105
+ if isinstance(parsed, dict):
106
+ # Check if response field contains JSON string (nested JSON)
107
+ if "response" in parsed and isinstance(parsed["response"], str):
108
+ try:
109
+ inner_parsed = json.loads(parsed["response"])
110
+ if isinstance(inner_parsed, dict) and "update" in inner_parsed:
111
+ parsed = inner_parsed
112
+ except (json.JSONDecodeError, TypeError):
113
+ pass
114
+
115
+ # First check for direct content field
116
+ if "content" in parsed:
117
+ response = parsed.get("content", response_value)
118
+ # Check for update.messages structure (LangGraph graph updates)
119
+ # this comes when you use supervisor agent with multiple agents
120
+ elif "update" in parsed:
121
+ update = parsed.get("update", {})
122
+ messages = update.get("messages", [])
123
+ # Find the last tool message
124
+ for msg in reversed(messages):
125
+ if isinstance(msg, dict) and msg.get("type") == "tool":
126
+ content = msg.get("content", "")
127
+ if content:
128
+ response = content
129
+ break
130
+ else:
131
+ # No tool message found, keep original response
132
+ response = response_value
133
+ else:
134
+ response = response_value
135
+ except json.JSONDecodeError:
136
+ pass
137
+
138
+ return ToolCallInfo(
139
+ tool_name=tool_name,
140
+ tool_call_id=None, # LangGraph doesn't always have call IDs in tool spans
141
+ arguments=arguments,
142
+ response=response,
143
+ )
144
+
145
+
146
+ def extract_tool_adk(span: dict[str, Any]) -> ToolCallInfo | None:
147
+ """
148
+ Extract tool info from Google ADK tool spans.
149
+
150
+ Attributes:
151
+ - tool.name: "get_weather"
152
+ - tool.parameters: "{\"city\": \"New York\"}"
153
+ - gcp.vertex.agent.tool_call_args: "{\"city\": \"New York\"}"
154
+ - gcp.vertex.agent.tool_response: JSON response
155
+ - output.value: JSON with id, name, response
156
+ """
157
+ attrs = span.get("attributes", {})
158
+
159
+ tool_name = attrs.get("tool.name") or attrs.get("gen_ai.tool.name")
160
+ if not tool_name:
161
+ return None
162
+
163
+ # Skip merged tool spans
164
+ if tool_name == "(merged tools)":
165
+ return None
166
+
167
+ tool_call_id = attrs.get("gen_ai.tool.call.id")
168
+ arguments = (
169
+ attrs.get("tool.parameters")
170
+ or attrs.get("gcp.vertex.agent.tool_call_args")
171
+ or attrs.get("input.value", "{}")
172
+ )
173
+
174
+ # Get response from various possible locations
175
+ response = attrs.get("gcp.vertex.agent.tool_response", "")
176
+ if not response or response == "<not serializable>":
177
+ output_value = attrs.get("output.value", "")
178
+ if isinstance(output_value, str):
179
+ try:
180
+ parsed = json.loads(output_value)
181
+ if isinstance(parsed, dict) and "response" in parsed:
182
+ response = parsed.get("response", output_value)
183
+ else:
184
+ response = output_value
185
+ except json.JSONDecodeError:
186
+ response = output_value
187
+ else:
188
+ response = output_value
189
+
190
+ return ToolCallInfo(
191
+ tool_name=tool_name,
192
+ tool_call_id=tool_call_id,
193
+ arguments=arguments,
194
+ response=response,
195
+ )
196
+
197
+
198
+ def extract_tool_openai_agents(span: dict[str, Any]) -> ToolCallInfo | None:
199
+ """
200
+ Extract tool info from OpenAI Agents tool spans.
201
+
202
+ Attributes:
203
+ - tool.name: "multiply"
204
+ - input.value: "{\"a\":10,\"b\":2}"
205
+ - output.value: 20.0
206
+ """
207
+ attrs = span.get("attributes", {})
208
+
209
+ tool_name = attrs.get("tool.name")
210
+ if not tool_name:
211
+ return None
212
+
213
+ arguments = attrs.get("input.value", "{}")
214
+ response = attrs.get("output.value", "")
215
+
216
+ return ToolCallInfo(
217
+ tool_name=tool_name,
218
+ tool_call_id=None, # OpenAI Agents SDK doesn't put call ID in tool span
219
+ arguments=arguments,
220
+ response=response,
221
+ )
222
+
223
+
224
+ def extract_tool_agno(span: dict[str, Any]) -> ToolCallInfo | None:
225
+ """
226
+ Extract tool info from Agno tool spans.
227
+
228
+ Attributes:
229
+ - tool.name: "duckduckgo_search"
230
+ - tool.description: "..."
231
+ - tool.parameters: "{\"query\": \"...\", \"max_results\": 5}"
232
+ - input.value: same as parameters
233
+ - output.value: JSON response
234
+ """
235
+ attrs = span.get("attributes", {})
236
+
237
+ tool_name = attrs.get("tool.name")
238
+ if not tool_name:
239
+ return None
240
+
241
+ arguments = attrs.get("tool.parameters") or attrs.get("input.value", "{}")
242
+ response = attrs.get("output.value", "")
243
+
244
+ return ToolCallInfo(
245
+ tool_name=tool_name,
246
+ tool_call_id=None,
247
+ arguments=arguments,
248
+ response=response,
249
+ )
250
+
251
+
252
+ def extract_tool_smolagents(span: dict[str, Any]) -> ToolCallInfo | None:
253
+ """
254
+ Extract tool info from SmolAgents tool spans.
255
+ """
256
+ attrs = span.get("attributes", {})
257
+
258
+ tool_name = attrs.get("tool.name")
259
+ if not tool_name:
260
+ return None
261
+
262
+ arguments = attrs.get("input.value", "{}")
263
+ response = attrs.get("output.value", "")
264
+
265
+ return ToolCallInfo(
266
+ tool_name=tool_name,
267
+ tool_call_id=None,
268
+ arguments=arguments,
269
+ response=response,
270
+ )
271
+
272
+
273
+ # Registry of framework extractors
274
+ TOOL_EXTRACTORS: dict[Framework, ToolExtractor] = {
275
+ Framework.PYDANTIC: extract_tool_pydantic,
276
+ Framework.LANGGRAPH: extract_tool_langgraph,
277
+ Framework.GOOGLE_ADK: extract_tool_adk,
278
+ Framework.OPENAI_AGENTS: extract_tool_openai_agents,
279
+ Framework.AGNO: extract_tool_agno,
280
+ Framework.SMOLAGENTS: extract_tool_smolagents,
281
+ }
282
+
283
+
284
+ def get_tool_extractor(framework: Framework | str) -> ToolExtractor:
285
+ """Get the appropriate tool extractor for the given framework."""
286
+ if isinstance(framework, str):
287
+ framework = Framework(framework.lower())
288
+ return TOOL_EXTRACTORS.get(
289
+ framework, extract_tool_langgraph
290
+ ) # Default to langgraph