aiqa-client 0.2.1__py3-none-any.whl → 0.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiqa/__init__.py +38 -3
- aiqa/aiqa_exporter.py +208 -63
- aiqa/client.py +146 -25
- aiqa/experiment_runner.py +336 -0
- aiqa/object_serialiser.py +396 -0
- aiqa/test_experiment_runner.py +176 -0
- aiqa/test_tracing.py +230 -0
- aiqa/tracing.py +785 -72
- {aiqa_client-0.2.1.dist-info → aiqa_client-0.3.4.dist-info}/METADATA +94 -4
- aiqa_client-0.3.4.dist-info/RECORD +14 -0
- aiqa_client-0.2.1.dist-info/RECORD +0 -10
- {aiqa_client-0.2.1.dist-info → aiqa_client-0.3.4.dist-info}/WHEEL +0 -0
- {aiqa_client-0.2.1.dist-info → aiqa_client-0.3.4.dist-info}/licenses/LICENSE +0 -0
- {aiqa_client-0.2.1.dist-info → aiqa_client-0.3.4.dist-info}/top_level.txt +0 -0
aiqa/tracing.py
CHANGED
|
@@ -3,24 +3,21 @@ OpenTelemetry tracing setup and utilities. Initializes tracer provider on import
|
|
|
3
3
|
Provides WithTracing decorator to automatically trace function calls.
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
|
-
import os
|
|
7
6
|
import json
|
|
8
7
|
import logging
|
|
9
8
|
import inspect
|
|
10
|
-
|
|
9
|
+
import os
|
|
10
|
+
from typing import Any, Callable, Optional, List
|
|
11
11
|
from functools import wraps
|
|
12
12
|
from opentelemetry import trace
|
|
13
13
|
from opentelemetry.sdk.trace import TracerProvider
|
|
14
|
-
from opentelemetry.
|
|
15
|
-
from opentelemetry.
|
|
16
|
-
from opentelemetry.sdk.resources import Resource
|
|
17
|
-
from opentelemetry.semconv.resource import ResourceAttributes
|
|
18
|
-
from opentelemetry.trace import Status, StatusCode
|
|
19
|
-
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
|
|
14
|
+
from opentelemetry.trace import Status, StatusCode, SpanContext, TraceFlags
|
|
15
|
+
from opentelemetry.propagate import inject, extract
|
|
20
16
|
from .aiqa_exporter import AIQASpanExporter
|
|
21
|
-
from .client import
|
|
17
|
+
from .client import get_aiqa_client, AIQA_TRACER_NAME, get_component_tag, set_component_tag as _set_component_tag, get_aiqa_tracer
|
|
18
|
+
from .object_serialiser import serialize_for_span
|
|
22
19
|
|
|
23
|
-
logger = logging.getLogger(
|
|
20
|
+
logger = logging.getLogger("AIQA")
|
|
24
21
|
|
|
25
22
|
|
|
26
23
|
async def flush_tracing() -> None:
|
|
@@ -31,7 +28,7 @@ async def flush_tracing() -> None:
|
|
|
31
28
|
|
|
32
29
|
This flushes both the BatchSpanProcessor and the exporter buffer.
|
|
33
30
|
"""
|
|
34
|
-
client =
|
|
31
|
+
client = get_aiqa_client()
|
|
35
32
|
if client.get("provider"):
|
|
36
33
|
client["provider"].force_flush() # Synchronous method
|
|
37
34
|
if client.get("exporter"):
|
|
@@ -44,30 +41,85 @@ async def shutdown_tracing() -> None:
|
|
|
44
41
|
It is not necessary to call this function.
|
|
45
42
|
"""
|
|
46
43
|
try:
|
|
47
|
-
client =
|
|
44
|
+
client = get_aiqa_client()
|
|
48
45
|
if client.get("provider"):
|
|
49
46
|
client["provider"].shutdown() # Synchronous method
|
|
50
47
|
if client.get("exporter"):
|
|
51
|
-
|
|
48
|
+
client["exporter"].shutdown() # Synchronous method
|
|
52
49
|
except Exception as e:
|
|
53
|
-
logger.error(f"Error shutting down tracing: {e}"
|
|
50
|
+
logger.error(f"Error shutting down tracing: {e}")
|
|
54
51
|
|
|
55
52
|
|
|
56
53
|
# Export provider and exporter accessors for advanced usage
|
|
57
|
-
|
|
54
|
+
|
|
55
|
+
__all__ = [
|
|
56
|
+
"get_provider", "get_exporter", "flush_tracing", "shutdown_tracing", "WithTracing",
|
|
57
|
+
"set_span_attribute", "set_span_name", "get_active_span",
|
|
58
|
+
"get_trace_id", "get_span_id", "create_span_from_trace_id", "inject_trace_context", "extract_trace_context",
|
|
59
|
+
"set_conversation_id", "set_component_tag", "set_token_usage", "set_provider_and_model", "get_span", "submit_feedback"
|
|
60
|
+
]
|
|
58
61
|
|
|
59
62
|
|
|
60
63
|
class TracingOptions:
|
|
61
|
-
"""
|
|
64
|
+
"""
|
|
65
|
+
Options for WithTracing decorator.
|
|
66
|
+
|
|
67
|
+
This class is used to configure how function calls are traced and what data
|
|
68
|
+
is recorded in span attributes. All fields are optional.
|
|
69
|
+
"""
|
|
62
70
|
|
|
63
71
|
def __init__(
|
|
64
72
|
self,
|
|
65
73
|
name: Optional[str] = None,
|
|
66
|
-
ignore_input: Optional[
|
|
67
|
-
ignore_output: Optional[
|
|
74
|
+
ignore_input: Optional[List[str]] = None,
|
|
75
|
+
ignore_output: Optional[List[str]] = None,
|
|
68
76
|
filter_input: Optional[Callable[[Any], Any]] = None,
|
|
69
77
|
filter_output: Optional[Callable[[Any], Any]] = None,
|
|
70
78
|
):
|
|
79
|
+
"""
|
|
80
|
+
Initialize TracingOptions.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
name: Custom name for the span. If not provided, the function name
|
|
84
|
+
will be used. Useful for renaming spans or providing more
|
|
85
|
+
descriptive names.
|
|
86
|
+
|
|
87
|
+
ignore_input: Iterable of keys (e.g., list, set) to exclude from
|
|
88
|
+
input data when recording span attributes. Only applies when
|
|
89
|
+
input is a dictionary. For example, use `["password", "api_key"]`
|
|
90
|
+
to exclude sensitive fields from being traced.
|
|
91
|
+
|
|
92
|
+
ignore_output: Iterable of keys (e.g., list, set) to exclude from
|
|
93
|
+
output data when recording span attributes. Only applies when
|
|
94
|
+
output is a dictionary. Useful for excluding large or sensitive
|
|
95
|
+
fields from traces.
|
|
96
|
+
|
|
97
|
+
filter_input: Callable function that receives the prepared input data
|
|
98
|
+
and returns a filtered/transformed version to be recorded in the
|
|
99
|
+
span. The function should accept one argument (the input data)
|
|
100
|
+
and return the transformed data. This is applied before
|
|
101
|
+
ignore_input filtering.
|
|
102
|
+
|
|
103
|
+
filter_output: Callable function that receives the output data and
|
|
104
|
+
returns a filtered/transformed version to be recorded in the span.
|
|
105
|
+
The function should accept one argument (the output data) and
|
|
106
|
+
return the transformed data. This is applied before
|
|
107
|
+
ignore_output filtering.
|
|
108
|
+
|
|
109
|
+
Example:
|
|
110
|
+
# Exclude sensitive fields from input
|
|
111
|
+
@WithTracing(ignore_input=["password", "secret_key"])
|
|
112
|
+
def authenticate(username, password):
|
|
113
|
+
return {"token": "..."}
|
|
114
|
+
|
|
115
|
+
# Custom span name and filter output
|
|
116
|
+
@WithTracing(
|
|
117
|
+
name="data_processing",
|
|
118
|
+
filter_output=lambda x: {"count": len(x)} if isinstance(x, list) else x
|
|
119
|
+
)
|
|
120
|
+
def process_data(items):
|
|
121
|
+
return items
|
|
122
|
+
"""
|
|
71
123
|
self.name = name
|
|
72
124
|
self.ignore_input = ignore_input
|
|
73
125
|
self.ignore_output = ignore_output
|
|
@@ -75,47 +127,25 @@ class TracingOptions:
|
|
|
75
127
|
self.filter_output = filter_output
|
|
76
128
|
|
|
77
129
|
|
|
78
|
-
def _serialize_for_span(value: Any) -> Any:
|
|
79
|
-
"""
|
|
80
|
-
Serialize a value for span attributes.
|
|
81
|
-
OpenTelemetry only accepts primitives (bool, str, bytes, int, float) or sequences of those.
|
|
82
|
-
Complex types (dicts, lists, objects) are converted to JSON strings.
|
|
83
|
-
"""
|
|
84
|
-
# Keep primitives as is (including None)
|
|
85
|
-
if value is None or isinstance(value, (str, int, float, bool, bytes)):
|
|
86
|
-
return value
|
|
87
|
-
|
|
88
|
-
# For sequences, check if all elements are primitives
|
|
89
|
-
if isinstance(value, (list, tuple)):
|
|
90
|
-
# If all elements are primitives, return as list
|
|
91
|
-
if all(isinstance(item, (str, int, float, bool, bytes, type(None))) for item in value):
|
|
92
|
-
return list(value)
|
|
93
|
-
# Otherwise serialize to JSON string
|
|
94
|
-
try:
|
|
95
|
-
return json.dumps(value)
|
|
96
|
-
except (TypeError, ValueError):
|
|
97
|
-
return str(value)
|
|
98
|
-
|
|
99
|
-
# For dicts and other complex types, serialize to JSON string
|
|
100
|
-
try:
|
|
101
|
-
return json.dumps(value)
|
|
102
|
-
except (TypeError, ValueError):
|
|
103
|
-
# If JSON serialization fails, convert to string
|
|
104
|
-
return str(value)
|
|
105
130
|
|
|
106
131
|
|
|
107
132
|
def _prepare_input(args: tuple, kwargs: dict) -> Any:
|
|
108
|
-
"""Prepare input for span attributes.
|
|
133
|
+
"""Prepare input for span attributes.
|
|
134
|
+
|
|
135
|
+
Note: This function does NOT serialize values - it just structures the data.
|
|
136
|
+
Serialization happens later via serialize_for_span() to avoid double-encoding
|
|
137
|
+
(e.g., converting messages to JSON string, then encoding that string again).
|
|
138
|
+
"""
|
|
109
139
|
if not args and not kwargs:
|
|
110
140
|
return None
|
|
111
141
|
if len(args) == 1 and not kwargs:
|
|
112
|
-
return
|
|
142
|
+
return args[0] # Don't serialize here - will be serialized later
|
|
113
143
|
# Multiple args or kwargs - combine into dict
|
|
114
144
|
result = {}
|
|
115
145
|
if args:
|
|
116
|
-
result["args"] =
|
|
146
|
+
result["args"] = list(args) # Keep as-is, will be serialized later
|
|
117
147
|
if kwargs:
|
|
118
|
-
result["kwargs"] =
|
|
148
|
+
result["kwargs"] = dict(kwargs) # Keep as-is, will be serialized later
|
|
119
149
|
return result
|
|
120
150
|
|
|
121
151
|
|
|
@@ -123,23 +153,41 @@ def _prepare_and_filter_input(
|
|
|
123
153
|
args: tuple,
|
|
124
154
|
kwargs: dict,
|
|
125
155
|
filter_input: Optional[Callable[[Any], Any]],
|
|
126
|
-
ignore_input: Optional[
|
|
156
|
+
ignore_input: Optional[List[str]],
|
|
127
157
|
) -> Any:
|
|
128
158
|
"""Prepare and filter input for span attributes."""
|
|
129
|
-
|
|
159
|
+
# Handle "self" in ignore_input by skipping the first argument
|
|
160
|
+
filtered_args = args
|
|
161
|
+
filtered_kwargs = kwargs.copy() if kwargs else {}
|
|
162
|
+
filtered_ignore_input = ignore_input
|
|
163
|
+
if ignore_input and "self" in ignore_input:
|
|
164
|
+
# Remove "self" from ignore_input list (we'll handle it specially)
|
|
165
|
+
filtered_ignore_input = [key for key in ignore_input if key != "self"]
|
|
166
|
+
# Skip first arg if it exists (typically self for bound methods)
|
|
167
|
+
if args:
|
|
168
|
+
filtered_args = args[1:]
|
|
169
|
+
# Also remove "self" from kwargs if present
|
|
170
|
+
if "self" in filtered_kwargs:
|
|
171
|
+
del filtered_kwargs["self"]
|
|
172
|
+
|
|
173
|
+
input_data = _prepare_input(filtered_args, filtered_kwargs)
|
|
130
174
|
if filter_input:
|
|
131
175
|
input_data = filter_input(input_data)
|
|
132
|
-
if
|
|
133
|
-
for key in
|
|
176
|
+
if filtered_ignore_input and isinstance(input_data, dict):
|
|
177
|
+
for key in filtered_ignore_input:
|
|
134
178
|
if key in input_data:
|
|
135
179
|
del input_data[key]
|
|
180
|
+
# Also handle case where input_data is just self (single value, not dict)
|
|
181
|
+
# If we filtered out self and there are no remaining args/kwargs, return None
|
|
182
|
+
if ignore_input and "self" in ignore_input and not filtered_args and not filtered_kwargs:
|
|
183
|
+
return None
|
|
136
184
|
return input_data
|
|
137
185
|
|
|
138
186
|
|
|
139
187
|
def _prepare_and_filter_output(
|
|
140
188
|
result: Any,
|
|
141
189
|
filter_output: Optional[Callable[[Any], Any]],
|
|
142
|
-
ignore_output: Optional[
|
|
190
|
+
ignore_output: Optional[List[str]],
|
|
143
191
|
) -> Any:
|
|
144
192
|
"""Prepare and filter output for span attributes."""
|
|
145
193
|
output_data = result
|
|
@@ -160,6 +208,191 @@ def _handle_span_exception(span: trace.Span, exception: Exception) -> None:
|
|
|
160
208
|
span.set_status(Status(StatusCode.ERROR, str(error)))
|
|
161
209
|
|
|
162
210
|
|
|
211
|
+
def _is_attribute_set(span: trace.Span, attribute_name: str) -> bool:
|
|
212
|
+
"""
|
|
213
|
+
Check if an attribute is already set on a span.
|
|
214
|
+
Returns True if the attribute exists, False otherwise.
|
|
215
|
+
Safe against exceptions.
|
|
216
|
+
"""
|
|
217
|
+
try:
|
|
218
|
+
# Try multiple ways to access span attributes (SDK spans may store them differently)
|
|
219
|
+
# Check public 'attributes' property
|
|
220
|
+
if hasattr(span, "attributes"):
|
|
221
|
+
attrs = span.attributes
|
|
222
|
+
if attrs and attribute_name in attrs:
|
|
223
|
+
return True
|
|
224
|
+
|
|
225
|
+
# Check private '_attributes' (common in OpenTelemetry SDK)
|
|
226
|
+
if hasattr(span, "_attributes"):
|
|
227
|
+
attrs = span._attributes
|
|
228
|
+
if attrs and attribute_name in attrs:
|
|
229
|
+
return True
|
|
230
|
+
|
|
231
|
+
# If we can't find the attribute, assume not set (conservative approach)
|
|
232
|
+
return False
|
|
233
|
+
except Exception:
|
|
234
|
+
# If anything goes wrong, assume not set (conservative approach)
|
|
235
|
+
return False
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
def _extract_and_set_token_usage(span: trace.Span, result: Any) -> None:
|
|
239
|
+
"""
|
|
240
|
+
Extract OpenAI API style token usage from result and add to span attributes
|
|
241
|
+
using OpenTelemetry semantic conventions for gen_ai.
|
|
242
|
+
|
|
243
|
+
Looks for usage dict with prompt_tokens, completion_tokens, and total_tokens.
|
|
244
|
+
Sets gen_ai.usage.input_tokens, gen_ai.usage.output_tokens, and gen_ai.usage.total_tokens.
|
|
245
|
+
Only sets attributes that are not already set.
|
|
246
|
+
|
|
247
|
+
This function detects token usage from OpenAI API response patterns:
|
|
248
|
+
- OpenAI Chat Completions API: The 'usage' object contains 'prompt_tokens', 'completion_tokens', and 'total_tokens'.
|
|
249
|
+
See https://platform.openai.com/docs/api-reference/chat/object (usage field)
|
|
250
|
+
- OpenAI Completions API: The 'usage' object contains 'prompt_tokens', 'completion_tokens', and 'total_tokens'.
|
|
251
|
+
See https://platform.openai.com/docs/api-reference/completions/object (usage field)
|
|
252
|
+
|
|
253
|
+
This function is safe against exceptions and will not derail tracing or program execution.
|
|
254
|
+
"""
|
|
255
|
+
try:
|
|
256
|
+
if not span.is_recording():
|
|
257
|
+
return
|
|
258
|
+
|
|
259
|
+
usage = None
|
|
260
|
+
|
|
261
|
+
# Check if result is a dict with 'usage' key
|
|
262
|
+
try:
|
|
263
|
+
if isinstance(result, dict):
|
|
264
|
+
usage = result.get("usage")
|
|
265
|
+
# Also check if result itself is a usage dict (OpenAI format)
|
|
266
|
+
if usage is None and all(key in result for key in ("prompt_tokens", "completion_tokens", "total_tokens")):
|
|
267
|
+
usage = result
|
|
268
|
+
# Also check if result itself is a usage dict (Bedrock format)
|
|
269
|
+
elif usage is None and all(key in result for key in ("input_tokens", "output_tokens")):
|
|
270
|
+
usage = result
|
|
271
|
+
|
|
272
|
+
# Check if result has a 'usage' attribute (e.g., OpenAI response object)
|
|
273
|
+
elif hasattr(result, "usage"):
|
|
274
|
+
usage = result.usage
|
|
275
|
+
except Exception:
|
|
276
|
+
# If accessing result properties fails, just return silently
|
|
277
|
+
return
|
|
278
|
+
|
|
279
|
+
# Extract token usage if found
|
|
280
|
+
if isinstance(usage, dict):
|
|
281
|
+
try:
|
|
282
|
+
# Support both OpenAI format (prompt_tokens/completion_tokens) and Bedrock format (input_tokens/output_tokens)
|
|
283
|
+
prompt_tokens = usage.get("prompt_tokens") or usage.get("PromptTokens")
|
|
284
|
+
completion_tokens = usage.get("completion_tokens") or usage.get("CompletionTokens")
|
|
285
|
+
input_tokens = usage.get("input_tokens") or usage.get("InputTokens")
|
|
286
|
+
output_tokens = usage.get("output_tokens") or usage.get("OutputTokens")
|
|
287
|
+
total_tokens = usage.get("total_tokens") or usage.get("TotalTokens")
|
|
288
|
+
|
|
289
|
+
# Use Bedrock format if OpenAI format not available
|
|
290
|
+
if prompt_tokens is None:
|
|
291
|
+
prompt_tokens = input_tokens
|
|
292
|
+
if completion_tokens is None:
|
|
293
|
+
completion_tokens = output_tokens
|
|
294
|
+
|
|
295
|
+
# Calculate total_tokens if not provided but we have input and output
|
|
296
|
+
if total_tokens is None and prompt_tokens is not None and completion_tokens is not None:
|
|
297
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
298
|
+
|
|
299
|
+
# Only set attributes that are not already set
|
|
300
|
+
if prompt_tokens is not None and not _is_attribute_set(span, "gen_ai.usage.input_tokens"):
|
|
301
|
+
span.set_attribute("gen_ai.usage.input_tokens", prompt_tokens)
|
|
302
|
+
if completion_tokens is not None and not _is_attribute_set(span, "gen_ai.usage.output_tokens"):
|
|
303
|
+
span.set_attribute("gen_ai.usage.output_tokens", completion_tokens)
|
|
304
|
+
if total_tokens is not None and not _is_attribute_set(span, "gen_ai.usage.total_tokens"):
|
|
305
|
+
span.set_attribute("gen_ai.usage.total_tokens", total_tokens)
|
|
306
|
+
except Exception:
|
|
307
|
+
# If setting attributes fails, log but don't raise
|
|
308
|
+
logger.debug(f"Failed to set token usage attributes on span")
|
|
309
|
+
except Exception:
|
|
310
|
+
# Catch any other exceptions to ensure this never derails tracing
|
|
311
|
+
logger.debug(f"Error in _extract_and_set_token_usage")
|
|
312
|
+
|
|
313
|
+
|
|
314
|
+
def _extract_and_set_provider_and_model(span: trace.Span, result: Any) -> None:
|
|
315
|
+
"""
|
|
316
|
+
Extract provider and model information from result and add to span attributes
|
|
317
|
+
using OpenTelemetry semantic conventions for gen_ai.
|
|
318
|
+
|
|
319
|
+
Looks for 'model', 'provider', 'provider_name' fields in the result.
|
|
320
|
+
Sets gen_ai.provider.name and gen_ai.request.model.
|
|
321
|
+
Only sets attributes that are not already set.
|
|
322
|
+
|
|
323
|
+
This function detects model information from common API response patterns:
|
|
324
|
+
- OpenAI Chat Completions API: The 'model' field is at the top level of the response.
|
|
325
|
+
See https://platform.openai.com/docs/api-reference/chat/object
|
|
326
|
+
- OpenAI Completions API: The 'model' field is at the top level of the response.
|
|
327
|
+
See https://platform.openai.com/docs/api-reference/completions/object
|
|
328
|
+
|
|
329
|
+
This function is safe against exceptions and will not derail tracing or program execution.
|
|
330
|
+
"""
|
|
331
|
+
try:
|
|
332
|
+
if not span.is_recording():
|
|
333
|
+
return
|
|
334
|
+
|
|
335
|
+
model = None
|
|
336
|
+
provider = None
|
|
337
|
+
|
|
338
|
+
# Check if result is a dict
|
|
339
|
+
try:
|
|
340
|
+
if isinstance(result, dict):
|
|
341
|
+
model = result.get("model") or result.get("Model")
|
|
342
|
+
provider = result.get("provider") or result.get("Provider") or result.get("provider_name") or result.get("providerName")
|
|
343
|
+
|
|
344
|
+
# Check if result has attributes (e.g., OpenAI response object)
|
|
345
|
+
elif hasattr(result, "model"):
|
|
346
|
+
model = result.model
|
|
347
|
+
if hasattr(result, "provider"):
|
|
348
|
+
provider = result.provider
|
|
349
|
+
elif hasattr(result, "provider_name"):
|
|
350
|
+
provider = result.provider_name
|
|
351
|
+
elif hasattr(result, "providerName"):
|
|
352
|
+
provider = result.providerName
|
|
353
|
+
|
|
354
|
+
# Check nested structures (e.g., response.data.model)
|
|
355
|
+
if model is None and hasattr(result, "data"):
|
|
356
|
+
data = result.data
|
|
357
|
+
if isinstance(data, dict):
|
|
358
|
+
model = data.get("model") or data.get("Model")
|
|
359
|
+
elif hasattr(data, "model"):
|
|
360
|
+
model = data.model
|
|
361
|
+
|
|
362
|
+
# Check for model in choices (OpenAI pattern)
|
|
363
|
+
if model is None and isinstance(result, dict):
|
|
364
|
+
choices = result.get("choices")
|
|
365
|
+
if choices and isinstance(choices, list) and len(choices) > 0:
|
|
366
|
+
first_choice = choices[0]
|
|
367
|
+
if isinstance(first_choice, dict):
|
|
368
|
+
model = first_choice.get("model")
|
|
369
|
+
elif hasattr(first_choice, "model"):
|
|
370
|
+
model = first_choice.model
|
|
371
|
+
except Exception:
|
|
372
|
+
# If accessing result properties fails, just return silently
|
|
373
|
+
return
|
|
374
|
+
|
|
375
|
+
# Set attributes if found and not already set
|
|
376
|
+
try:
|
|
377
|
+
if model is not None and not _is_attribute_set(span, "gen_ai.request.model"):
|
|
378
|
+
# Convert to string if needed
|
|
379
|
+
model_str = str(model) if model is not None else None
|
|
380
|
+
if model_str:
|
|
381
|
+
span.set_attribute("gen_ai.request.model", model_str)
|
|
382
|
+
|
|
383
|
+
if provider is not None and not _is_attribute_set(span, "gen_ai.provider.name"):
|
|
384
|
+
# Convert to string if needed
|
|
385
|
+
provider_str = str(provider) if provider is not None else None
|
|
386
|
+
if provider_str:
|
|
387
|
+
span.set_attribute("gen_ai.provider.name", provider_str)
|
|
388
|
+
except Exception:
|
|
389
|
+
# If setting attributes fails, log but don't raise
|
|
390
|
+
logger.debug(f"Failed to set provider/model attributes on span")
|
|
391
|
+
except Exception:
|
|
392
|
+
# Catch any other exceptions to ensure this never derails tracing
|
|
393
|
+
logger.debug(f"Error in _extract_and_set_provider_and_model")
|
|
394
|
+
|
|
395
|
+
|
|
163
396
|
class TracedGenerator:
|
|
164
397
|
"""Wrapper for sync generators that traces iteration."""
|
|
165
398
|
|
|
@@ -169,7 +402,7 @@ class TracedGenerator:
|
|
|
169
402
|
span: trace.Span,
|
|
170
403
|
fn_name: str,
|
|
171
404
|
filter_output: Optional[Callable[[Any], Any]],
|
|
172
|
-
ignore_output: Optional[
|
|
405
|
+
ignore_output: Optional[List[str]],
|
|
173
406
|
context_token: Any,
|
|
174
407
|
):
|
|
175
408
|
self._generator = generator
|
|
@@ -207,6 +440,12 @@ class TracedGenerator:
|
|
|
207
440
|
|
|
208
441
|
def _finalize_span_success(self):
|
|
209
442
|
"""Set output and success status on span."""
|
|
443
|
+
# Check last yielded value for token usage (common pattern in streaming responses)
|
|
444
|
+
if self._yielded_values:
|
|
445
|
+
last_value = self._yielded_values[-1]
|
|
446
|
+
_extract_and_set_token_usage(self._span, last_value)
|
|
447
|
+
_extract_and_set_provider_and_model(self._span, last_value)
|
|
448
|
+
|
|
210
449
|
# Record summary of yielded values
|
|
211
450
|
output_data = {
|
|
212
451
|
"type": "generator",
|
|
@@ -217,13 +456,14 @@ class TracedGenerator:
|
|
|
217
456
|
if self._yielded_values:
|
|
218
457
|
sample_size = min(10, len(self._yielded_values))
|
|
219
458
|
output_data["sample_values"] = [
|
|
220
|
-
|
|
459
|
+
serialize_for_span(v) for v in self._yielded_values[:sample_size]
|
|
221
460
|
]
|
|
222
461
|
if len(self._yielded_values) > sample_size:
|
|
223
462
|
output_data["truncated"] = True
|
|
224
463
|
|
|
225
464
|
output_data = _prepare_and_filter_output(output_data, self._filter_output, self._ignore_output)
|
|
226
|
-
|
|
465
|
+
if output_data is not None:
|
|
466
|
+
self._span.set_attribute("output", serialize_for_span(output_data))
|
|
227
467
|
self._span.set_status(Status(StatusCode.OK))
|
|
228
468
|
|
|
229
469
|
|
|
@@ -236,7 +476,7 @@ class TracedAsyncGenerator:
|
|
|
236
476
|
span: trace.Span,
|
|
237
477
|
fn_name: str,
|
|
238
478
|
filter_output: Optional[Callable[[Any], Any]],
|
|
239
|
-
ignore_output: Optional[
|
|
479
|
+
ignore_output: Optional[List[str]],
|
|
240
480
|
context_token: Any,
|
|
241
481
|
):
|
|
242
482
|
self._generator = generator
|
|
@@ -274,6 +514,12 @@ class TracedAsyncGenerator:
|
|
|
274
514
|
|
|
275
515
|
def _finalize_span_success(self):
|
|
276
516
|
"""Set output and success status on span."""
|
|
517
|
+
# Check last yielded value for token usage (common pattern in streaming responses)
|
|
518
|
+
if self._yielded_values:
|
|
519
|
+
last_value = self._yielded_values[-1]
|
|
520
|
+
_extract_and_set_token_usage(self._span, last_value)
|
|
521
|
+
_extract_and_set_provider_and_model(self._span, last_value)
|
|
522
|
+
|
|
277
523
|
# Record summary of yielded values
|
|
278
524
|
output_data = {
|
|
279
525
|
"type": "async_generator",
|
|
@@ -284,13 +530,14 @@ class TracedAsyncGenerator:
|
|
|
284
530
|
if self._yielded_values:
|
|
285
531
|
sample_size = min(10, len(self._yielded_values))
|
|
286
532
|
output_data["sample_values"] = [
|
|
287
|
-
|
|
533
|
+
serialize_for_span(v) for v in self._yielded_values[:sample_size]
|
|
288
534
|
]
|
|
289
535
|
if len(self._yielded_values) > sample_size:
|
|
290
536
|
output_data["truncated"] = True
|
|
291
537
|
|
|
292
538
|
output_data = _prepare_and_filter_output(output_data, self._filter_output, self._ignore_output)
|
|
293
|
-
|
|
539
|
+
if output_data is not None:
|
|
540
|
+
self._span.set_attribute("output", serialize_for_span(output_data))
|
|
294
541
|
self._span.set_status(Status(StatusCode.OK))
|
|
295
542
|
|
|
296
543
|
|
|
@@ -298,8 +545,8 @@ def WithTracing(
|
|
|
298
545
|
func: Optional[Callable] = None,
|
|
299
546
|
*,
|
|
300
547
|
name: Optional[str] = None,
|
|
301
|
-
ignore_input: Optional[
|
|
302
|
-
ignore_output: Optional[
|
|
548
|
+
ignore_input: Optional[List[str]] = None,
|
|
549
|
+
ignore_output: Optional[List[str]] = None,
|
|
303
550
|
filter_input: Optional[Callable[[Any], Any]] = None,
|
|
304
551
|
filter_output: Optional[Callable[[Any], Any]] = None,
|
|
305
552
|
):
|
|
@@ -312,8 +559,12 @@ def WithTracing(
|
|
|
312
559
|
Args:
|
|
313
560
|
func: The function to trace (when used as @WithTracing)
|
|
314
561
|
name: Optional custom name for the span (defaults to function name)
|
|
315
|
-
ignore_input:
|
|
316
|
-
|
|
562
|
+
ignore_input: List of keys to exclude from input data when recording span attributes.
|
|
563
|
+
Only applies when input is a dictionary. For example, use ["password", "api_key"]
|
|
564
|
+
to exclude sensitive fields from being traced.
|
|
565
|
+
ignore_output: List of keys to exclude from output data when recording span attributes.
|
|
566
|
+
Only applies when output is a dictionary. Useful for excluding large or sensitive
|
|
567
|
+
fields from traces.
|
|
317
568
|
filter_input: Function to filter/transform input before recording
|
|
318
569
|
filter_output: Function to filter/transform output before recording
|
|
319
570
|
|
|
@@ -352,7 +603,7 @@ def WithTracing(
|
|
|
352
603
|
is_generator = inspect.isgeneratorfunction(fn)
|
|
353
604
|
is_async_generator = inspect.isasyncgenfunction(fn) if hasattr(inspect, 'isasyncgenfunction') else False
|
|
354
605
|
|
|
355
|
-
tracer =
|
|
606
|
+
tracer = get_aiqa_tracer()
|
|
356
607
|
|
|
357
608
|
def _setup_span(span: trace.Span, input_data: Any) -> bool:
|
|
358
609
|
"""Setup span with input data. Returns True if span is recording."""
|
|
@@ -362,8 +613,13 @@ def WithTracing(
|
|
|
362
613
|
|
|
363
614
|
logger.debug(f"Span {fn_name} is recording, trace_id={format(span.get_span_context().trace_id, '032x')}")
|
|
364
615
|
|
|
616
|
+
# Set component tag if configured
|
|
617
|
+
component_tag = get_component_tag()
|
|
618
|
+
if component_tag:
|
|
619
|
+
span.set_attribute("gen_ai.component.id", component_tag)
|
|
620
|
+
|
|
365
621
|
if input_data is not None:
|
|
366
|
-
span.set_attribute("input",
|
|
622
|
+
span.set_attribute("input", serialize_for_span(input_data))
|
|
367
623
|
|
|
368
624
|
trace_id = format(span.get_span_context().trace_id, "032x")
|
|
369
625
|
logger.debug(f"do traceable stuff {fn_name} {trace_id}")
|
|
@@ -371,12 +627,20 @@ def WithTracing(
|
|
|
371
627
|
|
|
372
628
|
def _finalize_span_success(span: trace.Span, result: Any) -> None:
|
|
373
629
|
"""Set output and success status on span."""
|
|
630
|
+
# Extract and set token usage if present (before filtering output)
|
|
631
|
+
_extract_and_set_token_usage(span, result)
|
|
632
|
+
# Extract and set provider/model if present (before filtering output)
|
|
633
|
+
_extract_and_set_provider_and_model(span, result)
|
|
634
|
+
|
|
374
635
|
output_data = _prepare_and_filter_output(result, filter_output, ignore_output)
|
|
375
|
-
|
|
636
|
+
if output_data is not None:
|
|
637
|
+
span.set_attribute("output", serialize_for_span(output_data))
|
|
376
638
|
span.set_status(Status(StatusCode.OK))
|
|
377
639
|
|
|
378
640
|
def _execute_with_span_sync(executor: Callable[[], Any], input_data: Any) -> Any:
|
|
379
641
|
"""Execute sync function within span context, handling input/output and exceptions."""
|
|
642
|
+
# Ensure tracer provider is initialized before creating spans
|
|
643
|
+
get_aiqa_client()
|
|
380
644
|
with tracer.start_as_current_span(fn_name) as span:
|
|
381
645
|
if not _setup_span(span, input_data):
|
|
382
646
|
return executor()
|
|
@@ -391,6 +655,8 @@ def WithTracing(
|
|
|
391
655
|
|
|
392
656
|
async def _execute_with_span_async(executor: Callable[[], Any], input_data: Any) -> Any:
|
|
393
657
|
"""Execute async function within span context, handling input/output and exceptions."""
|
|
658
|
+
# Ensure tracer provider is initialized before creating spans
|
|
659
|
+
get_aiqa_client()
|
|
394
660
|
with tracer.start_as_current_span(fn_name) as span:
|
|
395
661
|
if not _setup_span(span, input_data):
|
|
396
662
|
return await executor()
|
|
@@ -408,6 +674,8 @@ def WithTracing(
|
|
|
408
674
|
|
|
409
675
|
def _execute_generator_sync(executor: Callable[[], Any], input_data: Any) -> Any:
|
|
410
676
|
"""Execute sync generator function, returning a traced generator."""
|
|
677
|
+
# Ensure tracer provider is initialized before creating spans
|
|
678
|
+
get_aiqa_client()
|
|
411
679
|
# Create span but don't use 'with' - span will be closed by TracedGenerator
|
|
412
680
|
span = tracer.start_span(fn_name)
|
|
413
681
|
token = trace.context_api.attach(trace.context_api.set_span_in_context(span))
|
|
@@ -429,18 +697,20 @@ def WithTracing(
|
|
|
429
697
|
|
|
430
698
|
async def _execute_generator_async(executor: Callable[[], Any], input_data: Any) -> Any:
|
|
431
699
|
"""Execute async generator function, returning a traced async generator."""
|
|
700
|
+
# Ensure tracer provider is initialized before creating spans
|
|
701
|
+
get_aiqa_client()
|
|
432
702
|
# Create span but don't use 'with' - span will be closed by TracedAsyncGenerator
|
|
433
703
|
span = tracer.start_span(fn_name)
|
|
434
704
|
token = trace.context_api.attach(trace.context_api.set_span_in_context(span))
|
|
435
705
|
|
|
436
706
|
try:
|
|
437
707
|
if not _setup_span(span, input_data):
|
|
438
|
-
generator =
|
|
708
|
+
generator = executor()
|
|
439
709
|
trace.context_api.detach(token)
|
|
440
710
|
span.end()
|
|
441
711
|
return generator
|
|
442
712
|
|
|
443
|
-
generator =
|
|
713
|
+
generator = executor()
|
|
444
714
|
return TracedAsyncGenerator(generator, span, fn_name, filter_output, ignore_output, token)
|
|
445
715
|
except Exception as exception:
|
|
446
716
|
trace.context_api.detach(token)
|
|
@@ -513,7 +783,7 @@ def set_span_attribute(attribute_name: str, attribute_value: Any) -> bool:
|
|
|
513
783
|
"""
|
|
514
784
|
span = trace.get_current_span()
|
|
515
785
|
if span and span.is_recording():
|
|
516
|
-
span.set_attribute(attribute_name,
|
|
786
|
+
span.set_attribute(attribute_name, serialize_for_span(attribute_value))
|
|
517
787
|
return True
|
|
518
788
|
return False
|
|
519
789
|
|
|
@@ -531,13 +801,456 @@ def get_active_span() -> Optional[trace.Span]:
|
|
|
531
801
|
"""Get the currently active span."""
|
|
532
802
|
return trace.get_current_span()
|
|
533
803
|
|
|
804
|
+
|
|
805
|
+
def set_conversation_id(conversation_id: str) -> bool:
|
|
806
|
+
"""
|
|
807
|
+
Set the gen_ai.conversation.id attribute on the active span.
|
|
808
|
+
This allows you to group multiple traces together that are part of the same conversation.
|
|
809
|
+
See https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-events/ for more details.
|
|
810
|
+
|
|
811
|
+
Args:
|
|
812
|
+
conversation_id: A unique identifier for the conversation (e.g., user session ID, chat ID, etc.)
|
|
813
|
+
|
|
814
|
+
Returns:
|
|
815
|
+
True if gen_ai.conversation.id was set, False if no active span found
|
|
816
|
+
|
|
817
|
+
Example:
|
|
818
|
+
from aiqa import WithTracing, set_conversation_id
|
|
819
|
+
|
|
820
|
+
@WithTracing
|
|
821
|
+
def handle_user_request(user_id: str, request: dict):
|
|
822
|
+
# Set conversation ID to group all traces for this user session
|
|
823
|
+
set_conversation_id(f"user_{user_id}_session_{request.get('session_id')}")
|
|
824
|
+
# ... rest of function
|
|
825
|
+
"""
|
|
826
|
+
return set_span_attribute("gen_ai.conversation.id", conversation_id)
|
|
827
|
+
|
|
828
|
+
|
|
829
|
+
def set_token_usage(
|
|
830
|
+
input_tokens: Optional[int] = None,
|
|
831
|
+
output_tokens: Optional[int] = None,
|
|
832
|
+
total_tokens: Optional[int] = None,
|
|
833
|
+
) -> bool:
|
|
834
|
+
"""
|
|
835
|
+
Set token usage attributes on the active span using OpenTelemetry semantic conventions for gen_ai.
|
|
836
|
+
This allows you to explicitly record token usage information.
|
|
837
|
+
AIQA tracing will automatically detect and set token usage from standard OpenAI-like API responses.
|
|
838
|
+
See https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/ for more details.
|
|
839
|
+
|
|
840
|
+
Args:
|
|
841
|
+
input_tokens: Number of input tokens used (maps to gen_ai.usage.input_tokens)
|
|
842
|
+
output_tokens: Number of output tokens generated (maps to gen_ai.usage.output_tokens)
|
|
843
|
+
total_tokens: Total number of tokens used (maps to gen_ai.usage.total_tokens)
|
|
844
|
+
|
|
845
|
+
Returns:
|
|
846
|
+
True if at least one token usage attribute was set, False if no active span found
|
|
847
|
+
|
|
848
|
+
Example:
|
|
849
|
+
from aiqa import WithTracing, set_token_usage
|
|
850
|
+
|
|
851
|
+
@WithTracing
|
|
852
|
+
def call_llm(prompt: str):
|
|
853
|
+
response = openai_client.chat.completions.create(...)
|
|
854
|
+
# Explicitly set token usage
|
|
855
|
+
set_token_usage(
|
|
856
|
+
input_tokens=response.usage.prompt_tokens,
|
|
857
|
+
output_tokens=response.usage.completion_tokens,
|
|
858
|
+
total_tokens=response.usage.total_tokens
|
|
859
|
+
)
|
|
860
|
+
return response
|
|
861
|
+
"""
|
|
862
|
+
span = trace.get_current_span()
|
|
863
|
+
if not span or not span.is_recording():
|
|
864
|
+
return False
|
|
865
|
+
|
|
866
|
+
set_count = 0
|
|
867
|
+
try:
|
|
868
|
+
if input_tokens is not None:
|
|
869
|
+
span.set_attribute("gen_ai.usage.input_tokens", input_tokens)
|
|
870
|
+
set_count += 1
|
|
871
|
+
if output_tokens is not None:
|
|
872
|
+
span.set_attribute("gen_ai.usage.output_tokens", output_tokens)
|
|
873
|
+
set_count += 1
|
|
874
|
+
if total_tokens is not None:
|
|
875
|
+
span.set_attribute("gen_ai.usage.total_tokens", total_tokens)
|
|
876
|
+
set_count += 1
|
|
877
|
+
except Exception as e:
|
|
878
|
+
logger.warning(f"Failed to set token usage attributes: {e}")
|
|
879
|
+
return False
|
|
880
|
+
|
|
881
|
+
return set_count > 0
|
|
882
|
+
|
|
883
|
+
|
|
884
|
+
def set_provider_and_model(
|
|
885
|
+
provider: Optional[str] = None,
|
|
886
|
+
model: Optional[str] = None,
|
|
887
|
+
) -> bool:
|
|
888
|
+
"""
|
|
889
|
+
Set provider and model attributes on the active span using OpenTelemetry semantic conventions for gen_ai.
|
|
890
|
+
This allows you to explicitly record provider and model information.
|
|
891
|
+
AIQA tracing will automatically detect and set provider/model from standard API responses.
|
|
892
|
+
See https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/ for more details.
|
|
893
|
+
|
|
894
|
+
Args:
|
|
895
|
+
provider: Name of the AI provider (e.g., "openai", "anthropic", "google") (maps to gen_ai.provider.name)
|
|
896
|
+
model: Name of the model used (e.g., "gpt-4", "claude-3-5-sonnet") (maps to gen_ai.request.model)
|
|
897
|
+
|
|
898
|
+
Returns:
|
|
899
|
+
True if at least one attribute was set, False if no active span found
|
|
900
|
+
|
|
901
|
+
Example:
|
|
902
|
+
from aiqa import WithTracing, set_provider_and_model
|
|
903
|
+
|
|
904
|
+
@WithTracing
|
|
905
|
+
def call_llm(prompt: str):
|
|
906
|
+
response = openai_client.chat.completions.create(...)
|
|
907
|
+
# Explicitly set provider and model
|
|
908
|
+
set_provider_and_model(
|
|
909
|
+
provider="openai",
|
|
910
|
+
model=response.model
|
|
911
|
+
)
|
|
912
|
+
return response
|
|
913
|
+
"""
|
|
914
|
+
span = trace.get_current_span()
|
|
915
|
+
if not span or not span.is_recording():
|
|
916
|
+
return False
|
|
917
|
+
|
|
918
|
+
set_count = 0
|
|
919
|
+
try:
|
|
920
|
+
if provider is not None:
|
|
921
|
+
span.set_attribute("gen_ai.provider.name", str(provider))
|
|
922
|
+
set_count += 1
|
|
923
|
+
if model is not None:
|
|
924
|
+
span.set_attribute("gen_ai.request.model", str(model))
|
|
925
|
+
set_count += 1
|
|
926
|
+
except Exception as e:
|
|
927
|
+
logger.warning(f"Failed to set provider/model attributes: {e}")
|
|
928
|
+
return False
|
|
929
|
+
|
|
930
|
+
return set_count > 0
|
|
931
|
+
|
|
932
|
+
|
|
933
|
+
def set_component_tag(tag: str) -> None:
|
|
934
|
+
"""
|
|
935
|
+
Set the component tag that will be added to all spans created by AIQA.
|
|
936
|
+
This can also be set via the AIQA_COMPONENT_TAG environment variable.
|
|
937
|
+
The component tag allows you to identify which component/system generated the spans.
|
|
938
|
+
|
|
939
|
+
Note: If using environment variables, ensure you call get_aiqa_client() first to initialize
|
|
940
|
+
the client and load environment variables.
|
|
941
|
+
|
|
942
|
+
Args:
|
|
943
|
+
tag: A component identifier (e.g., "mynamespace.mysystem", "backend.api", etc.)
|
|
944
|
+
|
|
945
|
+
Example:
|
|
946
|
+
from aiqa import get_aiqa_client, set_component_tag, WithTracing
|
|
947
|
+
|
|
948
|
+
# Initialize client (loads env vars including AIQA_COMPONENT_TAG)
|
|
949
|
+
get_aiqa_client()
|
|
950
|
+
|
|
951
|
+
# Or set component tag programmatically (overrides env var)
|
|
952
|
+
set_component_tag("mynamespace.mysystem")
|
|
953
|
+
|
|
954
|
+
@WithTracing
|
|
955
|
+
def my_function():
|
|
956
|
+
pass
|
|
957
|
+
"""
|
|
958
|
+
_set_component_tag(tag)
|
|
959
|
+
|
|
534
960
|
def get_provider() -> Optional[TracerProvider]:
|
|
535
961
|
"""Get the tracer provider for advanced usage."""
|
|
536
|
-
client =
|
|
962
|
+
client = get_aiqa_client()
|
|
537
963
|
return client.get("provider")
|
|
538
964
|
|
|
539
965
|
def get_exporter() -> Optional[AIQASpanExporter]:
|
|
540
966
|
"""Get the exporter for advanced usage."""
|
|
541
|
-
client =
|
|
967
|
+
client = get_aiqa_client()
|
|
542
968
|
return client.get("exporter")
|
|
543
969
|
|
|
970
|
+
|
|
971
|
+
def get_trace_id() -> Optional[str]:
|
|
972
|
+
"""
|
|
973
|
+
Get the current trace ID as a hexadecimal string (32 characters).
|
|
974
|
+
|
|
975
|
+
Returns:
|
|
976
|
+
The trace ID as a hex string, or None if no active span exists.
|
|
977
|
+
|
|
978
|
+
Example:
|
|
979
|
+
trace_id = get_trace_id()
|
|
980
|
+
# Pass trace_id to another service/agent
|
|
981
|
+
# e.g., include in HTTP headers, message queue metadata, etc.
|
|
982
|
+
"""
|
|
983
|
+
span = trace.get_current_span()
|
|
984
|
+
if span and span.get_span_context().is_valid:
|
|
985
|
+
return format(span.get_span_context().trace_id, "032x")
|
|
986
|
+
return None
|
|
987
|
+
|
|
988
|
+
|
|
989
|
+
def get_span_id() -> Optional[str]:
|
|
990
|
+
"""
|
|
991
|
+
Get the current span ID as a hexadecimal string (16 characters).
|
|
992
|
+
|
|
993
|
+
Returns:
|
|
994
|
+
The span ID as a hex string, or None if no active span exists.
|
|
995
|
+
|
|
996
|
+
Example:
|
|
997
|
+
span_id = get_span_id()
|
|
998
|
+
# Can be used to create child spans in other services
|
|
999
|
+
"""
|
|
1000
|
+
span = trace.get_current_span()
|
|
1001
|
+
if span and span.get_span_context().is_valid:
|
|
1002
|
+
return format(span.get_span_context().span_id, "016x")
|
|
1003
|
+
return None
|
|
1004
|
+
|
|
1005
|
+
|
|
1006
|
+
def create_span_from_trace_id(
|
|
1007
|
+
trace_id: str,
|
|
1008
|
+
parent_span_id: Optional[str] = None,
|
|
1009
|
+
span_name: str = "continued_span",
|
|
1010
|
+
) -> trace.Span:
|
|
1011
|
+
"""
|
|
1012
|
+
Create a new span that continues from an existing trace ID.
|
|
1013
|
+
This is useful for linking traces across different services or agents.
|
|
1014
|
+
|
|
1015
|
+
Args:
|
|
1016
|
+
trace_id: The trace ID as a hexadecimal string (32 characters)
|
|
1017
|
+
parent_span_id: Optional parent span ID as a hexadecimal string (16 characters).
|
|
1018
|
+
If provided, the new span will be a child of this span.
|
|
1019
|
+
span_name: Name for the new span (default: "continued_span")
|
|
1020
|
+
|
|
1021
|
+
Returns:
|
|
1022
|
+
A new span that continues the trace. Use it in a context manager or call end() manually.
|
|
1023
|
+
|
|
1024
|
+
Example:
|
|
1025
|
+
# In service A: get trace ID
|
|
1026
|
+
trace_id = get_trace_id()
|
|
1027
|
+
span_id = get_span_id()
|
|
1028
|
+
|
|
1029
|
+
# Send to service B (e.g., via HTTP, message queue, etc.)
|
|
1030
|
+
# ...
|
|
1031
|
+
|
|
1032
|
+
# In service B: continue the trace
|
|
1033
|
+
with create_span_from_trace_id(trace_id, parent_span_id=span_id, span_name="service_b_operation"):
|
|
1034
|
+
# Your code here
|
|
1035
|
+
pass
|
|
1036
|
+
"""
|
|
1037
|
+
try:
|
|
1038
|
+
# Parse trace ID from hex string
|
|
1039
|
+
trace_id_int = int(trace_id, 16)
|
|
1040
|
+
|
|
1041
|
+
# Parse parent span ID if provided
|
|
1042
|
+
parent_span_id_int = None
|
|
1043
|
+
if parent_span_id:
|
|
1044
|
+
parent_span_id_int = int(parent_span_id, 16)
|
|
1045
|
+
|
|
1046
|
+
# Create a parent span context
|
|
1047
|
+
parent_span_context = SpanContext(
|
|
1048
|
+
trace_id=trace_id_int,
|
|
1049
|
+
span_id=parent_span_id_int if parent_span_id_int else 0,
|
|
1050
|
+
is_remote=True,
|
|
1051
|
+
trace_flags=TraceFlags(0x01), # SAMPLED flag
|
|
1052
|
+
)
|
|
1053
|
+
|
|
1054
|
+
# Create a context with this span context as the parent
|
|
1055
|
+
from opentelemetry.trace import set_span_in_context
|
|
1056
|
+
parent_context = set_span_in_context(trace.NonRecordingSpan(parent_span_context))
|
|
1057
|
+
|
|
1058
|
+
# Start a new span in this context (it will be a child of the parent span)
|
|
1059
|
+
tracer = get_aiqa_tracer()
|
|
1060
|
+
span = tracer.start_span(span_name, context=parent_context)
|
|
1061
|
+
|
|
1062
|
+
# Set component tag if configured
|
|
1063
|
+
component_tag = get_component_tag()
|
|
1064
|
+
if component_tag:
|
|
1065
|
+
span.set_attribute("gen_ai.component.id", component_tag)
|
|
1066
|
+
|
|
1067
|
+
return span
|
|
1068
|
+
except (ValueError, AttributeError) as e:
|
|
1069
|
+
logger.error(f"Error creating span from trace_id: {e}")
|
|
1070
|
+
# Fallback: create a new span
|
|
1071
|
+
tracer = get_aiqa_tracer()
|
|
1072
|
+
span = tracer.start_span(span_name)
|
|
1073
|
+
component_tag = get_component_tag()
|
|
1074
|
+
if component_tag:
|
|
1075
|
+
span.set_attribute("gen_ai.component.id", component_tag)
|
|
1076
|
+
return span
|
|
1077
|
+
|
|
1078
|
+
|
|
1079
|
+
def inject_trace_context(carrier: dict) -> None:
|
|
1080
|
+
"""
|
|
1081
|
+
Inject the current trace context into a carrier (e.g., HTTP headers).
|
|
1082
|
+
This allows you to pass trace context to another service.
|
|
1083
|
+
|
|
1084
|
+
Args:
|
|
1085
|
+
carrier: Dictionary to inject trace context into (e.g., HTTP headers dict)
|
|
1086
|
+
|
|
1087
|
+
Example:
|
|
1088
|
+
import requests
|
|
1089
|
+
|
|
1090
|
+
headers = {}
|
|
1091
|
+
inject_trace_context(headers)
|
|
1092
|
+
response = requests.get("http://other-service/api", headers=headers)
|
|
1093
|
+
"""
|
|
1094
|
+
try:
|
|
1095
|
+
inject(carrier)
|
|
1096
|
+
except Exception as e:
|
|
1097
|
+
logger.warning(f"Error injecting trace context: {e}")
|
|
1098
|
+
|
|
1099
|
+
|
|
1100
|
+
def extract_trace_context(carrier: dict) -> Any:
|
|
1101
|
+
"""
|
|
1102
|
+
Extract trace context from a carrier (e.g., HTTP headers).
|
|
1103
|
+
Use this to continue a trace that was started in another service.
|
|
1104
|
+
|
|
1105
|
+
Args:
|
|
1106
|
+
carrier: Dictionary containing trace context (e.g., HTTP headers dict)
|
|
1107
|
+
|
|
1108
|
+
Returns:
|
|
1109
|
+
A context object that can be used with trace.use_span() or tracer.start_span()
|
|
1110
|
+
|
|
1111
|
+
Example:
|
|
1112
|
+
from opentelemetry.trace import use_span
|
|
1113
|
+
|
|
1114
|
+
# Extract context from incoming request headers
|
|
1115
|
+
ctx = extract_trace_context(request.headers)
|
|
1116
|
+
|
|
1117
|
+
# Use the context to create a span
|
|
1118
|
+
with use_span(ctx):
|
|
1119
|
+
# Your code here
|
|
1120
|
+
pass
|
|
1121
|
+
|
|
1122
|
+
# Or create a span with the context
|
|
1123
|
+
tracer = get_aiqa_tracer()
|
|
1124
|
+
with tracer.start_as_current_span("operation", context=ctx):
|
|
1125
|
+
# Your code here
|
|
1126
|
+
pass
|
|
1127
|
+
"""
|
|
1128
|
+
try:
|
|
1129
|
+
return extract(carrier)
|
|
1130
|
+
except Exception as e:
|
|
1131
|
+
logger.warning(f"Error extracting trace context: {e}")
|
|
1132
|
+
return None
|
|
1133
|
+
|
|
1134
|
+
|
|
1135
|
+
def get_span(span_id: str, organisation_id: Optional[str] = None, exclude: Optional[List[str]] = None) -> Optional[dict]:
|
|
1136
|
+
"""
|
|
1137
|
+
Get a span by its ID from the AIQA server.
|
|
1138
|
+
|
|
1139
|
+
Expected usage is: re-playing a specific function call in a unit test (either a developer debugging an issue, or as part of a test suite).
|
|
1140
|
+
|
|
1141
|
+
Args:
|
|
1142
|
+
span_id: The span ID as a hexadecimal string (16 characters) or client span ID
|
|
1143
|
+
organisation_id: Optional organisation ID. If not provided, will try to get from
|
|
1144
|
+
AIQA_ORGANISATION_ID environment variable. The organisation is typically
|
|
1145
|
+
extracted from the API key during authentication, but the API requires it
|
|
1146
|
+
as a query parameter.
|
|
1147
|
+
exclude: Optional list of fields to exclude from the span data. By default this function WILL return 'attributes' (often large).
|
|
1148
|
+
|
|
1149
|
+
Returns:
|
|
1150
|
+
The span data as a dictionary, or None if not found
|
|
1151
|
+
|
|
1152
|
+
Example:
|
|
1153
|
+
from aiqa import get_span
|
|
1154
|
+
|
|
1155
|
+
span = get_span('abc123...')
|
|
1156
|
+
if span:
|
|
1157
|
+
print(f"Found span: {span['name']}")
|
|
1158
|
+
my_function(**span['input'])
|
|
1159
|
+
"""
|
|
1160
|
+
import os
|
|
1161
|
+
import requests
|
|
1162
|
+
|
|
1163
|
+
server_url = os.getenv("AIQA_SERVER_URL", "").rstrip("/")
|
|
1164
|
+
api_key = os.getenv("AIQA_API_KEY", "")
|
|
1165
|
+
org_id = organisation_id or os.getenv("AIQA_ORGANISATION_ID", "")
|
|
1166
|
+
|
|
1167
|
+
if not server_url:
|
|
1168
|
+
raise ValueError("AIQA_SERVER_URL is not set. Cannot retrieve span.")
|
|
1169
|
+
if not org_id:
|
|
1170
|
+
raise ValueError("Organisation ID is required. Provide it as parameter or set AIQA_ORGANISATION_ID environment variable.")
|
|
1171
|
+
if not api_key:
|
|
1172
|
+
raise ValueError("API key is required. Set AIQA_API_KEY environment variable.")
|
|
1173
|
+
|
|
1174
|
+
# Try both spanId and clientSpanId queries
|
|
1175
|
+
for query_field in ["spanId", "clientSpanId"]:
|
|
1176
|
+
url = f"{server_url}/span"
|
|
1177
|
+
params = {
|
|
1178
|
+
"q": f"{query_field}:{span_id}",
|
|
1179
|
+
"organisation": org_id,
|
|
1180
|
+
"limit": "1",
|
|
1181
|
+
"exclude": ",".join(exclude) if exclude else None,
|
|
1182
|
+
"fields": "*" if not exclude else None,
|
|
1183
|
+
}
|
|
1184
|
+
|
|
1185
|
+
headers = {"Content-Type": "application/json"}
|
|
1186
|
+
if api_key:
|
|
1187
|
+
headers["Authorization"] = f"ApiKey {api_key}"
|
|
1188
|
+
|
|
1189
|
+
response = requests.get(url, params=params, headers=headers)
|
|
1190
|
+
if response.status_code == 200:
|
|
1191
|
+
result = response.json()
|
|
1192
|
+
hits = result.get("hits", [])
|
|
1193
|
+
if hits and len(hits) > 0:
|
|
1194
|
+
return hits[0]
|
|
1195
|
+
elif response.status_code == 404:
|
|
1196
|
+
# Try next query field
|
|
1197
|
+
continue
|
|
1198
|
+
else:
|
|
1199
|
+
error_text = response.text
|
|
1200
|
+
raise ValueError(f"Failed to get span: {response.status_code} - {error_text[:500]}")
|
|
1201
|
+
# not found
|
|
1202
|
+
return None
|
|
1203
|
+
|
|
1204
|
+
|
|
1205
|
+
async def submit_feedback(
|
|
1206
|
+
trace_id: str,
|
|
1207
|
+
thumbs_up: Optional[bool] = None,
|
|
1208
|
+
comment: Optional[str] = None,
|
|
1209
|
+
) -> None:
|
|
1210
|
+
"""
|
|
1211
|
+
Submit feedback for a trace by creating a new span with the same trace ID.
|
|
1212
|
+
This allows you to add feedback (thumbs-up, thumbs-down, comment) to a trace after it has completed.
|
|
1213
|
+
|
|
1214
|
+
Args:
|
|
1215
|
+
trace_id: The trace ID as a hexadecimal string (32 characters)
|
|
1216
|
+
thumbs_up: True for positive feedback, False for negative feedback, None for neutral
|
|
1217
|
+
comment: Optional text comment
|
|
1218
|
+
|
|
1219
|
+
Example:
|
|
1220
|
+
from aiqa import submit_feedback
|
|
1221
|
+
|
|
1222
|
+
# Submit positive feedback
|
|
1223
|
+
await submit_feedback('abc123...', thumbs_up=True, comment='Great response!')
|
|
1224
|
+
|
|
1225
|
+
# Submit negative feedback
|
|
1226
|
+
await submit_feedback('abc123...', thumbs_up=False, comment='Incorrect answer')
|
|
1227
|
+
"""
|
|
1228
|
+
if not trace_id or len(trace_id) != 32:
|
|
1229
|
+
raise ValueError('Invalid trace ID: must be 32 hexadecimal characters')
|
|
1230
|
+
|
|
1231
|
+
# Create a span for feedback with the same trace ID
|
|
1232
|
+
span = create_span_from_trace_id(trace_id, span_name='feedback')
|
|
1233
|
+
|
|
1234
|
+
try:
|
|
1235
|
+
# Set feedback attributes
|
|
1236
|
+
if thumbs_up is not None:
|
|
1237
|
+
span.set_attribute('feedback.thumbs_up', thumbs_up)
|
|
1238
|
+
span.set_attribute('feedback.type', 'positive' if thumbs_up else 'negative')
|
|
1239
|
+
else:
|
|
1240
|
+
span.set_attribute('feedback.type', 'neutral')
|
|
1241
|
+
|
|
1242
|
+
if comment:
|
|
1243
|
+
span.set_attribute('feedback.comment', comment)
|
|
1244
|
+
|
|
1245
|
+
# Mark as feedback span
|
|
1246
|
+
span.set_attribute('aiqa.span_type', 'feedback')
|
|
1247
|
+
|
|
1248
|
+
# End the span
|
|
1249
|
+
span.end()
|
|
1250
|
+
|
|
1251
|
+
# Flush to ensure it's sent immediately
|
|
1252
|
+
await flush_tracing()
|
|
1253
|
+
except Exception as e:
|
|
1254
|
+
span.end()
|
|
1255
|
+
raise e
|
|
1256
|
+
|