aiqa-client 0.4.3__py3-none-any.whl → 0.5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiqa/__init__.py +1 -1
- aiqa/client.py +108 -23
- aiqa/constants.py +3 -1
- aiqa/experiment_runner.py +12 -29
- aiqa/http_utils.py +143 -0
- aiqa/object_serialiser.py +136 -115
- aiqa/tracing.py +155 -267
- aiqa/tracing_llm_utils.py +191 -0
- {aiqa_client-0.4.3.dist-info → aiqa_client-0.5.2.dist-info}/METADATA +1 -1
- aiqa_client-0.5.2.dist-info/RECORD +14 -0
- aiqa/aiqa_exporter.py +0 -679
- aiqa/test_experiment_runner.py +0 -176
- aiqa/test_startup_reliability.py +0 -249
- aiqa/test_tracing.py +0 -230
- aiqa_client-0.4.3.dist-info/RECORD +0 -16
- {aiqa_client-0.4.3.dist-info → aiqa_client-0.5.2.dist-info}/WHEEL +0 -0
- {aiqa_client-0.4.3.dist-info → aiqa_client-0.5.2.dist-info}/licenses/LICENSE.txt +0 -0
- {aiqa_client-0.4.3.dist-info → aiqa_client-0.5.2.dist-info}/top_level.txt +0 -0
aiqa/tracing.py
CHANGED
|
@@ -7,18 +7,21 @@ import json
|
|
|
7
7
|
import logging
|
|
8
8
|
import inspect
|
|
9
9
|
import os
|
|
10
|
+
import copy
|
|
11
|
+
import requests
|
|
10
12
|
from typing import Any, Callable, Optional, List
|
|
11
13
|
from functools import wraps
|
|
12
14
|
from opentelemetry import trace
|
|
13
15
|
from opentelemetry.sdk.trace import TracerProvider
|
|
14
16
|
from opentelemetry.trace import Status, StatusCode, SpanContext, TraceFlags
|
|
15
17
|
from opentelemetry.propagate import inject, extract
|
|
16
|
-
from .aiqa_exporter import AIQASpanExporter
|
|
17
18
|
from .client import get_aiqa_client, get_component_tag, set_component_tag as _set_component_tag, get_aiqa_tracer
|
|
18
|
-
from .constants import AIQA_TRACER_NAME
|
|
19
|
+
from .constants import AIQA_TRACER_NAME, LOG_TAG
|
|
19
20
|
from .object_serialiser import serialize_for_span
|
|
21
|
+
from .http_utils import build_headers, get_server_url, get_api_key
|
|
22
|
+
from .tracing_llm_utils import _extract_and_set_token_usage, _extract_and_set_provider_and_model
|
|
20
23
|
|
|
21
|
-
logger = logging.getLogger(
|
|
24
|
+
logger = logging.getLogger(LOG_TAG)
|
|
22
25
|
|
|
23
26
|
|
|
24
27
|
async def flush_tracing() -> None:
|
|
@@ -28,13 +31,11 @@ async def flush_tracing() -> None:
|
|
|
28
31
|
if you want to flush immediately, e.g. before exiting a process.
|
|
29
32
|
A common use is if you are tracing unit tests or experiment runs.
|
|
30
33
|
|
|
31
|
-
This flushes
|
|
34
|
+
This flushes the BatchSpanProcessor (OTLP exporter doesn't have a separate flush method).
|
|
32
35
|
"""
|
|
33
36
|
client = get_aiqa_client()
|
|
34
37
|
if client.provider:
|
|
35
38
|
client.provider.force_flush() # Synchronous method
|
|
36
|
-
if client.exporter:
|
|
37
|
-
await client.exporter.flush()
|
|
38
39
|
|
|
39
40
|
|
|
40
41
|
# Export provider and exporter accessors for advanced usage
|
|
@@ -114,10 +115,10 @@ class TracingOptions:
|
|
|
114
115
|
self.filter_output = filter_output
|
|
115
116
|
|
|
116
117
|
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
118
|
+
def _prepare_input(args: tuple, kwargs: dict, sig: Optional[inspect.Signature] = None) -> Any:
|
|
119
|
+
"""Prepare input for span attributes.
|
|
120
|
+
Converts args and kwargs into a unified dict structure using function signature when available.
|
|
121
|
+
Falls back to legacy behavior for functions without inspectable signatures.
|
|
121
122
|
|
|
122
123
|
Note: This function does NOT serialize values - it just structures the data.
|
|
123
124
|
Serialization happens later via serialize_for_span() to avoid double-encoding
|
|
@@ -125,14 +126,34 @@ def _prepare_input(args: tuple, kwargs: dict) -> Any:
|
|
|
125
126
|
"""
|
|
126
127
|
if not args and not kwargs:
|
|
127
128
|
return None
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
129
|
+
|
|
130
|
+
# Try to bind args to parameter names using function signature
|
|
131
|
+
if sig is not None:
|
|
132
|
+
try:
|
|
133
|
+
bound = sig.bind(*args, **kwargs)
|
|
134
|
+
bound.apply_defaults()
|
|
135
|
+
# Return dict of all arguments (positional args are now named)
|
|
136
|
+
result = bound.arguments.copy()
|
|
137
|
+
# Shallow copy to protect against mutating the input
|
|
138
|
+
return result
|
|
139
|
+
except (TypeError, ValueError):
|
|
140
|
+
# Binding failed (e.g., wrong number of args, *args/**kwargs issues)
|
|
141
|
+
# Fall through to legacy behavior
|
|
142
|
+
pass
|
|
143
|
+
|
|
144
|
+
# in case binding fails
|
|
145
|
+
if not kwargs:
|
|
146
|
+
if len(args) == 1:
|
|
147
|
+
arg0 = args[0]
|
|
148
|
+
if isinstance(arg0, dict): # shallow copy to protect against mutating the input
|
|
149
|
+
return arg0.copy()
|
|
150
|
+
return arg0
|
|
151
|
+
return list(args)
|
|
152
|
+
if kwargs and len(args) == 0:
|
|
153
|
+
return kwargs.copy() # shallow copy to protect against mutating the input
|
|
154
|
+
# Multiple args and kwargs - combine into dict
|
|
155
|
+
result = kwargs.copy()
|
|
156
|
+
result["args"] = list(args)
|
|
136
157
|
return result
|
|
137
158
|
|
|
138
159
|
|
|
@@ -141,12 +162,17 @@ def _prepare_and_filter_input(
|
|
|
141
162
|
kwargs: dict,
|
|
142
163
|
filter_input: Optional[Callable[[Any], Any]],
|
|
143
164
|
ignore_input: Optional[List[str]],
|
|
165
|
+
sig: Optional[inspect.Signature] = None,
|
|
144
166
|
) -> Any:
|
|
145
|
-
"""
|
|
167
|
+
"""
|
|
168
|
+
Prepare and filter input for span attributes - applies the user's filter_input and ignore_input.
|
|
169
|
+
Converts all args to a dict using function signature when available.
|
|
170
|
+
"""
|
|
146
171
|
# Handle "self" in ignore_input by skipping the first argument
|
|
147
172
|
filtered_args = args
|
|
148
173
|
filtered_kwargs = kwargs.copy() if kwargs else {}
|
|
149
174
|
filtered_ignore_input = ignore_input
|
|
175
|
+
filtered_sig = sig
|
|
150
176
|
if ignore_input and "self" in ignore_input:
|
|
151
177
|
# Remove "self" from ignore_input list (we'll handle it specially)
|
|
152
178
|
filtered_ignore_input = [key for key in ignore_input if key != "self"]
|
|
@@ -156,14 +182,23 @@ def _prepare_and_filter_input(
|
|
|
156
182
|
# Also remove "self" from kwargs if present
|
|
157
183
|
if "self" in filtered_kwargs:
|
|
158
184
|
del filtered_kwargs["self"]
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
185
|
+
# Adjust signature to remove "self" parameter if present
|
|
186
|
+
# This is needed because we removed self from args, so signature binding will fail otherwise
|
|
187
|
+
if filtered_sig is not None:
|
|
188
|
+
params = list(filtered_sig.parameters.values())
|
|
189
|
+
if params and params[0].name == "self":
|
|
190
|
+
filtered_sig = filtered_sig.replace(parameters=params[1:])
|
|
191
|
+
# turn args, kwargs into one "nice" object (now always a dict when signature is available)
|
|
192
|
+
input_data = _prepare_input(filtered_args, filtered_kwargs, filtered_sig)
|
|
193
|
+
if filter_input and input_data is not None:
|
|
162
194
|
input_data = filter_input(input_data)
|
|
163
|
-
if filtered_ignore_input and
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
195
|
+
if filtered_ignore_input and len(filtered_ignore_input) > 0:
|
|
196
|
+
if not isinstance(input_data, dict):
|
|
197
|
+
logger.warning(f"_prepare_and_filter_input: skip: ignore_input is set beyond 'self': {filtered_ignore_input} but input_data is not a dict: {type(input_data)}")
|
|
198
|
+
else:
|
|
199
|
+
for key in filtered_ignore_input:
|
|
200
|
+
if key in input_data:
|
|
201
|
+
del input_data[key]
|
|
167
202
|
# Also handle case where input_data is just self (single value, not dict)
|
|
168
203
|
# If we filtered out self and there are no remaining args/kwargs, return None
|
|
169
204
|
if ignore_input and "self" in ignore_input and not filtered_args and not filtered_kwargs:
|
|
@@ -171,213 +206,67 @@ def _prepare_and_filter_input(
|
|
|
171
206
|
return input_data
|
|
172
207
|
|
|
173
208
|
|
|
174
|
-
def
|
|
209
|
+
def _filter_and_serialize_output(
|
|
175
210
|
result: Any,
|
|
176
211
|
filter_output: Optional[Callable[[Any], Any]],
|
|
177
212
|
ignore_output: Optional[List[str]],
|
|
178
213
|
) -> Any:
|
|
179
|
-
"""
|
|
214
|
+
"""Filter and serialize output for span attributes."""
|
|
180
215
|
output_data = result
|
|
181
216
|
if filter_output:
|
|
217
|
+
if isinstance(output_data, dict):
|
|
218
|
+
output_data = output_data.copy() # copy to provide shallow protection against the user accidentally mutating the output with filter_output
|
|
182
219
|
output_data = filter_output(output_data)
|
|
183
220
|
if ignore_output and isinstance(output_data, dict):
|
|
184
221
|
output_data = output_data.copy()
|
|
185
222
|
for key in ignore_output:
|
|
186
223
|
if key in output_data:
|
|
187
224
|
del output_data[key]
|
|
188
|
-
|
|
225
|
+
|
|
226
|
+
# Serialize immediately to create immutable result (removes mutable structures)
|
|
227
|
+
return serialize_for_span(output_data)
|
|
189
228
|
|
|
190
229
|
|
|
191
230
|
def _handle_span_exception(span: trace.Span, exception: Exception) -> None:
|
|
192
231
|
"""Record exception on span and set error status."""
|
|
232
|
+
logger.info(f"span end: Handling span exception for {span.name}")
|
|
193
233
|
error = exception if isinstance(exception, Exception) else Exception(str(exception))
|
|
194
234
|
span.record_exception(error)
|
|
195
235
|
span.set_status(Status(StatusCode.ERROR, str(error)))
|
|
196
236
|
|
|
197
237
|
|
|
198
|
-
def _is_attribute_set(span: trace.Span, attribute_name: str) -> bool:
|
|
199
|
-
"""
|
|
200
|
-
Check if an attribute is already set on a span.
|
|
201
|
-
Returns True if the attribute exists, False otherwise.
|
|
202
|
-
Safe against exceptions.
|
|
203
|
-
"""
|
|
204
|
-
try:
|
|
205
|
-
# Try multiple ways to access span attributes (SDK spans may store them differently)
|
|
206
|
-
# Check public 'attributes' property
|
|
207
|
-
if hasattr(span, "attributes"):
|
|
208
|
-
attrs = span.attributes
|
|
209
|
-
if attrs and attribute_name in attrs:
|
|
210
|
-
return True
|
|
211
|
-
|
|
212
|
-
# Check private '_attributes' (common in OpenTelemetry SDK)
|
|
213
|
-
if hasattr(span, "_attributes"):
|
|
214
|
-
attrs = span._attributes
|
|
215
|
-
if attrs and attribute_name in attrs:
|
|
216
|
-
return True
|
|
217
|
-
|
|
218
|
-
# If we can't find the attribute, assume not set (conservative approach)
|
|
219
|
-
return False
|
|
220
|
-
except Exception:
|
|
221
|
-
# If anything goes wrong, assume not set (conservative approach)
|
|
222
|
-
return False
|
|
223
|
-
|
|
224
238
|
|
|
225
|
-
def
|
|
239
|
+
def _finalize_span_success_common(
|
|
240
|
+
span: trace.Span,
|
|
241
|
+
result_for_metadata: Any,
|
|
242
|
+
output_data: Any,
|
|
243
|
+
filter_output: Optional[Callable[[Any], Any]] = None,
|
|
244
|
+
ignore_output: Optional[List[str]] = None,
|
|
245
|
+
) -> None:
|
|
226
246
|
"""
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
Looks for usage dict with prompt_tokens, completion_tokens, and total_tokens.
|
|
231
|
-
Sets gen_ai.usage.input_tokens, gen_ai.usage.output_tokens, and gen_ai.usage.total_tokens.
|
|
232
|
-
Only sets attributes that are not already set.
|
|
247
|
+
Common logic for finalizing a span with success status.
|
|
248
|
+
Extracts token usage and provider/model from result, sets output attribute, and sets status to OK.
|
|
233
249
|
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
See https://platform.openai.com/docs/api-reference/chat/object (usage field)
|
|
237
|
-
- OpenAI Completions API: The 'usage' object contains 'prompt_tokens', 'completion_tokens', and 'total_tokens'.
|
|
238
|
-
See https://platform.openai.com/docs/api-reference/completions/object (usage field)
|
|
250
|
+
Serializes output immediately to capture its state when the function returns,
|
|
251
|
+
preventing mutations from affecting the trace.
|
|
239
252
|
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
usage = None
|
|
247
|
-
|
|
248
|
-
# Check if result is a dict with 'usage' key
|
|
249
|
-
try:
|
|
250
|
-
if isinstance(result, dict):
|
|
251
|
-
usage = result.get("usage")
|
|
252
|
-
# Also check if result itself is a usage dict (OpenAI format)
|
|
253
|
-
if usage is None and all(key in result for key in ("prompt_tokens", "completion_tokens", "total_tokens")):
|
|
254
|
-
usage = result
|
|
255
|
-
# Also check if result itself is a usage dict (Bedrock format)
|
|
256
|
-
elif usage is None and all(key in result for key in ("input_tokens", "output_tokens")):
|
|
257
|
-
usage = result
|
|
258
|
-
|
|
259
|
-
# Check if result has a 'usage' attribute (e.g., OpenAI response object)
|
|
260
|
-
elif hasattr(result, "usage"):
|
|
261
|
-
usage = result.usage
|
|
262
|
-
except Exception:
|
|
263
|
-
# If accessing result properties fails, just return silently
|
|
264
|
-
return
|
|
265
|
-
|
|
266
|
-
# Extract token usage if found
|
|
267
|
-
if isinstance(usage, dict):
|
|
268
|
-
try:
|
|
269
|
-
# Support both OpenAI format (prompt_tokens/completion_tokens) and Bedrock format (input_tokens/output_tokens)
|
|
270
|
-
prompt_tokens = usage.get("prompt_tokens") or usage.get("PromptTokens")
|
|
271
|
-
completion_tokens = usage.get("completion_tokens") or usage.get("CompletionTokens")
|
|
272
|
-
input_tokens = usage.get("input_tokens") or usage.get("InputTokens")
|
|
273
|
-
output_tokens = usage.get("output_tokens") or usage.get("OutputTokens")
|
|
274
|
-
total_tokens = usage.get("total_tokens") or usage.get("TotalTokens")
|
|
275
|
-
|
|
276
|
-
# Use Bedrock format if OpenAI format not available
|
|
277
|
-
if prompt_tokens is None:
|
|
278
|
-
prompt_tokens = input_tokens
|
|
279
|
-
if completion_tokens is None:
|
|
280
|
-
completion_tokens = output_tokens
|
|
281
|
-
|
|
282
|
-
# Calculate total_tokens if not provided but we have input and output
|
|
283
|
-
if total_tokens is None and prompt_tokens is not None and completion_tokens is not None:
|
|
284
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
285
|
-
|
|
286
|
-
# Only set attributes that are not already set
|
|
287
|
-
if prompt_tokens is not None and not _is_attribute_set(span, "gen_ai.usage.input_tokens"):
|
|
288
|
-
span.set_attribute("gen_ai.usage.input_tokens", prompt_tokens)
|
|
289
|
-
if completion_tokens is not None and not _is_attribute_set(span, "gen_ai.usage.output_tokens"):
|
|
290
|
-
span.set_attribute("gen_ai.usage.output_tokens", completion_tokens)
|
|
291
|
-
if total_tokens is not None and not _is_attribute_set(span, "gen_ai.usage.total_tokens"):
|
|
292
|
-
span.set_attribute("gen_ai.usage.total_tokens", total_tokens)
|
|
293
|
-
except Exception:
|
|
294
|
-
# If setting attributes fails, log but don't raise
|
|
295
|
-
logger.debug(f"Failed to set token usage attributes on span")
|
|
296
|
-
except Exception:
|
|
297
|
-
# Catch any other exceptions to ensure this never derails tracing
|
|
298
|
-
logger.debug(f"Error in _extract_and_set_token_usage")
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
def _extract_and_set_provider_and_model(span: trace.Span, result: Any) -> None:
|
|
253
|
+
Args:
|
|
254
|
+
span: The span to finalize
|
|
255
|
+
result_for_metadata: Value to extract token usage and provider/model from
|
|
256
|
+
output_data: The output data to set on the span (will be filtered if needed)
|
|
257
|
+
filter_output: Optional function to filter output data
|
|
258
|
+
ignore_output: Optional list of keys to exclude from output
|
|
302
259
|
"""
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
Looks for 'model', 'provider', 'provider_name' fields in the result.
|
|
307
|
-
Sets gen_ai.provider.name and gen_ai.request.model.
|
|
308
|
-
Only sets attributes that are not already set.
|
|
309
|
-
|
|
310
|
-
This function detects model information from common API response patterns:
|
|
311
|
-
- OpenAI Chat Completions API: The 'model' field is at the top level of the response.
|
|
312
|
-
See https://platform.openai.com/docs/api-reference/chat/object
|
|
313
|
-
- OpenAI Completions API: The 'model' field is at the top level of the response.
|
|
314
|
-
See https://platform.openai.com/docs/api-reference/completions/object
|
|
260
|
+
logger.info(f"span end: Finalizing for {span.name}")
|
|
261
|
+
_extract_and_set_token_usage(span, result_for_metadata)
|
|
262
|
+
_extract_and_set_provider_and_model(span, result_for_metadata)
|
|
315
263
|
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
model = None
|
|
323
|
-
provider = None
|
|
324
|
-
|
|
325
|
-
# Check if result is a dict
|
|
326
|
-
try:
|
|
327
|
-
if isinstance(result, dict):
|
|
328
|
-
model = result.get("model") or result.get("Model")
|
|
329
|
-
provider = result.get("provider") or result.get("Provider") or result.get("provider_name") or result.get("providerName")
|
|
330
|
-
|
|
331
|
-
# Check if result has attributes (e.g., OpenAI response object)
|
|
332
|
-
elif hasattr(result, "model"):
|
|
333
|
-
model = result.model
|
|
334
|
-
if hasattr(result, "provider"):
|
|
335
|
-
provider = result.provider
|
|
336
|
-
elif hasattr(result, "provider_name"):
|
|
337
|
-
provider = result.provider_name
|
|
338
|
-
elif hasattr(result, "providerName"):
|
|
339
|
-
provider = result.providerName
|
|
340
|
-
|
|
341
|
-
# Check nested structures (e.g., response.data.model)
|
|
342
|
-
if model is None and hasattr(result, "data"):
|
|
343
|
-
data = result.data
|
|
344
|
-
if isinstance(data, dict):
|
|
345
|
-
model = data.get("model") or data.get("Model")
|
|
346
|
-
elif hasattr(data, "model"):
|
|
347
|
-
model = data.model
|
|
348
|
-
|
|
349
|
-
# Check for model in choices (OpenAI pattern)
|
|
350
|
-
if model is None and isinstance(result, dict):
|
|
351
|
-
choices = result.get("choices")
|
|
352
|
-
if choices and isinstance(choices, list) and len(choices) > 0:
|
|
353
|
-
first_choice = choices[0]
|
|
354
|
-
if isinstance(first_choice, dict):
|
|
355
|
-
model = first_choice.get("model")
|
|
356
|
-
elif hasattr(first_choice, "model"):
|
|
357
|
-
model = first_choice.model
|
|
358
|
-
except Exception:
|
|
359
|
-
# If accessing result properties fails, just return silently
|
|
360
|
-
return
|
|
361
|
-
|
|
362
|
-
# Set attributes if found and not already set
|
|
363
|
-
try:
|
|
364
|
-
if model is not None and not _is_attribute_set(span, "gen_ai.request.model"):
|
|
365
|
-
# Convert to string if needed
|
|
366
|
-
model_str = str(model) if model is not None else None
|
|
367
|
-
if model_str:
|
|
368
|
-
span.set_attribute("gen_ai.request.model", model_str)
|
|
369
|
-
|
|
370
|
-
if provider is not None and not _is_attribute_set(span, "gen_ai.provider.name"):
|
|
371
|
-
# Convert to string if needed
|
|
372
|
-
provider_str = str(provider) if provider is not None else None
|
|
373
|
-
if provider_str:
|
|
374
|
-
span.set_attribute("gen_ai.provider.name", provider_str)
|
|
375
|
-
except Exception:
|
|
376
|
-
# If setting attributes fails, log but don't raise
|
|
377
|
-
logger.debug(f"Failed to set provider/model attributes on span")
|
|
378
|
-
except Exception:
|
|
379
|
-
# Catch any other exceptions to ensure this never derails tracing
|
|
380
|
-
logger.debug(f"Error in _extract_and_set_provider_and_model")
|
|
264
|
+
# Prepare, filter, and serialize output (serialization happens in _prepare_and_filter_output)
|
|
265
|
+
output_data = _filter_and_serialize_output(output_data, filter_output, ignore_output)
|
|
266
|
+
if output_data is not None:
|
|
267
|
+
# output_data is already serialized (immutable) from _prepare_and_filter_output
|
|
268
|
+
span.set_attribute("output", output_data)
|
|
269
|
+
span.set_status(Status(StatusCode.OK))
|
|
381
270
|
|
|
382
271
|
|
|
383
272
|
class TracedGenerator:
|
|
@@ -410,7 +299,8 @@ class TracedGenerator:
|
|
|
410
299
|
|
|
411
300
|
try:
|
|
412
301
|
value = next(self._generator)
|
|
413
|
-
|
|
302
|
+
# Serialize immediately to create immutable result (removes mutable structures)
|
|
303
|
+
self._yielded_values.append(serialize_for_span(value))
|
|
414
304
|
return value
|
|
415
305
|
except StopIteration:
|
|
416
306
|
self._exhausted = True
|
|
@@ -428,10 +318,7 @@ class TracedGenerator:
|
|
|
428
318
|
def _finalize_span_success(self):
|
|
429
319
|
"""Set output and success status on span."""
|
|
430
320
|
# Check last yielded value for token usage (common pattern in streaming responses)
|
|
431
|
-
if self._yielded_values
|
|
432
|
-
last_value = self._yielded_values[-1]
|
|
433
|
-
_extract_and_set_token_usage(self._span, last_value)
|
|
434
|
-
_extract_and_set_provider_and_model(self._span, last_value)
|
|
321
|
+
result_for_metadata = self._yielded_values[-1] if self._yielded_values else None
|
|
435
322
|
|
|
436
323
|
# Record summary of yielded values
|
|
437
324
|
output_data = {
|
|
@@ -448,10 +335,13 @@ class TracedGenerator:
|
|
|
448
335
|
if len(self._yielded_values) > sample_size:
|
|
449
336
|
output_data["truncated"] = True
|
|
450
337
|
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
338
|
+
_finalize_span_success_common(
|
|
339
|
+
self._span,
|
|
340
|
+
result_for_metadata,
|
|
341
|
+
output_data,
|
|
342
|
+
self._filter_output,
|
|
343
|
+
self._ignore_output,
|
|
344
|
+
)
|
|
455
345
|
|
|
456
346
|
|
|
457
347
|
class TracedAsyncGenerator:
|
|
@@ -484,7 +374,8 @@ class TracedAsyncGenerator:
|
|
|
484
374
|
|
|
485
375
|
try:
|
|
486
376
|
value = await self._generator.__anext__()
|
|
487
|
-
|
|
377
|
+
# Serialize immediately to create immutable result (removes mutable structures)
|
|
378
|
+
self._yielded_values.append(serialize_for_span(value))
|
|
488
379
|
return value
|
|
489
380
|
except StopAsyncIteration:
|
|
490
381
|
self._exhausted = True
|
|
@@ -502,10 +393,7 @@ class TracedAsyncGenerator:
|
|
|
502
393
|
def _finalize_span_success(self):
|
|
503
394
|
"""Set output and success status on span."""
|
|
504
395
|
# Check last yielded value for token usage (common pattern in streaming responses)
|
|
505
|
-
if self._yielded_values
|
|
506
|
-
last_value = self._yielded_values[-1]
|
|
507
|
-
_extract_and_set_token_usage(self._span, last_value)
|
|
508
|
-
_extract_and_set_provider_and_model(self._span, last_value)
|
|
396
|
+
result_for_metadata = self._yielded_values[-1] if self._yielded_values else None
|
|
509
397
|
|
|
510
398
|
# Record summary of yielded values
|
|
511
399
|
output_data = {
|
|
@@ -522,10 +410,13 @@ class TracedAsyncGenerator:
|
|
|
522
410
|
if len(self._yielded_values) > sample_size:
|
|
523
411
|
output_data["truncated"] = True
|
|
524
412
|
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
413
|
+
_finalize_span_success_common(
|
|
414
|
+
self._span,
|
|
415
|
+
result_for_metadata,
|
|
416
|
+
output_data,
|
|
417
|
+
self._filter_output,
|
|
418
|
+
self._ignore_output,
|
|
419
|
+
)
|
|
529
420
|
|
|
530
421
|
|
|
531
422
|
def WithTracing(
|
|
@@ -560,20 +451,6 @@ def WithTracing(
|
|
|
560
451
|
def my_function(x, y):
|
|
561
452
|
return x + y
|
|
562
453
|
|
|
563
|
-
@WithTracing
|
|
564
|
-
async def my_async_function(x, y):
|
|
565
|
-
return x + y
|
|
566
|
-
|
|
567
|
-
@WithTracing
|
|
568
|
-
def my_generator(n):
|
|
569
|
-
for i in range(n):
|
|
570
|
-
yield i * 2
|
|
571
|
-
|
|
572
|
-
@WithTracing
|
|
573
|
-
async def my_async_generator(n):
|
|
574
|
-
for i in range(n):
|
|
575
|
-
yield i * 2
|
|
576
|
-
|
|
577
454
|
@WithTracing(name="custom_name")
|
|
578
455
|
def another_function():
|
|
579
456
|
pass
|
|
@@ -585,16 +462,30 @@ def WithTracing(
|
|
|
585
462
|
if hasattr(fn, "_is_traced"):
|
|
586
463
|
logger.warning(f"Function {fn_name} is already traced, skipping tracing again")
|
|
587
464
|
return fn
|
|
588
|
-
|
|
465
|
+
logger.info(f"WithTracing function {fn_name}")
|
|
589
466
|
is_async = inspect.iscoroutinefunction(fn)
|
|
590
467
|
is_generator = inspect.isgeneratorfunction(fn)
|
|
591
468
|
is_async_generator = inspect.isasyncgenfunction(fn) if hasattr(inspect, 'isasyncgenfunction') else False
|
|
592
469
|
|
|
470
|
+
# Get function signature once at decoration time for efficient arg name resolution
|
|
471
|
+
fn_sig: Optional[inspect.Signature] = None
|
|
472
|
+
try:
|
|
473
|
+
fn_sig = inspect.signature(fn)
|
|
474
|
+
except (ValueError, TypeError):
|
|
475
|
+
# Some callables (e.g., builtins, C extensions) don't have inspectable signatures
|
|
476
|
+
# Will fall back to legacy behavior
|
|
477
|
+
pass
|
|
478
|
+
|
|
593
479
|
# Don't get tracer here - get it lazily when function is called
|
|
594
480
|
# This ensures initialization only happens when tracing is actually used
|
|
595
481
|
|
|
596
482
|
def _setup_span(span: trace.Span, input_data: Any) -> bool:
|
|
597
|
-
"""
|
|
483
|
+
"""
|
|
484
|
+
Setup span with input data. Returns True if span is recording.
|
|
485
|
+
|
|
486
|
+
Serializes input immediately to capture its state at function start,
|
|
487
|
+
preventing mutations from affecting the trace.
|
|
488
|
+
"""
|
|
598
489
|
if not span.is_recording():
|
|
599
490
|
logger.warning(f"Span {fn_name} is not recording - will not be exported")
|
|
600
491
|
return False
|
|
@@ -607,6 +498,8 @@ def WithTracing(
|
|
|
607
498
|
span.set_attribute("gen_ai.component.id", component_tag)
|
|
608
499
|
|
|
609
500
|
if input_data is not None:
|
|
501
|
+
# Serialize input immediately to capture state at function start
|
|
502
|
+
# input_data has already been copied in _prepare_and_filter_input
|
|
610
503
|
span.set_attribute("input", serialize_for_span(input_data))
|
|
611
504
|
|
|
612
505
|
trace_id = format(span.get_span_context().trace_id, "032x")
|
|
@@ -615,30 +508,28 @@ def WithTracing(
|
|
|
615
508
|
|
|
616
509
|
def _finalize_span_success(span: trace.Span, result: Any) -> None:
|
|
617
510
|
"""Set output and success status on span."""
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
span.set_attribute("output", serialize_for_span(output_data))
|
|
626
|
-
span.set_status(Status(StatusCode.OK))
|
|
511
|
+
_finalize_span_success_common(
|
|
512
|
+
span,
|
|
513
|
+
result,
|
|
514
|
+
result,
|
|
515
|
+
filter_output,
|
|
516
|
+
ignore_output,
|
|
517
|
+
)
|
|
627
518
|
|
|
628
519
|
def _execute_with_span_sync(executor: Callable[[], Any], input_data: Any) -> Any:
|
|
629
|
-
"""Execute sync function within span context, handling input/output and exceptions.
|
|
520
|
+
"""Execute sync function within span context, handling input/output and exceptions.
|
|
521
|
+
Note: input_data has already gone through _prepare_and_filter_input
|
|
522
|
+
"""
|
|
630
523
|
# Ensure tracer provider is initialized before creating spans
|
|
631
524
|
# This is called lazily when the function runs, not at decorator definition time
|
|
632
525
|
client = get_aiqa_client()
|
|
633
526
|
if not client.enabled:
|
|
634
527
|
return executor()
|
|
635
|
-
|
|
636
528
|
# Get tracer after initialization (lazy)
|
|
637
529
|
tracer = get_aiqa_tracer()
|
|
638
530
|
with tracer.start_as_current_span(fn_name) as span:
|
|
639
531
|
if not _setup_span(span, input_data):
|
|
640
|
-
return executor()
|
|
641
|
-
|
|
532
|
+
return executor() # span is not recording, so just execute the function and return the result
|
|
642
533
|
try:
|
|
643
534
|
result = executor()
|
|
644
535
|
_finalize_span_success(span, result)
|
|
@@ -688,7 +579,7 @@ def WithTracing(
|
|
|
688
579
|
|
|
689
580
|
try:
|
|
690
581
|
if not _setup_span(span, input_data):
|
|
691
|
-
generator = executor()
|
|
582
|
+
generator = executor() # span is not recording, so just execute the function and return the result
|
|
692
583
|
trace.context_api.detach(token)
|
|
693
584
|
span.end()
|
|
694
585
|
return generator
|
|
@@ -733,7 +624,7 @@ def WithTracing(
|
|
|
733
624
|
if is_async_generator:
|
|
734
625
|
@wraps(fn)
|
|
735
626
|
async def async_gen_traced_fn(*args, **kwargs):
|
|
736
|
-
input_data = _prepare_and_filter_input(args, kwargs, filter_input, ignore_input)
|
|
627
|
+
input_data = _prepare_and_filter_input(args, kwargs, filter_input, ignore_input, fn_sig)
|
|
737
628
|
return await _execute_generator_async(
|
|
738
629
|
lambda: fn(*args, **kwargs),
|
|
739
630
|
input_data
|
|
@@ -745,7 +636,7 @@ def WithTracing(
|
|
|
745
636
|
elif is_generator:
|
|
746
637
|
@wraps(fn)
|
|
747
638
|
def gen_traced_fn(*args, **kwargs):
|
|
748
|
-
input_data = _prepare_and_filter_input(args, kwargs, filter_input, ignore_input)
|
|
639
|
+
input_data = _prepare_and_filter_input(args, kwargs, filter_input, ignore_input, fn_sig)
|
|
749
640
|
return _execute_generator_sync(
|
|
750
641
|
lambda: fn(*args, **kwargs),
|
|
751
642
|
input_data
|
|
@@ -757,7 +648,7 @@ def WithTracing(
|
|
|
757
648
|
elif is_async:
|
|
758
649
|
@wraps(fn)
|
|
759
650
|
async def async_traced_fn(*args, **kwargs):
|
|
760
|
-
input_data = _prepare_and_filter_input(args, kwargs, filter_input, ignore_input)
|
|
651
|
+
input_data = _prepare_and_filter_input(args, kwargs, filter_input, ignore_input, fn_sig)
|
|
761
652
|
return await _execute_with_span_async(
|
|
762
653
|
lambda: fn(*args, **kwargs),
|
|
763
654
|
input_data
|
|
@@ -769,7 +660,7 @@ def WithTracing(
|
|
|
769
660
|
else:
|
|
770
661
|
@wraps(fn)
|
|
771
662
|
def sync_traced_fn(*args, **kwargs):
|
|
772
|
-
input_data = _prepare_and_filter_input(args, kwargs, filter_input, ignore_input)
|
|
663
|
+
input_data = _prepare_and_filter_input(args, kwargs, filter_input, ignore_input, fn_sig)
|
|
773
664
|
return _execute_with_span_sync(
|
|
774
665
|
lambda: fn(*args, **kwargs),
|
|
775
666
|
input_data
|
|
@@ -816,6 +707,7 @@ def get_active_span() -> Optional[trace.Span]:
|
|
|
816
707
|
|
|
817
708
|
def set_conversation_id(conversation_id: str) -> bool:
|
|
818
709
|
"""
|
|
710
|
+
Naturally a conversation might span several traces.
|
|
819
711
|
Set the gen_ai.conversation.id attribute on the active span.
|
|
820
712
|
This allows you to group multiple traces together that are part of the same conversation.
|
|
821
713
|
See https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-events/ for more details.
|
|
@@ -1165,14 +1057,12 @@ def get_span(span_id: str, organisation_id: Optional[str] = None, exclude: Optio
|
|
|
1165
1057
|
print(f"Found span: {span['name']}")
|
|
1166
1058
|
my_function(**span['input'])
|
|
1167
1059
|
"""
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
server_url = os.getenv("AIQA_SERVER_URL", "").rstrip("/")
|
|
1172
|
-
api_key = os.getenv("AIQA_API_KEY", "")
|
|
1060
|
+
server_url = get_server_url()
|
|
1061
|
+
api_key = get_api_key()
|
|
1173
1062
|
org_id = organisation_id or os.getenv("AIQA_ORGANISATION_ID", "")
|
|
1174
1063
|
|
|
1175
|
-
if not
|
|
1064
|
+
# Check if server_url is the default (meaning AIQA_SERVER_URL was not set)
|
|
1065
|
+
if not os.getenv("AIQA_SERVER_URL"):
|
|
1176
1066
|
raise ValueError("AIQA_SERVER_URL is not set. Cannot retrieve span.")
|
|
1177
1067
|
if not org_id:
|
|
1178
1068
|
raise ValueError("Organisation ID is required. Provide it as parameter or set AIQA_ORGANISATION_ID environment variable.")
|
|
@@ -1190,9 +1080,7 @@ def get_span(span_id: str, organisation_id: Optional[str] = None, exclude: Optio
|
|
|
1190
1080
|
"fields": "*" if not exclude else None,
|
|
1191
1081
|
}
|
|
1192
1082
|
|
|
1193
|
-
headers =
|
|
1194
|
-
if api_key:
|
|
1195
|
-
headers["Authorization"] = f"ApiKey {api_key}"
|
|
1083
|
+
headers = build_headers(api_key)
|
|
1196
1084
|
|
|
1197
1085
|
response = requests.get(url, params=params, headers=headers)
|
|
1198
1086
|
if response.status_code == 200:
|