aiqa-client 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aiqa/tracing.py CHANGED
@@ -3,43 +3,24 @@ OpenTelemetry tracing setup and utilities. Initializes tracer provider on import
3
3
  Provides WithTracing decorator to automatically trace function calls.
4
4
  """
5
5
 
6
- import os
7
6
  import json
8
7
  import logging
9
8
  import inspect
10
- from typing import Any, Callable, Optional, Dict
9
+ import os
10
+ from typing import Any, Callable, Optional, List
11
11
  from functools import wraps
12
12
  from opentelemetry import trace
13
13
  from opentelemetry.sdk.trace import TracerProvider
14
- from opentelemetry.sdk.trace.export import BatchSpanProcessor
15
- from opentelemetry.sdk.resources import Resource
16
- from opentelemetry.semconv.resource import ResourceAttributes
17
- from opentelemetry.trace import Status, StatusCode
18
- from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
14
+ from opentelemetry.trace import Status, StatusCode, SpanContext, TraceFlags
15
+ from opentelemetry.propagate import inject, extract
19
16
  from .aiqa_exporter import AIQASpanExporter
17
+ from .client import get_aiqa_client, AIQA_TRACER_NAME, get_component_tag, set_component_tag as _set_component_tag, get_aiqa_tracer
18
+ from .object_serialiser import serialize_for_span
20
19
 
21
- logger = logging.getLogger(__name__)
22
-
23
- # Load environment variables
24
- exporter = AIQASpanExporter()
25
-
26
- # Initialize OpenTelemetry
27
- provider = TracerProvider(
28
- resource=Resource.create(
29
- {
30
- ResourceAttributes.SERVICE_NAME: os.getenv("OTEL_SERVICE_NAME", "aiqa-service"),
31
- }
32
- )
33
- )
34
-
35
- provider.add_span_processor(BatchSpanProcessor(exporter))
36
- trace.set_tracer_provider(provider)
37
-
38
- # Get a tracer instance
39
- tracer = trace.get_tracer("aiqa-tracer")
20
+ logger = logging.getLogger("AIQA")
40
21
 
41
22
 
42
- async def flush_spans() -> None:
23
+ async def flush_tracing() -> None:
43
24
  """
44
25
  Flush all pending spans to the server.
45
26
  Flushes also happen automatically every few seconds. So you only need to call this function
@@ -47,8 +28,11 @@ async def flush_spans() -> None:
47
28
 
48
29
  This flushes both the BatchSpanProcessor and the exporter buffer.
49
30
  """
50
- provider.force_flush() # Synchronous method
51
- await exporter.flush()
31
+ client = get_aiqa_client()
32
+ if client.get("provider"):
33
+ client["provider"].force_flush() # Synchronous method
34
+ if client.get("exporter"):
35
+ await client["exporter"].flush()
52
36
 
53
37
 
54
38
  async def shutdown_tracing() -> None:
@@ -56,25 +40,86 @@ async def shutdown_tracing() -> None:
56
40
  Shutdown the tracer provider and exporter.
57
41
  It is not necessary to call this function.
58
42
  """
59
- provider.shutdown() # Synchronous method
60
- await exporter.shutdown()
43
+ try:
44
+ client = get_aiqa_client()
45
+ if client.get("provider"):
46
+ client["provider"].shutdown() # Synchronous method
47
+ if client.get("exporter"):
48
+ client["exporter"].shutdown() # Synchronous method
49
+ except Exception as e:
50
+ logger.error(f"Error shutting down tracing: {e}")
61
51
 
62
52
 
63
- # Export provider and exporter for advanced usage
64
- __all__ = ["provider", "exporter", "flush_spans", "shutdown_tracing", "WithTracing", "set_span_attribute", "set_span_name", "get_active_span"]
53
+ # Export provider and exporter accessors for advanced usage
54
+
55
+ __all__ = [
56
+ "get_provider", "get_exporter", "flush_tracing", "shutdown_tracing", "WithTracing",
57
+ "set_span_attribute", "set_span_name", "get_active_span",
58
+ "get_trace_id", "get_span_id", "create_span_from_trace_id", "inject_trace_context", "extract_trace_context",
59
+ "set_conversation_id", "set_component_tag", "set_token_usage", "set_provider_and_model", "get_span", "submit_feedback"
60
+ ]
65
61
 
66
62
 
67
63
  class TracingOptions:
68
- """Options for WithTracing decorator"""
64
+ """
65
+ Options for WithTracing decorator.
66
+
67
+ This class is used to configure how function calls are traced and what data
68
+ is recorded in span attributes. All fields are optional.
69
+ """
69
70
 
70
71
  def __init__(
71
72
  self,
72
73
  name: Optional[str] = None,
73
- ignore_input: Optional[Any] = None,
74
- ignore_output: Optional[Any] = None,
74
+ ignore_input: Optional[List[str]] = None,
75
+ ignore_output: Optional[List[str]] = None,
75
76
  filter_input: Optional[Callable[[Any], Any]] = None,
76
77
  filter_output: Optional[Callable[[Any], Any]] = None,
77
78
  ):
79
+ """
80
+ Initialize TracingOptions.
81
+
82
+ Args:
83
+ name: Custom name for the span. If not provided, the function name
84
+ will be used. Useful for renaming spans or providing more
85
+ descriptive names.
86
+
87
+ ignore_input: Iterable of keys (e.g., list, set) to exclude from
88
+ input data when recording span attributes. Only applies when
89
+ input is a dictionary. For example, use `["password", "api_key"]`
90
+ to exclude sensitive fields from being traced.
91
+
92
+ ignore_output: Iterable of keys (e.g., list, set) to exclude from
93
+ output data when recording span attributes. Only applies when
94
+ output is a dictionary. Useful for excluding large or sensitive
95
+ fields from traces.
96
+
97
+ filter_input: Callable function that receives the prepared input data
98
+ and returns a filtered/transformed version to be recorded in the
99
+ span. The function should accept one argument (the input data)
100
+ and return the transformed data. This is applied before
101
+ ignore_input filtering.
102
+
103
+ filter_output: Callable function that receives the output data and
104
+ returns a filtered/transformed version to be recorded in the span.
105
+ The function should accept one argument (the output data) and
106
+ return the transformed data. This is applied before
107
+ ignore_output filtering.
108
+
109
+ Example:
110
+ # Exclude sensitive fields from input
111
+ @WithTracing(ignore_input=["password", "secret_key"])
112
+ def authenticate(username, password):
113
+ return {"token": "..."}
114
+
115
+ # Custom span name and filter output
116
+ @WithTracing(
117
+ name="data_processing",
118
+ filter_output=lambda x: {"count": len(x)} if isinstance(x, list) else x
119
+ )
120
+ def process_data(items):
121
+ return items
122
+ """
78
123
  self.name = name
79
124
  self.ignore_input = ignore_input
80
125
  self.ignore_output = ignore_output
@@ -82,56 +127,426 @@ class TracingOptions:
82
127
  self.filter_output = filter_output
83
128
 
84
129
 
85
- def _serialize_for_span(value: Any) -> Any:
86
- """
87
- Serialize a value for span attributes.
88
- OpenTelemetry only accepts primitives (bool, str, bytes, int, float) or sequences of those.
89
- Complex types (dicts, lists, objects) are converted to JSON strings.
90
- """
91
- # Keep primitives as is (including None)
92
- if value is None or isinstance(value, (str, int, float, bool, bytes)):
93
- return value
94
-
95
- # For sequences, check if all elements are primitives
96
- if isinstance(value, (list, tuple)):
97
- # If all elements are primitives, return as list
98
- if all(isinstance(item, (str, int, float, bool, bytes, type(None))) for item in value):
99
- return list(value)
100
- # Otherwise serialize to JSON string
101
- try:
102
- return json.dumps(value)
103
- except (TypeError, ValueError):
104
- return str(value)
105
-
106
- # For dicts and other complex types, serialize to JSON string
107
- try:
108
- return json.dumps(value)
109
- except (TypeError, ValueError):
110
- # If JSON serialization fails, convert to string
111
- return str(value)
112
130
 
113
131
 
114
132
  def _prepare_input(args: tuple, kwargs: dict) -> Any:
115
- """Prepare input for span attributes."""
133
+ """Prepare input for span attributes.
134
+
135
+ Note: This function does NOT serialize values - it just structures the data.
136
+ Serialization happens later via serialize_for_span() to avoid double-encoding
137
+ (e.g., converting messages to JSON string, then encoding that string again).
138
+ """
116
139
  if not args and not kwargs:
117
140
  return None
118
141
  if len(args) == 1 and not kwargs:
119
- return _serialize_for_span(args[0])
142
+ return args[0] # Don't serialize here - will be serialized later
120
143
  # Multiple args or kwargs - combine into dict
121
144
  result = {}
122
145
  if args:
123
- result["args"] = [_serialize_for_span(arg) for arg in args]
146
+ result["args"] = list(args) # Keep as-is, will be serialized later
124
147
  if kwargs:
125
- result["kwargs"] = {k: _serialize_for_span(v) for k, v in kwargs.items()}
148
+ result["kwargs"] = dict(kwargs) # Keep as-is, will be serialized later
126
149
  return result
127
150
 
128
151
 
152
+ def _prepare_and_filter_input(
153
+ args: tuple,
154
+ kwargs: dict,
155
+ filter_input: Optional[Callable[[Any], Any]],
156
+ ignore_input: Optional[List[str]],
157
+ ) -> Any:
158
+ """Prepare and filter input for span attributes."""
159
+ # Handle "self" in ignore_input by skipping the first argument
160
+ filtered_args = args
161
+ filtered_kwargs = kwargs.copy() if kwargs else {}
162
+ filtered_ignore_input = ignore_input
163
+ if ignore_input and "self" in ignore_input:
164
+ # Remove "self" from ignore_input list (we'll handle it specially)
165
+ filtered_ignore_input = [key for key in ignore_input if key != "self"]
166
+ # Skip first arg if it exists (typically self for bound methods)
167
+ if args:
168
+ filtered_args = args[1:]
169
+ # Also remove "self" from kwargs if present
170
+ if "self" in filtered_kwargs:
171
+ del filtered_kwargs["self"]
172
+
173
+ input_data = _prepare_input(filtered_args, filtered_kwargs)
174
+ if filter_input:
175
+ input_data = filter_input(input_data)
176
+ if filtered_ignore_input and isinstance(input_data, dict):
177
+ for key in filtered_ignore_input:
178
+ if key in input_data:
179
+ del input_data[key]
180
+ # Also handle case where input_data is just self (single value, not dict)
181
+ # If we filtered out self and there are no remaining args/kwargs, return None
182
+ if ignore_input and "self" in ignore_input and not filtered_args and not filtered_kwargs:
183
+ return None
184
+ return input_data
185
+
186
+
187
+ def _prepare_and_filter_output(
188
+ result: Any,
189
+ filter_output: Optional[Callable[[Any], Any]],
190
+ ignore_output: Optional[List[str]],
191
+ ) -> Any:
192
+ """Prepare and filter output for span attributes."""
193
+ output_data = result
194
+ if filter_output:
195
+ output_data = filter_output(output_data)
196
+ if ignore_output and isinstance(output_data, dict):
197
+ output_data = output_data.copy()
198
+ for key in ignore_output:
199
+ if key in output_data:
200
+ del output_data[key]
201
+ return output_data
202
+
203
+
204
+ def _handle_span_exception(span: trace.Span, exception: Exception) -> None:
205
+ """Record exception on span and set error status."""
206
+ error = exception if isinstance(exception, Exception) else Exception(str(exception))
207
+ span.record_exception(error)
208
+ span.set_status(Status(StatusCode.ERROR, str(error)))
209
+
210
+
211
+ def _is_attribute_set(span: trace.Span, attribute_name: str) -> bool:
212
+ """
213
+ Check if an attribute is already set on a span.
214
+ Returns True if the attribute exists, False otherwise.
215
+ Safe against exceptions.
216
+ """
217
+ try:
218
+ # Try multiple ways to access span attributes (SDK spans may store them differently)
219
+ # Check public 'attributes' property
220
+ if hasattr(span, "attributes"):
221
+ attrs = span.attributes
222
+ if attrs and attribute_name in attrs:
223
+ return True
224
+
225
+ # Check private '_attributes' (common in OpenTelemetry SDK)
226
+ if hasattr(span, "_attributes"):
227
+ attrs = span._attributes
228
+ if attrs and attribute_name in attrs:
229
+ return True
230
+
231
+ # If we can't find the attribute, assume not set (conservative approach)
232
+ return False
233
+ except Exception:
234
+ # If anything goes wrong, assume not set (conservative approach)
235
+ return False
236
+
237
+
238
+ def _extract_and_set_token_usage(span: trace.Span, result: Any) -> None:
239
+ """
240
+ Extract OpenAI API style token usage from result and add to span attributes
241
+ using OpenTelemetry semantic conventions for gen_ai.
242
+
243
+ Looks for usage dict with prompt_tokens, completion_tokens, and total_tokens.
244
+ Sets gen_ai.usage.input_tokens, gen_ai.usage.output_tokens, and gen_ai.usage.total_tokens.
245
+ Only sets attributes that are not already set.
246
+
247
+ This function detects token usage from OpenAI API response patterns:
248
+ - OpenAI Chat Completions API: The 'usage' object contains 'prompt_tokens', 'completion_tokens', and 'total_tokens'.
249
+ See https://platform.openai.com/docs/api-reference/chat/object (usage field)
250
+ - OpenAI Completions API: The 'usage' object contains 'prompt_tokens', 'completion_tokens', and 'total_tokens'.
251
+ See https://platform.openai.com/docs/api-reference/completions/object (usage field)
252
+
253
+ This function is safe against exceptions and will not derail tracing or program execution.
254
+ """
255
+ try:
256
+ if not span.is_recording():
257
+ return
258
+
259
+ usage = None
260
+
261
+ # Check if result is a dict with 'usage' key
262
+ try:
263
+ if isinstance(result, dict):
264
+ usage = result.get("usage")
265
+ # Also check if result itself is a usage dict (OpenAI format)
266
+ if usage is None and all(key in result for key in ("prompt_tokens", "completion_tokens", "total_tokens")):
267
+ usage = result
268
+ # Also check if result itself is a usage dict (Bedrock format)
269
+ elif usage is None and all(key in result for key in ("input_tokens", "output_tokens")):
270
+ usage = result
271
+
272
+ # Check if result has a 'usage' attribute (e.g., OpenAI response object)
273
+ elif hasattr(result, "usage"):
274
+ usage = result.usage
275
+ except Exception:
276
+ # If accessing result properties fails, just return silently
277
+ return
278
+
279
+ # Extract token usage if found
280
+ if isinstance(usage, dict):
281
+ try:
282
+ # Support both OpenAI format (prompt_tokens/completion_tokens) and Bedrock format (input_tokens/output_tokens)
283
+ prompt_tokens = usage.get("prompt_tokens") or usage.get("PromptTokens")
284
+ completion_tokens = usage.get("completion_tokens") or usage.get("CompletionTokens")
285
+ input_tokens = usage.get("input_tokens") or usage.get("InputTokens")
286
+ output_tokens = usage.get("output_tokens") or usage.get("OutputTokens")
287
+ total_tokens = usage.get("total_tokens") or usage.get("TotalTokens")
288
+
289
+ # Use Bedrock format if OpenAI format not available
290
+ if prompt_tokens is None:
291
+ prompt_tokens = input_tokens
292
+ if completion_tokens is None:
293
+ completion_tokens = output_tokens
294
+
295
+ # Calculate total_tokens if not provided but we have input and output
296
+ if total_tokens is None and prompt_tokens is not None and completion_tokens is not None:
297
+ total_tokens = prompt_tokens + completion_tokens
298
+
299
+ # Only set attributes that are not already set
300
+ if prompt_tokens is not None and not _is_attribute_set(span, "gen_ai.usage.input_tokens"):
301
+ span.set_attribute("gen_ai.usage.input_tokens", prompt_tokens)
302
+ if completion_tokens is not None and not _is_attribute_set(span, "gen_ai.usage.output_tokens"):
303
+ span.set_attribute("gen_ai.usage.output_tokens", completion_tokens)
304
+ if total_tokens is not None and not _is_attribute_set(span, "gen_ai.usage.total_tokens"):
305
+ span.set_attribute("gen_ai.usage.total_tokens", total_tokens)
306
+ except Exception:
307
+ # If setting attributes fails, log but don't raise
308
+ logger.debug(f"Failed to set token usage attributes on span")
309
+ except Exception:
310
+ # Catch any other exceptions to ensure this never derails tracing
311
+ logger.debug(f"Error in _extract_and_set_token_usage")
312
+
313
+
314
+ def _extract_and_set_provider_and_model(span: trace.Span, result: Any) -> None:
315
+ """
316
+ Extract provider and model information from result and add to span attributes
317
+ using OpenTelemetry semantic conventions for gen_ai.
318
+
319
+ Looks for 'model', 'provider', 'provider_name' fields in the result.
320
+ Sets gen_ai.provider.name and gen_ai.request.model.
321
+ Only sets attributes that are not already set.
322
+
323
+ This function detects model information from common API response patterns:
324
+ - OpenAI Chat Completions API: The 'model' field is at the top level of the response.
325
+ See https://platform.openai.com/docs/api-reference/chat/object
326
+ - OpenAI Completions API: The 'model' field is at the top level of the response.
327
+ See https://platform.openai.com/docs/api-reference/completions/object
328
+
329
+ This function is safe against exceptions and will not derail tracing or program execution.
330
+ """
331
+ try:
332
+ if not span.is_recording():
333
+ return
334
+
335
+ model = None
336
+ provider = None
337
+
338
+ # Check if result is a dict
339
+ try:
340
+ if isinstance(result, dict):
341
+ model = result.get("model") or result.get("Model")
342
+ provider = result.get("provider") or result.get("Provider") or result.get("provider_name") or result.get("providerName")
343
+
344
+ # Check if result has attributes (e.g., OpenAI response object)
345
+ elif hasattr(result, "model"):
346
+ model = result.model
347
+ if hasattr(result, "provider"):
348
+ provider = result.provider
349
+ elif hasattr(result, "provider_name"):
350
+ provider = result.provider_name
351
+ elif hasattr(result, "providerName"):
352
+ provider = result.providerName
353
+
354
+ # Check nested structures (e.g., response.data.model)
355
+ if model is None and hasattr(result, "data"):
356
+ data = result.data
357
+ if isinstance(data, dict):
358
+ model = data.get("model") or data.get("Model")
359
+ elif hasattr(data, "model"):
360
+ model = data.model
361
+
362
+ # Check for model in choices (OpenAI pattern)
363
+ if model is None and isinstance(result, dict):
364
+ choices = result.get("choices")
365
+ if choices and isinstance(choices, list) and len(choices) > 0:
366
+ first_choice = choices[0]
367
+ if isinstance(first_choice, dict):
368
+ model = first_choice.get("model")
369
+ elif hasattr(first_choice, "model"):
370
+ model = first_choice.model
371
+ except Exception:
372
+ # If accessing result properties fails, just return silently
373
+ return
374
+
375
+ # Set attributes if found and not already set
376
+ try:
377
+ if model is not None and not _is_attribute_set(span, "gen_ai.request.model"):
378
+ # Convert to string if needed
379
+ model_str = str(model) if model is not None else None
380
+ if model_str:
381
+ span.set_attribute("gen_ai.request.model", model_str)
382
+
383
+ if provider is not None and not _is_attribute_set(span, "gen_ai.provider.name"):
384
+ # Convert to string if needed
385
+ provider_str = str(provider) if provider is not None else None
386
+ if provider_str:
387
+ span.set_attribute("gen_ai.provider.name", provider_str)
388
+ except Exception:
389
+ # If setting attributes fails, log but don't raise
390
+ logger.debug(f"Failed to set provider/model attributes on span")
391
+ except Exception:
392
+ # Catch any other exceptions to ensure this never derails tracing
393
+ logger.debug(f"Error in _extract_and_set_provider_and_model")
394
+
395
+
396
+ class TracedGenerator:
397
+ """Wrapper for sync generators that traces iteration."""
398
+
399
+ def __init__(
400
+ self,
401
+ generator: Any,
402
+ span: trace.Span,
403
+ fn_name: str,
404
+ filter_output: Optional[Callable[[Any], Any]],
405
+ ignore_output: Optional[List[str]],
406
+ context_token: Any,
407
+ ):
408
+ self._generator = generator
409
+ self._span = span
410
+ self._fn_name = fn_name
411
+ self._filter_output = filter_output
412
+ self._ignore_output = ignore_output
413
+ self._context_token = context_token
414
+ self._yielded_values = []
415
+ self._exhausted = False
416
+
417
+ def __iter__(self):
418
+ return self
419
+
420
+ def __next__(self):
421
+ if self._exhausted:
422
+ raise StopIteration
423
+
424
+ try:
425
+ value = next(self._generator)
426
+ self._yielded_values.append(value)
427
+ return value
428
+ except StopIteration:
429
+ self._exhausted = True
430
+ self._finalize_span_success()
431
+ trace.context_api.detach(self._context_token)
432
+ self._span.end()
433
+ raise
434
+ except Exception as exception:
435
+ self._exhausted = True
436
+ _handle_span_exception(self._span, exception)
437
+ trace.context_api.detach(self._context_token)
438
+ self._span.end()
439
+ raise
440
+
441
+ def _finalize_span_success(self):
442
+ """Set output and success status on span."""
443
+ # Check last yielded value for token usage (common pattern in streaming responses)
444
+ if self._yielded_values:
445
+ last_value = self._yielded_values[-1]
446
+ _extract_and_set_token_usage(self._span, last_value)
447
+ _extract_and_set_provider_and_model(self._span, last_value)
448
+
449
+ # Record summary of yielded values
450
+ output_data = {
451
+ "type": "generator",
452
+ "yielded_count": len(self._yielded_values),
453
+ }
454
+
455
+ # Optionally include sample values (limit to avoid huge spans)
456
+ if self._yielded_values:
457
+ sample_size = min(10, len(self._yielded_values))
458
+ output_data["sample_values"] = [
459
+ serialize_for_span(v) for v in self._yielded_values[:sample_size]
460
+ ]
461
+ if len(self._yielded_values) > sample_size:
462
+ output_data["truncated"] = True
463
+
464
+ output_data = _prepare_and_filter_output(output_data, self._filter_output, self._ignore_output)
465
+ if output_data is not None:
466
+ self._span.set_attribute("output", serialize_for_span(output_data))
467
+ self._span.set_status(Status(StatusCode.OK))
468
+
469
+
470
+ class TracedAsyncGenerator:
471
+ """Wrapper for async generators that traces iteration."""
472
+
473
+ def __init__(
474
+ self,
475
+ generator: Any,
476
+ span: trace.Span,
477
+ fn_name: str,
478
+ filter_output: Optional[Callable[[Any], Any]],
479
+ ignore_output: Optional[List[str]],
480
+ context_token: Any,
481
+ ):
482
+ self._generator = generator
483
+ self._span = span
484
+ self._fn_name = fn_name
485
+ self._filter_output = filter_output
486
+ self._ignore_output = ignore_output
487
+ self._context_token = context_token
488
+ self._yielded_values = []
489
+ self._exhausted = False
490
+
491
+ def __aiter__(self):
492
+ return self
493
+
494
+ async def __anext__(self):
495
+ if self._exhausted:
496
+ raise StopAsyncIteration
497
+
498
+ try:
499
+ value = await self._generator.__anext__()
500
+ self._yielded_values.append(value)
501
+ return value
502
+ except StopAsyncIteration:
503
+ self._exhausted = True
504
+ self._finalize_span_success()
505
+ trace.context_api.detach(self._context_token)
506
+ self._span.end()
507
+ raise
508
+ except Exception as exception:
509
+ self._exhausted = True
510
+ _handle_span_exception(self._span, exception)
511
+ trace.context_api.detach(self._context_token)
512
+ self._span.end()
513
+ raise
514
+
515
+ def _finalize_span_success(self):
516
+ """Set output and success status on span."""
517
+ # Check last yielded value for token usage (common pattern in streaming responses)
518
+ if self._yielded_values:
519
+ last_value = self._yielded_values[-1]
520
+ _extract_and_set_token_usage(self._span, last_value)
521
+ _extract_and_set_provider_and_model(self._span, last_value)
522
+
523
+ # Record summary of yielded values
524
+ output_data = {
525
+ "type": "async_generator",
526
+ "yielded_count": len(self._yielded_values),
527
+ }
528
+
529
+ # Optionally include sample values (limit to avoid huge spans)
530
+ if self._yielded_values:
531
+ sample_size = min(10, len(self._yielded_values))
532
+ output_data["sample_values"] = [
533
+ serialize_for_span(v) for v in self._yielded_values[:sample_size]
534
+ ]
535
+ if len(self._yielded_values) > sample_size:
536
+ output_data["truncated"] = True
537
+
538
+ output_data = _prepare_and_filter_output(output_data, self._filter_output, self._ignore_output)
539
+ if output_data is not None:
540
+ self._span.set_attribute("output", serialize_for_span(output_data))
541
+ self._span.set_status(Status(StatusCode.OK))
542
+
543
+
129
544
  def WithTracing(
130
545
  func: Optional[Callable] = None,
131
546
  *,
132
547
  name: Optional[str] = None,
133
- ignore_input: Optional[Any] = None,
134
- ignore_output: Optional[Any] = None,
548
+ ignore_input: Optional[List[str]] = None,
549
+ ignore_output: Optional[List[str]] = None,
135
550
  filter_input: Optional[Callable[[Any], Any]] = None,
136
551
  filter_output: Optional[Callable[[Any], Any]] = None,
137
552
  ):
@@ -139,13 +554,17 @@ def WithTracing(
139
554
  Decorator to automatically create spans for function calls.
140
555
  Records input/output as span attributes. Spans are automatically linked via OpenTelemetry context.
141
556
 
142
- Works with both synchronous and asynchronous functions.
557
+ Works with synchronous functions, asynchronous functions, generator functions, and async generator functions.
143
558
 
144
559
  Args:
145
560
  func: The function to trace (when used as @WithTracing)
146
561
  name: Optional custom name for the span (defaults to function name)
147
- ignore_input: Fields to ignore in input (not yet implemented)
148
- ignore_output: Fields to ignore in output (not yet implemented)
562
+ ignore_input: List of keys to exclude from input data when recording span attributes.
563
+ Only applies when input is a dictionary. For example, use ["password", "api_key"]
564
+ to exclude sensitive fields from being traced.
565
+ ignore_output: List of keys to exclude from output data when recording span attributes.
566
+ Only applies when output is a dictionary. Useful for excluding large or sensitive
567
+ fields from traces.
149
568
  filter_input: Function to filter/transform input before recording
150
569
  filter_output: Function to filter/transform output before recording
151
570
 
@@ -158,6 +577,16 @@ def WithTracing(
158
577
  async def my_async_function(x, y):
159
578
  return x + y
160
579
 
580
+ @WithTracing
581
+ def my_generator(n):
582
+ for i in range(n):
583
+ yield i * 2
584
+
585
+ @WithTracing
586
+ async def my_async_generator(n):
587
+ for i in range(n):
588
+ yield i * 2
589
+
161
590
  @WithTracing(name="custom_name")
162
591
  def another_function():
163
592
  pass
@@ -171,52 +600,156 @@ def WithTracing(
171
600
  return fn
172
601
 
173
602
  is_async = inspect.iscoroutinefunction(fn)
603
+ is_generator = inspect.isgeneratorfunction(fn)
604
+ is_async_generator = inspect.isasyncgenfunction(fn) if hasattr(inspect, 'isasyncgenfunction') else False
174
605
 
175
- if is_async:
176
- @wraps(fn)
177
- async def async_traced_fn(*args, **kwargs):
178
- span = tracer.start_span(fn_name)
179
-
180
- # Prepare input
181
- input_data = _prepare_input(args, kwargs)
182
- if filter_input:
183
- input_data = filter_input(input_data)
184
- if ignore_input and isinstance(input_data, dict):
185
- # TODO: implement ignore_input logic
186
- pass
606
+ tracer = get_aiqa_tracer()
607
+
608
+ def _setup_span(span: trace.Span, input_data: Any) -> bool:
609
+ """Setup span with input data. Returns True if span is recording."""
610
+ if not span.is_recording():
611
+ logger.warning(f"Span {fn_name} is not recording - will not be exported")
612
+ return False
613
+
614
+ logger.debug(f"Span {fn_name} is recording, trace_id={format(span.get_span_context().trace_id, '032x')}")
615
+
616
+ # Set component tag if configured
617
+ component_tag = get_component_tag()
618
+ if component_tag:
619
+ span.set_attribute("gen_ai.component.id", component_tag)
620
+
621
+ if input_data is not None:
622
+ span.set_attribute("input", serialize_for_span(input_data))
623
+
624
+ trace_id = format(span.get_span_context().trace_id, "032x")
625
+ logger.debug(f"do traceable stuff {fn_name} {trace_id}")
626
+ return True
627
+
628
+ def _finalize_span_success(span: trace.Span, result: Any) -> None:
629
+ """Set output and success status on span."""
630
+ # Extract and set token usage if present (before filtering output)
631
+ _extract_and_set_token_usage(span, result)
632
+ # Extract and set provider/model if present (before filtering output)
633
+ _extract_and_set_provider_and_model(span, result)
634
+
635
+ output_data = _prepare_and_filter_output(result, filter_output, ignore_output)
636
+ if output_data is not None:
637
+ span.set_attribute("output", serialize_for_span(output_data))
638
+ span.set_status(Status(StatusCode.OK))
639
+
640
+ def _execute_with_span_sync(executor: Callable[[], Any], input_data: Any) -> Any:
641
+ """Execute sync function within span context, handling input/output and exceptions."""
642
+ # Ensure tracer provider is initialized before creating spans
643
+ get_aiqa_client()
644
+ with tracer.start_as_current_span(fn_name) as span:
645
+ if not _setup_span(span, input_data):
646
+ return executor()
187
647
 
188
- if input_data is not None:
189
- # Serialize for span attributes (OpenTelemetry only accepts primitives or JSON strings)
190
- serialized_input = _serialize_for_span(input_data)
191
- span.set_attribute("input", serialized_input)
648
+ try:
649
+ result = executor()
650
+ _finalize_span_success(span, result)
651
+ return result
652
+ except Exception as exception:
653
+ _handle_span_exception(span, exception)
654
+ raise
655
+
656
+ async def _execute_with_span_async(executor: Callable[[], Any], input_data: Any) -> Any:
657
+ """Execute async function within span context, handling input/output and exceptions."""
658
+ # Ensure tracer provider is initialized before creating spans
659
+ get_aiqa_client()
660
+ with tracer.start_as_current_span(fn_name) as span:
661
+ if not _setup_span(span, input_data):
662
+ return await executor()
192
663
 
193
664
  try:
194
- # Call the function within the span context
195
- trace_id = format(span.get_span_context().trace_id, "032x")
196
- logger.debug(f"do traceable stuff {fn_name} {trace_id}")
197
-
198
- with trace.use_span(span, end_on_exit=False):
199
- result = await fn(*args, **kwargs)
200
-
201
- # Prepare output
202
- output_data = result
203
- if filter_output:
204
- output_data = filter_output(output_data)
205
- if ignore_output and isinstance(output_data, dict):
206
- # TODO: implement ignore_output logic
207
- pass
208
-
209
- span.set_attribute("output", _serialize_for_span(output_data))
210
- span.set_status(Status(StatusCode.OK))
211
-
665
+ result = await executor()
666
+ _finalize_span_success(span, result)
667
+ logger.debug(f"Span {fn_name} completed successfully, is_recording={span.is_recording()}")
212
668
  return result
213
669
  except Exception as exception:
214
- error = exception if isinstance(exception, Exception) else Exception(str(exception))
215
- span.record_exception(error)
216
- span.set_status(Status(StatusCode.ERROR, str(error)))
670
+ _handle_span_exception(span, exception)
217
671
  raise
218
672
  finally:
673
+ logger.debug(f"Span {fn_name} context exiting, is_recording={span.is_recording()}")
674
+
675
+ def _execute_generator_sync(executor: Callable[[], Any], input_data: Any) -> Any:
676
+ """Execute sync generator function, returning a traced generator."""
677
+ # Ensure tracer provider is initialized before creating spans
678
+ get_aiqa_client()
679
+ # Create span but don't use 'with' - span will be closed by TracedGenerator
680
+ span = tracer.start_span(fn_name)
681
+ token = trace.context_api.attach(trace.context_api.set_span_in_context(span))
682
+
683
+ try:
684
+ if not _setup_span(span, input_data):
685
+ generator = executor()
686
+ trace.context_api.detach(token)
219
687
  span.end()
688
+ return generator
689
+
690
+ generator = executor()
691
+ return TracedGenerator(generator, span, fn_name, filter_output, ignore_output, token)
692
+ except Exception as exception:
693
+ trace.context_api.detach(token)
694
+ _handle_span_exception(span, exception)
695
+ span.end()
696
+ raise
697
+
698
+ async def _execute_generator_async(executor: Callable[[], Any], input_data: Any) -> Any:
699
+ """Execute async generator function, returning a traced async generator."""
700
+ # Ensure tracer provider is initialized before creating spans
701
+ get_aiqa_client()
702
+ # Create span but don't use 'with' - span will be closed by TracedAsyncGenerator
703
+ span = tracer.start_span(fn_name)
704
+ token = trace.context_api.attach(trace.context_api.set_span_in_context(span))
705
+
706
+ try:
707
+ if not _setup_span(span, input_data):
708
+ generator = executor()
709
+ trace.context_api.detach(token)
710
+ span.end()
711
+ return generator
712
+
713
+ generator = executor()
714
+ return TracedAsyncGenerator(generator, span, fn_name, filter_output, ignore_output, token)
715
+ except Exception as exception:
716
+ trace.context_api.detach(token)
717
+ _handle_span_exception(span, exception)
718
+ span.end()
719
+ raise
720
+
721
+ if is_async_generator:
722
+ @wraps(fn)
723
+ async def async_gen_traced_fn(*args, **kwargs):
724
+ input_data = _prepare_and_filter_input(args, kwargs, filter_input, ignore_input)
725
+ return await _execute_generator_async(
726
+ lambda: fn(*args, **kwargs),
727
+ input_data
728
+ )
729
+
730
+ async_gen_traced_fn._is_traced = True
731
+ logger.debug(f"Function {fn_name} is now traced (async generator)")
732
+ return async_gen_traced_fn
733
+ elif is_generator:
734
+ @wraps(fn)
735
+ def gen_traced_fn(*args, **kwargs):
736
+ input_data = _prepare_and_filter_input(args, kwargs, filter_input, ignore_input)
737
+ return _execute_generator_sync(
738
+ lambda: fn(*args, **kwargs),
739
+ input_data
740
+ )
741
+
742
+ gen_traced_fn._is_traced = True
743
+ logger.debug(f"Function {fn_name} is now traced (generator)")
744
+ return gen_traced_fn
745
+ elif is_async:
746
+ @wraps(fn)
747
+ async def async_traced_fn(*args, **kwargs):
748
+ input_data = _prepare_and_filter_input(args, kwargs, filter_input, ignore_input)
749
+ return await _execute_with_span_async(
750
+ lambda: fn(*args, **kwargs),
751
+ input_data
752
+ )
220
753
 
221
754
  async_traced_fn._is_traced = True
222
755
  logger.debug(f"Function {fn_name} is now traced (async)")
@@ -224,48 +757,11 @@ def WithTracing(
224
757
  else:
225
758
  @wraps(fn)
226
759
  def sync_traced_fn(*args, **kwargs):
227
- span = tracer.start_span(fn_name)
228
-
229
- # Prepare input
230
- input_data = _prepare_input(args, kwargs)
231
- if filter_input:
232
- input_data = filter_input(input_data)
233
- if ignore_input and isinstance(input_data, dict):
234
- # TODO: implement ignore_input logic
235
- pass
236
-
237
- if input_data is not None:
238
- # Serialize for span attributes (OpenTelemetry only accepts primitives or JSON strings)
239
- serialized_input = _serialize_for_span(input_data)
240
- span.set_attribute("input", serialized_input)
241
-
242
- try:
243
- # Call the function within the span context
244
- trace_id = format(span.get_span_context().trace_id, "032x")
245
- logger.debug(f"do traceable stuff {fn_name} {trace_id}")
246
-
247
- with trace.use_span(span, end_on_exit=False):
248
- result = fn(*args, **kwargs)
249
-
250
- # Prepare output
251
- output_data = result
252
- if filter_output:
253
- output_data = filter_output(output_data)
254
- if ignore_output and isinstance(output_data, dict):
255
- # TODO: implement ignore_output logic
256
- pass
257
-
258
- span.set_attribute("output", _serialize_for_span(output_data))
259
- span.set_status(Status(StatusCode.OK))
260
-
261
- return result
262
- except Exception as exception:
263
- error = exception if isinstance(exception, Exception) else Exception(str(exception))
264
- span.record_exception(error)
265
- span.set_status(Status(StatusCode.ERROR, str(error)))
266
- raise
267
- finally:
268
- span.end()
760
+ input_data = _prepare_and_filter_input(args, kwargs, filter_input, ignore_input)
761
+ return _execute_with_span_sync(
762
+ lambda: fn(*args, **kwargs),
763
+ input_data
764
+ )
269
765
 
270
766
  sync_traced_fn._is_traced = True
271
767
  logger.debug(f"Function {fn_name} is now traced (sync)")
@@ -287,7 +783,7 @@ def set_span_attribute(attribute_name: str, attribute_value: Any) -> bool:
287
783
  """
288
784
  span = trace.get_current_span()
289
785
  if span and span.is_recording():
290
- span.set_attribute(attribute_name, _serialize_for_span(attribute_value))
786
+ span.set_attribute(attribute_name, serialize_for_span(attribute_value))
291
787
  return True
292
788
  return False
293
789
 
@@ -297,7 +793,7 @@ def set_span_name(span_name: str) -> bool:
297
793
  """
298
794
  span = trace.get_current_span()
299
795
  if span and span.is_recording():
300
- span.set_name(span_name)
796
+ span.update_name(span_name)
301
797
  return True
302
798
  return False
303
799
 
@@ -305,3 +801,456 @@ def get_active_span() -> Optional[trace.Span]:
305
801
  """Get the currently active span."""
306
802
  return trace.get_current_span()
307
803
 
804
+
805
+ def set_conversation_id(conversation_id: str) -> bool:
806
+ """
807
+ Set the gen_ai.conversation.id attribute on the active span.
808
+ This allows you to group multiple traces together that are part of the same conversation.
809
+ See https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-events/ for more details.
810
+
811
+ Args:
812
+ conversation_id: A unique identifier for the conversation (e.g., user session ID, chat ID, etc.)
813
+
814
+ Returns:
815
+ True if gen_ai.conversation.id was set, False if no active span found
816
+
817
+ Example:
818
+ from aiqa import WithTracing, set_conversation_id
819
+
820
+ @WithTracing
821
+ def handle_user_request(user_id: str, request: dict):
822
+ # Set conversation ID to group all traces for this user session
823
+ set_conversation_id(f"user_{user_id}_session_{request.get('session_id')}")
824
+ # ... rest of function
825
+ """
826
+ return set_span_attribute("gen_ai.conversation.id", conversation_id)
827
+
828
+
829
+ def set_token_usage(
830
+ input_tokens: Optional[int] = None,
831
+ output_tokens: Optional[int] = None,
832
+ total_tokens: Optional[int] = None,
833
+ ) -> bool:
834
+ """
835
+ Set token usage attributes on the active span using OpenTelemetry semantic conventions for gen_ai.
836
+ This allows you to explicitly record token usage information.
837
+ AIQA tracing will automatically detect and set token usage from standard OpenAI-like API responses.
838
+ See https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/ for more details.
839
+
840
+ Args:
841
+ input_tokens: Number of input tokens used (maps to gen_ai.usage.input_tokens)
842
+ output_tokens: Number of output tokens generated (maps to gen_ai.usage.output_tokens)
843
+ total_tokens: Total number of tokens used (maps to gen_ai.usage.total_tokens)
844
+
845
+ Returns:
846
+ True if at least one token usage attribute was set, False if no active span found
847
+
848
+ Example:
849
+ from aiqa import WithTracing, set_token_usage
850
+
851
+ @WithTracing
852
+ def call_llm(prompt: str):
853
+ response = openai_client.chat.completions.create(...)
854
+ # Explicitly set token usage
855
+ set_token_usage(
856
+ input_tokens=response.usage.prompt_tokens,
857
+ output_tokens=response.usage.completion_tokens,
858
+ total_tokens=response.usage.total_tokens
859
+ )
860
+ return response
861
+ """
862
+ span = trace.get_current_span()
863
+ if not span or not span.is_recording():
864
+ return False
865
+
866
+ set_count = 0
867
+ try:
868
+ if input_tokens is not None:
869
+ span.set_attribute("gen_ai.usage.input_tokens", input_tokens)
870
+ set_count += 1
871
+ if output_tokens is not None:
872
+ span.set_attribute("gen_ai.usage.output_tokens", output_tokens)
873
+ set_count += 1
874
+ if total_tokens is not None:
875
+ span.set_attribute("gen_ai.usage.total_tokens", total_tokens)
876
+ set_count += 1
877
+ except Exception as e:
878
+ logger.warning(f"Failed to set token usage attributes: {e}")
879
+ return False
880
+
881
+ return set_count > 0
882
+
883
+
884
+ def set_provider_and_model(
885
+ provider: Optional[str] = None,
886
+ model: Optional[str] = None,
887
+ ) -> bool:
888
+ """
889
+ Set provider and model attributes on the active span using OpenTelemetry semantic conventions for gen_ai.
890
+ This allows you to explicitly record provider and model information.
891
+ AIQA tracing will automatically detect and set provider/model from standard API responses.
892
+ See https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/ for more details.
893
+
894
+ Args:
895
+ provider: Name of the AI provider (e.g., "openai", "anthropic", "google") (maps to gen_ai.provider.name)
896
+ model: Name of the model used (e.g., "gpt-4", "claude-3-5-sonnet") (maps to gen_ai.request.model)
897
+
898
+ Returns:
899
+ True if at least one attribute was set, False if no active span found
900
+
901
+ Example:
902
+ from aiqa import WithTracing, set_provider_and_model
903
+
904
+ @WithTracing
905
+ def call_llm(prompt: str):
906
+ response = openai_client.chat.completions.create(...)
907
+ # Explicitly set provider and model
908
+ set_provider_and_model(
909
+ provider="openai",
910
+ model=response.model
911
+ )
912
+ return response
913
+ """
914
+ span = trace.get_current_span()
915
+ if not span or not span.is_recording():
916
+ return False
917
+
918
+ set_count = 0
919
+ try:
920
+ if provider is not None:
921
+ span.set_attribute("gen_ai.provider.name", str(provider))
922
+ set_count += 1
923
+ if model is not None:
924
+ span.set_attribute("gen_ai.request.model", str(model))
925
+ set_count += 1
926
+ except Exception as e:
927
+ logger.warning(f"Failed to set provider/model attributes: {e}")
928
+ return False
929
+
930
+ return set_count > 0
931
+
932
+
933
+ def set_component_tag(tag: str) -> None:
934
+ """
935
+ Set the component tag that will be added to all spans created by AIQA.
936
+ This can also be set via the AIQA_COMPONENT_TAG environment variable.
937
+ The component tag allows you to identify which component/system generated the spans.
938
+
939
+ Note: If using environment variables, ensure you call get_aiqa_client() first to initialize
940
+ the client and load environment variables.
941
+
942
+ Args:
943
+ tag: A component identifier (e.g., "mynamespace.mysystem", "backend.api", etc.)
944
+
945
+ Example:
946
+ from aiqa import get_aiqa_client, set_component_tag, WithTracing
947
+
948
+ # Initialize client (loads env vars including AIQA_COMPONENT_TAG)
949
+ get_aiqa_client()
950
+
951
+ # Or set component tag programmatically (overrides env var)
952
+ set_component_tag("mynamespace.mysystem")
953
+
954
+ @WithTracing
955
+ def my_function():
956
+ pass
957
+ """
958
+ _set_component_tag(tag)
959
+
960
+ def get_provider() -> Optional[TracerProvider]:
961
+ """Get the tracer provider for advanced usage."""
962
+ client = get_aiqa_client()
963
+ return client.get("provider")
964
+
965
+ def get_exporter() -> Optional[AIQASpanExporter]:
966
+ """Get the exporter for advanced usage."""
967
+ client = get_aiqa_client()
968
+ return client.get("exporter")
969
+
970
+
971
+ def get_trace_id() -> Optional[str]:
972
+ """
973
+ Get the current trace ID as a hexadecimal string (32 characters).
974
+
975
+ Returns:
976
+ The trace ID as a hex string, or None if no active span exists.
977
+
978
+ Example:
979
+ trace_id = get_trace_id()
980
+ # Pass trace_id to another service/agent
981
+ # e.g., include in HTTP headers, message queue metadata, etc.
982
+ """
983
+ span = trace.get_current_span()
984
+ if span and span.get_span_context().is_valid:
985
+ return format(span.get_span_context().trace_id, "032x")
986
+ return None
987
+
988
+
989
+ def get_span_id() -> Optional[str]:
990
+ """
991
+ Get the current span ID as a hexadecimal string (16 characters).
992
+
993
+ Returns:
994
+ The span ID as a hex string, or None if no active span exists.
995
+
996
+ Example:
997
+ span_id = get_span_id()
998
+ # Can be used to create child spans in other services
999
+ """
1000
+ span = trace.get_current_span()
1001
+ if span and span.get_span_context().is_valid:
1002
+ return format(span.get_span_context().span_id, "016x")
1003
+ return None
1004
+
1005
+
1006
+ def create_span_from_trace_id(
1007
+ trace_id: str,
1008
+ parent_span_id: Optional[str] = None,
1009
+ span_name: str = "continued_span",
1010
+ ) -> trace.Span:
1011
+ """
1012
+ Create a new span that continues from an existing trace ID.
1013
+ This is useful for linking traces across different services or agents.
1014
+
1015
+ Args:
1016
+ trace_id: The trace ID as a hexadecimal string (32 characters)
1017
+ parent_span_id: Optional parent span ID as a hexadecimal string (16 characters).
1018
+ If provided, the new span will be a child of this span.
1019
+ span_name: Name for the new span (default: "continued_span")
1020
+
1021
+ Returns:
1022
+ A new span that continues the trace. Use it in a context manager or call end() manually.
1023
+
1024
+ Example:
1025
+ # In service A: get trace ID
1026
+ trace_id = get_trace_id()
1027
+ span_id = get_span_id()
1028
+
1029
+ # Send to service B (e.g., via HTTP, message queue, etc.)
1030
+ # ...
1031
+
1032
+ # In service B: continue the trace
1033
+ with create_span_from_trace_id(trace_id, parent_span_id=span_id, span_name="service_b_operation"):
1034
+ # Your code here
1035
+ pass
1036
+ """
1037
+ try:
1038
+ # Parse trace ID from hex string
1039
+ trace_id_int = int(trace_id, 16)
1040
+
1041
+ # Parse parent span ID if provided
1042
+ parent_span_id_int = None
1043
+ if parent_span_id:
1044
+ parent_span_id_int = int(parent_span_id, 16)
1045
+
1046
+ # Create a parent span context
1047
+ parent_span_context = SpanContext(
1048
+ trace_id=trace_id_int,
1049
+ span_id=parent_span_id_int if parent_span_id_int else 0,
1050
+ is_remote=True,
1051
+ trace_flags=TraceFlags(0x01), # SAMPLED flag
1052
+ )
1053
+
1054
+ # Create a context with this span context as the parent
1055
+ from opentelemetry.trace import set_span_in_context
1056
+ parent_context = set_span_in_context(trace.NonRecordingSpan(parent_span_context))
1057
+
1058
+ # Start a new span in this context (it will be a child of the parent span)
1059
+ tracer = get_aiqa_tracer()
1060
+ span = tracer.start_span(span_name, context=parent_context)
1061
+
1062
+ # Set component tag if configured
1063
+ component_tag = get_component_tag()
1064
+ if component_tag:
1065
+ span.set_attribute("gen_ai.component.id", component_tag)
1066
+
1067
+ return span
1068
+ except (ValueError, AttributeError) as e:
1069
+ logger.error(f"Error creating span from trace_id: {e}")
1070
+ # Fallback: create a new span
1071
+ tracer = get_aiqa_tracer()
1072
+ span = tracer.start_span(span_name)
1073
+ component_tag = get_component_tag()
1074
+ if component_tag:
1075
+ span.set_attribute("gen_ai.component.id", component_tag)
1076
+ return span
1077
+
1078
+
1079
+ def inject_trace_context(carrier: dict) -> None:
1080
+ """
1081
+ Inject the current trace context into a carrier (e.g., HTTP headers).
1082
+ This allows you to pass trace context to another service.
1083
+
1084
+ Args:
1085
+ carrier: Dictionary to inject trace context into (e.g., HTTP headers dict)
1086
+
1087
+ Example:
1088
+ import requests
1089
+
1090
+ headers = {}
1091
+ inject_trace_context(headers)
1092
+ response = requests.get("http://other-service/api", headers=headers)
1093
+ """
1094
+ try:
1095
+ inject(carrier)
1096
+ except Exception as e:
1097
+ logger.warning(f"Error injecting trace context: {e}")
1098
+
1099
+
1100
+ def extract_trace_context(carrier: dict) -> Any:
1101
+ """
1102
+ Extract trace context from a carrier (e.g., HTTP headers).
1103
+ Use this to continue a trace that was started in another service.
1104
+
1105
+ Args:
1106
+ carrier: Dictionary containing trace context (e.g., HTTP headers dict)
1107
+
1108
+ Returns:
1109
+ A context object that can be used with trace.use_span() or tracer.start_span()
1110
+
1111
+ Example:
1112
+ from opentelemetry.trace import use_span
1113
+
1114
+ # Extract context from incoming request headers
1115
+ ctx = extract_trace_context(request.headers)
1116
+
1117
+ # Use the context to create a span
1118
+ with use_span(ctx):
1119
+ # Your code here
1120
+ pass
1121
+
1122
+ # Or create a span with the context
1123
+ tracer = get_aiqa_tracer()
1124
+ with tracer.start_as_current_span("operation", context=ctx):
1125
+ # Your code here
1126
+ pass
1127
+ """
1128
+ try:
1129
+ return extract(carrier)
1130
+ except Exception as e:
1131
+ logger.warning(f"Error extracting trace context: {e}")
1132
+ return None
1133
+
1134
+
1135
+ def get_span(span_id: str, organisation_id: Optional[str] = None, exclude: Optional[List[str]] = None) -> Optional[dict]:
1136
+ """
1137
+ Get a span by its ID from the AIQA server.
1138
+
1139
+ Expected usage is: re-playing a specific function call in a unit test (either a developer debugging an issue, or as part of a test suite).
1140
+
1141
+ Args:
1142
+ span_id: The span ID as a hexadecimal string (16 characters) or client span ID
1143
+ organisation_id: Optional organisation ID. If not provided, will try to get from
1144
+ AIQA_ORGANISATION_ID environment variable. The organisation is typically
1145
+ extracted from the API key during authentication, but the API requires it
1146
+ as a query parameter.
1147
+ exclude: Optional list of fields to exclude from the span data. By default this function WILL return 'attributes' (often large).
1148
+
1149
+ Returns:
1150
+ The span data as a dictionary, or None if not found
1151
+
1152
+ Example:
1153
+ from aiqa import get_span
1154
+
1155
+ span = get_span('abc123...')
1156
+ if span:
1157
+ print(f"Found span: {span['name']}")
1158
+ my_function(**span['input'])
1159
+ """
1160
+ import os
1161
+ import requests
1162
+
1163
+ server_url = os.getenv("AIQA_SERVER_URL", "").rstrip("/")
1164
+ api_key = os.getenv("AIQA_API_KEY", "")
1165
+ org_id = organisation_id or os.getenv("AIQA_ORGANISATION_ID", "")
1166
+
1167
+ if not server_url:
1168
+ raise ValueError("AIQA_SERVER_URL is not set. Cannot retrieve span.")
1169
+ if not org_id:
1170
+ raise ValueError("Organisation ID is required. Provide it as parameter or set AIQA_ORGANISATION_ID environment variable.")
1171
+ if not api_key:
1172
+ raise ValueError("API key is required. Set AIQA_API_KEY environment variable.")
1173
+
1174
+ # Try both spanId and clientSpanId queries
1175
+ for query_field in ["spanId", "clientSpanId"]:
1176
+ url = f"{server_url}/span"
1177
+ params = {
1178
+ "q": f"{query_field}:{span_id}",
1179
+ "organisation": org_id,
1180
+ "limit": "1",
1181
+ "exclude": ",".join(exclude) if exclude else None,
1182
+ "fields": "*" if not exclude else None,
1183
+ }
1184
+
1185
+ headers = {"Content-Type": "application/json"}
1186
+ if api_key:
1187
+ headers["Authorization"] = f"ApiKey {api_key}"
1188
+
1189
+ response = requests.get(url, params=params, headers=headers)
1190
+ if response.status_code == 200:
1191
+ result = response.json()
1192
+ hits = result.get("hits", [])
1193
+ if hits and len(hits) > 0:
1194
+ return hits[0]
1195
+ elif response.status_code == 404:
1196
+ # Try next query field
1197
+ continue
1198
+ else:
1199
+ error_text = response.text
1200
+ raise ValueError(f"Failed to get span: {response.status_code} - {error_text[:500]}")
1201
+ # not found
1202
+ return None
1203
+
1204
+
1205
+ async def submit_feedback(
1206
+ trace_id: str,
1207
+ thumbs_up: Optional[bool] = None,
1208
+ comment: Optional[str] = None,
1209
+ ) -> None:
1210
+ """
1211
+ Submit feedback for a trace by creating a new span with the same trace ID.
1212
+ This allows you to add feedback (thumbs-up, thumbs-down, comment) to a trace after it has completed.
1213
+
1214
+ Args:
1215
+ trace_id: The trace ID as a hexadecimal string (32 characters)
1216
+ thumbs_up: True for positive feedback, False for negative feedback, None for neutral
1217
+ comment: Optional text comment
1218
+
1219
+ Example:
1220
+ from aiqa import submit_feedback
1221
+
1222
+ # Submit positive feedback
1223
+ await submit_feedback('abc123...', thumbs_up=True, comment='Great response!')
1224
+
1225
+ # Submit negative feedback
1226
+ await submit_feedback('abc123...', thumbs_up=False, comment='Incorrect answer')
1227
+ """
1228
+ if not trace_id or len(trace_id) != 32:
1229
+ raise ValueError('Invalid trace ID: must be 32 hexadecimal characters')
1230
+
1231
+ # Create a span for feedback with the same trace ID
1232
+ span = create_span_from_trace_id(trace_id, span_name='feedback')
1233
+
1234
+ try:
1235
+ # Set feedback attributes
1236
+ if thumbs_up is not None:
1237
+ span.set_attribute('feedback.thumbs_up', thumbs_up)
1238
+ span.set_attribute('feedback.type', 'positive' if thumbs_up else 'negative')
1239
+ else:
1240
+ span.set_attribute('feedback.type', 'neutral')
1241
+
1242
+ if comment:
1243
+ span.set_attribute('feedback.comment', comment)
1244
+
1245
+ # Mark as feedback span
1246
+ span.set_attribute('aiqa.span_type', 'feedback')
1247
+
1248
+ # End the span
1249
+ span.end()
1250
+
1251
+ # Flush to ensure it's sent immediately
1252
+ await flush_tracing()
1253
+ except Exception as e:
1254
+ span.end()
1255
+ raise e
1256
+