aiqa-client 0.3.1__tar.gz → 0.3.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aiqa-client
3
- Version: 0.3.1
3
+ Version: 0.3.5
4
4
  Summary: OpenTelemetry-based Python client for tracing functions and sending traces to the AIQA server
5
5
  Author-email: AIQA <info@aiqa.dev>
6
6
  License: MIT
@@ -176,10 +176,10 @@ To link traces across different services or agents, you can extract and propagat
176
176
  #### Getting Current Trace ID
177
177
 
178
178
  ```python
179
- from aiqa import get_trace_id, get_span_id
179
+ from aiqa import get_active_trace_id, get_span_id
180
180
 
181
181
  # Get the current trace ID and span ID
182
- trace_id = get_trace_id() # Returns hex string (32 chars) or None
182
+ trace_id = get_active_trace_id() # Returns hex string (32 chars) or None
183
183
  span_id = get_span_id() # Returns hex string (16 chars) or None
184
184
 
185
185
  # Pass these to another service (e.g., in HTTP headers, message queue, etc.)
@@ -139,10 +139,10 @@ To link traces across different services or agents, you can extract and propagat
139
139
  #### Getting Current Trace ID
140
140
 
141
141
  ```python
142
- from aiqa import get_trace_id, get_span_id
142
+ from aiqa import get_active_trace_id, get_span_id
143
143
 
144
144
  # Get the current trace ID and span ID
145
- trace_id = get_trace_id() # Returns hex string (32 chars) or None
145
+ trace_id = get_active_trace_id() # Returns hex string (32 chars) or None
146
146
  span_id = get_span_id() # Returns hex string (16 chars) or None
147
147
 
148
148
  # Pass these to another service (e.g., in HTTP headers, message queue, etc.)
@@ -28,7 +28,7 @@ from .tracing import (
28
28
  get_active_span,
29
29
  get_provider,
30
30
  get_exporter,
31
- get_trace_id,
31
+ get_active_trace_id,
32
32
  get_span_id,
33
33
  create_span_from_trace_id,
34
34
  inject_trace_context,
@@ -40,7 +40,7 @@ from .tracing import (
40
40
  from .client import get_aiqa_client
41
41
  from .experiment_runner import ExperimentRunner
42
42
 
43
- __version__ = "0.3.1"
43
+ __version__ = "0.3.5"
44
44
 
45
45
  __all__ = [
46
46
  "WithTracing",
@@ -53,7 +53,7 @@ __all__ = [
53
53
  "get_exporter",
54
54
  "get_aiqa_client",
55
55
  "ExperimentRunner",
56
- "get_trace_id",
56
+ "get_active_trace_id",
57
57
  "get_span_id",
58
58
  "create_span_from_trace_id",
59
59
  "inject_trace_context",
@@ -27,6 +27,7 @@ class AIQASpanExporter(SpanExporter):
27
27
  server_url: Optional[str] = None,
28
28
  api_key: Optional[str] = None,
29
29
  flush_interval_seconds: float = 5.0,
30
+ max_batch_size_bytes: int = 5 * 1024 * 1024, # 5MB default
30
31
  ):
31
32
  """
32
33
  Initialize the AIQA span exporter.
@@ -35,10 +36,12 @@ class AIQASpanExporter(SpanExporter):
35
36
  server_url: URL of the AIQA server (defaults to AIQA_SERVER_URL env var)
36
37
  api_key: API key for authentication (defaults to AIQA_API_KEY env var)
37
38
  flush_interval_seconds: How often to flush spans to the server
39
+ max_batch_size_bytes: Maximum size of a single batch in bytes (default: 5mb)
38
40
  """
39
41
  self._server_url = server_url
40
42
  self._api_key = api_key
41
43
  self.flush_interval_ms = flush_interval_seconds * 1000
44
+ self.max_batch_size_bytes = max_batch_size_bytes
42
45
  self.buffer: List[Dict[str, Any]] = []
43
46
  self.buffer_span_keys: set = set() # Track (traceId, spanId) tuples to prevent duplicates (Python 3.8 compatible)
44
47
  self.buffer_lock = threading.Lock()
@@ -243,6 +246,59 @@ class AIQASpanExporter(SpanExporter):
243
246
  self.buffer.clear()
244
247
  self.buffer_span_keys.clear()
245
248
 
249
+ def _split_into_batches(self, spans: List[Dict[str, Any]]) -> List[List[Dict[str, Any]]]:
250
+ """
251
+ Split spans into batches based on max_batch_size_bytes.
252
+ Each batch will be as large as possible without exceeding the limit.
253
+ If a single span exceeds the limit, it will be sent in its own batch with a warning.
254
+ """
255
+ if not spans:
256
+ return []
257
+
258
+ batches = []
259
+ current_batch = []
260
+ current_batch_size = 0
261
+
262
+ for span in spans:
263
+ # Estimate size of this span when serialized
264
+ span_json = json.dumps(span)
265
+ span_size = len(span_json.encode('utf-8'))
266
+
267
+ # Check if this single span exceeds the limit
268
+ if span_size > self.max_batch_size_bytes:
269
+ # If we have a current batch, save it first
270
+ if current_batch:
271
+ batches.append(current_batch)
272
+ current_batch = []
273
+ current_batch_size = 0
274
+
275
+ # Log warning about oversized span
276
+ span_name = span.get('name', 'unknown')
277
+ span_trace_id = span.get('traceId', 'unknown')
278
+ logger.warning(
279
+ f"Span '{span_name}' (traceId={span_trace_id}) exceeds max_batch_size_bytes "
280
+ f"({span_size} bytes > {self.max_batch_size_bytes} bytes). "
281
+ f"Will attempt to send it anyway - may fail if server/nginx limit is exceeded."
282
+ )
283
+ # Still create a batch with just this span - we'll try to send it
284
+ batches.append([span])
285
+ continue
286
+
287
+ # If adding this span would exceed the limit, start a new batch
288
+ if current_batch and current_batch_size + span_size > self.max_batch_size_bytes:
289
+ batches.append(current_batch)
290
+ current_batch = []
291
+ current_batch_size = 0
292
+
293
+ current_batch.append(span)
294
+ current_batch_size += span_size
295
+
296
+ # Add the last batch if it has any spans
297
+ if current_batch:
298
+ batches.append(current_batch)
299
+
300
+ return batches
301
+
246
302
  async def flush(self) -> None:
247
303
  """
248
304
  Flush buffered spans to the server. Thread-safe: ensures only one flush operation runs at a time.
@@ -345,77 +401,108 @@ class AIQASpanExporter(SpanExporter):
345
401
  logger.info(f"Auto-flush thread started: {flush_thread.name} (daemon={flush_thread.daemon})")
346
402
 
347
403
  async def _send_spans(self, spans: List[Dict[str, Any]]) -> None:
348
- """Send spans to the server API (async)."""
404
+ """Send spans to the server API (async). Batches large payloads automatically."""
349
405
  import aiohttp
350
406
 
407
+ # Split into batches if needed
408
+ batches = self._split_into_batches(spans)
409
+ if len(batches) > 1:
410
+ logger.info(f"_send_spans() splitting {len(spans)} spans into {len(batches)} batches")
411
+
351
412
  url = self._get_span_url()
352
413
  headers = self._build_request_headers()
353
- logger.debug(f"_send_spans() sending {len(spans)} spans to {url}")
414
+
354
415
  if self.api_key:
355
416
  logger.debug("_send_spans() using API key authentication")
356
417
  else:
357
418
  logger.debug("_send_spans() no API key provided")
358
419
 
359
- try:
360
- # Pre-serialize JSON to bytes and wrap in BytesIO to avoid blocking event loop
361
- json_bytes = json.dumps(spans).encode('utf-8')
362
- data = io.BytesIO(json_bytes)
363
-
364
- async with aiohttp.ClientSession() as session:
365
- logger.debug(f"_send_spans() POST request starting to {url}")
366
- async with session.post(url, data=data, headers=headers) as response:
367
- logger.debug(f"_send_spans() received response: status={response.status}")
368
- if not response.ok:
369
- error_text = await response.text()
370
- logger.error(
371
- f"_send_spans() failed: status={response.status}, "
372
- f"reason={response.reason}, error={error_text[:200]}"
373
- )
374
- raise Exception(
375
- f"Failed to send spans: {response.status} {response.reason} - {error_text}"
376
- )
377
- logger.debug(f"_send_spans() successfully sent {len(spans)} spans")
378
- except RuntimeError as e:
379
- if self._is_interpreter_shutdown_error(e):
380
- if self.shutdown_requested:
381
- logger.debug(f"_send_spans() skipped due to interpreter shutdown: {e}")
382
- else:
383
- logger.warning(f"_send_spans() interrupted by interpreter shutdown: {e}")
384
- raise
385
- logger.error(f"_send_spans() RuntimeError: {type(e).__name__}: {e}")
386
- raise
387
- except Exception as e:
388
- logger.error(f"_send_spans() exception: {type(e).__name__}: {e}")
389
- raise
420
+ errors = []
421
+ async with aiohttp.ClientSession() as session:
422
+ for batch_idx, batch in enumerate(batches):
423
+ try:
424
+ logger.debug(f"_send_spans() sending batch {batch_idx + 1}/{len(batches)} with {len(batch)} spans to {url}")
425
+ # Pre-serialize JSON to bytes and wrap in BytesIO to avoid blocking event loop
426
+ json_bytes = json.dumps(batch).encode('utf-8')
427
+ data = io.BytesIO(json_bytes)
428
+
429
+ async with session.post(url, data=data, headers=headers) as response:
430
+ logger.debug(f"_send_spans() batch {batch_idx + 1} received response: status={response.status}")
431
+ if not response.ok:
432
+ error_text = await response.text()
433
+ error_msg = f"Failed to send batch {batch_idx + 1}/{len(batches)}: {response.status} {response.reason} - {error_text[:200]}"
434
+ logger.error(f"_send_spans() {error_msg}")
435
+ errors.append((batch_idx + 1, error_msg))
436
+ # Continue with other batches even if one fails
437
+ continue
438
+ logger.debug(f"_send_spans() batch {batch_idx + 1} successfully sent {len(batch)} spans")
439
+ except RuntimeError as e:
440
+ if self._is_interpreter_shutdown_error(e):
441
+ if self.shutdown_requested:
442
+ logger.debug(f"_send_spans() skipped due to interpreter shutdown: {e}")
443
+ else:
444
+ logger.warning(f"_send_spans() interrupted by interpreter shutdown: {e}")
445
+ raise
446
+ error_msg = f"RuntimeError in batch {batch_idx + 1}: {type(e).__name__}: {e}"
447
+ logger.error(f"_send_spans() {error_msg}")
448
+ errors.append((batch_idx + 1, error_msg))
449
+ # Continue with other batches
450
+ except Exception as e:
451
+ error_msg = f"Exception in batch {batch_idx + 1}: {type(e).__name__}: {e}"
452
+ logger.error(f"_send_spans() {error_msg}")
453
+ errors.append((batch_idx + 1, error_msg))
454
+ # Continue with other batches
455
+
456
+ # If any batches failed, raise an exception with details
457
+ if errors:
458
+ error_summary = "; ".join([f"batch {idx}: {msg}" for idx, msg in errors])
459
+ raise Exception(f"Failed to send some spans: {error_summary}")
460
+
461
+ logger.debug(f"_send_spans() successfully sent all {len(spans)} spans in {len(batches)} batch(es)")
390
462
 
391
463
  def _send_spans_sync(self, spans: List[Dict[str, Any]]) -> None:
392
- """Send spans to the server API (synchronous, for shutdown scenarios)."""
464
+ """Send spans to the server API (synchronous, for shutdown scenarios). Batches large payloads automatically."""
393
465
  import requests
394
466
 
467
+ # Split into batches if needed
468
+ batches = self._split_into_batches(spans)
469
+ if len(batches) > 1:
470
+ logger.info(f"_send_spans_sync() splitting {len(spans)} spans into {len(batches)} batches")
471
+
395
472
  url = self._get_span_url()
396
473
  headers = self._build_request_headers()
397
- logger.debug(f"_send_spans_sync() sending {len(spans)} spans to {url}")
474
+
398
475
  if self.api_key:
399
476
  logger.debug("_send_spans_sync() using API key authentication")
400
477
  else:
401
478
  logger.debug("_send_spans_sync() no API key provided")
402
479
 
403
- try:
404
- response = requests.post(url, json=spans, headers=headers, timeout=10.0)
405
- logger.debug(f"_send_spans_sync() received response: status={response.status_code}")
406
- if not response.ok:
407
- error_text = response.text[:200] if response.text else ""
408
- logger.error(
409
- f"_send_spans_sync() failed: status={response.status_code}, "
410
- f"reason={response.reason}, error={error_text}"
411
- )
412
- raise Exception(
413
- f"Failed to send spans: {response.status_code} {response.reason} - {error_text}"
414
- )
415
- logger.debug(f"_send_spans_sync() successfully sent {len(spans)} spans")
416
- except Exception as e:
417
- logger.error(f"_send_spans_sync() exception: {type(e).__name__}: {e}")
418
- raise
480
+ errors = []
481
+ for batch_idx, batch in enumerate(batches):
482
+ try:
483
+ logger.debug(f"_send_spans_sync() sending batch {batch_idx + 1}/{len(batches)} with {len(batch)} spans to {url}")
484
+ response = requests.post(url, json=batch, headers=headers, timeout=10.0)
485
+ logger.debug(f"_send_spans_sync() batch {batch_idx + 1} received response: status={response.status_code}")
486
+ if not response.ok:
487
+ error_text = response.text[:200] if response.text else ""
488
+ error_msg = f"Failed to send batch {batch_idx + 1}/{len(batches)}: {response.status_code} {response.reason} - {error_text}"
489
+ logger.error(f"_send_spans_sync() {error_msg}")
490
+ errors.append((batch_idx + 1, error_msg))
491
+ # Continue with other batches even if one fails
492
+ continue
493
+ logger.debug(f"_send_spans_sync() batch {batch_idx + 1} successfully sent {len(batch)} spans")
494
+ except Exception as e:
495
+ error_msg = f"Exception in batch {batch_idx + 1}: {type(e).__name__}: {e}"
496
+ logger.error(f"_send_spans_sync() {error_msg}")
497
+ errors.append((batch_idx + 1, error_msg))
498
+ # Continue with other batches
499
+
500
+ # If any batches failed, raise an exception with details
501
+ if errors:
502
+ error_summary = "; ".join([f"batch {idx}: {msg}" for idx, msg in errors])
503
+ raise Exception(f"Failed to send some spans: {error_summary}")
504
+
505
+ logger.debug(f"_send_spans_sync() successfully sent all {len(spans)} spans in {len(batches)} batch(es)")
419
506
 
420
507
  def shutdown(self) -> None:
421
508
  """Shutdown the exporter, flushing any remaining spans. Call before process exit."""
@@ -6,9 +6,12 @@ Handles objects, dataclasses, circular references, and size limits.
6
6
  import json
7
7
  import os
8
8
  import dataclasses
9
+ import logging
9
10
  from datetime import datetime, date, time
10
11
  from typing import Any, Callable, Set
11
12
 
13
+ logger = logging.getLogger("aiqa")
14
+
12
15
  def toNumber(value: str|int|None) -> int:
13
16
  """Convert string to number. handling units like g, m, k, (also mb kb gb though these should be avoided)"""
14
17
  if value is None:
@@ -195,24 +198,39 @@ def object_to_dict(obj: Any, visited: Set[int], max_depth: int = 10, current_dep
195
198
  try:
196
199
  result = {}
197
200
  for k, v in obj.items():
198
- key_str = str(k) if not isinstance(k, (str, int, float, bool)) else k
199
- filtered_value = _apply_data_filters(key_str, v)
200
- result[key_str] = object_to_dict(filtered_value, visited, max_depth, current_depth + 1)
201
+ try:
202
+ key_str = str(k) if not isinstance(k, (str, int, float, bool)) else k
203
+ filtered_value = _apply_data_filters(key_str, v)
204
+ result[key_str] = object_to_dict(filtered_value, visited, max_depth, current_depth + 1)
205
+ except Exception as e:
206
+ # If one key-value pair fails, log and use string representation for the value
207
+ key_str = str(k) if not isinstance(k, (str, int, float, bool)) else k
208
+ logger.debug(f"Failed to convert dict value for key '{key_str}': {e}")
209
+ result[key_str] = safe_str_repr(v)
201
210
  visited.remove(obj_id)
202
211
  return result
203
- except Exception:
212
+ except Exception as e:
204
213
  visited.discard(obj_id)
214
+ logger.debug(f"Failed to convert dict to dict: {e}")
205
215
  return safe_str_repr(obj)
206
216
 
207
217
  # Handle list/tuple
208
218
  if isinstance(obj, (list, tuple)):
209
219
  visited.add(obj_id)
210
220
  try:
211
- result = [object_to_dict(item, visited, max_depth, current_depth + 1) for item in obj]
221
+ result = []
222
+ for item in obj:
223
+ try:
224
+ result.append(object_to_dict(item, visited, max_depth, current_depth + 1))
225
+ except Exception as e:
226
+ # If one item fails, log and use its string representation
227
+ logger.debug(f"Failed to convert list item {type(item).__name__} to dict: {e}")
228
+ result.append(safe_str_repr(item))
212
229
  visited.remove(obj_id)
213
230
  return result
214
- except Exception:
231
+ except Exception as e:
215
232
  visited.discard(obj_id)
233
+ logger.debug(f"Failed to convert list/tuple to dict: {e}")
216
234
  return safe_str_repr(obj)
217
235
 
218
236
  # Handle dataclasses
@@ -221,13 +239,19 @@ def object_to_dict(obj: Any, visited: Set[int], max_depth: int = 10, current_dep
221
239
  try:
222
240
  result = {}
223
241
  for field in dataclasses.fields(obj):
224
- value = getattr(obj, field.name, None)
225
- filtered_value = _apply_data_filters(field.name, value)
226
- result[field.name] = object_to_dict(filtered_value, visited, max_depth, current_depth + 1)
242
+ try:
243
+ value = getattr(obj, field.name, None)
244
+ filtered_value = _apply_data_filters(field.name, value)
245
+ result[field.name] = object_to_dict(filtered_value, visited, max_depth, current_depth + 1)
246
+ except Exception as e:
247
+ # If accessing a field fails, log and skip it
248
+ logger.debug(f"Failed to access field {field.name} on {type(obj).__name__}: {e}")
249
+ result[field.name] = "<error accessing field>"
227
250
  visited.remove(obj_id)
228
251
  return result
229
- except Exception:
252
+ except Exception as e:
230
253
  visited.discard(obj_id)
254
+ logger.debug(f"Failed to convert dataclass {type(obj).__name__} to dict: {e}")
231
255
  return safe_str_repr(obj)
232
256
 
233
257
  # Handle objects with __dict__
@@ -242,8 +266,10 @@ def object_to_dict(obj: Any, visited: Set[int], max_depth: int = 10, current_dep
242
266
  result[key] = object_to_dict(filtered_value, visited, max_depth, current_depth + 1)
243
267
  visited.remove(obj_id)
244
268
  return result
245
- except Exception:
269
+ except Exception as e:
246
270
  visited.discard(obj_id)
271
+ # Log the error for debugging, but still return string representation
272
+ logger.debug(f"Failed to convert object {type(obj).__name__} to dict: {e}")
247
273
  return safe_str_repr(obj)
248
274
 
249
275
  # Handle objects with __slots__
@@ -252,14 +278,20 @@ def object_to_dict(obj: Any, visited: Set[int], max_depth: int = 10, current_dep
252
278
  try:
253
279
  result = {}
254
280
  for slot in obj.__slots__:
255
- if hasattr(obj, slot):
256
- value = getattr(obj, slot, None)
257
- filtered_value = _apply_data_filters(slot, value)
258
- result[slot] = object_to_dict(filtered_value, visited, max_depth, current_depth + 1)
281
+ try:
282
+ if hasattr(obj, slot):
283
+ value = getattr(obj, slot, None)
284
+ filtered_value = _apply_data_filters(slot, value)
285
+ result[slot] = object_to_dict(filtered_value, visited, max_depth, current_depth + 1)
286
+ except Exception as e:
287
+ # If accessing a slot fails, log and skip it
288
+ logger.debug(f"Failed to access slot {slot} on {type(obj).__name__}: {e}")
289
+ result[slot] = "<error accessing slot>"
259
290
  visited.remove(obj_id)
260
291
  return result
261
- except Exception:
292
+ except Exception as e:
262
293
  visited.discard(obj_id)
294
+ logger.debug(f"Failed to convert slotted object {type(obj).__name__} to dict: {e}")
263
295
  return safe_str_repr(obj)
264
296
 
265
297
  # Fallback: try to get a few common attributes
@@ -301,14 +333,16 @@ def safe_json_dumps(value: Any) -> str:
301
333
  # across the whole object graph
302
334
  try:
303
335
  converted = object_to_dict(value, visited)
304
- except Exception:
336
+ except Exception as e:
305
337
  # If conversion fails, try with a fresh visited set and json default handler
338
+ logger.debug(f"object_to_dict failed for {type(value).__name__}, trying json.dumps with default handler: {e}")
306
339
  try:
307
340
  json_str = json.dumps(value, default=json_default_handler_factory(set()))
308
341
  if len(json_str) > max_size_chars:
309
342
  return f"<object {type(value)} too large: {len(json_str)} chars (limit: {max_size_chars} chars) begins: {json_str[:100]}... conversion error: {e}>"
310
343
  return json_str
311
- except Exception:
344
+ except Exception as e2:
345
+ logger.debug(f"json.dumps with default handler also failed for {type(value).__name__}: {e2}")
312
346
  return safe_str_repr(value)
313
347
 
314
348
  # Try JSON serialization of the converted structure
@@ -318,7 +352,8 @@ def safe_json_dumps(value: Any) -> str:
318
352
  if len(json_str) > max_size_chars:
319
353
  return f"<object {type(value)} too large: {len(json_str)} chars (limit: {max_size_chars} chars) begins: {json_str[:100]}...>"
320
354
  return json_str
321
- except Exception:
355
+ except Exception as e:
356
+ logger.debug(f"json.dumps total fail for {type(value).__name__}: {e2}")
322
357
  # Final fallback
323
358
  return safe_str_repr(value)
324
359
 
@@ -55,7 +55,7 @@ async def shutdown_tracing() -> None:
55
55
  __all__ = [
56
56
  "get_provider", "get_exporter", "flush_tracing", "shutdown_tracing", "WithTracing",
57
57
  "set_span_attribute", "set_span_name", "get_active_span",
58
- "get_trace_id", "get_span_id", "create_span_from_trace_id", "inject_trace_context", "extract_trace_context",
58
+ "get_active_trace_id", "get_span_id", "create_span_from_trace_id", "inject_trace_context", "extract_trace_context",
59
59
  "set_conversation_id", "set_component_tag", "set_token_usage", "set_provider_and_model", "get_span", "submit_feedback"
60
60
  ]
61
61
 
@@ -968,7 +968,7 @@ def get_exporter() -> Optional[AIQASpanExporter]:
968
968
  return client.get("exporter")
969
969
 
970
970
 
971
- def get_trace_id() -> Optional[str]:
971
+ def get_active_trace_id() -> Optional[str]:
972
972
  """
973
973
  Get the current trace ID as a hexadecimal string (32 characters).
974
974
 
@@ -976,9 +976,10 @@ def get_trace_id() -> Optional[str]:
976
976
  The trace ID as a hex string, or None if no active span exists.
977
977
 
978
978
  Example:
979
- trace_id = get_trace_id()
979
+ trace_id = get_active_trace_id()
980
980
  # Pass trace_id to another service/agent
981
981
  # e.g., include in HTTP headers, message queue metadata, etc.
982
+ # Within a single thread, OpenTelemetry normally does this for you.
982
983
  """
983
984
  span = trace.get_current_span()
984
985
  if span and span.get_span_context().is_valid:
@@ -1023,7 +1024,7 @@ def create_span_from_trace_id(
1023
1024
 
1024
1025
  Example:
1025
1026
  # In service A: get trace ID
1026
- trace_id = get_trace_id()
1027
+ trace_id = get_active_trace_id()
1027
1028
  span_id = get_span_id()
1028
1029
 
1029
1030
  # Send to service B (e.g., via HTTP, message queue, etc.)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aiqa-client
3
- Version: 0.3.1
3
+ Version: 0.3.5
4
4
  Summary: OpenTelemetry-based Python client for tracing functions and sending traces to the AIQA server
5
5
  Author-email: AIQA <info@aiqa.dev>
6
6
  License: MIT
@@ -176,10 +176,10 @@ To link traces across different services or agents, you can extract and propagat
176
176
  #### Getting Current Trace ID
177
177
 
178
178
  ```python
179
- from aiqa import get_trace_id, get_span_id
179
+ from aiqa import get_active_trace_id, get_span_id
180
180
 
181
181
  # Get the current trace ID and span ID
182
- trace_id = get_trace_id() # Returns hex string (32 chars) or None
182
+ trace_id = get_active_trace_id() # Returns hex string (32 chars) or None
183
183
  span_id = get_span_id() # Returns hex string (16 chars) or None
184
184
 
185
185
  # Pass these to another service (e.g., in HTTP headers, message queue, etc.)
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "aiqa-client"
7
- version = "0.3.1"
7
+ version = "0.3.5"
8
8
  description = "OpenTelemetry-based Python client for tracing functions and sending traces to the AIQA server"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes