netra-sdk 0.1.7__py3-none-any.whl → 0.1.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of netra-sdk might be problematic. Click here for more details.

@@ -0,0 +1,554 @@
1
+ """
2
+ OpenAI API wrappers for Netra SDK instrumentation.
3
+
4
+ This module contains wrapper functions for different OpenAI API endpoints with
5
+ proper span handling for streaming vs non-streaming operations.
6
+ """
7
+
8
+ import logging
9
+ import time
10
+ from collections.abc import Awaitable
11
+ from typing import Any, AsyncIterator, Callable, Dict, Iterator, Tuple
12
+
13
+ from opentelemetry import context as context_api
14
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
15
+ from opentelemetry.trace import Span, SpanKind, Tracer
16
+ from opentelemetry.trace.status import Status, StatusCode
17
+ from wrapt import ObjectProxy
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+ # Span names
22
+ CHAT_SPAN_NAME = "openai.chat"
23
+ COMPLETION_SPAN_NAME = "openai.completion"
24
+ EMBEDDING_SPAN_NAME = "openai.embedding"
25
+ RESPONSE_SPAN_NAME = "openai.response"
26
+
27
+
28
+ def should_suppress_instrumentation() -> bool:
29
+ """Check if instrumentation should be suppressed"""
30
+ return context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) is True
31
+
32
+
33
+ def is_streaming_response(response: Any) -> bool:
34
+ """Check if response is a streaming response"""
35
+ return hasattr(response, "__iter__") and not isinstance(response, (str, bytes, dict))
36
+
37
+
38
+ def model_as_dict(obj: Any) -> Dict[str, Any]:
39
+ """Convert OpenAI model object to dictionary"""
40
+ if hasattr(obj, "model_dump"):
41
+ result = obj.model_dump()
42
+ return result if isinstance(result, dict) else {}
43
+ elif hasattr(obj, "to_dict"):
44
+ result = obj.to_dict()
45
+ return result if isinstance(result, dict) else {}
46
+ elif isinstance(obj, dict):
47
+ return obj
48
+ else:
49
+ return {}
50
+
51
+
52
+ def set_request_attributes(span: Span, kwargs: Dict[str, Any], operation_type: str) -> None:
53
+ """Set request attributes on span"""
54
+ if not span.is_recording():
55
+ return
56
+
57
+ # Set operation type
58
+ span.set_attribute("llm.request.type", operation_type)
59
+
60
+ # Common attributes
61
+ if kwargs.get("model"):
62
+ span.set_attribute("llm.request.model", kwargs["model"])
63
+
64
+ if kwargs.get("temperature") is not None:
65
+ span.set_attribute("llm.request.temperature", kwargs["temperature"])
66
+
67
+ if kwargs.get("max_tokens") is not None:
68
+ span.set_attribute("llm.request.max_tokens", kwargs["max_tokens"])
69
+
70
+ if kwargs.get("stream") is not None:
71
+ span.set_attribute("llm.stream", kwargs["stream"])
72
+
73
+ # Chat-specific attributes
74
+ if operation_type == "chat" and kwargs.get("messages"):
75
+ messages = kwargs["messages"]
76
+ if isinstance(messages, list) and len(messages) > 0:
77
+ span.set_attribute("llm.prompts.0.role", messages[0].get("role", ""))
78
+ span.set_attribute("llm.prompts.0.content", str(messages[0].get("content", "")))
79
+
80
+ # Response-specific attributes
81
+ if operation_type == "response":
82
+ if kwargs.get("instructions"):
83
+ span.set_attribute("llm.instructions", kwargs["instructions"])
84
+ if kwargs.get("input"):
85
+ span.set_attribute("llm.input", kwargs["input"])
86
+
87
+
88
+ def set_response_attributes(span: Span, response_dict: Dict[str, Any]) -> None:
89
+ """Set response attributes on span"""
90
+ if not span.is_recording():
91
+ return
92
+
93
+ if response_dict.get("model"):
94
+ span.set_attribute("llm.response.model", response_dict["model"])
95
+
96
+ if response_dict.get("id"):
97
+ span.set_attribute("llm.response.id", response_dict["id"])
98
+
99
+ # Usage information
100
+ usage = response_dict.get("usage", {})
101
+ if usage:
102
+ if usage.get("prompt_tokens"):
103
+ span.set_attribute("llm.usage.prompt_tokens", usage["prompt_tokens"])
104
+ if usage.get("completion_tokens"):
105
+ span.set_attribute("llm.usage.completion_tokens", usage["completion_tokens"])
106
+ if usage.get("total_tokens"):
107
+ span.set_attribute("llm.usage.total_tokens", usage["total_tokens"])
108
+
109
+ # Response content
110
+ choices = response_dict.get("choices", [])
111
+ if choices and len(choices) > 0:
112
+ first_choice = choices[0]
113
+ if first_choice.get("message", {}).get("content"):
114
+ span.set_attribute("llm.completions.0.content", first_choice["message"]["content"])
115
+ if first_choice.get("finish_reason"):
116
+ span.set_attribute("llm.completions.0.finish_reason", first_choice["finish_reason"])
117
+
118
+ # For responses.create
119
+ if response_dict.get("output_text"):
120
+ span.set_attribute("llm.response.output_text", response_dict["output_text"])
121
+
122
+
123
+ def chat_wrapper(tracer: Tracer) -> Callable[..., Any]:
124
+ """Wrapper for chat completions"""
125
+
126
+ def wrapper(wrapped: Callable[..., Any], instance: Any, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any:
127
+ if should_suppress_instrumentation():
128
+ return wrapped(*args, **kwargs)
129
+
130
+ # Check if streaming
131
+ is_streaming = kwargs.get("stream", False)
132
+
133
+ if is_streaming:
134
+ # Use start_span for streaming - returns span directly
135
+ span = tracer.start_span(CHAT_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "chat"})
136
+
137
+ set_request_attributes(span, kwargs, "chat")
138
+
139
+ try:
140
+ start_time = time.time()
141
+ response = wrapped(*args, **kwargs)
142
+
143
+ return StreamingWrapper(span=span, response=response, start_time=start_time, request_kwargs=kwargs)
144
+ except Exception as e:
145
+ span.set_status(Status(StatusCode.ERROR, str(e)))
146
+ span.record_exception(e)
147
+ span.end()
148
+ raise
149
+ else:
150
+ # Use start_as_current_span for non-streaming - returns context manager
151
+ with tracer.start_as_current_span(
152
+ CHAT_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "chat"}
153
+ ) as span:
154
+ set_request_attributes(span, kwargs, "chat")
155
+
156
+ try:
157
+ start_time = time.time()
158
+ response = wrapped(*args, **kwargs)
159
+ end_time = time.time()
160
+
161
+ response_dict = model_as_dict(response)
162
+ set_response_attributes(span, response_dict)
163
+
164
+ span.set_attribute("llm.response.duration", end_time - start_time)
165
+ span.set_status(Status(StatusCode.OK))
166
+
167
+ return response
168
+ except Exception as e:
169
+ span.set_status(Status(StatusCode.ERROR, str(e)))
170
+ raise
171
+
172
+ return wrapper
173
+
174
+
175
+ def achat_wrapper(tracer: Tracer) -> Callable[..., Awaitable[Any]]:
176
+ """Async wrapper for chat completions"""
177
+
178
+ async def wrapper(
179
+ wrapped: Callable[..., Awaitable[Any]], instance: Any, args: Tuple[Any, ...], kwargs: Dict[str, Any]
180
+ ) -> Any:
181
+ if should_suppress_instrumentation():
182
+ return await wrapped(*args, **kwargs)
183
+
184
+ # Check if streaming
185
+ is_streaming = kwargs.get("stream", False)
186
+
187
+ if is_streaming:
188
+ # Use start_span for streaming - returns span directly
189
+ span = tracer.start_span(CHAT_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "chat"})
190
+
191
+ set_request_attributes(span, kwargs, "chat")
192
+
193
+ try:
194
+ start_time = time.time()
195
+ response = await wrapped(*args, **kwargs)
196
+
197
+ return AsyncStreamingWrapper(span=span, response=response, start_time=start_time, request_kwargs=kwargs)
198
+ except Exception as e:
199
+ span.set_status(Status(StatusCode.ERROR, str(e)))
200
+ span.record_exception(e)
201
+ span.end()
202
+ raise
203
+ else:
204
+ # Use start_as_current_span for non-streaming - returns context manager
205
+ with tracer.start_as_current_span(
206
+ CHAT_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "chat"}
207
+ ) as span:
208
+ set_request_attributes(span, kwargs, "chat")
209
+
210
+ try:
211
+ start_time = time.time()
212
+ response = await wrapped(*args, **kwargs)
213
+ end_time = time.time()
214
+
215
+ response_dict = model_as_dict(response)
216
+ set_response_attributes(span, response_dict)
217
+
218
+ span.set_attribute("llm.response.duration", end_time - start_time)
219
+ span.set_status(Status(StatusCode.OK))
220
+
221
+ return response
222
+ except Exception as e:
223
+ span.set_status(Status(StatusCode.ERROR, str(e)))
224
+ raise
225
+
226
+ return wrapper
227
+
228
+
229
+ def completion_wrapper(tracer: Tracer) -> Callable[..., Any]:
230
+ """Wrapper for text completions"""
231
+
232
+ def wrapper(wrapped: Callable[..., Any], instance: Any, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any:
233
+ if should_suppress_instrumentation():
234
+ return wrapped(*args, **kwargs)
235
+
236
+ is_streaming = kwargs.get("stream", False)
237
+
238
+ if is_streaming:
239
+ # Use start_span for streaming - returns span directly
240
+ span = tracer.start_span(
241
+ COMPLETION_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "completion"}
242
+ )
243
+
244
+ set_request_attributes(span, kwargs, "completion")
245
+
246
+ try:
247
+ start_time = time.time()
248
+ response = wrapped(*args, **kwargs)
249
+
250
+ return StreamingWrapper(span=span, response=response, start_time=start_time, request_kwargs=kwargs)
251
+ except Exception as e:
252
+ span.set_status(Status(StatusCode.ERROR, str(e)))
253
+ span.record_exception(e)
254
+ span.end()
255
+ raise
256
+ else:
257
+ # Use start_as_current_span for non-streaming - returns context manager
258
+ with tracer.start_as_current_span(
259
+ COMPLETION_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "completion"}
260
+ ) as span:
261
+ set_request_attributes(span, kwargs, "completion")
262
+
263
+ try:
264
+ start_time = time.time()
265
+ response = wrapped(*args, **kwargs)
266
+ end_time = time.time()
267
+
268
+ response_dict = model_as_dict(response)
269
+ set_response_attributes(span, response_dict)
270
+
271
+ span.set_attribute("llm.response.duration", end_time - start_time)
272
+ span.set_status(Status(StatusCode.OK))
273
+
274
+ return response
275
+ except Exception as e:
276
+ span.set_status(Status(StatusCode.ERROR, str(e)))
277
+ raise
278
+
279
+ return wrapper
280
+
281
+
282
+ def acompletion_wrapper(tracer: Tracer) -> Callable[..., Awaitable[Any]]:
283
+ """Async wrapper for text completions"""
284
+
285
+ async def wrapper(
286
+ wrapped: Callable[..., Awaitable[Any]], instance: Any, args: Tuple[Any, ...], kwargs: Dict[str, Any]
287
+ ) -> Any:
288
+ if should_suppress_instrumentation():
289
+ return await wrapped(*args, **kwargs)
290
+
291
+ is_streaming = kwargs.get("stream", False)
292
+
293
+ if is_streaming:
294
+ # Use start_span for streaming - returns span directly
295
+ span = tracer.start_span(
296
+ COMPLETION_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "completion"}
297
+ )
298
+
299
+ set_request_attributes(span, kwargs, "completion")
300
+
301
+ try:
302
+ start_time = time.time()
303
+ response = await wrapped(*args, **kwargs)
304
+
305
+ return AsyncStreamingWrapper(span=span, response=response, start_time=start_time, request_kwargs=kwargs)
306
+ except Exception as e:
307
+ span.set_status(Status(StatusCode.ERROR, str(e)))
308
+ span.record_exception(e)
309
+ span.end()
310
+ raise
311
+ else:
312
+ # Use start_as_current_span for non-streaming - returns context manager
313
+ with tracer.start_as_current_span(
314
+ COMPLETION_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "completion"}
315
+ ) as span:
316
+ set_request_attributes(span, kwargs, "completion")
317
+
318
+ try:
319
+ start_time = time.time()
320
+ response = await wrapped(*args, **kwargs)
321
+ end_time = time.time()
322
+
323
+ response_dict = model_as_dict(response)
324
+ set_response_attributes(span, response_dict)
325
+
326
+ span.set_attribute("llm.response.duration", end_time - start_time)
327
+ span.set_status(Status(StatusCode.OK))
328
+
329
+ return response
330
+ except Exception as e:
331
+ span.set_status(Status(StatusCode.ERROR, str(e)))
332
+ raise
333
+
334
+ return wrapper
335
+
336
+
337
+ def embeddings_wrapper(tracer: Tracer) -> Callable[..., Any]:
338
+ """Wrapper for embeddings"""
339
+
340
+ def wrapper(wrapped: Callable[..., Any], instance: Any, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any:
341
+ if should_suppress_instrumentation():
342
+ return wrapped(*args, **kwargs)
343
+
344
+ # Embeddings are never streaming, always use start_as_current_span
345
+ with tracer.start_as_current_span(
346
+ EMBEDDING_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "embedding"}
347
+ ) as span:
348
+ set_request_attributes(span, kwargs, "embedding")
349
+
350
+ try:
351
+ start_time = time.time()
352
+ response = wrapped(*args, **kwargs)
353
+ end_time = time.time()
354
+
355
+ response_dict = model_as_dict(response)
356
+ set_response_attributes(span, response_dict)
357
+
358
+ span.set_attribute("llm.response.duration", end_time - start_time)
359
+ span.set_status(Status(StatusCode.OK))
360
+
361
+ return response
362
+ except Exception as e:
363
+ span.set_status(Status(StatusCode.ERROR, str(e)))
364
+ raise
365
+
366
+ return wrapper
367
+
368
+
369
+ def aembeddings_wrapper(tracer: Tracer) -> Callable[..., Awaitable[Any]]:
370
+ """Async wrapper for embeddings"""
371
+
372
+ async def wrapper(
373
+ wrapped: Callable[..., Awaitable[Any]], instance: Any, args: Tuple[Any, ...], kwargs: Dict[str, Any]
374
+ ) -> Any:
375
+ if should_suppress_instrumentation():
376
+ return await wrapped(*args, **kwargs)
377
+
378
+ # Embeddings are never streaming, always use start_as_current_span
379
+ with tracer.start_as_current_span(
380
+ EMBEDDING_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "embedding"}
381
+ ) as span:
382
+ set_request_attributes(span, kwargs, "embedding")
383
+
384
+ try:
385
+ start_time = time.time()
386
+ response = await wrapped(*args, **kwargs)
387
+ end_time = time.time()
388
+
389
+ response_dict = model_as_dict(response)
390
+ set_response_attributes(span, response_dict)
391
+
392
+ span.set_attribute("llm.response.duration", end_time - start_time)
393
+ span.set_status(Status(StatusCode.OK))
394
+
395
+ return response
396
+ except Exception as e:
397
+ span.set_status(Status(StatusCode.ERROR, str(e)))
398
+ raise
399
+
400
+ return wrapper
401
+
402
+
403
+ def responses_wrapper(tracer: Tracer) -> Callable[..., Any]:
404
+ """Wrapper for responses.create (new OpenAI API)"""
405
+
406
+ def wrapper(wrapped: Callable[..., Any], instance: Any, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any:
407
+ if should_suppress_instrumentation():
408
+ return wrapped(*args, **kwargs)
409
+
410
+ # responses.create is typically not streaming, use start_as_current_span
411
+ with tracer.start_as_current_span(
412
+ RESPONSE_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "response"}
413
+ ) as span:
414
+ set_request_attributes(span, kwargs, "response")
415
+
416
+ try:
417
+ start_time = time.time()
418
+ response = wrapped(*args, **kwargs)
419
+ end_time = time.time()
420
+
421
+ response_dict = model_as_dict(response)
422
+ set_response_attributes(span, response_dict)
423
+
424
+ span.set_attribute("llm.response.duration", end_time - start_time)
425
+ span.set_status(Status(StatusCode.OK))
426
+
427
+ return response
428
+ except Exception as e:
429
+ span.set_status(Status(StatusCode.ERROR, str(e)))
430
+ raise
431
+
432
+ return wrapper
433
+
434
+
435
+ def aresponses_wrapper(tracer: Tracer) -> Callable[..., Awaitable[Any]]:
436
+ """Async wrapper for responses.create (new OpenAI API)"""
437
+
438
+ async def wrapper(wrapped: Callable[..., Awaitable[Any]], instance: Any, args: Any, kwargs: Dict[str, Any]) -> Any:
439
+ if should_suppress_instrumentation():
440
+ return await wrapped(*args, **kwargs)
441
+
442
+ # responses.create is typically not streaming, use start_as_current_span
443
+ with tracer.start_as_current_span(
444
+ RESPONSE_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "response"}
445
+ ) as span:
446
+ set_request_attributes(span, kwargs, "response")
447
+
448
+ try:
449
+ start_time = time.time()
450
+ response = await wrapped(*args, **kwargs)
451
+ end_time = time.time()
452
+
453
+ response_dict = model_as_dict(response)
454
+ set_response_attributes(span, response_dict)
455
+
456
+ span.set_attribute("llm.response.duration", end_time - start_time)
457
+ span.set_status(Status(StatusCode.OK))
458
+
459
+ return response
460
+ except Exception as e:
461
+ span.set_status(Status(StatusCode.ERROR, str(e)))
462
+ raise
463
+
464
+ return wrapper
465
+
466
+
467
+ class StreamingWrapper(ObjectProxy): # type: ignore[misc]
468
+ """Wrapper for streaming responses"""
469
+
470
+ def __init__(self, span: Span, response: Iterator[Any], start_time: float, request_kwargs: Dict[str, Any]) -> None:
471
+ super().__init__(response)
472
+ self._span = span
473
+ self._start_time = start_time
474
+ self._request_kwargs = request_kwargs
475
+ self._complete_response: Dict[str, Any] = {"choices": [], "model": ""}
476
+
477
+ def __iter__(self) -> Iterator[Any]:
478
+ return self
479
+
480
+ def __next__(self) -> Any:
481
+ try:
482
+ chunk = self.__wrapped__.__next__()
483
+ self._process_chunk(chunk)
484
+ return chunk
485
+ except StopIteration:
486
+ self._finalize_span()
487
+ raise
488
+
489
+ def _process_chunk(self, chunk: Any) -> None:
490
+ """Process streaming chunk"""
491
+ chunk_dict = model_as_dict(chunk)
492
+
493
+ # Accumulate response data
494
+ if chunk_dict.get("model"):
495
+ self._complete_response["model"] = chunk_dict["model"]
496
+
497
+ # Add chunk event
498
+ self._span.add_event("llm.content.completion.chunk")
499
+
500
+ def _finalize_span(self) -> None:
501
+ """Finalize span when streaming is complete"""
502
+ end_time = time.time()
503
+ duration = end_time - self._start_time
504
+
505
+ set_response_attributes(self._span, self._complete_response)
506
+ self._span.set_attribute("llm.response.duration", duration)
507
+ self._span.set_status(Status(StatusCode.OK))
508
+ self._span.end()
509
+
510
+
511
+ class AsyncStreamingWrapper(ObjectProxy): # type: ignore[misc]
512
+ """Async wrapper for streaming responses"""
513
+
514
+ def __init__(
515
+ self, span: Span, response: AsyncIterator[Any], start_time: float, request_kwargs: Dict[str, Any]
516
+ ) -> None:
517
+ super().__init__(response)
518
+ self._span = span
519
+ self._start_time = start_time
520
+ self._request_kwargs = request_kwargs
521
+ self._complete_response: Dict[str, Any] = {"choices": [], "model": ""}
522
+
523
+ def __aiter__(self) -> AsyncIterator[Any]:
524
+ return self
525
+
526
+ async def __anext__(self) -> Any:
527
+ try:
528
+ chunk = await self.__wrapped__.__anext__()
529
+ self._process_chunk(chunk)
530
+ return chunk
531
+ except StopAsyncIteration:
532
+ self._finalize_span()
533
+ raise
534
+
535
+ def _process_chunk(self, chunk: Any) -> None:
536
+ """Process streaming chunk"""
537
+ chunk_dict = model_as_dict(chunk)
538
+
539
+ # Accumulate response data
540
+ if chunk_dict.get("model"):
541
+ self._complete_response["model"] = chunk_dict["model"]
542
+
543
+ # Add chunk event
544
+ self._span.add_event("llm.content.completion.chunk")
545
+
546
+ def _finalize_span(self) -> None:
547
+ """Finalize span when streaming is complete"""
548
+ end_time = time.time()
549
+ duration = end_time - self._start_time
550
+
551
+ set_response_attributes(self._span, self._complete_response)
552
+ self._span.set_attribute("llm.response.duration", duration)
553
+ self._span.set_status(Status(StatusCode.OK))
554
+ self._span.end()
@@ -1,4 +1,3 @@
1
- from netra.processors.error_detection_processor import ErrorDetectionProcessor
2
1
  from netra.processors.session_span_processor import SessionSpanProcessor
3
2
 
4
- __all__ = ["ErrorDetectionProcessor", "SessionSpanProcessor"]
3
+ __all__ = ["SessionSpanProcessor"]
netra/span_wrapper.py CHANGED
@@ -16,6 +16,14 @@ logging.basicConfig(level=logging.INFO)
16
16
  logger = logging.getLogger(__name__)
17
17
 
18
18
 
19
+ class ActionModel(BaseModel): # type: ignore[misc]
20
+ action: str
21
+ action_type: str
22
+ success: bool
23
+ affected_records: Optional[List[Dict[str, str]]] = None
24
+ metadata: Optional[Dict[str, str]] = None
25
+
26
+
19
27
  class UsageModel(BaseModel): # type: ignore[misc]
20
28
  model: str
21
29
  usage_type: str
@@ -32,6 +40,7 @@ class ATTRIBUTE:
32
40
  STATUS = "status"
33
41
  DURATION_MS = "duration_ms"
34
42
  ERROR_MESSAGE = "error_message"
43
+ ACTION = "action"
35
44
 
36
45
 
37
46
  class SpanWrapper:
@@ -144,6 +153,12 @@ class SpanWrapper:
144
153
  usage_json = json.dumps(usage_dict)
145
154
  return self.set_attribute(f"{Config.LIBRARY_NAME}.{ATTRIBUTE.USAGE}", usage_json)
146
155
 
156
+ def set_action(self, action: List[ActionModel]) -> "SpanWrapper":
157
+ """Set the action data as a JSON string."""
158
+ action_dict = [a.model_dump() for a in action]
159
+ action_json = json.dumps(action_dict)
160
+ return self.set_attribute(f"{Config.LIBRARY_NAME}.{ATTRIBUTE.ACTION}", action_json)
161
+
147
162
  def set_model(self, model: str) -> "SpanWrapper":
148
163
  """Set the model used."""
149
164
  return self.set_attribute(f"{Config.LIBRARY_NAME}.{ATTRIBUTE.MODEL}", model)
netra/tracer.py CHANGED
@@ -66,10 +66,9 @@ class Tracer:
66
66
  headers=self.cfg.headers,
67
67
  )
68
68
  # Add span processors for session span processing and data aggregation processing
69
- from netra.processors import ErrorDetectionProcessor, SessionSpanProcessor
69
+ from netra.processors import SessionSpanProcessor
70
70
 
71
71
  provider.add_span_processor(SessionSpanProcessor())
72
- provider.add_span_processor(ErrorDetectionProcessor())
73
72
 
74
73
  # Install appropriate span processor
75
74
  if self.cfg.disable_batch:
netra/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.7"
1
+ __version__ = "0.1.10"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: netra-sdk
3
- Version: 0.1.7
3
+ Version: 0.1.10
4
4
  Summary: A Python SDK for AI application observability that provides OpenTelemetry-based monitoring, tracing, and PII protection for LLM and vector database applications. Enables easy instrumentation, session tracking, and privacy-focused data collection for AI systems in production environments.
5
5
  License: Apache-2.0
6
6
  Keywords: netra,tracing,observability,sdk,ai,llm,vector,database
@@ -440,6 +440,58 @@ with Netra.start_span("image_generation") as span:
440
440
 
441
441
  # Get the current active open telemetry span
442
442
  current_span = span.get_current_span()
443
+
444
+ # Track database operations and other actions
445
+ action = ActionModel(
446
+ action="DB",
447
+ action_type="INSERT",
448
+ affected_records=[
449
+ {"record_id": "user_123", "record_type": "user"},
450
+ {"record_id": "profile_456", "record_type": "profile"}
451
+ ],
452
+ metadata={
453
+ "table": "users",
454
+ "operation_id": "tx_789",
455
+ "duration_ms": "45"
456
+ },
457
+ success=True
458
+ )
459
+ span.set_action([action])
460
+
461
+ # Record API calls
462
+ api_action = ActionModel(
463
+ action="API",
464
+ action_type="CALL",
465
+ metadata={
466
+ "endpoint": "/api/v1/process",
467
+ "method": "POST",
468
+ "status_code": 200,
469
+ "duration_ms": "120"
470
+ },
471
+ success=True
472
+ )
473
+ span.set_action([api_action])
474
+ ```
475
+
476
+ ### Action Tracking Schema
477
+
478
+ Action tracking follows this schema:
479
+
480
+ ```python
481
+ [
482
+ {
483
+ "action": str, # Type of action (e.g., "DB", "API", "CACHE")
484
+ "action_type": str, # Action subtype (e.g., "INSERT", "SELECT", "CALL")
485
+ "affected_records": [ # Optional: List of records affected
486
+ {
487
+ "record_id": str, # ID of the affected record
488
+ "record_type": str # Type of the record
489
+ }
490
+ ],
491
+ "metadata": Dict[str, str], # Additional metadata as key-value pairs
492
+ "success": bool # Whether the action succeeded
493
+ }
494
+ ]
443
495
  ```
444
496
 
445
497
  ## 🔧 Advanced Configuration