netra-sdk 0.1.30__py3-none-any.whl → 0.1.33__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of netra-sdk might be problematic. Click here for more details.
- netra/__init__.py +5 -1
- netra/config.py +16 -1
- netra/instrumentation/__init__.py +24 -0
- netra/instrumentation/instruments.py +2 -0
- netra/instrumentation/litellm/__init__.py +161 -0
- netra/instrumentation/litellm/version.py +1 -0
- netra/instrumentation/litellm/wrappers.py +557 -0
- netra/processors/__init__.py +2 -1
- netra/processors/scrubbing_span_processor.py +178 -0
- netra/span_wrapper.py +15 -20
- netra/tracer.py +86 -3
- netra/version.py +1 -1
- {netra_sdk-0.1.30.dist-info → netra_sdk-0.1.33.dist-info}/METADATA +3 -2
- {netra_sdk-0.1.30.dist-info → netra_sdk-0.1.33.dist-info}/RECORD +16 -12
- {netra_sdk-0.1.30.dist-info → netra_sdk-0.1.33.dist-info}/LICENCE +0 -0
- {netra_sdk-0.1.30.dist-info → netra_sdk-0.1.33.dist-info}/WHEEL +0 -0
netra/__init__.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import atexit
|
|
2
2
|
import logging
|
|
3
3
|
import threading
|
|
4
|
-
from typing import Any, Dict, Optional, Set
|
|
4
|
+
from typing import Any, Dict, List, Optional, Set
|
|
5
5
|
|
|
6
6
|
from opentelemetry import context as context_api
|
|
7
7
|
from opentelemetry import trace
|
|
@@ -56,6 +56,8 @@ class Netra:
|
|
|
56
56
|
enable_root_span: Optional[bool] = None,
|
|
57
57
|
resource_attributes: Optional[Dict[str, Any]] = None,
|
|
58
58
|
environment: Optional[str] = None,
|
|
59
|
+
enable_scrubbing: Optional[bool] = None,
|
|
60
|
+
blocked_spans: Optional[List[str]] = None,
|
|
59
61
|
instruments: Optional[Set[NetraInstruments]] = None,
|
|
60
62
|
block_instruments: Optional[Set[NetraInstruments]] = None,
|
|
61
63
|
) -> None:
|
|
@@ -77,6 +79,8 @@ class Netra:
|
|
|
77
79
|
enable_root_span=enable_root_span,
|
|
78
80
|
resource_attributes=resource_attributes,
|
|
79
81
|
environment=environment,
|
|
82
|
+
enable_scrubbing=enable_scrubbing,
|
|
83
|
+
blocked_spans=blocked_spans,
|
|
80
84
|
)
|
|
81
85
|
|
|
82
86
|
# Configure package logging based on debug mode
|
netra/config.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import os
|
|
3
|
-
from typing import Any, Dict, Optional
|
|
3
|
+
from typing import Any, Dict, List, Optional
|
|
4
4
|
|
|
5
5
|
from opentelemetry.util.re import parse_env_headers
|
|
6
6
|
|
|
@@ -19,6 +19,8 @@ class Config:
|
|
|
19
19
|
- debug_mode: Whether to enable SDK logging; default False (bool)
|
|
20
20
|
- enable_root_span: Whether to create a process root span; default False (bool)
|
|
21
21
|
- resource_attributes: Custom resource attributes dict (e.g., {'env': 'prod', 'version': '1.0.0'})
|
|
22
|
+
- enable_scrubbing: Whether to enable pydantic logfire scrubbing; default False (bool)
|
|
23
|
+
- blocked_spans: List of span names (prefix/suffix patterns) to block from being exported to the tracing backend
|
|
22
24
|
"""
|
|
23
25
|
|
|
24
26
|
# SDK Constants
|
|
@@ -38,6 +40,8 @@ class Config:
|
|
|
38
40
|
enable_root_span: Optional[bool] = None,
|
|
39
41
|
resource_attributes: Optional[Dict[str, Any]] = None,
|
|
40
42
|
environment: Optional[str] = None,
|
|
43
|
+
enable_scrubbing: Optional[bool] = None,
|
|
44
|
+
blocked_spans: Optional[List[str]] = None,
|
|
41
45
|
):
|
|
42
46
|
# Application name: from param, else env
|
|
43
47
|
self.app_name = (
|
|
@@ -134,3 +138,14 @@ class Config:
|
|
|
134
138
|
self.resource_attributes = {}
|
|
135
139
|
else:
|
|
136
140
|
self.resource_attributes = {}
|
|
141
|
+
|
|
142
|
+
# Enable scrubbing with pydantic logfire? Default False.
|
|
143
|
+
if enable_scrubbing is not None:
|
|
144
|
+
self.enable_scrubbing = enable_scrubbing
|
|
145
|
+
else:
|
|
146
|
+
env_scrub = os.getenv("NETRA_ENABLE_SCRUBBING")
|
|
147
|
+
self.enable_scrubbing = True if (env_scrub is not None and env_scrub.lower() in ("1", "true")) else False
|
|
148
|
+
|
|
149
|
+
# Blocked span names/prefix patterns
|
|
150
|
+
if blocked_spans is not None:
|
|
151
|
+
self.blocked_spans = blocked_spans
|
|
@@ -93,6 +93,10 @@ def init_instrumentations(
|
|
|
93
93
|
if CustomInstruments.MISTRALAI in netra_custom_instruments:
|
|
94
94
|
init_mistral_instrumentor()
|
|
95
95
|
|
|
96
|
+
# Initialize LiteLLM instrumentation.
|
|
97
|
+
if CustomInstruments.LITELLM in netra_custom_instruments:
|
|
98
|
+
init_litellm_instrumentation()
|
|
99
|
+
|
|
96
100
|
# Initialize OpenAI instrumentation.
|
|
97
101
|
if CustomInstruments.OPENAI in netra_custom_instruments:
|
|
98
102
|
init_openai_instrumentation()
|
|
@@ -435,6 +439,26 @@ def init_mistral_instrumentor() -> bool:
|
|
|
435
439
|
return False
|
|
436
440
|
|
|
437
441
|
|
|
442
|
+
def init_litellm_instrumentation() -> bool:
|
|
443
|
+
"""Initialize LiteLLM instrumentation.
|
|
444
|
+
|
|
445
|
+
Returns:
|
|
446
|
+
bool: True if initialization was successful, False otherwise.
|
|
447
|
+
"""
|
|
448
|
+
try:
|
|
449
|
+
if is_package_installed("litellm"):
|
|
450
|
+
from netra.instrumentation.litellm import LiteLLMInstrumentor
|
|
451
|
+
|
|
452
|
+
instrumentor = LiteLLMInstrumentor()
|
|
453
|
+
if not instrumentor.is_instrumented_by_opentelemetry:
|
|
454
|
+
instrumentor.instrument()
|
|
455
|
+
return True
|
|
456
|
+
except Exception as e:
|
|
457
|
+
logging.error(f"Error initializing LiteLLM instrumentor: {e}")
|
|
458
|
+
Telemetry().log_exception(e)
|
|
459
|
+
return False
|
|
460
|
+
|
|
461
|
+
|
|
438
462
|
def init_openai_instrumentation() -> bool:
|
|
439
463
|
"""Initialize OpenAI instrumentation.
|
|
440
464
|
|
|
@@ -8,6 +8,7 @@ class CustomInstruments(Enum):
|
|
|
8
8
|
AIOHTTP = "aiohttp"
|
|
9
9
|
COHEREAI = "cohere_ai"
|
|
10
10
|
HTTPX = "httpx"
|
|
11
|
+
LITELLM = "litellm"
|
|
11
12
|
MISTRALAI = "mistral_ai"
|
|
12
13
|
OPENAI = "openai"
|
|
13
14
|
PYDANTIC_AI = "pydantic_ai"
|
|
@@ -127,6 +128,7 @@ class InstrumentSet(Enum):
|
|
|
127
128
|
KAFKA_PYTHON = "kafka_python"
|
|
128
129
|
LANCEDB = "lancedb"
|
|
129
130
|
LANGCHAIN = "langchain"
|
|
131
|
+
LITELLM = "litellm"
|
|
130
132
|
LLAMA_INDEX = "llama_index"
|
|
131
133
|
LOGGING = "logging"
|
|
132
134
|
MARQO = "marqo"
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import time
|
|
3
|
+
from typing import Any, Collection, Dict, Optional
|
|
4
|
+
|
|
5
|
+
from opentelemetry import context as context_api
|
|
6
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
7
|
+
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY, unwrap
|
|
8
|
+
from opentelemetry.trace import SpanKind, Tracer, get_tracer
|
|
9
|
+
from opentelemetry.trace.status import Status, StatusCode
|
|
10
|
+
from wrapt import wrap_function_wrapper
|
|
11
|
+
|
|
12
|
+
from netra.instrumentation.litellm.version import __version__
|
|
13
|
+
from netra.instrumentation.litellm.wrappers import (
|
|
14
|
+
acompletion_wrapper,
|
|
15
|
+
aembedding_wrapper,
|
|
16
|
+
aimage_generation_wrapper,
|
|
17
|
+
completion_wrapper,
|
|
18
|
+
embedding_wrapper,
|
|
19
|
+
image_generation_wrapper,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
_instruments = ("litellm >= 1.0.0",)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class LiteLLMInstrumentor(BaseInstrumentor): # type: ignore[misc]
|
|
28
|
+
"""
|
|
29
|
+
Custom LiteLLM instrumentor for Netra SDK with enhanced support for:
|
|
30
|
+
- completion() and acompletion() methods
|
|
31
|
+
- embedding() and aembedding() methods
|
|
32
|
+
- image_generation() and aimage_generation() methods
|
|
33
|
+
- Proper streaming/non-streaming span handling
|
|
34
|
+
- Integration with Netra tracing
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
|
38
|
+
return _instruments
|
|
39
|
+
|
|
40
|
+
def _instrument(self, **kwargs): # type: ignore[no-untyped-def]
|
|
41
|
+
"""Instrument LiteLLM methods"""
|
|
42
|
+
tracer_provider = kwargs.get("tracer_provider")
|
|
43
|
+
tracer = get_tracer(__name__, __version__, tracer_provider)
|
|
44
|
+
|
|
45
|
+
logger.debug("Starting LiteLLM instrumentation...")
|
|
46
|
+
|
|
47
|
+
# Force import litellm to ensure it's available for wrapping
|
|
48
|
+
try:
|
|
49
|
+
import litellm
|
|
50
|
+
except ImportError as e:
|
|
51
|
+
logger.error(f"Failed to import litellm: {e}")
|
|
52
|
+
return
|
|
53
|
+
|
|
54
|
+
# Store original functions for uninstrumentation
|
|
55
|
+
self._original_completion = getattr(litellm, "completion", None)
|
|
56
|
+
self._original_acompletion = getattr(litellm, "acompletion", None)
|
|
57
|
+
self._original_embedding = getattr(litellm, "embedding", None)
|
|
58
|
+
self._original_aembedding = getattr(litellm, "aembedding", None)
|
|
59
|
+
self._original_image_generation = getattr(litellm, "image_generation", None)
|
|
60
|
+
self._original_aimage_generation = getattr(litellm, "aimage_generation", None)
|
|
61
|
+
|
|
62
|
+
# Chat completions - use direct monkey patching with proper function wrapping
|
|
63
|
+
if self._original_completion:
|
|
64
|
+
try:
|
|
65
|
+
|
|
66
|
+
def instrumented_completion(*args, **kwargs): # type: ignore[no-untyped-def]
|
|
67
|
+
wrapper = completion_wrapper(tracer)
|
|
68
|
+
return wrapper(self._original_completion, None, args, kwargs)
|
|
69
|
+
|
|
70
|
+
litellm.completion = instrumented_completion
|
|
71
|
+
except Exception as e:
|
|
72
|
+
logger.error(f"Failed to monkey-patch litellm.completion: {e}")
|
|
73
|
+
|
|
74
|
+
if self._original_acompletion:
|
|
75
|
+
try:
|
|
76
|
+
|
|
77
|
+
async def instrumented_acompletion(*args, **kwargs): # type: ignore[no-untyped-def]
|
|
78
|
+
wrapper = acompletion_wrapper(tracer)
|
|
79
|
+
return await wrapper(self._original_acompletion, None, args, kwargs)
|
|
80
|
+
|
|
81
|
+
litellm.acompletion = instrumented_acompletion
|
|
82
|
+
except Exception as e:
|
|
83
|
+
logger.error(f"Failed to monkey-patch litellm.acompletion: {e}")
|
|
84
|
+
|
|
85
|
+
# Embeddings
|
|
86
|
+
if self._original_embedding:
|
|
87
|
+
try:
|
|
88
|
+
|
|
89
|
+
def instrumented_embedding(*args, **kwargs): # type: ignore[no-untyped-def]
|
|
90
|
+
wrapper = embedding_wrapper(tracer)
|
|
91
|
+
return wrapper(self._original_embedding, None, args, kwargs)
|
|
92
|
+
|
|
93
|
+
litellm.embedding = instrumented_embedding
|
|
94
|
+
except Exception as e:
|
|
95
|
+
logger.error(f"Failed to monkey-patch litellm.embedding: {e}")
|
|
96
|
+
|
|
97
|
+
if self._original_aembedding:
|
|
98
|
+
try:
|
|
99
|
+
|
|
100
|
+
async def instrumented_aembedding(*args, **kwargs): # type: ignore[no-untyped-def]
|
|
101
|
+
wrapper = aembedding_wrapper(tracer)
|
|
102
|
+
return await wrapper(self._original_aembedding, None, args, kwargs)
|
|
103
|
+
|
|
104
|
+
litellm.aembedding = instrumented_aembedding
|
|
105
|
+
except Exception as e:
|
|
106
|
+
logger.error(f"Failed to monkey-patch litellm.aembedding: {e}")
|
|
107
|
+
|
|
108
|
+
# Image generation
|
|
109
|
+
if self._original_image_generation:
|
|
110
|
+
try:
|
|
111
|
+
|
|
112
|
+
def instrumented_image_generation(*args, **kwargs): # type: ignore[no-untyped-def]
|
|
113
|
+
wrapper = image_generation_wrapper(tracer)
|
|
114
|
+
return wrapper(self._original_image_generation, None, args, kwargs)
|
|
115
|
+
|
|
116
|
+
litellm.image_generation = instrumented_image_generation
|
|
117
|
+
except Exception as e:
|
|
118
|
+
logger.error(f"Failed to monkey-patch litellm.image_generation: {e}")
|
|
119
|
+
|
|
120
|
+
if self._original_aimage_generation:
|
|
121
|
+
try:
|
|
122
|
+
|
|
123
|
+
async def instrumented_aimage_generation(*args, **kwargs): # type: ignore[no-untyped-def]
|
|
124
|
+
wrapper = aimage_generation_wrapper(tracer)
|
|
125
|
+
return await wrapper(self._original_aimage_generation, None, args, kwargs)
|
|
126
|
+
|
|
127
|
+
litellm.aimage_generation = instrumented_aimage_generation
|
|
128
|
+
except Exception as e:
|
|
129
|
+
logger.error(f"Failed to monkey-patch litellm.aimage_generation: {e}")
|
|
130
|
+
|
|
131
|
+
def _uninstrument(self, **kwargs): # type: ignore[no-untyped-def]
|
|
132
|
+
"""Uninstrument LiteLLM methods"""
|
|
133
|
+
try:
|
|
134
|
+
import litellm
|
|
135
|
+
|
|
136
|
+
# Restore original functions
|
|
137
|
+
if hasattr(self, "_original_completion") and self._original_completion:
|
|
138
|
+
litellm.completion = self._original_completion
|
|
139
|
+
|
|
140
|
+
if hasattr(self, "_original_acompletion") and self._original_acompletion:
|
|
141
|
+
litellm.acompletion = self._original_acompletion
|
|
142
|
+
|
|
143
|
+
if hasattr(self, "_original_embedding") and self._original_embedding:
|
|
144
|
+
litellm.embedding = self._original_embedding
|
|
145
|
+
|
|
146
|
+
if hasattr(self, "_original_aembedding") and self._original_aembedding:
|
|
147
|
+
litellm.aembedding = self._original_aembedding
|
|
148
|
+
|
|
149
|
+
if hasattr(self, "_original_image_generation") and self._original_image_generation:
|
|
150
|
+
litellm.image_generation = self._original_image_generation
|
|
151
|
+
|
|
152
|
+
if hasattr(self, "_original_aimage_generation") and self._original_aimage_generation:
|
|
153
|
+
litellm.aimage_generation = self._original_aimage_generation
|
|
154
|
+
|
|
155
|
+
except ImportError:
|
|
156
|
+
pass
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def should_suppress_instrumentation() -> bool:
|
|
160
|
+
"""Check if instrumentation should be suppressed"""
|
|
161
|
+
return context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) is True
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "1.0.0"
|
|
@@ -0,0 +1,557 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import time
|
|
3
|
+
from collections.abc import Awaitable
|
|
4
|
+
from typing import Any, AsyncIterator, Callable, Dict, Iterator, Tuple
|
|
5
|
+
|
|
6
|
+
from opentelemetry import context as context_api
|
|
7
|
+
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
|
|
8
|
+
from opentelemetry.semconv_ai import (
|
|
9
|
+
SpanAttributes,
|
|
10
|
+
)
|
|
11
|
+
from opentelemetry.trace import Span, SpanKind, Tracer
|
|
12
|
+
from opentelemetry.trace.status import Status, StatusCode
|
|
13
|
+
from wrapt import ObjectProxy
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
COMPLETION_SPAN_NAME = "litellm.completion"
|
|
18
|
+
EMBEDDING_SPAN_NAME = "litellm.embedding"
|
|
19
|
+
IMAGE_GENERATION_SPAN_NAME = "litellm.image_generation"
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def should_suppress_instrumentation() -> bool:
|
|
23
|
+
"""Check if instrumentation should be suppressed"""
|
|
24
|
+
return context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) is True
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def is_streaming_response(response: Any) -> bool:
|
|
28
|
+
"""Check if response is a streaming response"""
|
|
29
|
+
return hasattr(response, "__iter__") and not isinstance(response, (str, bytes, dict))
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def model_as_dict(obj: Any) -> Dict[str, Any]:
|
|
33
|
+
"""Convert LiteLLM model object to dictionary"""
|
|
34
|
+
if hasattr(obj, "model_dump"):
|
|
35
|
+
result = obj.model_dump()
|
|
36
|
+
return result if isinstance(result, dict) else {}
|
|
37
|
+
elif hasattr(obj, "to_dict"):
|
|
38
|
+
result = obj.to_dict()
|
|
39
|
+
return result if isinstance(result, dict) else {}
|
|
40
|
+
elif isinstance(obj, dict):
|
|
41
|
+
return obj
|
|
42
|
+
else:
|
|
43
|
+
return {}
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def set_request_attributes(span: Span, kwargs: Dict[str, Any], operation_type: str) -> None:
|
|
47
|
+
"""Set request attributes on span"""
|
|
48
|
+
if not span.is_recording():
|
|
49
|
+
return
|
|
50
|
+
|
|
51
|
+
# Set operation type
|
|
52
|
+
span.set_attribute(f"{SpanAttributes.LLM_REQUEST_TYPE}", operation_type)
|
|
53
|
+
span.set_attribute(f"{SpanAttributes.LLM_SYSTEM}", "LiteLLM")
|
|
54
|
+
|
|
55
|
+
# Common attributes
|
|
56
|
+
if kwargs.get("model"):
|
|
57
|
+
span.set_attribute(f"{SpanAttributes.LLM_REQUEST_MODEL}", kwargs["model"])
|
|
58
|
+
|
|
59
|
+
if kwargs.get("temperature") is not None:
|
|
60
|
+
span.set_attribute(f"{SpanAttributes.LLM_REQUEST_TEMPERATURE}", kwargs["temperature"])
|
|
61
|
+
|
|
62
|
+
if kwargs.get("max_tokens") is not None:
|
|
63
|
+
span.set_attribute(f"{SpanAttributes.LLM_REQUEST_MAX_TOKENS}", kwargs["max_tokens"])
|
|
64
|
+
|
|
65
|
+
if kwargs.get("stream") is not None:
|
|
66
|
+
span.set_attribute("gen_ai.stream", kwargs["stream"])
|
|
67
|
+
|
|
68
|
+
# Chat completion specific attributes
|
|
69
|
+
if operation_type == "chat" and kwargs.get("messages"):
|
|
70
|
+
messages = kwargs["messages"]
|
|
71
|
+
if isinstance(messages, list) and len(messages) > 0:
|
|
72
|
+
for index, message in enumerate(messages):
|
|
73
|
+
if isinstance(message, dict):
|
|
74
|
+
span.set_attribute(f"{SpanAttributes.LLM_PROMPTS}.{index}.role", message.get("role", "user"))
|
|
75
|
+
span.set_attribute(f"{SpanAttributes.LLM_PROMPTS}.{index}.content", str(message.get("content", "")))
|
|
76
|
+
|
|
77
|
+
# Embedding specific attributes
|
|
78
|
+
if operation_type == "embedding" and kwargs.get("input"):
|
|
79
|
+
input_data = kwargs["input"]
|
|
80
|
+
if isinstance(input_data, str):
|
|
81
|
+
span.set_attribute(f"{SpanAttributes.LLM_PROMPTS}.0.content", input_data)
|
|
82
|
+
elif isinstance(input_data, list):
|
|
83
|
+
for index, text in enumerate(input_data):
|
|
84
|
+
if isinstance(text, str):
|
|
85
|
+
span.set_attribute(f"{SpanAttributes.LLM_PROMPTS}.{index}.content", text)
|
|
86
|
+
|
|
87
|
+
# Image generation specific attributes
|
|
88
|
+
if operation_type == "image_generation":
|
|
89
|
+
if kwargs.get("prompt"):
|
|
90
|
+
span.set_attribute("gen_ai.prompt", kwargs["prompt"])
|
|
91
|
+
if kwargs.get("n"):
|
|
92
|
+
span.set_attribute("gen_ai.request.n", kwargs["n"])
|
|
93
|
+
if kwargs.get("size"):
|
|
94
|
+
span.set_attribute("gen_ai.request.size", kwargs["size"])
|
|
95
|
+
if kwargs.get("quality"):
|
|
96
|
+
span.set_attribute("gen_ai.request.quality", kwargs["quality"])
|
|
97
|
+
if kwargs.get("style"):
|
|
98
|
+
span.set_attribute("gen_ai.request.style", kwargs["style"])
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def set_response_attributes(span: Span, response_dict: Dict[str, Any], operation_type: str) -> None:
|
|
102
|
+
"""Set response attributes on span"""
|
|
103
|
+
if not span.is_recording():
|
|
104
|
+
return
|
|
105
|
+
|
|
106
|
+
if response_dict.get("model"):
|
|
107
|
+
span.set_attribute(f"{SpanAttributes.LLM_RESPONSE_MODEL}", response_dict["model"])
|
|
108
|
+
|
|
109
|
+
if response_dict.get("id"):
|
|
110
|
+
span.set_attribute("gen_ai.response.id", response_dict["id"])
|
|
111
|
+
|
|
112
|
+
# Usage information
|
|
113
|
+
usage = response_dict.get("usage", {})
|
|
114
|
+
if usage:
|
|
115
|
+
if usage.get("prompt_tokens"):
|
|
116
|
+
span.set_attribute(f"{SpanAttributes.LLM_USAGE_PROMPT_TOKENS}", usage["prompt_tokens"])
|
|
117
|
+
if usage.get("completion_tokens"):
|
|
118
|
+
span.set_attribute(f"{SpanAttributes.LLM_USAGE_COMPLETION_TOKENS}", usage["completion_tokens"])
|
|
119
|
+
if usage.get("cache_read_input_tokens"):
|
|
120
|
+
span.set_attribute(f"{SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS}", usage["cache_read_input_tokens"])
|
|
121
|
+
if usage.get("cache_creation_input_tokens"):
|
|
122
|
+
span.set_attribute("gen_ai.usage.cache_creation_input_tokens", usage["cache_creation_input_tokens"])
|
|
123
|
+
if usage.get("total_tokens"):
|
|
124
|
+
span.set_attribute(f"{SpanAttributes.LLM_USAGE_TOTAL_TOKENS}", usage["total_tokens"])
|
|
125
|
+
|
|
126
|
+
# Chat completion response content
|
|
127
|
+
if operation_type == "chat":
|
|
128
|
+
choices = response_dict.get("choices", [])
|
|
129
|
+
for index, choice in enumerate(choices):
|
|
130
|
+
if choice.get("message", {}).get("role"):
|
|
131
|
+
span.set_attribute(f"{SpanAttributes.LLM_COMPLETIONS}.{index}.role", choice["message"]["role"])
|
|
132
|
+
if choice.get("message", {}).get("content"):
|
|
133
|
+
span.set_attribute(f"{SpanAttributes.LLM_COMPLETIONS}.{index}.content", choice["message"]["content"])
|
|
134
|
+
if choice.get("finish_reason"):
|
|
135
|
+
span.set_attribute(f"{SpanAttributes.LLM_COMPLETIONS}.{index}.finish_reason", choice["finish_reason"])
|
|
136
|
+
|
|
137
|
+
# Embedding response content
|
|
138
|
+
elif operation_type == "embedding":
|
|
139
|
+
data = response_dict.get("data", [])
|
|
140
|
+
for index, embedding_data in enumerate(data):
|
|
141
|
+
if embedding_data.get("index") is not None:
|
|
142
|
+
span.set_attribute(f"gen_ai.response.embeddings.{index}.index", embedding_data["index"])
|
|
143
|
+
if embedding_data.get("embedding"):
|
|
144
|
+
# Don't log the actual embedding vector, just its dimensions
|
|
145
|
+
embedding_vector = embedding_data["embedding"]
|
|
146
|
+
if isinstance(embedding_vector, list):
|
|
147
|
+
span.set_attribute(f"gen_ai.response.embeddings.{index}.dimensions", len(embedding_vector))
|
|
148
|
+
|
|
149
|
+
# Image generation response content
|
|
150
|
+
elif operation_type == "image_generation":
|
|
151
|
+
data = response_dict.get("data", [])
|
|
152
|
+
for index, image_data in enumerate(data):
|
|
153
|
+
if image_data.get("url"):
|
|
154
|
+
span.set_attribute(f"gen_ai.response.images.{index}.url", image_data["url"])
|
|
155
|
+
if image_data.get("b64_json"):
|
|
156
|
+
span.set_attribute(f"gen_ai.response.images.{index}.has_b64_json", True)
|
|
157
|
+
if image_data.get("revised_prompt"):
|
|
158
|
+
span.set_attribute(f"gen_ai.response.images.{index}.revised_prompt", image_data["revised_prompt"])
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def completion_wrapper(tracer: Tracer) -> Callable[..., Any]:
|
|
162
|
+
"""Wrapper for LiteLLM completion function"""
|
|
163
|
+
|
|
164
|
+
def wrapper(wrapped: Callable[..., Any], instance: Any, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any:
|
|
165
|
+
logger.debug(f"LiteLLM completion wrapper called with model: {kwargs.get('model')}")
|
|
166
|
+
|
|
167
|
+
if should_suppress_instrumentation():
|
|
168
|
+
logger.debug("LiteLLM instrumentation suppressed")
|
|
169
|
+
return wrapped(*args, **kwargs)
|
|
170
|
+
|
|
171
|
+
# Check if streaming
|
|
172
|
+
is_streaming = kwargs.get("stream", False)
|
|
173
|
+
|
|
174
|
+
if is_streaming:
|
|
175
|
+
# Use start_span for streaming - returns span directly
|
|
176
|
+
span = tracer.start_span(
|
|
177
|
+
COMPLETION_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "chat"}
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
set_request_attributes(span, kwargs, "chat")
|
|
181
|
+
|
|
182
|
+
try:
|
|
183
|
+
start_time = time.time()
|
|
184
|
+
response = wrapped(*args, **kwargs)
|
|
185
|
+
|
|
186
|
+
return StreamingWrapper(span=span, response=response, start_time=start_time, request_kwargs=kwargs)
|
|
187
|
+
except Exception as e:
|
|
188
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
189
|
+
span.record_exception(e)
|
|
190
|
+
span.end()
|
|
191
|
+
raise
|
|
192
|
+
else:
|
|
193
|
+
# Use start_as_current_span for non-streaming - returns context manager
|
|
194
|
+
with tracer.start_as_current_span(
|
|
195
|
+
COMPLETION_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "chat"}
|
|
196
|
+
) as span:
|
|
197
|
+
set_request_attributes(span, kwargs, "chat")
|
|
198
|
+
|
|
199
|
+
try:
|
|
200
|
+
start_time = time.time()
|
|
201
|
+
response = wrapped(*args, **kwargs)
|
|
202
|
+
end_time = time.time()
|
|
203
|
+
|
|
204
|
+
response_dict = model_as_dict(response)
|
|
205
|
+
set_response_attributes(span, response_dict, "chat")
|
|
206
|
+
|
|
207
|
+
span.set_attribute("llm.response.duration", end_time - start_time)
|
|
208
|
+
span.set_status(Status(StatusCode.OK))
|
|
209
|
+
|
|
210
|
+
return response
|
|
211
|
+
except Exception as e:
|
|
212
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
213
|
+
raise
|
|
214
|
+
|
|
215
|
+
return wrapper
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def acompletion_wrapper(tracer: Tracer) -> Callable[..., Awaitable[Any]]:
|
|
219
|
+
"""Async wrapper for LiteLLM acompletion function"""
|
|
220
|
+
|
|
221
|
+
async def wrapper(
|
|
222
|
+
wrapped: Callable[..., Awaitable[Any]], instance: Any, args: Tuple[Any, ...], kwargs: Dict[str, Any]
|
|
223
|
+
) -> Any:
|
|
224
|
+
if should_suppress_instrumentation():
|
|
225
|
+
return await wrapped(*args, **kwargs)
|
|
226
|
+
|
|
227
|
+
# Check if streaming
|
|
228
|
+
is_streaming = kwargs.get("stream", False)
|
|
229
|
+
|
|
230
|
+
if is_streaming:
|
|
231
|
+
# Use start_span for streaming - returns span directly
|
|
232
|
+
span = tracer.start_span(
|
|
233
|
+
COMPLETION_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "chat"}
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
set_request_attributes(span, kwargs, "chat")
|
|
237
|
+
|
|
238
|
+
try:
|
|
239
|
+
start_time = time.time()
|
|
240
|
+
response = await wrapped(*args, **kwargs)
|
|
241
|
+
|
|
242
|
+
return AsyncStreamingWrapper(span=span, response=response, start_time=start_time, request_kwargs=kwargs)
|
|
243
|
+
except Exception as e:
|
|
244
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
245
|
+
span.record_exception(e)
|
|
246
|
+
span.end()
|
|
247
|
+
raise
|
|
248
|
+
else:
|
|
249
|
+
# Use start_as_current_span for non-streaming - returns context manager
|
|
250
|
+
with tracer.start_as_current_span(
|
|
251
|
+
COMPLETION_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "chat"}
|
|
252
|
+
) as span:
|
|
253
|
+
set_request_attributes(span, kwargs, "chat")
|
|
254
|
+
|
|
255
|
+
try:
|
|
256
|
+
start_time = time.time()
|
|
257
|
+
response = await wrapped(*args, **kwargs)
|
|
258
|
+
end_time = time.time()
|
|
259
|
+
|
|
260
|
+
response_dict = model_as_dict(response)
|
|
261
|
+
set_response_attributes(span, response_dict, "chat")
|
|
262
|
+
|
|
263
|
+
span.set_attribute("llm.response.duration", end_time - start_time)
|
|
264
|
+
span.set_status(Status(StatusCode.OK))
|
|
265
|
+
|
|
266
|
+
return response
|
|
267
|
+
except Exception as e:
|
|
268
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
269
|
+
raise
|
|
270
|
+
|
|
271
|
+
return wrapper
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
def embedding_wrapper(tracer: Tracer) -> Callable[..., Any]:
|
|
275
|
+
"""Wrapper for LiteLLM embedding function"""
|
|
276
|
+
|
|
277
|
+
def wrapper(wrapped: Callable[..., Any], instance: Any, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any:
|
|
278
|
+
if should_suppress_instrumentation():
|
|
279
|
+
return wrapped(*args, **kwargs)
|
|
280
|
+
|
|
281
|
+
# Embeddings are never streaming, always use start_as_current_span
|
|
282
|
+
with tracer.start_as_current_span(
|
|
283
|
+
EMBEDDING_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "embedding"}
|
|
284
|
+
) as span:
|
|
285
|
+
set_request_attributes(span, kwargs, "embedding")
|
|
286
|
+
|
|
287
|
+
try:
|
|
288
|
+
start_time = time.time()
|
|
289
|
+
response = wrapped(*args, **kwargs)
|
|
290
|
+
end_time = time.time()
|
|
291
|
+
|
|
292
|
+
response_dict = model_as_dict(response)
|
|
293
|
+
set_response_attributes(span, response_dict, "embedding")
|
|
294
|
+
|
|
295
|
+
span.set_attribute("llm.response.duration", end_time - start_time)
|
|
296
|
+
span.set_status(Status(StatusCode.OK))
|
|
297
|
+
|
|
298
|
+
return response
|
|
299
|
+
except Exception as e:
|
|
300
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
301
|
+
raise
|
|
302
|
+
|
|
303
|
+
return wrapper
|
|
304
|
+
|
|
305
|
+
|
|
306
|
+
def aembedding_wrapper(tracer: Tracer) -> Callable[..., Awaitable[Any]]:
|
|
307
|
+
"""Async wrapper for LiteLLM aembedding function"""
|
|
308
|
+
|
|
309
|
+
async def wrapper(
|
|
310
|
+
wrapped: Callable[..., Awaitable[Any]], instance: Any, args: Tuple[Any, ...], kwargs: Dict[str, Any]
|
|
311
|
+
) -> Any:
|
|
312
|
+
if should_suppress_instrumentation():
|
|
313
|
+
return await wrapped(*args, **kwargs)
|
|
314
|
+
|
|
315
|
+
# Embeddings are never streaming, always use start_as_current_span
|
|
316
|
+
with tracer.start_as_current_span(
|
|
317
|
+
EMBEDDING_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "embedding"}
|
|
318
|
+
) as span:
|
|
319
|
+
set_request_attributes(span, kwargs, "embedding")
|
|
320
|
+
|
|
321
|
+
try:
|
|
322
|
+
start_time = time.time()
|
|
323
|
+
response = await wrapped(*args, **kwargs)
|
|
324
|
+
end_time = time.time()
|
|
325
|
+
|
|
326
|
+
response_dict = model_as_dict(response)
|
|
327
|
+
set_response_attributes(span, response_dict, "embedding")
|
|
328
|
+
|
|
329
|
+
span.set_attribute("llm.response.duration", end_time - start_time)
|
|
330
|
+
span.set_status(Status(StatusCode.OK))
|
|
331
|
+
|
|
332
|
+
return response
|
|
333
|
+
except Exception as e:
|
|
334
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
335
|
+
raise
|
|
336
|
+
|
|
337
|
+
return wrapper
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
def image_generation_wrapper(tracer: Tracer) -> Callable[..., Any]:
|
|
341
|
+
"""Wrapper for LiteLLM image_generation function"""
|
|
342
|
+
|
|
343
|
+
def wrapper(wrapped: Callable[..., Any], instance: Any, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any:
|
|
344
|
+
if should_suppress_instrumentation():
|
|
345
|
+
return wrapped(*args, **kwargs)
|
|
346
|
+
|
|
347
|
+
# Image generation is never streaming, always use start_as_current_span
|
|
348
|
+
with tracer.start_as_current_span(
|
|
349
|
+
IMAGE_GENERATION_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "image_generation"}
|
|
350
|
+
) as span:
|
|
351
|
+
set_request_attributes(span, kwargs, "image_generation")
|
|
352
|
+
|
|
353
|
+
try:
|
|
354
|
+
start_time = time.time()
|
|
355
|
+
response = wrapped(*args, **kwargs)
|
|
356
|
+
end_time = time.time()
|
|
357
|
+
|
|
358
|
+
response_dict = model_as_dict(response)
|
|
359
|
+
set_response_attributes(span, response_dict, "image_generation")
|
|
360
|
+
|
|
361
|
+
span.set_attribute("llm.response.duration", end_time - start_time)
|
|
362
|
+
span.set_status(Status(StatusCode.OK))
|
|
363
|
+
|
|
364
|
+
return response
|
|
365
|
+
except Exception as e:
|
|
366
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
367
|
+
raise
|
|
368
|
+
|
|
369
|
+
return wrapper
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
def aimage_generation_wrapper(tracer: Tracer) -> Callable[..., Awaitable[Any]]:
|
|
373
|
+
"""Async wrapper for LiteLLM aimage_generation function"""
|
|
374
|
+
|
|
375
|
+
async def wrapper(
|
|
376
|
+
wrapped: Callable[..., Awaitable[Any]], instance: Any, args: Tuple[Any, ...], kwargs: Dict[str, Any]
|
|
377
|
+
) -> Any:
|
|
378
|
+
if should_suppress_instrumentation():
|
|
379
|
+
return await wrapped(*args, **kwargs)
|
|
380
|
+
|
|
381
|
+
# Image generation is never streaming, always use start_as_current_span
|
|
382
|
+
with tracer.start_as_current_span(
|
|
383
|
+
IMAGE_GENERATION_SPAN_NAME, kind=SpanKind.CLIENT, attributes={"llm.request.type": "image_generation"}
|
|
384
|
+
) as span:
|
|
385
|
+
set_request_attributes(span, kwargs, "image_generation")
|
|
386
|
+
|
|
387
|
+
try:
|
|
388
|
+
start_time = time.time()
|
|
389
|
+
response = await wrapped(*args, **kwargs)
|
|
390
|
+
end_time = time.time()
|
|
391
|
+
|
|
392
|
+
response_dict = model_as_dict(response)
|
|
393
|
+
set_response_attributes(span, response_dict, "image_generation")
|
|
394
|
+
|
|
395
|
+
span.set_attribute("llm.response.duration", end_time - start_time)
|
|
396
|
+
span.set_status(Status(StatusCode.OK))
|
|
397
|
+
|
|
398
|
+
return response
|
|
399
|
+
except Exception as e:
|
|
400
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
401
|
+
raise
|
|
402
|
+
|
|
403
|
+
return wrapper
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
class StreamingWrapper(ObjectProxy): # type: ignore[misc]
|
|
407
|
+
"""Wrapper for streaming responses"""
|
|
408
|
+
|
|
409
|
+
def __init__(self, span: Span, response: Iterator[Any], start_time: float, request_kwargs: Dict[str, Any]) -> None:
|
|
410
|
+
super().__init__(response)
|
|
411
|
+
self._span = span
|
|
412
|
+
self._start_time = start_time
|
|
413
|
+
self._request_kwargs = request_kwargs
|
|
414
|
+
self._complete_response: Dict[str, Any] = {"choices": [], "model": ""}
|
|
415
|
+
self._content_parts: list[str] = []
|
|
416
|
+
|
|
417
|
+
def __iter__(self) -> Iterator[Any]:
|
|
418
|
+
return self
|
|
419
|
+
|
|
420
|
+
def __next__(self) -> Any:
|
|
421
|
+
try:
|
|
422
|
+
chunk = self.__wrapped__.__next__()
|
|
423
|
+
self._process_chunk(chunk)
|
|
424
|
+
return chunk
|
|
425
|
+
except StopIteration:
|
|
426
|
+
self._finalize_span()
|
|
427
|
+
raise
|
|
428
|
+
|
|
429
|
+
def _process_chunk(self, chunk: Any) -> None:
|
|
430
|
+
"""Process streaming chunk"""
|
|
431
|
+
chunk_dict = model_as_dict(chunk)
|
|
432
|
+
|
|
433
|
+
# Accumulate response data
|
|
434
|
+
if chunk_dict.get("model"):
|
|
435
|
+
self._complete_response["model"] = chunk_dict["model"]
|
|
436
|
+
|
|
437
|
+
# Accumulate usage information from chunks
|
|
438
|
+
if chunk_dict.get("usage"):
|
|
439
|
+
self._complete_response["usage"] = chunk_dict["usage"]
|
|
440
|
+
|
|
441
|
+
# Collect content from delta
|
|
442
|
+
choices = chunk_dict.get("choices", [])
|
|
443
|
+
for choice in choices:
|
|
444
|
+
delta = choice.get("delta", {})
|
|
445
|
+
if delta.get("content"):
|
|
446
|
+
self._content_parts.append(delta["content"])
|
|
447
|
+
|
|
448
|
+
# Collect finish_reason from choices
|
|
449
|
+
if choice.get("finish_reason"):
|
|
450
|
+
if "choices" not in self._complete_response:
|
|
451
|
+
self._complete_response["choices"] = []
|
|
452
|
+
# Ensure we have enough choice entries
|
|
453
|
+
while len(self._complete_response["choices"]) <= len(choices) - 1:
|
|
454
|
+
self._complete_response["choices"].append(
|
|
455
|
+
{"message": {"role": "assistant", "content": ""}, "finish_reason": None}
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
choice_index = choice.get("index", 0)
|
|
459
|
+
if choice_index < len(self._complete_response["choices"]):
|
|
460
|
+
self._complete_response["choices"][choice_index]["finish_reason"] = choice["finish_reason"]
|
|
461
|
+
|
|
462
|
+
# Add chunk event
|
|
463
|
+
self._span.add_event("llm.content.completion.chunk")
|
|
464
|
+
|
|
465
|
+
def _finalize_span(self) -> None:
|
|
466
|
+
"""Finalize span when streaming is complete"""
|
|
467
|
+
end_time = time.time()
|
|
468
|
+
duration = end_time - self._start_time
|
|
469
|
+
|
|
470
|
+
# Set accumulated content
|
|
471
|
+
if self._content_parts:
|
|
472
|
+
full_content = "".join(self._content_parts)
|
|
473
|
+
self._span.set_attribute(f"{SpanAttributes.LLM_COMPLETIONS}.0.content", full_content)
|
|
474
|
+
self._span.set_attribute(f"{SpanAttributes.LLM_COMPLETIONS}.0.role", "assistant")
|
|
475
|
+
|
|
476
|
+
set_response_attributes(self._span, self._complete_response, "chat")
|
|
477
|
+
self._span.set_attribute("llm.response.duration", duration)
|
|
478
|
+
self._span.set_status(Status(StatusCode.OK))
|
|
479
|
+
self._span.end()
|
|
480
|
+
|
|
481
|
+
|
|
482
|
+
class AsyncStreamingWrapper(ObjectProxy): # type: ignore[misc]
|
|
483
|
+
"""Async wrapper for streaming responses"""
|
|
484
|
+
|
|
485
|
+
def __init__(
|
|
486
|
+
self, span: Span, response: AsyncIterator[Any], start_time: float, request_kwargs: Dict[str, Any]
|
|
487
|
+
) -> None:
|
|
488
|
+
super().__init__(response)
|
|
489
|
+
self._span = span
|
|
490
|
+
self._start_time = start_time
|
|
491
|
+
self._request_kwargs = request_kwargs
|
|
492
|
+
self._complete_response: Dict[str, Any] = {"choices": [], "model": ""}
|
|
493
|
+
self._content_parts: list[str] = []
|
|
494
|
+
|
|
495
|
+
def __aiter__(self) -> AsyncIterator[Any]:
|
|
496
|
+
return self
|
|
497
|
+
|
|
498
|
+
async def __anext__(self) -> Any:
|
|
499
|
+
try:
|
|
500
|
+
chunk = await self.__wrapped__.__anext__()
|
|
501
|
+
self._process_chunk(chunk)
|
|
502
|
+
return chunk
|
|
503
|
+
except StopAsyncIteration:
|
|
504
|
+
self._finalize_span()
|
|
505
|
+
raise
|
|
506
|
+
|
|
507
|
+
def _process_chunk(self, chunk: Any) -> None:
|
|
508
|
+
"""Process streaming chunk"""
|
|
509
|
+
chunk_dict = model_as_dict(chunk)
|
|
510
|
+
|
|
511
|
+
# Accumulate response data
|
|
512
|
+
if chunk_dict.get("model"):
|
|
513
|
+
self._complete_response["model"] = chunk_dict["model"]
|
|
514
|
+
|
|
515
|
+
# Accumulate usage information from chunks
|
|
516
|
+
if chunk_dict.get("usage"):
|
|
517
|
+
self._complete_response["usage"] = chunk_dict["usage"]
|
|
518
|
+
|
|
519
|
+
# Collect content from delta
|
|
520
|
+
choices = chunk_dict.get("choices", [])
|
|
521
|
+
for choice in choices:
|
|
522
|
+
delta = choice.get("delta", {})
|
|
523
|
+
if delta.get("content"):
|
|
524
|
+
self._content_parts.append(delta["content"])
|
|
525
|
+
|
|
526
|
+
# Collect finish_reason from choices
|
|
527
|
+
if choice.get("finish_reason"):
|
|
528
|
+
if "choices" not in self._complete_response:
|
|
529
|
+
self._complete_response["choices"] = []
|
|
530
|
+
# Ensure we have enough choice entries
|
|
531
|
+
while len(self._complete_response["choices"]) <= len(choices) - 1:
|
|
532
|
+
self._complete_response["choices"].append(
|
|
533
|
+
{"message": {"role": "assistant", "content": ""}, "finish_reason": None}
|
|
534
|
+
)
|
|
535
|
+
|
|
536
|
+
choice_index = choice.get("index", 0)
|
|
537
|
+
if choice_index < len(self._complete_response["choices"]):
|
|
538
|
+
self._complete_response["choices"][choice_index]["finish_reason"] = choice["finish_reason"]
|
|
539
|
+
|
|
540
|
+
# Add chunk event
|
|
541
|
+
self._span.add_event("llm.content.completion.chunk")
|
|
542
|
+
|
|
543
|
+
def _finalize_span(self) -> None:
|
|
544
|
+
"""Finalize span when streaming is complete"""
|
|
545
|
+
end_time = time.time()
|
|
546
|
+
duration = end_time - self._start_time
|
|
547
|
+
|
|
548
|
+
# Set accumulated content
|
|
549
|
+
if self._content_parts:
|
|
550
|
+
full_content = "".join(self._content_parts)
|
|
551
|
+
self._span.set_attribute(f"{SpanAttributes.LLM_COMPLETIONS}.0.content", full_content)
|
|
552
|
+
self._span.set_attribute(f"{SpanAttributes.LLM_COMPLETIONS}.0.role", "assistant")
|
|
553
|
+
|
|
554
|
+
set_response_attributes(self._span, self._complete_response, "chat")
|
|
555
|
+
self._span.set_attribute("llm.response.duration", duration)
|
|
556
|
+
self._span.set_status(Status(StatusCode.OK))
|
|
557
|
+
self._span.end()
|
netra/processors/__init__.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
from netra.processors.instrumentation_span_processor import InstrumentationSpanProcessor
|
|
2
|
+
from netra.processors.scrubbing_span_processor import ScrubbingSpanProcessor
|
|
2
3
|
from netra.processors.session_span_processor import SessionSpanProcessor
|
|
3
4
|
|
|
4
|
-
__all__ = ["SessionSpanProcessor", "InstrumentationSpanProcessor"]
|
|
5
|
+
__all__ = ["SessionSpanProcessor", "InstrumentationSpanProcessor", "ScrubbingSpanProcessor"]
|
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import re
|
|
3
|
+
from typing import Any, Dict, Optional, Union
|
|
4
|
+
|
|
5
|
+
from opentelemetry import context as otel_context
|
|
6
|
+
from opentelemetry import trace
|
|
7
|
+
from opentelemetry.sdk.trace import SpanProcessor
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ScrubbingSpanProcessor(SpanProcessor): # type: ignore[misc]
|
|
13
|
+
"""OpenTelemetry span processor that scrubs sensitive data from span attributes using pydantic logfire patterns."""
|
|
14
|
+
|
|
15
|
+
# Common patterns for sensitive data detection (based on pydantic logfire scrubbing)
|
|
16
|
+
SENSITIVE_PATTERNS = {
|
|
17
|
+
# API keys first to avoid other patterns interfering
|
|
18
|
+
"api_key": re.compile(
|
|
19
|
+
r"(?:Token:\s*\S{32,})" # scrub entire "Token: <value>" where value is 32+ non-space
|
|
20
|
+
r"|(?:sk-[A-Za-z0-9]{16,})" # scrub only the sk-... token (keep labels like "API Key:")
|
|
21
|
+
),
|
|
22
|
+
"email": re.compile(r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b", re.IGNORECASE),
|
|
23
|
+
"phone": re.compile(r"(?:\+?1[-.\s]?)?\(?[0-9]{3}\)?[-.\s]?[0-9]{3}[-.\s]?[0-9]{4}"),
|
|
24
|
+
# Run credit card BEFORE SSN to avoid SSN partially matching inside card numbers
|
|
25
|
+
"credit_card": re.compile(r"(?<!\d)(?:4\d{15}|5[1-5]\d{14}|3[47]\d{13}|6(?:011|5\d{2})\d{12})(?!\d)"),
|
|
26
|
+
"ssn": re.compile(r"\b\d{3}-?\d{2}-?\d{4}\b"),
|
|
27
|
+
"password": re.compile(r"(?i)(?:password|passwd|pwd|secret|token)\s*[:=]\s*\S+"),
|
|
28
|
+
"bearer_token": re.compile(r"(?i)(?:authorization:\s*)?bearer\s+[A-Za-z0-9\-._~+/]+=*"),
|
|
29
|
+
"authorization": re.compile(r"(?i)authorization\s*:\s*\S+"),
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
# Sensitive attribute keys that should be scrubbed
|
|
33
|
+
SENSITIVE_KEYS = {
|
|
34
|
+
"password",
|
|
35
|
+
"passwd",
|
|
36
|
+
"pwd",
|
|
37
|
+
"secret",
|
|
38
|
+
"token",
|
|
39
|
+
"key",
|
|
40
|
+
"api_key",
|
|
41
|
+
"auth",
|
|
42
|
+
"authorization",
|
|
43
|
+
"bearer",
|
|
44
|
+
"credential",
|
|
45
|
+
"private_key",
|
|
46
|
+
"access_token",
|
|
47
|
+
"refresh_token",
|
|
48
|
+
"session_token",
|
|
49
|
+
"x-api-key",
|
|
50
|
+
"x-auth-token",
|
|
51
|
+
"cookie",
|
|
52
|
+
"set-cookie",
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
def __init__(self): # type: ignore[no-untyped-def]
|
|
56
|
+
"""Initialize the scrubbing span processor."""
|
|
57
|
+
self.scrub_replacement = "[SCRUBBED]"
|
|
58
|
+
|
|
59
|
+
def on_start(self, span: trace.Span, parent_context: Optional[otel_context.Context] = None) -> None:
|
|
60
|
+
"""Process span when it starts - no scrubbing needed here."""
|
|
61
|
+
|
|
62
|
+
def on_end(self, span: trace.Span) -> None:
|
|
63
|
+
"""Scrub sensitive data from span attributes when span ends."""
|
|
64
|
+
try:
|
|
65
|
+
# Get span attributes
|
|
66
|
+
if hasattr(span, "_attributes") and span._attributes:
|
|
67
|
+
scrubbed_attributes = {}
|
|
68
|
+
for key, value in span._attributes.items():
|
|
69
|
+
scrubbed_key, scrubbed_value = self._scrub_key_value(key, value)
|
|
70
|
+
scrubbed_attributes[scrubbed_key] = scrubbed_value
|
|
71
|
+
|
|
72
|
+
# Replace the attributes with scrubbed versions
|
|
73
|
+
span._attributes = scrubbed_attributes
|
|
74
|
+
|
|
75
|
+
except Exception as e:
|
|
76
|
+
logger.exception(f"Error scrubbing span attributes: {e}")
|
|
77
|
+
|
|
78
|
+
def _scrub_key_value(self, key: str, value: Any) -> tuple[str, Any]:
|
|
79
|
+
"""Scrub sensitive data from a key-value pair.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
key: The attribute key
|
|
83
|
+
value: The attribute value
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
Tuple of (scrubbed_key, scrubbed_value)
|
|
87
|
+
"""
|
|
88
|
+
# Check if key itself is sensitive and value is a simple type (string, number, etc.)
|
|
89
|
+
if self._is_sensitive_key(key) and not isinstance(value, (dict, list, tuple)):
|
|
90
|
+
return key, self.scrub_replacement
|
|
91
|
+
|
|
92
|
+
# Scrub value based on its type
|
|
93
|
+
if isinstance(value, str):
|
|
94
|
+
scrubbed_value = self._scrub_string_value(value)
|
|
95
|
+
return key, scrubbed_value
|
|
96
|
+
elif isinstance(value, dict):
|
|
97
|
+
return key, self._scrub_dict_value(value)
|
|
98
|
+
elif isinstance(value, (list, tuple)):
|
|
99
|
+
return key, self._scrub_list_value(value)
|
|
100
|
+
|
|
101
|
+
return key, value
|
|
102
|
+
|
|
103
|
+
def _is_sensitive_key(self, key: str) -> bool:
|
|
104
|
+
"""Check if a key is considered sensitive.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
key: The key to check
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
True if the key is sensitive, False otherwise
|
|
111
|
+
"""
|
|
112
|
+
key_lower = key.lower()
|
|
113
|
+
return any(sensitive_key in key_lower for sensitive_key in self.SENSITIVE_KEYS)
|
|
114
|
+
|
|
115
|
+
def _scrub_string_value(self, value: str) -> str:
|
|
116
|
+
"""Scrub sensitive patterns from a string value.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
value: The string value to scrub
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
The scrubbed string value
|
|
123
|
+
"""
|
|
124
|
+
scrubbed_value = value
|
|
125
|
+
|
|
126
|
+
# Early catch-all for contiguous 13-19 digit sequences (credit/debit cards)
|
|
127
|
+
scrubbed_value = re.sub(r"(?<!\d)\d{13,19}(?!\d)", self.scrub_replacement, scrubbed_value)
|
|
128
|
+
|
|
129
|
+
for pattern_name, pattern in self.SENSITIVE_PATTERNS.items():
|
|
130
|
+
if pattern.search(scrubbed_value):
|
|
131
|
+
scrubbed_value = pattern.sub(self.scrub_replacement, scrubbed_value)
|
|
132
|
+
|
|
133
|
+
# No extra fallback required now that we pre-scrub 13-19 digit sequences
|
|
134
|
+
|
|
135
|
+
return scrubbed_value
|
|
136
|
+
|
|
137
|
+
def _scrub_dict_value(self, value: Dict[str, Any]) -> Dict[str, Any]:
|
|
138
|
+
"""Recursively scrub sensitive data from a dictionary value.
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
value: The dictionary value to scrub
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
The scrubbed dictionary value
|
|
145
|
+
"""
|
|
146
|
+
scrubbed_dict = {}
|
|
147
|
+
for k, v in value.items():
|
|
148
|
+
scrubbed_k, scrubbed_v = self._scrub_key_value(k, v)
|
|
149
|
+
scrubbed_dict[scrubbed_k] = scrubbed_v
|
|
150
|
+
return scrubbed_dict
|
|
151
|
+
|
|
152
|
+
def _scrub_list_value(self, value: Union[list, tuple]) -> Union[list, tuple] | None: # type: ignore[type-arg]
|
|
153
|
+
"""Recursively scrub sensitive data from a list/tuple value.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
value: The list/tuple value to scrub
|
|
157
|
+
|
|
158
|
+
Returns:
|
|
159
|
+
The scrubbed list/tuple value
|
|
160
|
+
"""
|
|
161
|
+
scrubbed_items = []
|
|
162
|
+
for item in value:
|
|
163
|
+
if isinstance(item, str):
|
|
164
|
+
scrubbed_items.append(self._scrub_string_value(item))
|
|
165
|
+
elif isinstance(item, dict):
|
|
166
|
+
scrubbed_items.append(self._scrub_dict_value(item)) # type: ignore[arg-type]
|
|
167
|
+
elif isinstance(item, (list, tuple)):
|
|
168
|
+
scrubbed_items.append(self._scrub_list_value(item)) # type: ignore[arg-type]
|
|
169
|
+
else:
|
|
170
|
+
scrubbed_items.append(item)
|
|
171
|
+
|
|
172
|
+
return type(value)(scrubbed_items)
|
|
173
|
+
|
|
174
|
+
def force_flush(self, timeout_millis: int = 30000) -> None:
|
|
175
|
+
"""Force flush - no-op for scrubbing processor."""
|
|
176
|
+
|
|
177
|
+
def shutdown(self) -> None:
|
|
178
|
+
"""Shutdown - no-op for scrubbing processor."""
|
netra/span_wrapper.py
CHANGED
|
@@ -4,10 +4,8 @@ import time
|
|
|
4
4
|
from datetime import datetime
|
|
5
5
|
from typing import Any, Dict, List, Literal, Optional
|
|
6
6
|
|
|
7
|
-
from opentelemetry import context as context_api
|
|
8
7
|
from opentelemetry import trace
|
|
9
8
|
from opentelemetry.trace import SpanKind, Status, StatusCode
|
|
10
|
-
from opentelemetry.trace.propagation import set_span_in_context
|
|
11
9
|
from pydantic import BaseModel
|
|
12
10
|
|
|
13
11
|
from netra.config import Config
|
|
@@ -72,18 +70,19 @@ class SpanWrapper:
|
|
|
72
70
|
# OpenTelemetry span management
|
|
73
71
|
self.tracer = trace.get_tracer(module_name)
|
|
74
72
|
self.span: Optional[trace.Span] = None
|
|
75
|
-
|
|
73
|
+
# Internal context manager to manage current-span scope safely
|
|
74
|
+
self._span_cm: Optional[Any] = None
|
|
76
75
|
|
|
77
76
|
def __enter__(self) -> "SpanWrapper":
|
|
78
77
|
"""Start the span wrapper, begin time tracking, and create OpenTelemetry span."""
|
|
79
78
|
self.start_time = time.time()
|
|
80
79
|
|
|
81
|
-
# Create OpenTelemetry span
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
self.
|
|
80
|
+
# Create OpenTelemetry span and make it current using OTel's context manager
|
|
81
|
+
# Store the context manager so we can close it in __exit__
|
|
82
|
+
self._span_cm = self.tracer.start_as_current_span(
|
|
83
|
+
name=self.name, kind=SpanKind.CLIENT, attributes=self.attributes
|
|
84
|
+
)
|
|
85
|
+
self.span = self._span_cm.__enter__()
|
|
87
86
|
|
|
88
87
|
# Register with SessionManager for name-based lookup
|
|
89
88
|
try:
|
|
@@ -93,7 +92,6 @@ class SpanWrapper:
|
|
|
93
92
|
except Exception:
|
|
94
93
|
logger.exception("Failed to register span '%s' with SessionManager", self.name)
|
|
95
94
|
|
|
96
|
-
logger.info(f"Started span wrapper: {self.name}")
|
|
97
95
|
return self
|
|
98
96
|
|
|
99
97
|
def __exit__(self, exc_type: Optional[type], exc_val: Optional[Exception], exc_tb: Any) -> Literal[False]:
|
|
@@ -127,22 +125,19 @@ class SpanWrapper:
|
|
|
127
125
|
for key, value in self.attributes.items():
|
|
128
126
|
self.span.set_attribute(key, value)
|
|
129
127
|
|
|
130
|
-
# End OpenTelemetry span
|
|
128
|
+
# End OpenTelemetry span via the context manager (also clears current context)
|
|
131
129
|
if self.span:
|
|
132
130
|
# Unregister from SessionManager before ending span
|
|
133
131
|
try:
|
|
134
132
|
SessionManager.unregister_span(self.name, self.span)
|
|
135
133
|
except Exception:
|
|
136
134
|
logger.exception("Failed to unregister span '%s' from SessionManager", self.name)
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
if duration_ms is not None
|
|
144
|
-
else f"Ended span wrapper: {self.name} (Status: {self.status})"
|
|
145
|
-
)
|
|
135
|
+
if self._span_cm is not None:
|
|
136
|
+
try:
|
|
137
|
+
# Delegate to OTel CM to properly end span and restore context
|
|
138
|
+
self._span_cm.__exit__(exc_type, exc_val, exc_tb)
|
|
139
|
+
finally:
|
|
140
|
+
self._span_cm = None
|
|
146
141
|
|
|
147
142
|
# Don't suppress exceptions
|
|
148
143
|
return False
|
netra/tracer.py
CHANGED
|
@@ -5,16 +5,18 @@ including exporter setup and span processor configuration.
|
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
7
|
import logging
|
|
8
|
-
from typing import Any, Dict
|
|
8
|
+
from typing import Any, Dict, List, Sequence
|
|
9
9
|
|
|
10
10
|
from opentelemetry import trace
|
|
11
11
|
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
|
12
12
|
from opentelemetry.sdk.resources import DEPLOYMENT_ENVIRONMENT, SERVICE_NAME, Resource
|
|
13
|
-
from opentelemetry.sdk.trace import TracerProvider
|
|
13
|
+
from opentelemetry.sdk.trace import ReadableSpan, TracerProvider
|
|
14
14
|
from opentelemetry.sdk.trace.export import (
|
|
15
15
|
BatchSpanProcessor,
|
|
16
16
|
ConsoleSpanExporter,
|
|
17
17
|
SimpleSpanProcessor,
|
|
18
|
+
SpanExporter,
|
|
19
|
+
SpanExportResult,
|
|
18
20
|
)
|
|
19
21
|
|
|
20
22
|
from netra.config import Config
|
|
@@ -22,6 +24,75 @@ from netra.config import Config
|
|
|
22
24
|
logger = logging.getLogger(__name__)
|
|
23
25
|
|
|
24
26
|
|
|
27
|
+
class FilteringSpanExporter(SpanExporter): # type: ignore[misc]
|
|
28
|
+
"""
|
|
29
|
+
SpanExporter wrapper that filters out spans by name.
|
|
30
|
+
|
|
31
|
+
Matching rules:
|
|
32
|
+
- Exact match: pattern "Foo" blocks span.name == "Foo".
|
|
33
|
+
- Prefix match: pattern ending with '*' (e.g., "CloudSpanner.*") blocks spans whose
|
|
34
|
+
names start with the prefix before '*', e.g., "CloudSpanner.", "CloudSpanner.Query".
|
|
35
|
+
- Suffix match: pattern starting with '*' (e.g., "*.Query") blocks spans whose
|
|
36
|
+
names end with the suffix after '*', e.g., "DB.Query", "Search.Query".
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def __init__(self, exporter: SpanExporter, patterns: Sequence[str]) -> None:
|
|
40
|
+
self._exporter = exporter
|
|
41
|
+
# Normalize once for efficient checks
|
|
42
|
+
exact: List[str] = []
|
|
43
|
+
prefixes: List[str] = []
|
|
44
|
+
suffixes: List[str] = []
|
|
45
|
+
for p in patterns:
|
|
46
|
+
if not p:
|
|
47
|
+
continue
|
|
48
|
+
if p.endswith("*") and not p.startswith("*"):
|
|
49
|
+
prefixes.append(p[:-1])
|
|
50
|
+
elif p.startswith("*") and not p.endswith("*"):
|
|
51
|
+
suffixes.append(p[1:])
|
|
52
|
+
else:
|
|
53
|
+
exact.append(p)
|
|
54
|
+
self._exact = set(exact)
|
|
55
|
+
self._prefixes = prefixes
|
|
56
|
+
self._suffixes = suffixes
|
|
57
|
+
|
|
58
|
+
def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
|
|
59
|
+
filtered: List[ReadableSpan] = []
|
|
60
|
+
for s in spans:
|
|
61
|
+
name = getattr(s, "name", None)
|
|
62
|
+
if name is None:
|
|
63
|
+
filtered.append(s)
|
|
64
|
+
continue
|
|
65
|
+
if name in self._exact:
|
|
66
|
+
continue
|
|
67
|
+
blocked = False
|
|
68
|
+
for pref in self._prefixes:
|
|
69
|
+
if name.startswith(pref):
|
|
70
|
+
blocked = True
|
|
71
|
+
break
|
|
72
|
+
if not blocked and self._suffixes:
|
|
73
|
+
for suf in self._suffixes:
|
|
74
|
+
if name.endswith(suf):
|
|
75
|
+
blocked = True
|
|
76
|
+
break
|
|
77
|
+
if not blocked:
|
|
78
|
+
filtered.append(s)
|
|
79
|
+
if not filtered:
|
|
80
|
+
return SpanExportResult.SUCCESS
|
|
81
|
+
return self._exporter.export(filtered)
|
|
82
|
+
|
|
83
|
+
def shutdown(self) -> None:
|
|
84
|
+
try:
|
|
85
|
+
self._exporter.shutdown()
|
|
86
|
+
except Exception:
|
|
87
|
+
pass
|
|
88
|
+
|
|
89
|
+
def force_flush(self, timeout_millis: int = 30000) -> Any:
|
|
90
|
+
try:
|
|
91
|
+
return self._exporter.force_flush(timeout_millis)
|
|
92
|
+
except Exception:
|
|
93
|
+
return True
|
|
94
|
+
|
|
95
|
+
|
|
25
96
|
class Tracer:
|
|
26
97
|
"""
|
|
27
98
|
Configures Netra's OpenTelemetry tracer with OTLP exporter (or Console exporter as fallback)
|
|
@@ -65,12 +136,24 @@ class Tracer:
|
|
|
65
136
|
endpoint=self._format_endpoint(self.cfg.otlp_endpoint),
|
|
66
137
|
headers=self.cfg.headers,
|
|
67
138
|
)
|
|
139
|
+
# Wrap exporter with filtering if blocked span patterns are provided
|
|
140
|
+
try:
|
|
141
|
+
patterns = getattr(self.cfg, "blocked_spans", None)
|
|
142
|
+
if patterns:
|
|
143
|
+
exporter = FilteringSpanExporter(exporter, patterns)
|
|
144
|
+
logger.info("Enabled FilteringSpanExporter with %d pattern(s)", len(patterns))
|
|
145
|
+
except Exception as e:
|
|
146
|
+
logger.warning("Failed to enable FilteringSpanExporter: %s", e)
|
|
68
147
|
# Add span processors: first instrumentation wrapper, then session processor
|
|
69
|
-
from netra.processors import InstrumentationSpanProcessor, SessionSpanProcessor
|
|
148
|
+
from netra.processors import InstrumentationSpanProcessor, ScrubbingSpanProcessor, SessionSpanProcessor
|
|
70
149
|
|
|
71
150
|
provider.add_span_processor(InstrumentationSpanProcessor())
|
|
72
151
|
provider.add_span_processor(SessionSpanProcessor())
|
|
73
152
|
|
|
153
|
+
# Add scrubbing processor if enabled
|
|
154
|
+
if self.cfg.enable_scrubbing:
|
|
155
|
+
provider.add_span_processor(ScrubbingSpanProcessor()) # type: ignore[no-untyped-call]
|
|
156
|
+
|
|
74
157
|
# Install appropriate span processor
|
|
75
158
|
if self.cfg.disable_batch:
|
|
76
159
|
provider.add_span_processor(SimpleSpanProcessor(exporter))
|
netra/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.1.
|
|
1
|
+
__version__ = "0.1.33"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: netra-sdk
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.33
|
|
4
4
|
Summary: A Python SDK for AI application observability that provides OpenTelemetry-based monitoring, tracing, and PII protection for LLM and vector database applications. Enables easy instrumentation, session tracking, and privacy-focused data collection for AI systems in production environments.
|
|
5
5
|
License: Apache-2.0
|
|
6
6
|
Keywords: netra,tracing,observability,sdk,ai,llm,vector,database
|
|
@@ -72,7 +72,7 @@ Requires-Dist: presidio-anonymizer (==2.2.358) ; extra == "presidio"
|
|
|
72
72
|
Requires-Dist: stanza (>=1.10.1,<2.0.0) ; extra == "presidio"
|
|
73
73
|
Requires-Dist: traceloop-sdk (>=0.40.7,<0.43.0)
|
|
74
74
|
Requires-Dist: transformers (==4.51.3) ; extra == "presidio"
|
|
75
|
-
Project-URL:
|
|
75
|
+
Project-URL: Changelog, https://github.com/KeyValueSoftwareSystems/netra-sdk-py/blob/main/CHANGELOG.md
|
|
76
76
|
Project-URL: Documentation, https://github.com/KeyValueSoftwareSystems/netra-sdk-py/blob/main/README.md
|
|
77
77
|
Project-URL: Homepage, https://github.com/KeyValueSoftwareSystems/netra-sdk-py
|
|
78
78
|
Project-URL: Repository, https://github.com/KeyValueSoftwareSystems/netra-sdk-py
|
|
@@ -303,6 +303,7 @@ async def async_span(data):
|
|
|
303
303
|
- **CrewAI** - Multi-agent AI systems
|
|
304
304
|
- **Pydantic AI** - AI model communication standard
|
|
305
305
|
- **MCP (Model Context Protocol)** - AI model communication standard
|
|
306
|
+
- **LiteLLM** - LLM provider agnostic client
|
|
306
307
|
|
|
307
308
|
## 🛡️ Privacy Protection & Security
|
|
308
309
|
|
|
@@ -1,15 +1,15 @@
|
|
|
1
|
-
netra/__init__.py,sha256=
|
|
1
|
+
netra/__init__.py,sha256=bBnv8InguoqBleHuA5KNU145eozdPOooxQrIMaKgJ5c,10942
|
|
2
2
|
netra/anonymizer/__init__.py,sha256=KeGPPZqKVZbtkbirEKYTYhj6aZHlakjdQhD7QHqBRio,133
|
|
3
3
|
netra/anonymizer/anonymizer.py,sha256=IcrYkdwWrFauGWUeAW-0RwrSUM8VSZCFNtoywZhvIqU,3778
|
|
4
4
|
netra/anonymizer/base.py,sha256=ytPxHCUD2OXlEY6fNTuMmwImNdIjgj294I41FIgoXpU,5946
|
|
5
5
|
netra/anonymizer/fp_anonymizer.py,sha256=_6svIYmE0eejdIMkhKBUWCNjGtGimtrGtbLvPSOp8W4,6493
|
|
6
|
-
netra/config.py,sha256
|
|
6
|
+
netra/config.py,sha256=51m8R0NoOrw58gMV7arOniEuFdJ7EIu3PNdFtIQ5xfg,6893
|
|
7
7
|
netra/decorators.py,sha256=yuQP02sdvTRIYkv-myNcP8q7dmPq3ME1AJZxJtryayI,8720
|
|
8
8
|
netra/exceptions/__init__.py,sha256=uDgcBxmC4WhdS7HRYQk_TtJyxH1s1o6wZmcsnSHLAcM,174
|
|
9
9
|
netra/exceptions/injection.py,sha256=ke4eUXRYUFJkMZgdSyPPkPt5PdxToTI6xLEBI0hTWUQ,1332
|
|
10
10
|
netra/exceptions/pii.py,sha256=MT4p_x-zH3VtYudTSxw1Z9qQZADJDspq64WrYqSWlZc,2438
|
|
11
11
|
netra/input_scanner.py,sha256=At6N9gNY8cR0O6S8x3K6swWBV3P1a_9O-XBNM_pcKz4,5348
|
|
12
|
-
netra/instrumentation/__init__.py,sha256=
|
|
12
|
+
netra/instrumentation/__init__.py,sha256=HdG3n5TxPRUNlOxsqjlvwDmBcnm3UtYx1OecLhnLeQM,41578
|
|
13
13
|
netra/instrumentation/aiohttp/__init__.py,sha256=M1kuF0R3gKY5rlbhEC1AR13UWHelmfokluL2yFysKWc,14398
|
|
14
14
|
netra/instrumentation/aiohttp/version.py,sha256=Zy-0Aukx-HS_Mo3NKPWg-hlUoWKDzS0w58gLoVtJec8,24
|
|
15
15
|
netra/instrumentation/cohere/__init__.py,sha256=3XwmCAZwZiMkHdNN3YvcBOLsNCx80ymbU31TyMzv1IY,17685
|
|
@@ -22,7 +22,10 @@ netra/instrumentation/google_genai/utils.py,sha256=2OeSN5jUaMKF4x5zWiW65R1LB_a44
|
|
|
22
22
|
netra/instrumentation/google_genai/version.py,sha256=Hww1duZrC8kYK7ThBSQVyz0HNOb0ys_o8Pln-wVQ1hI,23
|
|
23
23
|
netra/instrumentation/httpx/__init__.py,sha256=w1su_eQP_w5ZJHq0Lf-4miF5zM4OOW0ItmRp0wi85Ew,19388
|
|
24
24
|
netra/instrumentation/httpx/version.py,sha256=ZRQKbgDaGz_yuLk-cUKuk6ZBKCSRKZC8nQd041NRNXk,23
|
|
25
|
-
netra/instrumentation/instruments.py,sha256=
|
|
25
|
+
netra/instrumentation/instruments.py,sha256=O6MI_BO-5EBkVqI-dr5eqhYnk8mP5QEpI0RWJ7Fe3FQ,4349
|
|
26
|
+
netra/instrumentation/litellm/__init__.py,sha256=H9FsdEq-CL39zbl_dLm8D43-D1vAjoNqFTBpbmZsVXs,6740
|
|
27
|
+
netra/instrumentation/litellm/version.py,sha256=J-j-u0itpEFT6irdmWmixQqYMadNl1X91TxUmoiLHMI,22
|
|
28
|
+
netra/instrumentation/litellm/wrappers.py,sha256=H_UG0et6PUmj6CQagvNzbs_WodNTMruzzGOHhedmTko,22840
|
|
26
29
|
netra/instrumentation/mistralai/__init__.py,sha256=RE0b-rS6iXdoynJMFKHL9s97eYo5HghrJa013fR4ZhI,18910
|
|
27
30
|
netra/instrumentation/mistralai/config.py,sha256=XCyo3mk30qkvqyCqeTrKwROahu0gcOEwmbDLOo53J5k,121
|
|
28
31
|
netra/instrumentation/mistralai/utils.py,sha256=nhdIer5gJFxuGwg8FCT222hggDHeMQDhJctnDSwLqcc,894
|
|
@@ -37,15 +40,16 @@ netra/instrumentation/pydantic_ai/wrappers.py,sha256=6cfIRvELBS4d9G9TttNYcHGueNI
|
|
|
37
40
|
netra/instrumentation/weaviate/__init__.py,sha256=EOlpWxobOLHYKqo_kMct_7nu26x1hr8qkeG5_h99wtg,4330
|
|
38
41
|
netra/instrumentation/weaviate/version.py,sha256=PiCZHjonujPbnIn0KmD3Yl68hrjPRG_oKe5vJF3mmG8,24
|
|
39
42
|
netra/pii.py,sha256=Rn4SjgTJW_aw9LcbjLuMqF3fKd9b1ndlYt1CaK51Ge0,33125
|
|
40
|
-
netra/processors/__init__.py,sha256=
|
|
43
|
+
netra/processors/__init__.py,sha256=TLVBKk4Bli7MOyHTy_F-4NSm0thzIcJcZAVVNoq6gK8,333
|
|
41
44
|
netra/processors/instrumentation_span_processor.py,sha256=Ef5FTr8O5FLHcIkBAW3ueU1nlkV2DuOi-y5iIwHzldQ,4252
|
|
45
|
+
netra/processors/scrubbing_span_processor.py,sha256=dJ86Ncmjvmrhm_uAdGTwcGvRpZbVVWqD9AOFwEMWHZY,6701
|
|
42
46
|
netra/processors/session_span_processor.py,sha256=qcsBl-LnILWefsftI8NQhXDGb94OWPc8LvzhVA0JS_c,2432
|
|
43
47
|
netra/scanner.py,sha256=kyDpeZiscCPb6pjuhS-sfsVj-dviBFRepdUWh0sLoEY,11554
|
|
44
48
|
netra/session_manager.py,sha256=AoQa-k4dFcq7PeOD8G8DNzhLzL1JrHUW6b_y8mRyTQo,10255
|
|
45
|
-
netra/span_wrapper.py,sha256=
|
|
46
|
-
netra/tracer.py,sha256=
|
|
47
|
-
netra/version.py,sha256=
|
|
48
|
-
netra_sdk-0.1.
|
|
49
|
-
netra_sdk-0.1.
|
|
50
|
-
netra_sdk-0.1.
|
|
51
|
-
netra_sdk-0.1.
|
|
49
|
+
netra/span_wrapper.py,sha256=IygQX78xQRlL_Z1MfKfUbv0okihx92qNClnRlYFtRNc,8004
|
|
50
|
+
netra/tracer.py,sha256=8stV4UUAiwsViGOgjKnv0MXse16ByWguYFZ4SGZJS_c,6712
|
|
51
|
+
netra/version.py,sha256=gzg6nU6x2Uud0fXG6Kts9v4UFjYEjLGQu5DaW7kU0qc,23
|
|
52
|
+
netra_sdk-0.1.33.dist-info/LICENCE,sha256=8B_UoZ-BAl0AqiHAHUETCgd3I2B9yYJ1WEQtVb_qFMA,11359
|
|
53
|
+
netra_sdk-0.1.33.dist-info/METADATA,sha256=Wa0rozvrmQQyzLtouKrO8V6HzniurPfK8FLm7qB6l3U,28210
|
|
54
|
+
netra_sdk-0.1.33.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
|
55
|
+
netra_sdk-0.1.33.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|