sentry-sdk 2.37.1__py2.py3-none-any.whl → 2.38.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sentry-sdk might be problematic. Click here for more details.
- sentry_sdk/__init__.py +4 -2
- sentry_sdk/_types.py +1 -1
- sentry_sdk/ai/utils.py +11 -1
- sentry_sdk/consts.py +2 -1
- sentry_sdk/envelope.py +1 -1
- sentry_sdk/integrations/__init__.py +1 -1
- sentry_sdk/integrations/anthropic.py +47 -12
- sentry_sdk/integrations/asyncio.py +2 -0
- sentry_sdk/integrations/dedupe.py +3 -1
- sentry_sdk/integrations/gql.py +22 -5
- sentry_sdk/integrations/huggingface_hub.py +277 -81
- sentry_sdk/integrations/langchain.py +5 -3
- sentry_sdk/integrations/openai_agents/patches/agent_run.py +4 -4
- sentry_sdk/integrations/openai_agents/spans/agent_workflow.py +2 -2
- sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +50 -6
- sentry_sdk/integrations/openai_agents/utils.py +0 -10
- sentry_sdk/integrations/threading.py +1 -1
- sentry_sdk/profiler/continuous_profiler.py +13 -3
- sentry_sdk/tracing.py +1 -2
- sentry_sdk/tracing_utils.py +18 -22
- sentry_sdk/utils.py +6 -0
- {sentry_sdk-2.37.1.dist-info → sentry_sdk-2.38.0.dist-info}/METADATA +1 -1
- {sentry_sdk-2.37.1.dist-info → sentry_sdk-2.38.0.dist-info}/RECORD +27 -27
- {sentry_sdk-2.37.1.dist-info → sentry_sdk-2.38.0.dist-info}/WHEEL +0 -0
- {sentry_sdk-2.37.1.dist-info → sentry_sdk-2.38.0.dist-info}/entry_points.txt +0 -0
- {sentry_sdk-2.37.1.dist-info → sentry_sdk-2.38.0.dist-info}/licenses/LICENSE +0 -0
- {sentry_sdk-2.37.1.dist-info → sentry_sdk-2.38.0.dist-info}/top_level.txt +0 -0
sentry_sdk/__init__.py
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
|
+
from sentry_sdk import profiler
|
|
1
2
|
from sentry_sdk.scope import Scope
|
|
2
3
|
from sentry_sdk.transport import Transport, HttpTransport
|
|
3
4
|
from sentry_sdk.client import Client
|
|
4
5
|
|
|
5
6
|
from sentry_sdk.api import * # noqa
|
|
6
|
-
|
|
7
|
-
from sentry_sdk.consts import VERSION # noqa
|
|
7
|
+
from sentry_sdk.consts import VERSION
|
|
8
8
|
|
|
9
9
|
__all__ = [ # noqa
|
|
10
10
|
"Hub",
|
|
@@ -12,6 +12,7 @@ __all__ = [ # noqa
|
|
|
12
12
|
"Client",
|
|
13
13
|
"Transport",
|
|
14
14
|
"HttpTransport",
|
|
15
|
+
"VERSION",
|
|
15
16
|
"integrations",
|
|
16
17
|
# From sentry_sdk.api
|
|
17
18
|
"init",
|
|
@@ -47,6 +48,7 @@ __all__ = [ # noqa
|
|
|
47
48
|
"trace",
|
|
48
49
|
"monitor",
|
|
49
50
|
"logger",
|
|
51
|
+
"profiler",
|
|
50
52
|
"start_session",
|
|
51
53
|
"end_session",
|
|
52
54
|
"set_transaction_name",
|
sentry_sdk/_types.py
CHANGED
sentry_sdk/ai/utils.py
CHANGED
|
@@ -3,9 +3,10 @@ import json
|
|
|
3
3
|
from typing import TYPE_CHECKING
|
|
4
4
|
|
|
5
5
|
if TYPE_CHECKING:
|
|
6
|
-
from typing import Any
|
|
6
|
+
from typing import Any, Callable
|
|
7
7
|
from sentry_sdk.tracing import Span
|
|
8
8
|
|
|
9
|
+
import sentry_sdk
|
|
9
10
|
from sentry_sdk.utils import logger
|
|
10
11
|
|
|
11
12
|
|
|
@@ -37,3 +38,12 @@ def set_data_normalized(span, key, value, unpack=True):
|
|
|
37
38
|
span.set_data(key, normalized)
|
|
38
39
|
else:
|
|
39
40
|
span.set_data(key, json.dumps(normalized))
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def get_start_span_function():
|
|
44
|
+
# type: () -> Callable[..., Any]
|
|
45
|
+
current_span = sentry_sdk.get_current_span()
|
|
46
|
+
transaction_exists = (
|
|
47
|
+
current_span is not None and current_span.containing_transaction is not None
|
|
48
|
+
)
|
|
49
|
+
return sentry_sdk.start_span if transaction_exists else sentry_sdk.start_transaction
|
sentry_sdk/consts.py
CHANGED
|
@@ -795,6 +795,7 @@ class OP:
|
|
|
795
795
|
GEN_AI_CREATE_AGENT = "gen_ai.create_agent"
|
|
796
796
|
GEN_AI_EMBEDDINGS = "gen_ai.embeddings"
|
|
797
797
|
GEN_AI_EXECUTE_TOOL = "gen_ai.execute_tool"
|
|
798
|
+
GEN_AI_GENERATE_TEXT = "gen_ai.generate_text"
|
|
798
799
|
GEN_AI_HANDOFF = "gen_ai.handoff"
|
|
799
800
|
GEN_AI_PIPELINE = "gen_ai.pipeline"
|
|
800
801
|
GEN_AI_INVOKE_AGENT = "gen_ai.invoke_agent"
|
|
@@ -1330,4 +1331,4 @@ DEFAULT_OPTIONS = _get_default_options()
|
|
|
1330
1331
|
del _get_default_options
|
|
1331
1332
|
|
|
1332
1333
|
|
|
1333
|
-
VERSION = "2.
|
|
1334
|
+
VERSION = "2.38.0"
|
sentry_sdk/envelope.py
CHANGED
|
@@ -1,10 +1,9 @@
|
|
|
1
1
|
from functools import wraps
|
|
2
|
-
import json
|
|
3
2
|
from typing import TYPE_CHECKING
|
|
4
3
|
|
|
5
4
|
import sentry_sdk
|
|
6
5
|
from sentry_sdk.ai.monitoring import record_token_usage
|
|
7
|
-
from sentry_sdk.ai.utils import set_data_normalized
|
|
6
|
+
from sentry_sdk.ai.utils import set_data_normalized, get_start_span_function
|
|
8
7
|
from sentry_sdk.consts import OP, SPANDATA
|
|
9
8
|
from sentry_sdk.integrations import _check_minimum_version, DidNotEnable, Integration
|
|
10
9
|
from sentry_sdk.scope import should_send_default_pii
|
|
@@ -117,8 +116,29 @@ def _set_input_data(span, kwargs, integration):
|
|
|
117
116
|
and should_send_default_pii()
|
|
118
117
|
and integration.include_prompts
|
|
119
118
|
):
|
|
119
|
+
normalized_messages = []
|
|
120
|
+
for message in messages:
|
|
121
|
+
if (
|
|
122
|
+
message.get("role") == "user"
|
|
123
|
+
and "content" in message
|
|
124
|
+
and isinstance(message["content"], (list, tuple))
|
|
125
|
+
):
|
|
126
|
+
for item in message["content"]:
|
|
127
|
+
if item.get("type") == "tool_result":
|
|
128
|
+
normalized_messages.append(
|
|
129
|
+
{
|
|
130
|
+
"role": "tool",
|
|
131
|
+
"content": {
|
|
132
|
+
"tool_use_id": item.get("tool_use_id"),
|
|
133
|
+
"output": item.get("content"),
|
|
134
|
+
},
|
|
135
|
+
}
|
|
136
|
+
)
|
|
137
|
+
else:
|
|
138
|
+
normalized_messages.append(message)
|
|
139
|
+
|
|
120
140
|
set_data_normalized(
|
|
121
|
-
span, SPANDATA.GEN_AI_REQUEST_MESSAGES,
|
|
141
|
+
span, SPANDATA.GEN_AI_REQUEST_MESSAGES, normalized_messages, unpack=False
|
|
122
142
|
)
|
|
123
143
|
|
|
124
144
|
set_data_normalized(
|
|
@@ -159,12 +179,29 @@ def _set_output_data(
|
|
|
159
179
|
Set output data for the span based on the AI response."""
|
|
160
180
|
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, model)
|
|
161
181
|
if should_send_default_pii() and integration.include_prompts:
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
182
|
+
output_messages = {
|
|
183
|
+
"response": [],
|
|
184
|
+
"tool": [],
|
|
185
|
+
} # type: (dict[str, list[Any]])
|
|
186
|
+
|
|
187
|
+
for output in content_blocks:
|
|
188
|
+
if output["type"] == "text":
|
|
189
|
+
output_messages["response"].append(output["text"])
|
|
190
|
+
elif output["type"] == "tool_use":
|
|
191
|
+
output_messages["tool"].append(output)
|
|
192
|
+
|
|
193
|
+
if len(output_messages["tool"]) > 0:
|
|
194
|
+
set_data_normalized(
|
|
195
|
+
span,
|
|
196
|
+
SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
|
|
197
|
+
output_messages["tool"],
|
|
198
|
+
unpack=False,
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
if len(output_messages["response"]) > 0:
|
|
202
|
+
set_data_normalized(
|
|
203
|
+
span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"]
|
|
204
|
+
)
|
|
168
205
|
|
|
169
206
|
record_token_usage(
|
|
170
207
|
span,
|
|
@@ -172,8 +209,6 @@ def _set_output_data(
|
|
|
172
209
|
output_tokens=output_tokens,
|
|
173
210
|
)
|
|
174
211
|
|
|
175
|
-
# TODO: GEN_AI_RESPONSE_TOOL_CALLS ?
|
|
176
|
-
|
|
177
212
|
if finish_span:
|
|
178
213
|
span.__exit__(None, None, None)
|
|
179
214
|
|
|
@@ -194,7 +229,7 @@ def _sentry_patched_create_common(f, *args, **kwargs):
|
|
|
194
229
|
|
|
195
230
|
model = kwargs.get("model", "")
|
|
196
231
|
|
|
197
|
-
span =
|
|
232
|
+
span = get_start_span_function()(
|
|
198
233
|
op=OP.GEN_AI_CHAT,
|
|
199
234
|
name=f"chat {model}".strip(),
|
|
200
235
|
origin=AnthropicIntegration.origin,
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import sentry_sdk
|
|
2
|
-
from sentry_sdk.utils import ContextVar
|
|
2
|
+
from sentry_sdk.utils import ContextVar, logger
|
|
3
3
|
from sentry_sdk.integrations import Integration
|
|
4
4
|
from sentry_sdk.scope import add_global_event_processor
|
|
5
5
|
|
|
@@ -37,7 +37,9 @@ class DedupeIntegration(Integration):
|
|
|
37
37
|
|
|
38
38
|
exc = exc_info[1]
|
|
39
39
|
if integration._last_seen.get(None) is exc:
|
|
40
|
+
logger.info("DedupeIntegration dropped duplicated error event %s", exc)
|
|
40
41
|
return None
|
|
42
|
+
|
|
41
43
|
integration._last_seen.set(exc)
|
|
42
44
|
return event
|
|
43
45
|
|
sentry_sdk/integrations/gql.py
CHANGED
|
@@ -18,6 +18,13 @@ try:
|
|
|
18
18
|
)
|
|
19
19
|
from gql.transport import Transport, AsyncTransport # type: ignore[import-not-found]
|
|
20
20
|
from gql.transport.exceptions import TransportQueryError # type: ignore[import-not-found]
|
|
21
|
+
|
|
22
|
+
try:
|
|
23
|
+
# gql 4.0+
|
|
24
|
+
from gql import GraphQLRequest
|
|
25
|
+
except ImportError:
|
|
26
|
+
GraphQLRequest = None
|
|
27
|
+
|
|
21
28
|
except ImportError:
|
|
22
29
|
raise DidNotEnable("gql is not installed")
|
|
23
30
|
|
|
@@ -92,13 +99,13 @@ def _patch_execute():
|
|
|
92
99
|
real_execute = gql.Client.execute
|
|
93
100
|
|
|
94
101
|
@ensure_integration_enabled(GQLIntegration, real_execute)
|
|
95
|
-
def sentry_patched_execute(self,
|
|
102
|
+
def sentry_patched_execute(self, document_or_request, *args, **kwargs):
|
|
96
103
|
# type: (gql.Client, DocumentNode, Any, Any) -> Any
|
|
97
104
|
scope = sentry_sdk.get_isolation_scope()
|
|
98
|
-
scope.add_event_processor(_make_gql_event_processor(self,
|
|
105
|
+
scope.add_event_processor(_make_gql_event_processor(self, document_or_request))
|
|
99
106
|
|
|
100
107
|
try:
|
|
101
|
-
return real_execute(self,
|
|
108
|
+
return real_execute(self, document_or_request, *args, **kwargs)
|
|
102
109
|
except TransportQueryError as e:
|
|
103
110
|
event, hint = event_from_exception(
|
|
104
111
|
e,
|
|
@@ -112,8 +119,8 @@ def _patch_execute():
|
|
|
112
119
|
gql.Client.execute = sentry_patched_execute
|
|
113
120
|
|
|
114
121
|
|
|
115
|
-
def _make_gql_event_processor(client,
|
|
116
|
-
# type: (gql.Client, DocumentNode) -> EventProcessor
|
|
122
|
+
def _make_gql_event_processor(client, document_or_request):
|
|
123
|
+
# type: (gql.Client, Union[DocumentNode, gql.GraphQLRequest]) -> EventProcessor
|
|
117
124
|
def processor(event, hint):
|
|
118
125
|
# type: (Event, dict[str, Any]) -> Event
|
|
119
126
|
try:
|
|
@@ -130,6 +137,16 @@ def _make_gql_event_processor(client, document):
|
|
|
130
137
|
)
|
|
131
138
|
|
|
132
139
|
if should_send_default_pii():
|
|
140
|
+
if GraphQLRequest is not None and isinstance(
|
|
141
|
+
document_or_request, GraphQLRequest
|
|
142
|
+
):
|
|
143
|
+
# In v4.0.0, gql moved to using GraphQLRequest instead of
|
|
144
|
+
# DocumentNode in execute
|
|
145
|
+
# https://github.com/graphql-python/gql/pull/556
|
|
146
|
+
document = document_or_request.document
|
|
147
|
+
else:
|
|
148
|
+
document = document_or_request
|
|
149
|
+
|
|
133
150
|
request["data"] = _data_from_document(document)
|
|
134
151
|
contexts = event.setdefault("contexts", {})
|
|
135
152
|
response = contexts.setdefault("response", {})
|
|
@@ -1,24 +1,24 @@
|
|
|
1
|
+
import inspect
|
|
1
2
|
from functools import wraps
|
|
2
3
|
|
|
3
|
-
|
|
4
|
+
import sentry_sdk
|
|
4
5
|
from sentry_sdk.ai.monitoring import record_token_usage
|
|
5
6
|
from sentry_sdk.ai.utils import set_data_normalized
|
|
6
|
-
from sentry_sdk.consts import SPANDATA
|
|
7
|
-
|
|
8
|
-
from typing import Any, Iterable, Callable
|
|
9
|
-
|
|
10
|
-
import sentry_sdk
|
|
11
|
-
from sentry_sdk.scope import should_send_default_pii
|
|
7
|
+
from sentry_sdk.consts import OP, SPANDATA
|
|
12
8
|
from sentry_sdk.integrations import DidNotEnable, Integration
|
|
9
|
+
from sentry_sdk.scope import should_send_default_pii
|
|
13
10
|
from sentry_sdk.utils import (
|
|
14
11
|
capture_internal_exceptions,
|
|
15
12
|
event_from_exception,
|
|
16
13
|
)
|
|
17
14
|
|
|
15
|
+
from typing import TYPE_CHECKING
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from typing import Any, Callable, Iterable
|
|
19
|
+
|
|
18
20
|
try:
|
|
19
21
|
import huggingface_hub.inference._client
|
|
20
|
-
|
|
21
|
-
from huggingface_hub import ChatCompletionStreamOutput, TextGenerationOutput
|
|
22
22
|
except ImportError:
|
|
23
23
|
raise DidNotEnable("Huggingface not installed")
|
|
24
24
|
|
|
@@ -34,9 +34,18 @@ class HuggingfaceHubIntegration(Integration):
|
|
|
34
34
|
@staticmethod
|
|
35
35
|
def setup_once():
|
|
36
36
|
# type: () -> None
|
|
37
|
+
|
|
38
|
+
# Other tasks that can be called: https://huggingface.co/docs/huggingface_hub/guides/inference#supported-providers-and-tasks
|
|
37
39
|
huggingface_hub.inference._client.InferenceClient.text_generation = (
|
|
38
|
-
|
|
39
|
-
huggingface_hub.inference._client.InferenceClient.text_generation
|
|
40
|
+
_wrap_huggingface_task(
|
|
41
|
+
huggingface_hub.inference._client.InferenceClient.text_generation,
|
|
42
|
+
OP.GEN_AI_GENERATE_TEXT,
|
|
43
|
+
)
|
|
44
|
+
)
|
|
45
|
+
huggingface_hub.inference._client.InferenceClient.chat_completion = (
|
|
46
|
+
_wrap_huggingface_task(
|
|
47
|
+
huggingface_hub.inference._client.InferenceClient.chat_completion,
|
|
48
|
+
OP.GEN_AI_CHAT,
|
|
40
49
|
)
|
|
41
50
|
)
|
|
42
51
|
|
|
@@ -51,131 +60,318 @@ def _capture_exception(exc):
|
|
|
51
60
|
sentry_sdk.capture_event(event, hint=hint)
|
|
52
61
|
|
|
53
62
|
|
|
54
|
-
def
|
|
55
|
-
# type: (Callable[..., Any]) -> Callable[..., Any]
|
|
63
|
+
def _wrap_huggingface_task(f, op):
|
|
64
|
+
# type: (Callable[..., Any], str) -> Callable[..., Any]
|
|
56
65
|
@wraps(f)
|
|
57
|
-
def
|
|
66
|
+
def new_huggingface_task(*args, **kwargs):
|
|
58
67
|
# type: (*Any, **Any) -> Any
|
|
59
68
|
integration = sentry_sdk.get_client().get_integration(HuggingfaceHubIntegration)
|
|
60
69
|
if integration is None:
|
|
61
70
|
return f(*args, **kwargs)
|
|
62
71
|
|
|
72
|
+
prompt = None
|
|
63
73
|
if "prompt" in kwargs:
|
|
64
74
|
prompt = kwargs["prompt"]
|
|
75
|
+
elif "messages" in kwargs:
|
|
76
|
+
prompt = kwargs["messages"]
|
|
65
77
|
elif len(args) >= 2:
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
# invalid call, let it return error
|
|
78
|
+
if isinstance(args[1], str) or isinstance(args[1], list):
|
|
79
|
+
prompt = args[1]
|
|
80
|
+
|
|
81
|
+
if prompt is None:
|
|
82
|
+
# invalid call, dont instrument, let it return error
|
|
71
83
|
return f(*args, **kwargs)
|
|
72
84
|
|
|
73
|
-
|
|
74
|
-
|
|
85
|
+
client = args[0]
|
|
86
|
+
model = client.model or kwargs.get("model") or ""
|
|
87
|
+
operation_name = op.split(".")[-1]
|
|
75
88
|
|
|
76
89
|
span = sentry_sdk.start_span(
|
|
77
|
-
op=
|
|
78
|
-
name="
|
|
90
|
+
op=op,
|
|
91
|
+
name=f"{operation_name} {model}",
|
|
79
92
|
origin=HuggingfaceHubIntegration.origin,
|
|
80
93
|
)
|
|
81
94
|
span.__enter__()
|
|
95
|
+
|
|
96
|
+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, operation_name)
|
|
97
|
+
|
|
98
|
+
if model:
|
|
99
|
+
span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model)
|
|
100
|
+
|
|
101
|
+
# Input attributes
|
|
102
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
103
|
+
set_data_normalized(
|
|
104
|
+
span, SPANDATA.GEN_AI_REQUEST_MESSAGES, prompt, unpack=False
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
attribute_mapping = {
|
|
108
|
+
"tools": SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS,
|
|
109
|
+
"frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
|
110
|
+
"max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS,
|
|
111
|
+
"presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
|
112
|
+
"temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
|
|
113
|
+
"top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
|
|
114
|
+
"top_k": SPANDATA.GEN_AI_REQUEST_TOP_K,
|
|
115
|
+
"stream": SPANDATA.GEN_AI_RESPONSE_STREAMING,
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
for attribute, span_attribute in attribute_mapping.items():
|
|
119
|
+
value = kwargs.get(attribute, None)
|
|
120
|
+
if value is not None:
|
|
121
|
+
if isinstance(value, (int, float, bool, str)):
|
|
122
|
+
span.set_data(span_attribute, value)
|
|
123
|
+
else:
|
|
124
|
+
set_data_normalized(span, span_attribute, value, unpack=False)
|
|
125
|
+
|
|
126
|
+
# LLM Execution
|
|
82
127
|
try:
|
|
83
128
|
res = f(*args, **kwargs)
|
|
84
129
|
except Exception as e:
|
|
130
|
+
# Error Handling
|
|
131
|
+
span.set_status("error")
|
|
85
132
|
_capture_exception(e)
|
|
86
133
|
span.__exit__(None, None, None)
|
|
87
134
|
raise e from None
|
|
88
135
|
|
|
136
|
+
# Output attributes
|
|
137
|
+
finish_reason = None
|
|
138
|
+
response_model = None
|
|
139
|
+
response_text_buffer: list[str] = []
|
|
140
|
+
tokens_used = 0
|
|
141
|
+
tool_calls = None
|
|
142
|
+
usage = None
|
|
143
|
+
|
|
89
144
|
with capture_internal_exceptions():
|
|
90
|
-
if
|
|
91
|
-
|
|
145
|
+
if isinstance(res, str) and res is not None:
|
|
146
|
+
response_text_buffer.append(res)
|
|
92
147
|
|
|
93
|
-
|
|
94
|
-
|
|
148
|
+
if hasattr(res, "generated_text") and res.generated_text is not None:
|
|
149
|
+
response_text_buffer.append(res.generated_text)
|
|
95
150
|
|
|
96
|
-
if
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
151
|
+
if hasattr(res, "model") and res.model is not None:
|
|
152
|
+
response_model = res.model
|
|
153
|
+
|
|
154
|
+
if hasattr(res, "details") and hasattr(res.details, "finish_reason"):
|
|
155
|
+
finish_reason = res.details.finish_reason
|
|
156
|
+
|
|
157
|
+
if (
|
|
158
|
+
hasattr(res, "details")
|
|
159
|
+
and hasattr(res.details, "generated_tokens")
|
|
160
|
+
and res.details.generated_tokens is not None
|
|
161
|
+
):
|
|
162
|
+
tokens_used = res.details.generated_tokens
|
|
163
|
+
|
|
164
|
+
if hasattr(res, "usage") and res.usage is not None:
|
|
165
|
+
usage = res.usage
|
|
166
|
+
|
|
167
|
+
if hasattr(res, "choices") and res.choices is not None:
|
|
168
|
+
for choice in res.choices:
|
|
169
|
+
if hasattr(choice, "finish_reason"):
|
|
170
|
+
finish_reason = choice.finish_reason
|
|
171
|
+
if hasattr(choice, "message") and hasattr(
|
|
172
|
+
choice.message, "tool_calls"
|
|
173
|
+
):
|
|
174
|
+
tool_calls = choice.message.tool_calls
|
|
175
|
+
if (
|
|
176
|
+
hasattr(choice, "message")
|
|
177
|
+
and hasattr(choice.message, "content")
|
|
178
|
+
and choice.message.content is not None
|
|
179
|
+
):
|
|
180
|
+
response_text_buffer.append(choice.message.content)
|
|
105
181
|
|
|
106
|
-
if
|
|
107
|
-
|
|
182
|
+
if response_model is not None:
|
|
183
|
+
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response_model)
|
|
184
|
+
|
|
185
|
+
if finish_reason is not None:
|
|
186
|
+
set_data_normalized(
|
|
187
|
+
span,
|
|
188
|
+
SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS,
|
|
189
|
+
finish_reason,
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
193
|
+
if tool_calls is not None and len(tool_calls) > 0:
|
|
108
194
|
set_data_normalized(
|
|
109
195
|
span,
|
|
110
|
-
SPANDATA.
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
if res.details is not None and res.details.generated_tokens > 0:
|
|
114
|
-
record_token_usage(
|
|
115
|
-
span,
|
|
116
|
-
total_tokens=res.details.generated_tokens,
|
|
196
|
+
SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
|
|
197
|
+
tool_calls,
|
|
198
|
+
unpack=False,
|
|
117
199
|
)
|
|
118
|
-
span.__exit__(None, None, None)
|
|
119
|
-
return res
|
|
120
200
|
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
201
|
+
if len(response_text_buffer) > 0:
|
|
202
|
+
text_response = "".join(response_text_buffer)
|
|
203
|
+
if text_response:
|
|
204
|
+
set_data_normalized(
|
|
205
|
+
span,
|
|
206
|
+
SPANDATA.GEN_AI_RESPONSE_TEXT,
|
|
207
|
+
text_response,
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
if usage is not None:
|
|
211
|
+
record_token_usage(
|
|
212
|
+
span,
|
|
213
|
+
input_tokens=usage.prompt_tokens,
|
|
214
|
+
output_tokens=usage.completion_tokens,
|
|
215
|
+
total_tokens=usage.total_tokens,
|
|
216
|
+
)
|
|
217
|
+
elif tokens_used > 0:
|
|
218
|
+
record_token_usage(
|
|
219
|
+
span,
|
|
220
|
+
total_tokens=tokens_used,
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
# If the response is not a generator (meaning a streaming response)
|
|
224
|
+
# we are done and can return the response
|
|
225
|
+
if not inspect.isgenerator(res):
|
|
124
226
|
span.__exit__(None, None, None)
|
|
125
227
|
return res
|
|
126
228
|
|
|
127
229
|
if kwargs.get("details", False):
|
|
128
|
-
#
|
|
230
|
+
# text-generation stream output
|
|
129
231
|
def new_details_iterator():
|
|
130
|
-
# type: () -> Iterable[
|
|
232
|
+
# type: () -> Iterable[Any]
|
|
233
|
+
finish_reason = None
|
|
234
|
+
response_text_buffer: list[str] = []
|
|
235
|
+
tokens_used = 0
|
|
236
|
+
|
|
131
237
|
with capture_internal_exceptions():
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
238
|
+
for chunk in res:
|
|
239
|
+
if (
|
|
240
|
+
hasattr(chunk, "token")
|
|
241
|
+
and hasattr(chunk.token, "text")
|
|
242
|
+
and chunk.token.text is not None
|
|
243
|
+
):
|
|
244
|
+
response_text_buffer.append(chunk.token.text)
|
|
245
|
+
|
|
246
|
+
if hasattr(chunk, "details") and hasattr(
|
|
247
|
+
chunk.details, "finish_reason"
|
|
248
|
+
):
|
|
249
|
+
finish_reason = chunk.details.finish_reason
|
|
250
|
+
|
|
251
|
+
if (
|
|
252
|
+
hasattr(chunk, "details")
|
|
253
|
+
and hasattr(chunk.details, "generated_tokens")
|
|
254
|
+
and chunk.details.generated_tokens is not None
|
|
139
255
|
):
|
|
140
|
-
tokens_used =
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
and integration.include_prompts
|
|
146
|
-
):
|
|
256
|
+
tokens_used = chunk.details.generated_tokens
|
|
257
|
+
|
|
258
|
+
yield chunk
|
|
259
|
+
|
|
260
|
+
if finish_reason is not None:
|
|
147
261
|
set_data_normalized(
|
|
148
|
-
span,
|
|
262
|
+
span,
|
|
263
|
+
SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS,
|
|
264
|
+
finish_reason,
|
|
149
265
|
)
|
|
266
|
+
|
|
267
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
268
|
+
if len(response_text_buffer) > 0:
|
|
269
|
+
text_response = "".join(response_text_buffer)
|
|
270
|
+
if text_response:
|
|
271
|
+
set_data_normalized(
|
|
272
|
+
span,
|
|
273
|
+
SPANDATA.GEN_AI_RESPONSE_TEXT,
|
|
274
|
+
text_response,
|
|
275
|
+
)
|
|
276
|
+
|
|
150
277
|
if tokens_used > 0:
|
|
151
278
|
record_token_usage(
|
|
152
279
|
span,
|
|
153
280
|
total_tokens=tokens_used,
|
|
154
281
|
)
|
|
282
|
+
|
|
155
283
|
span.__exit__(None, None, None)
|
|
156
284
|
|
|
157
285
|
return new_details_iterator()
|
|
158
|
-
else:
|
|
159
|
-
# res is Iterable[str]
|
|
160
286
|
|
|
287
|
+
else:
|
|
288
|
+
# chat-completion stream output
|
|
161
289
|
def new_iterator():
|
|
162
290
|
# type: () -> Iterable[str]
|
|
163
|
-
|
|
291
|
+
finish_reason = None
|
|
292
|
+
response_model = None
|
|
293
|
+
response_text_buffer: list[str] = []
|
|
294
|
+
tool_calls = None
|
|
295
|
+
usage = None
|
|
296
|
+
|
|
164
297
|
with capture_internal_exceptions():
|
|
165
|
-
for
|
|
166
|
-
if
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
298
|
+
for chunk in res:
|
|
299
|
+
if hasattr(chunk, "model") and chunk.model is not None:
|
|
300
|
+
response_model = chunk.model
|
|
301
|
+
|
|
302
|
+
if hasattr(chunk, "usage") and chunk.usage is not None:
|
|
303
|
+
usage = chunk.usage
|
|
304
|
+
|
|
305
|
+
if isinstance(chunk, str):
|
|
306
|
+
if chunk is not None:
|
|
307
|
+
response_text_buffer.append(chunk)
|
|
308
|
+
|
|
309
|
+
if hasattr(chunk, "choices") and chunk.choices is not None:
|
|
310
|
+
for choice in chunk.choices:
|
|
311
|
+
if (
|
|
312
|
+
hasattr(choice, "delta")
|
|
313
|
+
and hasattr(choice.delta, "content")
|
|
314
|
+
and choice.delta.content is not None
|
|
315
|
+
):
|
|
316
|
+
response_text_buffer.append(
|
|
317
|
+
choice.delta.content
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
if (
|
|
321
|
+
hasattr(choice, "finish_reason")
|
|
322
|
+
and choice.finish_reason is not None
|
|
323
|
+
):
|
|
324
|
+
finish_reason = choice.finish_reason
|
|
325
|
+
|
|
326
|
+
if (
|
|
327
|
+
hasattr(choice, "delta")
|
|
328
|
+
and hasattr(choice.delta, "tool_calls")
|
|
329
|
+
and choice.delta.tool_calls is not None
|
|
330
|
+
):
|
|
331
|
+
tool_calls = choice.delta.tool_calls
|
|
332
|
+
|
|
333
|
+
yield chunk
|
|
334
|
+
|
|
335
|
+
if response_model is not None:
|
|
336
|
+
span.set_data(
|
|
337
|
+
SPANDATA.GEN_AI_RESPONSE_MODEL, response_model
|
|
338
|
+
)
|
|
339
|
+
|
|
340
|
+
if finish_reason is not None:
|
|
174
341
|
set_data_normalized(
|
|
175
|
-
span,
|
|
342
|
+
span,
|
|
343
|
+
SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS,
|
|
344
|
+
finish_reason,
|
|
176
345
|
)
|
|
346
|
+
|
|
347
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
348
|
+
if tool_calls is not None and len(tool_calls) > 0:
|
|
349
|
+
set_data_normalized(
|
|
350
|
+
span,
|
|
351
|
+
SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
|
|
352
|
+
tool_calls,
|
|
353
|
+
unpack=False,
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
if len(response_text_buffer) > 0:
|
|
357
|
+
text_response = "".join(response_text_buffer)
|
|
358
|
+
if text_response:
|
|
359
|
+
set_data_normalized(
|
|
360
|
+
span,
|
|
361
|
+
SPANDATA.GEN_AI_RESPONSE_TEXT,
|
|
362
|
+
text_response,
|
|
363
|
+
)
|
|
364
|
+
|
|
365
|
+
if usage is not None:
|
|
366
|
+
record_token_usage(
|
|
367
|
+
span,
|
|
368
|
+
input_tokens=usage.prompt_tokens,
|
|
369
|
+
output_tokens=usage.completion_tokens,
|
|
370
|
+
total_tokens=usage.total_tokens,
|
|
371
|
+
)
|
|
372
|
+
|
|
177
373
|
span.__exit__(None, None, None)
|
|
178
374
|
|
|
179
375
|
return new_iterator()
|
|
180
376
|
|
|
181
|
-
return
|
|
377
|
+
return new_huggingface_task
|
|
@@ -4,7 +4,7 @@ from functools import wraps
|
|
|
4
4
|
|
|
5
5
|
import sentry_sdk
|
|
6
6
|
from sentry_sdk.ai.monitoring import set_ai_pipeline_name
|
|
7
|
-
from sentry_sdk.ai.utils import set_data_normalized
|
|
7
|
+
from sentry_sdk.ai.utils import set_data_normalized, get_start_span_function
|
|
8
8
|
from sentry_sdk.consts import OP, SPANDATA
|
|
9
9
|
from sentry_sdk.integrations import DidNotEnable, Integration
|
|
10
10
|
from sentry_sdk.scope import should_send_default_pii
|
|
@@ -716,8 +716,9 @@ def _wrap_agent_executor_invoke(f):
|
|
|
716
716
|
return f(self, *args, **kwargs)
|
|
717
717
|
|
|
718
718
|
agent_name, tools = _get_request_data(self, args, kwargs)
|
|
719
|
+
start_span_function = get_start_span_function()
|
|
719
720
|
|
|
720
|
-
with
|
|
721
|
+
with start_span_function(
|
|
721
722
|
op=OP.GEN_AI_INVOKE_AGENT,
|
|
722
723
|
name=f"invoke_agent {agent_name}" if agent_name else "invoke_agent",
|
|
723
724
|
origin=LangchainIntegration.origin,
|
|
@@ -767,8 +768,9 @@ def _wrap_agent_executor_stream(f):
|
|
|
767
768
|
return f(self, *args, **kwargs)
|
|
768
769
|
|
|
769
770
|
agent_name, tools = _get_request_data(self, args, kwargs)
|
|
771
|
+
start_span_function = get_start_span_function()
|
|
770
772
|
|
|
771
|
-
span =
|
|
773
|
+
span = start_span_function(
|
|
772
774
|
op=OP.GEN_AI_INVOKE_AGENT,
|
|
773
775
|
name=f"invoke_agent {agent_name}".strip(),
|
|
774
776
|
origin=LangchainIntegration.origin,
|
|
@@ -26,12 +26,12 @@ def _patch_agent_run():
|
|
|
26
26
|
original_execute_handoffs = agents._run_impl.RunImpl.execute_handoffs
|
|
27
27
|
original_execute_final_output = agents._run_impl.RunImpl.execute_final_output
|
|
28
28
|
|
|
29
|
-
def _start_invoke_agent_span(context_wrapper, agent):
|
|
30
|
-
# type: (agents.RunContextWrapper, agents.Agent) -> None
|
|
29
|
+
def _start_invoke_agent_span(context_wrapper, agent, kwargs):
|
|
30
|
+
# type: (agents.RunContextWrapper, agents.Agent, dict[str, Any]) -> None
|
|
31
31
|
"""Start an agent invocation span"""
|
|
32
32
|
# Store the agent on the context wrapper so we can access it later
|
|
33
33
|
context_wrapper._sentry_current_agent = agent
|
|
34
|
-
invoke_agent_span(context_wrapper, agent)
|
|
34
|
+
invoke_agent_span(context_wrapper, agent, kwargs)
|
|
35
35
|
|
|
36
36
|
def _end_invoke_agent_span(context_wrapper, agent, output=None):
|
|
37
37
|
# type: (agents.RunContextWrapper, agents.Agent, Optional[Any]) -> None
|
|
@@ -72,7 +72,7 @@ def _patch_agent_run():
|
|
|
72
72
|
if current_agent and current_agent != agent:
|
|
73
73
|
_end_invoke_agent_span(context_wrapper, current_agent)
|
|
74
74
|
|
|
75
|
-
_start_invoke_agent_span(context_wrapper, agent)
|
|
75
|
+
_start_invoke_agent_span(context_wrapper, agent, kwargs)
|
|
76
76
|
|
|
77
77
|
# Call original method with all the correct parameters
|
|
78
78
|
result = await original_run_single_turn(*args, **kwargs)
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import sentry_sdk
|
|
2
|
+
from sentry_sdk.ai.utils import get_start_span_function
|
|
2
3
|
|
|
3
4
|
from ..consts import SPAN_ORIGIN
|
|
4
|
-
from ..utils import _get_start_span_function
|
|
5
5
|
|
|
6
6
|
from typing import TYPE_CHECKING
|
|
7
7
|
|
|
@@ -13,7 +13,7 @@ def agent_workflow_span(agent):
|
|
|
13
13
|
# type: (agents.Agent) -> sentry_sdk.tracing.Span
|
|
14
14
|
|
|
15
15
|
# Create a transaction or a span if an transaction is already active
|
|
16
|
-
span =
|
|
16
|
+
span = get_start_span_function()(
|
|
17
17
|
name=f"{agent.name} workflow",
|
|
18
18
|
origin=SPAN_ORIGIN,
|
|
19
19
|
)
|
|
@@ -1,5 +1,8 @@
|
|
|
1
1
|
import sentry_sdk
|
|
2
|
+
from sentry_sdk.ai.utils import get_start_span_function, set_data_normalized
|
|
2
3
|
from sentry_sdk.consts import OP, SPANDATA
|
|
4
|
+
from sentry_sdk.scope import should_send_default_pii
|
|
5
|
+
from sentry_sdk.utils import safe_serialize
|
|
3
6
|
|
|
4
7
|
from ..consts import SPAN_ORIGIN
|
|
5
8
|
from ..utils import _set_agent_data
|
|
@@ -11,9 +14,10 @@ if TYPE_CHECKING:
|
|
|
11
14
|
from typing import Any
|
|
12
15
|
|
|
13
16
|
|
|
14
|
-
def invoke_agent_span(context, agent):
|
|
15
|
-
# type: (agents.RunContextWrapper, agents.Agent) -> sentry_sdk.tracing.Span
|
|
16
|
-
|
|
17
|
+
def invoke_agent_span(context, agent, kwargs):
|
|
18
|
+
# type: (agents.RunContextWrapper, agents.Agent, dict[str, Any]) -> sentry_sdk.tracing.Span
|
|
19
|
+
start_span_function = get_start_span_function()
|
|
20
|
+
span = start_span_function(
|
|
17
21
|
op=OP.GEN_AI_INVOKE_AGENT,
|
|
18
22
|
name=f"invoke_agent {agent.name}",
|
|
19
23
|
origin=SPAN_ORIGIN,
|
|
@@ -22,6 +26,40 @@ def invoke_agent_span(context, agent):
|
|
|
22
26
|
|
|
23
27
|
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
|
|
24
28
|
|
|
29
|
+
if should_send_default_pii():
|
|
30
|
+
messages = []
|
|
31
|
+
if agent.instructions:
|
|
32
|
+
message = (
|
|
33
|
+
agent.instructions
|
|
34
|
+
if isinstance(agent.instructions, str)
|
|
35
|
+
else safe_serialize(agent.instructions)
|
|
36
|
+
)
|
|
37
|
+
messages.append(
|
|
38
|
+
{
|
|
39
|
+
"content": [{"text": message, "type": "text"}],
|
|
40
|
+
"role": "system",
|
|
41
|
+
}
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
original_input = kwargs.get("original_input")
|
|
45
|
+
if original_input is not None:
|
|
46
|
+
message = (
|
|
47
|
+
original_input
|
|
48
|
+
if isinstance(original_input, str)
|
|
49
|
+
else safe_serialize(original_input)
|
|
50
|
+
)
|
|
51
|
+
messages.append(
|
|
52
|
+
{
|
|
53
|
+
"content": [{"text": message, "type": "text"}],
|
|
54
|
+
"role": "user",
|
|
55
|
+
}
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
if len(messages) > 0:
|
|
59
|
+
set_data_normalized(
|
|
60
|
+
span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages, unpack=False
|
|
61
|
+
)
|
|
62
|
+
|
|
25
63
|
_set_agent_data(span, agent)
|
|
26
64
|
|
|
27
65
|
return span
|
|
@@ -29,6 +67,12 @@ def invoke_agent_span(context, agent):
|
|
|
29
67
|
|
|
30
68
|
def update_invoke_agent_span(context, agent, output):
|
|
31
69
|
# type: (agents.RunContextWrapper, agents.Agent, Any) -> None
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
70
|
+
span = sentry_sdk.get_current_span()
|
|
71
|
+
|
|
72
|
+
if span:
|
|
73
|
+
if should_send_default_pii():
|
|
74
|
+
set_data_normalized(
|
|
75
|
+
span, SPANDATA.GEN_AI_RESPONSE_TEXT, output, unpack=False
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
span.__exit__(None, None, None)
|
|
@@ -9,7 +9,6 @@ from typing import TYPE_CHECKING
|
|
|
9
9
|
|
|
10
10
|
if TYPE_CHECKING:
|
|
11
11
|
from typing import Any
|
|
12
|
-
from typing import Callable
|
|
13
12
|
from agents import Usage
|
|
14
13
|
|
|
15
14
|
try:
|
|
@@ -29,15 +28,6 @@ def _capture_exception(exc):
|
|
|
29
28
|
sentry_sdk.capture_event(event, hint=hint)
|
|
30
29
|
|
|
31
30
|
|
|
32
|
-
def _get_start_span_function():
|
|
33
|
-
# type: () -> Callable[..., Any]
|
|
34
|
-
current_span = sentry_sdk.get_current_span()
|
|
35
|
-
transaction_exists = (
|
|
36
|
-
current_span is not None and current_span.containing_transaction == current_span
|
|
37
|
-
)
|
|
38
|
-
return sentry_sdk.start_span if transaction_exists else sentry_sdk.start_transaction
|
|
39
|
-
|
|
40
|
-
|
|
41
31
|
def _set_agent_data(span, agent):
|
|
42
32
|
# type: (sentry_sdk.tracing.Span, agents.Agent) -> None
|
|
43
33
|
span.set_data(
|
|
@@ -52,7 +52,7 @@ class ThreadingIntegration(Integration):
|
|
|
52
52
|
|
|
53
53
|
try:
|
|
54
54
|
from django import VERSION as django_version # noqa: N811
|
|
55
|
-
import channels # type: ignore[import-
|
|
55
|
+
import channels # type: ignore[import-untyped]
|
|
56
56
|
|
|
57
57
|
channels_version = channels.__version__
|
|
58
58
|
except ImportError:
|
|
@@ -75,9 +75,11 @@ def setup_continuous_profiler(options, sdk_info, capture_func):
|
|
|
75
75
|
# type: (Dict[str, Any], SDKInfo, Callable[[Envelope], None]) -> bool
|
|
76
76
|
global _scheduler
|
|
77
77
|
|
|
78
|
-
|
|
78
|
+
already_initialized = _scheduler is not None
|
|
79
|
+
|
|
80
|
+
if already_initialized:
|
|
79
81
|
logger.debug("[Profiling] Continuous Profiler is already setup")
|
|
80
|
-
|
|
82
|
+
teardown_continuous_profiler()
|
|
81
83
|
|
|
82
84
|
if is_gevent():
|
|
83
85
|
# If gevent has patched the threading modules then we cannot rely on
|
|
@@ -117,11 +119,19 @@ def setup_continuous_profiler(options, sdk_info, capture_func):
|
|
|
117
119
|
)
|
|
118
120
|
)
|
|
119
121
|
|
|
120
|
-
|
|
122
|
+
if not already_initialized:
|
|
123
|
+
atexit.register(teardown_continuous_profiler)
|
|
121
124
|
|
|
122
125
|
return True
|
|
123
126
|
|
|
124
127
|
|
|
128
|
+
def is_profile_session_sampled():
|
|
129
|
+
# type: () -> bool
|
|
130
|
+
if _scheduler is None:
|
|
131
|
+
return False
|
|
132
|
+
return _scheduler.sampled
|
|
133
|
+
|
|
134
|
+
|
|
125
135
|
def try_autostart_continuous_profiler():
|
|
126
136
|
# type: () -> None
|
|
127
137
|
|
sentry_sdk/tracing.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
from decimal import Decimal
|
|
2
1
|
import uuid
|
|
3
2
|
import warnings
|
|
4
3
|
from datetime import datetime, timedelta, timezone
|
|
@@ -1251,7 +1250,7 @@ class Transaction(Span):
|
|
|
1251
1250
|
return
|
|
1252
1251
|
|
|
1253
1252
|
# Now we roll the dice.
|
|
1254
|
-
self.sampled = self._sample_rand <
|
|
1253
|
+
self.sampled = self._sample_rand < self.sample_rate
|
|
1255
1254
|
|
|
1256
1255
|
if self.sampled:
|
|
1257
1256
|
logger.debug(
|
sentry_sdk/tracing_utils.py
CHANGED
|
@@ -6,7 +6,6 @@ import re
|
|
|
6
6
|
import sys
|
|
7
7
|
from collections.abc import Mapping
|
|
8
8
|
from datetime import timedelta
|
|
9
|
-
from decimal import ROUND_DOWN, Decimal, DefaultContext, localcontext
|
|
10
9
|
from random import Random
|
|
11
10
|
from urllib.parse import quote, unquote
|
|
12
11
|
import uuid
|
|
@@ -502,7 +501,7 @@ class PropagationContext:
|
|
|
502
501
|
return
|
|
503
502
|
|
|
504
503
|
sample_rand = try_convert(
|
|
505
|
-
|
|
504
|
+
float, self.dynamic_sampling_context.get("sample_rand")
|
|
506
505
|
)
|
|
507
506
|
if sample_rand is not None and 0 <= sample_rand < 1:
|
|
508
507
|
# sample_rand is present and valid, so don't overwrite it
|
|
@@ -650,7 +649,7 @@ class Baggage:
|
|
|
650
649
|
options = client.options or {}
|
|
651
650
|
|
|
652
651
|
sentry_items["trace_id"] = transaction.trace_id
|
|
653
|
-
sentry_items["sample_rand"] =
|
|
652
|
+
sentry_items["sample_rand"] = f"{transaction._sample_rand:.6f}" # noqa: E231
|
|
654
653
|
|
|
655
654
|
if options.get("environment"):
|
|
656
655
|
sentry_items["environment"] = options["environment"]
|
|
@@ -724,15 +723,15 @@ class Baggage:
|
|
|
724
723
|
)
|
|
725
724
|
|
|
726
725
|
def _sample_rand(self):
|
|
727
|
-
# type: () -> Optional[
|
|
726
|
+
# type: () -> Optional[float]
|
|
728
727
|
"""Convenience method to get the sample_rand value from the sentry_items.
|
|
729
728
|
|
|
730
|
-
We validate the value and parse it as a
|
|
731
|
-
valid if it is a
|
|
729
|
+
We validate the value and parse it as a float before returning it. The value is considered
|
|
730
|
+
valid if it is a float in the range [0, 1).
|
|
732
731
|
"""
|
|
733
|
-
sample_rand = try_convert(
|
|
732
|
+
sample_rand = try_convert(float, self.sentry_items.get("sample_rand"))
|
|
734
733
|
|
|
735
|
-
if sample_rand is not None and
|
|
734
|
+
if sample_rand is not None and 0.0 <= sample_rand < 1.0:
|
|
736
735
|
return sample_rand
|
|
737
736
|
|
|
738
737
|
return None
|
|
@@ -898,7 +897,7 @@ def _generate_sample_rand(
|
|
|
898
897
|
*,
|
|
899
898
|
interval=(0.0, 1.0), # type: tuple[float, float]
|
|
900
899
|
):
|
|
901
|
-
# type: (...) ->
|
|
900
|
+
# type: (...) -> float
|
|
902
901
|
"""Generate a sample_rand value from a trace ID.
|
|
903
902
|
|
|
904
903
|
The generated value will be pseudorandomly chosen from the provided
|
|
@@ -913,19 +912,16 @@ def _generate_sample_rand(
|
|
|
913
912
|
raise ValueError("Invalid interval: lower must be less than upper")
|
|
914
913
|
|
|
915
914
|
rng = Random(trace_id)
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
Decimal("0.000001"),
|
|
927
|
-
rounding=ROUND_DOWN,
|
|
928
|
-
)
|
|
915
|
+
lower_scaled = int(lower * 1_000_000)
|
|
916
|
+
upper_scaled = int(upper * 1_000_000)
|
|
917
|
+
try:
|
|
918
|
+
sample_rand_scaled = rng.randrange(lower_scaled, upper_scaled)
|
|
919
|
+
except ValueError:
|
|
920
|
+
# In some corner cases it might happen that the range is too small
|
|
921
|
+
# In that case, just take the lower bound
|
|
922
|
+
sample_rand_scaled = lower_scaled
|
|
923
|
+
|
|
924
|
+
return sample_rand_scaled / 1_000_000
|
|
929
925
|
|
|
930
926
|
|
|
931
927
|
def _sample_rand_range(parent_sampled, sample_rate):
|
sentry_sdk/utils.py
CHANGED
|
@@ -1934,6 +1934,12 @@ def try_convert(convert_func, value):
|
|
|
1934
1934
|
given function. Return None if the conversion fails, i.e. if the function
|
|
1935
1935
|
raises an exception.
|
|
1936
1936
|
"""
|
|
1937
|
+
try:
|
|
1938
|
+
if isinstance(value, convert_func): # type: ignore
|
|
1939
|
+
return value
|
|
1940
|
+
except TypeError:
|
|
1941
|
+
pass
|
|
1942
|
+
|
|
1937
1943
|
try:
|
|
1938
1944
|
return convert_func(value)
|
|
1939
1945
|
except Exception:
|
|
@@ -1,17 +1,17 @@
|
|
|
1
|
-
sentry_sdk/__init__.py,sha256
|
|
1
|
+
sentry_sdk/__init__.py,sha256=-jRAO-EG4LBj5L20sK01fdMlbtvGf_idy54Eb46EiKQ,1364
|
|
2
2
|
sentry_sdk/_compat.py,sha256=Pxcg6cUYPiOoXIFfLI_H3ATb7SfrcXOeZdzpeWv3umI,3116
|
|
3
3
|
sentry_sdk/_init_implementation.py,sha256=WL54d8nggjRunBm3XlG-sWSx4yS5lpYYggd7YBWpuVk,2559
|
|
4
4
|
sentry_sdk/_log_batcher.py,sha256=bBpspIlf1ejxlbudo17bZOSir226LGAdjDe_3kHkOro,5085
|
|
5
5
|
sentry_sdk/_lru_cache.py,sha256=phZMBm9EKU1m67OOApnKCffnlWAlVz9bYjig7CglQuk,1229
|
|
6
6
|
sentry_sdk/_queue.py,sha256=UUzbmliDYmdVYiDA32NMYkX369ElWMFNSj5kodqVQZE,11250
|
|
7
|
-
sentry_sdk/_types.py,sha256=
|
|
7
|
+
sentry_sdk/_types.py,sha256=Gw9Pn0mIHZP23B8C2iM1g07NzxnAkpgRAGR5MrKA2Es,10487
|
|
8
8
|
sentry_sdk/_werkzeug.py,sha256=m3GPf-jHd8v3eVOfBHaKw5f0uHoLkXrSO1EcY-8EisY,3734
|
|
9
9
|
sentry_sdk/api.py,sha256=OkwQ2tA5YASJ77wLOteUdv_woPF4wL_JTOAMxe9z8k4,15282
|
|
10
10
|
sentry_sdk/attachments.py,sha256=0Dylhm065O6hNFjB40fWCd5Hg4qWSXndmi1TPWglZkI,3109
|
|
11
11
|
sentry_sdk/client.py,sha256=oQcolwFdLvuX4huUaCcpgABy3M5Yb4IhzymlzyrqfkE,38860
|
|
12
|
-
sentry_sdk/consts.py,sha256=
|
|
12
|
+
sentry_sdk/consts.py,sha256=z-QZyKPzeM4-ZYB4wOmNZ_3CnS7QcTjXZ49nuMAr9H4,49865
|
|
13
13
|
sentry_sdk/debug.py,sha256=ddBehQlAuQC1sg1XO-N4N3diZ0x0iT5RWJwFdrtcsjw,1019
|
|
14
|
-
sentry_sdk/envelope.py,sha256=
|
|
14
|
+
sentry_sdk/envelope.py,sha256=nCUvqVWIVWV-RoVvMgrTNUDfo7h_Z5jU8g90T30wdXE,10360
|
|
15
15
|
sentry_sdk/feature_flags.py,sha256=99JRig6TBkrkBzVCKqYcmVgjsuA_Hk-ul7jFHGhJplc,2233
|
|
16
16
|
sentry_sdk/hub.py,sha256=2QLvEtIYSYV04r8h7VBmQjookILaiBZxZBGTtQKNAWg,25675
|
|
17
17
|
sentry_sdk/logger.py,sha256=HnmkMmOf1hwvxIcPW2qOvIOSnFZ9yRNDBae_eriGsoY,2471
|
|
@@ -24,29 +24,29 @@ sentry_sdk/serializer.py,sha256=xUw3xjSsGF0cWRHL9ofe0nmWEtZvzPOHSQ6IHvo6UAc,1323
|
|
|
24
24
|
sentry_sdk/session.py,sha256=TqDVmRKKHUDSmZb4jQR-s8wDt7Fwb6QaG21hawUGWEs,5571
|
|
25
25
|
sentry_sdk/sessions.py,sha256=UZ2jfrqhYvZzTxCDGc1MLD6P_aHLJnTFetSUROIaPaA,9154
|
|
26
26
|
sentry_sdk/spotlight.py,sha256=93kdd8KxdLfcPaxFnFuqHgYAAL4FCfpK1hiiPoD7Ac4,8678
|
|
27
|
-
sentry_sdk/tracing.py,sha256=
|
|
28
|
-
sentry_sdk/tracing_utils.py,sha256=
|
|
27
|
+
sentry_sdk/tracing.py,sha256=ecnuM0Y8ueX110M4iXfOV8ay4nWiHuvCiNQMPrNzaBM,51530
|
|
28
|
+
sentry_sdk/tracing_utils.py,sha256=EM2e3UHkOUNoWhs0gaFjaJZDAyHKCw2KB0fOwWlZF0w,38862
|
|
29
29
|
sentry_sdk/transport.py,sha256=A0uux7XnniDJuExLudLyyFDYnS5C6r7zozGbkveUM7E,32469
|
|
30
30
|
sentry_sdk/types.py,sha256=NLbnRzww2K3_oGz2GzcC8TdX5L2DXYso1-H1uCv2Hwc,1222
|
|
31
|
-
sentry_sdk/utils.py,sha256=
|
|
31
|
+
sentry_sdk/utils.py,sha256=KubsR-No80YTJ1FYwNQxavYU4hOQyBixevnPsXxNCBc,61705
|
|
32
32
|
sentry_sdk/worker.py,sha256=VSMaigRMbInVyupSFpBC42bft2oIViea-0C_d9ThnIo,4464
|
|
33
33
|
sentry_sdk/ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
34
34
|
sentry_sdk/ai/monitoring.py,sha256=bS_KneWCAL9ehml5XiyficoPVx4DUUG6acbH3cjP3I8,5057
|
|
35
|
-
sentry_sdk/ai/utils.py,sha256=
|
|
35
|
+
sentry_sdk/ai/utils.py,sha256=CsPNw-N2utLhUfXbT8cCBPlDIyTBNzJQ5aIF18gPdhk,1705
|
|
36
36
|
sentry_sdk/crons/__init__.py,sha256=3Zt6g1-pZZ12uRKKsC8QLm3XgJ4K1VYxgVpNNUygOZY,221
|
|
37
37
|
sentry_sdk/crons/api.py,sha256=mk-UB8Im2LU2rJFdE-TV302EaKnf8kAjwEL0bIV0Hzc,1767
|
|
38
38
|
sentry_sdk/crons/consts.py,sha256=dXqJk5meBSu5rjlGpqAOlkpACnuUi7svQnAFoy1ZNUU,87
|
|
39
39
|
sentry_sdk/crons/decorator.py,sha256=UrjeIqBCbvsuKrfjGkKJbbLBvjw2TQvDWcTO7WwAmrI,3913
|
|
40
|
-
sentry_sdk/integrations/__init__.py,sha256=
|
|
40
|
+
sentry_sdk/integrations/__init__.py,sha256=tWC8ujNg8r8Sc4bk8L5NMeAJQgbPYtvWdnbWwBlVVbQ,10342
|
|
41
41
|
sentry_sdk/integrations/_asgi_common.py,sha256=Ypg7IctB3iPPY60ebVlzChzgT8GeGpZ0YH8VvJNDlEY,3187
|
|
42
42
|
sentry_sdk/integrations/_wsgi_common.py,sha256=A1-X7l1pZCcrbUhRHkmdKiK_EemEZjn7xToJIvlEuFM,7558
|
|
43
43
|
sentry_sdk/integrations/aiohttp.py,sha256=_rfDKx1arvVQwcC20vh7HG80p8XtgzqKB3iBuPYZy8A,12895
|
|
44
|
-
sentry_sdk/integrations/anthropic.py,sha256=
|
|
44
|
+
sentry_sdk/integrations/anthropic.py,sha256=7MqIgZLOD3fOw14F9BxHFr9VpLpy44QtZgaPyYVKFi4,13169
|
|
45
45
|
sentry_sdk/integrations/argv.py,sha256=GIY7TBFETF8Z0fDzqTXEJldt5XXCDdFNZxpGxP7EPaU,911
|
|
46
46
|
sentry_sdk/integrations/ariadne.py,sha256=C-zKlOrU7jvTWmQHZx0M0tAZNkPPo7Z5-5jXDD92LiU,5834
|
|
47
47
|
sentry_sdk/integrations/arq.py,sha256=yDPdWJa3ZgnGLwFzavIylIafEVN0qqSSgL4kUHxQF70,7881
|
|
48
48
|
sentry_sdk/integrations/asgi.py,sha256=zjoOOA5bHlTptRsP3ZU4X5UsluyHFqebsUt3lRfiGtE,12738
|
|
49
|
-
sentry_sdk/integrations/asyncio.py,sha256=
|
|
49
|
+
sentry_sdk/integrations/asyncio.py,sha256=DEoXAwk8oVl_1Sbmm2TthpruaLO7p4WZBTh9K-mch_g,4136
|
|
50
50
|
sentry_sdk/integrations/asyncpg.py,sha256=fbBTi5bEERK3c9o43LBLtS5wPaSVq_qIl3Y50NPmr5Y,6521
|
|
51
51
|
sentry_sdk/integrations/atexit.py,sha256=sY46N2hEvtGuT1DBQhirUXHbjgXjXAm7R_sgiectVKw,1652
|
|
52
52
|
sentry_sdk/integrations/aws_lambda.py,sha256=WveHWnB_nBsnfLTbaUxih79Ra3Qjv4Jjh-7m2v-gSJs,17954
|
|
@@ -57,7 +57,7 @@ sentry_sdk/integrations/chalice.py,sha256=A4K_9FmNUu131El0ctkTmjtyYd184I4hQTlidZ
|
|
|
57
57
|
sentry_sdk/integrations/clickhouse_driver.py,sha256=2qpRznwSNuRSzrCA1R5bmpgiehDmzbG7yZe6hN-61Wg,6098
|
|
58
58
|
sentry_sdk/integrations/cloud_resource_context.py,sha256=_gFldMeVHs5pxP5sm8uP7ZKmm6s_5hw3UsnXek9Iw8A,7780
|
|
59
59
|
sentry_sdk/integrations/cohere.py,sha256=iuDI1IVPE39rbsc3e9_qJS2bCjNg7F53apueCdhzr8Q,9322
|
|
60
|
-
sentry_sdk/integrations/dedupe.py,sha256=
|
|
60
|
+
sentry_sdk/integrations/dedupe.py,sha256=nZF_Ml-5axq_Nh3AZT80Z0bNNgPbxvYn79eVeBKrIGA,1515
|
|
61
61
|
sentry_sdk/integrations/dramatiq.py,sha256=I09vKWnfiuhdRFCjYYjmE9LOBQvDTPS-KFqf3iHFSsM,5583
|
|
62
62
|
sentry_sdk/integrations/excepthook.py,sha256=tfwpSQuo1b_OmJbNKPPRh90EUjD_OSE4DqqgYY9PVQI,2408
|
|
63
63
|
sentry_sdk/integrations/executing.py,sha256=5lxBAxO5FypY-zTV03AHncGmolmaHd327-3Vrjzskcc,1994
|
|
@@ -66,12 +66,12 @@ sentry_sdk/integrations/fastapi.py,sha256=kicdigHM3MG-GlpLUN6wwH8jOVu4dTuoQD6RBF
|
|
|
66
66
|
sentry_sdk/integrations/flask.py,sha256=t7q73JoJT46RWDtrNImtIloGyDg7CnsNFKpS4gOuBIw,8740
|
|
67
67
|
sentry_sdk/integrations/gcp.py,sha256=u1rSi3nK2ISUQqkRnmKFG23Ks-SefshTf5PV0Dwp3_4,8274
|
|
68
68
|
sentry_sdk/integrations/gnu_backtrace.py,sha256=FL7WkRfHT6idYCSLIrtFQ2G5ZTGoYudCKvBcjR5hqsI,2812
|
|
69
|
-
sentry_sdk/integrations/gql.py,sha256=
|
|
69
|
+
sentry_sdk/integrations/gql.py,sha256=lN5LJNZwUHs_1BQcIrR0adIkgb9YiOh6UtoUG8vCO_Y,4801
|
|
70
70
|
sentry_sdk/integrations/graphene.py,sha256=I6ZJ8Apd9dO9XPVvZY7I46-v1eXOW1C1rAkWwasF3gU,5042
|
|
71
71
|
sentry_sdk/integrations/httpx.py,sha256=WwUulqzBLoGGqWUUdQg_MThwQUKzBXnA-m3g_1GOpCE,5866
|
|
72
72
|
sentry_sdk/integrations/huey.py,sha256=wlyxjeWqqJp1X5S3neD5FiZjXcyznm1dl8_u1wIo76U,5443
|
|
73
|
-
sentry_sdk/integrations/huggingface_hub.py,sha256=
|
|
74
|
-
sentry_sdk/integrations/langchain.py,sha256
|
|
73
|
+
sentry_sdk/integrations/huggingface_hub.py,sha256=33Z2Z0SEEQMcpoAY0y8EFfQdMaB1DacSVJ22utdHHKU,14940
|
|
74
|
+
sentry_sdk/integrations/langchain.py,sha256=vO7alD-_3gLuqW3LW2gQUYgatjB9CrsI-7IpJmOy8Tc,29393
|
|
75
75
|
sentry_sdk/integrations/langgraph.py,sha256=YyDDc14gFCNVuqVmKwX8GRQ17T17WOx2SqqD4IHROPs,11015
|
|
76
76
|
sentry_sdk/integrations/launchdarkly.py,sha256=bvtExuj68xPXZFsQeWTDR-ZBqP087tPuVzP1bNAOZHc,1935
|
|
77
77
|
sentry_sdk/integrations/litestar.py,sha256=jao0f8v5JQagkBg15dUJTdWGPxpS3LmOV301-lwGkGc,11815
|
|
@@ -97,7 +97,7 @@ sentry_sdk/integrations/statsig.py,sha256=-e57hxHfHo1S13YQKObV65q_UvREyxbR56fnf7
|
|
|
97
97
|
sentry_sdk/integrations/stdlib.py,sha256=vgB9weDGh455vBwmUSgcQRgzViKstu3O0syOthCn_H0,8831
|
|
98
98
|
sentry_sdk/integrations/strawberry.py,sha256=u7Lk4u3sNEycdSmY1nQBzYKmqI-mO8BWKAAJkCSuTRA,14126
|
|
99
99
|
sentry_sdk/integrations/sys_exit.py,sha256=AwShgGBWPdiY25aOWDLRAs2RBUKm5T3CrL-Q-zAk0l4,2493
|
|
100
|
-
sentry_sdk/integrations/threading.py,sha256=
|
|
100
|
+
sentry_sdk/integrations/threading.py,sha256=yzJ2gK9cWg_-gZfAidzktVokGM9fxAijS4nMO_Ev7r0,5394
|
|
101
101
|
sentry_sdk/integrations/tornado.py,sha256=Qcft8FZxdVICnaa1AhsDB262sInEQZPf-pvgI-Agjmc,7206
|
|
102
102
|
sentry_sdk/integrations/trytond.py,sha256=BaLCNqQeRWDbHHDEelS5tmj-p_CrbmtGEHIn6JfzEFE,1651
|
|
103
103
|
sentry_sdk/integrations/typer.py,sha256=FQrFgpR9t6yQWF-oWCI9KJLFioEnA2c_1BEtYV-mPAs,1815
|
|
@@ -124,18 +124,18 @@ sentry_sdk/integrations/grpc/aio/client.py,sha256=csOwlJb7fg9fBnzeNHxr-qpZEmU97I
|
|
|
124
124
|
sentry_sdk/integrations/grpc/aio/server.py,sha256=SCkdikPZRdWyrlnZewsSGpPk4v6AsdSApVAbO-lf_Lk,4019
|
|
125
125
|
sentry_sdk/integrations/openai_agents/__init__.py,sha256=-ydqG0sFIrvJlT9JHO58EZpCAzyy9J59Av8dxn0fHuw,1424
|
|
126
126
|
sentry_sdk/integrations/openai_agents/consts.py,sha256=PTb3vlqkuMPktu21ALK72o5WMIX4-cewTEiTRdHKFdQ,38
|
|
127
|
-
sentry_sdk/integrations/openai_agents/utils.py,sha256=
|
|
127
|
+
sentry_sdk/integrations/openai_agents/utils.py,sha256=mb8pGXZ0t_-mgbIJMh6HfVoGKp6kGqIMB7U5URQtSQA,5208
|
|
128
128
|
sentry_sdk/integrations/openai_agents/patches/__init__.py,sha256=I7C9JZ70Mf8PV3wPdFsxTqvcYl4TYUgSZYfNU2Spb7Y,231
|
|
129
|
-
sentry_sdk/integrations/openai_agents/patches/agent_run.py,sha256=
|
|
129
|
+
sentry_sdk/integrations/openai_agents/patches/agent_run.py,sha256=GPBV-j8YnHOrJAhdhu_tphe14z7G0-riFVmjFNDgy0s,5773
|
|
130
130
|
sentry_sdk/integrations/openai_agents/patches/models.py,sha256=DtwqCmSsYFlhRZquKM2jiTOnnAg97eyCTtJYZkWqdww,1405
|
|
131
131
|
sentry_sdk/integrations/openai_agents/patches/runner.py,sha256=Fr5tflgadu3fnEThSZauAhrT7BbvemuZelDVGZjleqA,1483
|
|
132
132
|
sentry_sdk/integrations/openai_agents/patches/tools.py,sha256=uAx1GgsiDJBP7jpYW8r_kOImdgzXlwYqK-uhkyP3icI,3255
|
|
133
133
|
sentry_sdk/integrations/openai_agents/spans/__init__.py,sha256=RlVi781zGsvCJBciDO_EbBbwkakwbP9DoFQBbo4VAEE,353
|
|
134
|
-
sentry_sdk/integrations/openai_agents/spans/agent_workflow.py,sha256=
|
|
134
|
+
sentry_sdk/integrations/openai_agents/spans/agent_workflow.py,sha256=fdRSThD31TcoMXFg-2vmqK2YcSws8Yhd0oC6fxOnysM,469
|
|
135
135
|
sentry_sdk/integrations/openai_agents/spans/ai_client.py,sha256=0HG5pT8a06Zgc5JUmRx8p_6bPoQFQLjDrMY_QSQd0_E,1206
|
|
136
136
|
sentry_sdk/integrations/openai_agents/spans/execute_tool.py,sha256=w3QWWS4wbpteFTz4JjMCXdDpR6JVKcUVREQ-lvJOQTY,1420
|
|
137
137
|
sentry_sdk/integrations/openai_agents/spans/handoff.py,sha256=MBhzy7MpvPGwQTPT5TFcOnmSPiSH_uadQ5wvksueIik,525
|
|
138
|
-
sentry_sdk/integrations/openai_agents/spans/invoke_agent.py,sha256=
|
|
138
|
+
sentry_sdk/integrations/openai_agents/spans/invoke_agent.py,sha256=yyg-wyVZaYMYbaORva_BotNxB9oyK5Dsn8KfQ3Y7OZI,2323
|
|
139
139
|
sentry_sdk/integrations/opentelemetry/__init__.py,sha256=emNL5aAq_NhK0PZmfX_g4GIdvBS6nHqGrjrIgrdC5m8,229
|
|
140
140
|
sentry_sdk/integrations/opentelemetry/consts.py,sha256=fYL6FIAEfnGZGBhFn5X7aRyHxihSPqAKKqMLhf5Gniw,143
|
|
141
141
|
sentry_sdk/integrations/opentelemetry/integration.py,sha256=CWp6hFFMqoR7wcuwTRbRO-1iVch4A6oOB3RuHWeX9GQ,1791
|
|
@@ -157,12 +157,12 @@ sentry_sdk/integrations/spark/__init__.py,sha256=oOewMErnZk2rzNvIlZO6URxQexu9bUJ
|
|
|
157
157
|
sentry_sdk/integrations/spark/spark_driver.py,sha256=mqGQMngDAZWM78lWK5S0FPpmjd1Q65Ta5T4bOH6mNXs,9465
|
|
158
158
|
sentry_sdk/integrations/spark/spark_worker.py,sha256=FGT4yRU2X_iQCC46aasMmvJfYOKmBip8KbDF_wnhvEY,3706
|
|
159
159
|
sentry_sdk/profiler/__init__.py,sha256=3PI3bHk9RSkkOXZKN84DDedk_7M65EiqqaIGo-DYs0E,1291
|
|
160
|
-
sentry_sdk/profiler/continuous_profiler.py,sha256=
|
|
160
|
+
sentry_sdk/profiler/continuous_profiler.py,sha256=7Qb75TaKLNYxMA97wO-qEpDVqxPQWOLUi2rnUm6_Ci0,23066
|
|
161
161
|
sentry_sdk/profiler/transaction_profiler.py,sha256=e3MsUqs-YIp6-nmzpmBYGoWWIF7RyuSGu24Dj-8GXAU,27970
|
|
162
162
|
sentry_sdk/profiler/utils.py,sha256=G5s4tYai9ATJqcHrQ3bOIxlK6jIaHzELrDtU5k3N4HI,6556
|
|
163
|
-
sentry_sdk-2.
|
|
164
|
-
sentry_sdk-2.
|
|
165
|
-
sentry_sdk-2.
|
|
166
|
-
sentry_sdk-2.
|
|
167
|
-
sentry_sdk-2.
|
|
168
|
-
sentry_sdk-2.
|
|
163
|
+
sentry_sdk-2.38.0.dist-info/licenses/LICENSE,sha256=KhQNZg9GKBL6KQvHQNBGMxJsXsRdhLebVp4Sew7t3Qs,1093
|
|
164
|
+
sentry_sdk-2.38.0.dist-info/METADATA,sha256=cxysc3uOmG-4teAeFmBvG40-oF7DfJDO01sPlHYG8aI,10358
|
|
165
|
+
sentry_sdk-2.38.0.dist-info/WHEEL,sha256=JNWh1Fm1UdwIQV075glCn4MVuCRs0sotJIq-J6rbxCU,109
|
|
166
|
+
sentry_sdk-2.38.0.dist-info/entry_points.txt,sha256=qacZEz40UspQZD1IukCXykx0JtImqGDOctS5KfOLTko,91
|
|
167
|
+
sentry_sdk-2.38.0.dist-info/top_level.txt,sha256=XrQz30XE9FKXSY_yGLrd9bsv2Rk390GTDJOSujYaMxI,11
|
|
168
|
+
sentry_sdk-2.38.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|