sentry-sdk 3.0.0a2__py2.py3-none-any.whl → 3.0.0a4__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sentry-sdk might be problematic. Click here for more details.
- sentry_sdk/__init__.py +4 -0
- sentry_sdk/_compat.py +5 -12
- sentry_sdk/_init_implementation.py +7 -7
- sentry_sdk/_log_batcher.py +17 -29
- sentry_sdk/_lru_cache.py +7 -9
- sentry_sdk/_queue.py +2 -4
- sentry_sdk/_types.py +9 -16
- sentry_sdk/_werkzeug.py +5 -7
- sentry_sdk/ai/monitoring.py +45 -33
- sentry_sdk/ai/utils.py +8 -5
- sentry_sdk/api.py +91 -87
- sentry_sdk/attachments.py +10 -12
- sentry_sdk/client.py +119 -159
- sentry_sdk/consts.py +432 -223
- sentry_sdk/crons/api.py +16 -17
- sentry_sdk/crons/decorator.py +25 -27
- sentry_sdk/debug.py +4 -6
- sentry_sdk/envelope.py +46 -112
- sentry_sdk/feature_flags.py +9 -15
- sentry_sdk/integrations/__init__.py +24 -19
- sentry_sdk/integrations/_asgi_common.py +16 -18
- sentry_sdk/integrations/_wsgi_common.py +22 -33
- sentry_sdk/integrations/aiohttp.py +33 -31
- sentry_sdk/integrations/anthropic.py +43 -38
- sentry_sdk/integrations/argv.py +3 -4
- sentry_sdk/integrations/ariadne.py +16 -18
- sentry_sdk/integrations/arq.py +20 -29
- sentry_sdk/integrations/asgi.py +63 -37
- sentry_sdk/integrations/asyncio.py +15 -17
- sentry_sdk/integrations/asyncpg.py +1 -1
- sentry_sdk/integrations/atexit.py +6 -10
- sentry_sdk/integrations/aws_lambda.py +26 -36
- sentry_sdk/integrations/beam.py +10 -18
- sentry_sdk/integrations/boto3.py +20 -18
- sentry_sdk/integrations/bottle.py +25 -34
- sentry_sdk/integrations/celery/__init__.py +40 -59
- sentry_sdk/integrations/celery/beat.py +22 -26
- sentry_sdk/integrations/celery/utils.py +15 -17
- sentry_sdk/integrations/chalice.py +8 -10
- sentry_sdk/integrations/clickhouse_driver.py +22 -32
- sentry_sdk/integrations/cloud_resource_context.py +9 -16
- sentry_sdk/integrations/cohere.py +19 -25
- sentry_sdk/integrations/dedupe.py +5 -8
- sentry_sdk/integrations/django/__init__.py +69 -74
- sentry_sdk/integrations/django/asgi.py +25 -33
- sentry_sdk/integrations/django/caching.py +24 -20
- sentry_sdk/integrations/django/middleware.py +18 -21
- sentry_sdk/integrations/django/signals_handlers.py +12 -11
- sentry_sdk/integrations/django/templates.py +21 -18
- sentry_sdk/integrations/django/transactions.py +16 -11
- sentry_sdk/integrations/django/views.py +8 -12
- sentry_sdk/integrations/dramatiq.py +21 -21
- sentry_sdk/integrations/excepthook.py +10 -10
- sentry_sdk/integrations/executing.py +3 -4
- sentry_sdk/integrations/falcon.py +27 -42
- sentry_sdk/integrations/fastapi.py +13 -16
- sentry_sdk/integrations/flask.py +31 -38
- sentry_sdk/integrations/gcp.py +13 -16
- sentry_sdk/integrations/gnu_backtrace.py +7 -20
- sentry_sdk/integrations/gql.py +16 -17
- sentry_sdk/integrations/graphene.py +14 -13
- sentry_sdk/integrations/grpc/__init__.py +3 -2
- sentry_sdk/integrations/grpc/aio/client.py +2 -2
- sentry_sdk/integrations/grpc/aio/server.py +15 -14
- sentry_sdk/integrations/grpc/client.py +21 -11
- sentry_sdk/integrations/grpc/consts.py +2 -0
- sentry_sdk/integrations/grpc/server.py +12 -8
- sentry_sdk/integrations/httpx.py +11 -14
- sentry_sdk/integrations/huey.py +14 -21
- sentry_sdk/integrations/huggingface_hub.py +17 -17
- sentry_sdk/integrations/langchain.py +204 -114
- sentry_sdk/integrations/launchdarkly.py +13 -10
- sentry_sdk/integrations/litestar.py +40 -38
- sentry_sdk/integrations/logging.py +29 -36
- sentry_sdk/integrations/loguru.py +16 -20
- sentry_sdk/integrations/modules.py +3 -4
- sentry_sdk/integrations/openai.py +421 -204
- sentry_sdk/integrations/openai_agents/__init__.py +49 -0
- sentry_sdk/integrations/openai_agents/consts.py +1 -0
- sentry_sdk/integrations/openai_agents/patches/__init__.py +4 -0
- sentry_sdk/integrations/openai_agents/patches/agent_run.py +152 -0
- sentry_sdk/integrations/openai_agents/patches/models.py +52 -0
- sentry_sdk/integrations/openai_agents/patches/runner.py +42 -0
- sentry_sdk/integrations/openai_agents/patches/tools.py +84 -0
- sentry_sdk/integrations/openai_agents/spans/__init__.py +5 -0
- sentry_sdk/integrations/openai_agents/spans/agent_workflow.py +20 -0
- sentry_sdk/integrations/openai_agents/spans/ai_client.py +46 -0
- sentry_sdk/integrations/openai_agents/spans/execute_tool.py +47 -0
- sentry_sdk/integrations/openai_agents/spans/handoff.py +24 -0
- sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +41 -0
- sentry_sdk/integrations/openai_agents/utils.py +153 -0
- sentry_sdk/integrations/openfeature.py +12 -8
- sentry_sdk/integrations/pure_eval.py +6 -10
- sentry_sdk/integrations/pymongo.py +14 -18
- sentry_sdk/integrations/pyramid.py +31 -36
- sentry_sdk/integrations/quart.py +23 -28
- sentry_sdk/integrations/ray.py +73 -64
- sentry_sdk/integrations/redis/__init__.py +7 -4
- sentry_sdk/integrations/redis/_async_common.py +18 -12
- sentry_sdk/integrations/redis/_sync_common.py +16 -15
- sentry_sdk/integrations/redis/modules/caches.py +17 -8
- sentry_sdk/integrations/redis/modules/queries.py +9 -8
- sentry_sdk/integrations/redis/rb.py +3 -2
- sentry_sdk/integrations/redis/redis.py +4 -4
- sentry_sdk/integrations/redis/redis_cluster.py +10 -8
- sentry_sdk/integrations/redis/redis_py_cluster_legacy.py +3 -2
- sentry_sdk/integrations/redis/utils.py +21 -22
- sentry_sdk/integrations/rq.py +13 -16
- sentry_sdk/integrations/rust_tracing.py +10 -7
- sentry_sdk/integrations/sanic.py +34 -46
- sentry_sdk/integrations/serverless.py +22 -27
- sentry_sdk/integrations/socket.py +29 -17
- sentry_sdk/integrations/spark/__init__.py +1 -0
- sentry_sdk/integrations/spark/spark_driver.py +45 -83
- sentry_sdk/integrations/spark/spark_worker.py +7 -11
- sentry_sdk/integrations/sqlalchemy.py +22 -19
- sentry_sdk/integrations/starlette.py +89 -93
- sentry_sdk/integrations/starlite.py +31 -37
- sentry_sdk/integrations/statsig.py +5 -4
- sentry_sdk/integrations/stdlib.py +32 -28
- sentry_sdk/integrations/strawberry.py +63 -50
- sentry_sdk/integrations/sys_exit.py +7 -11
- sentry_sdk/integrations/threading.py +13 -15
- sentry_sdk/integrations/tornado.py +28 -32
- sentry_sdk/integrations/trytond.py +4 -3
- sentry_sdk/integrations/typer.py +8 -6
- sentry_sdk/integrations/unleash.py +5 -4
- sentry_sdk/integrations/wsgi.py +47 -46
- sentry_sdk/logger.py +13 -9
- sentry_sdk/monitor.py +16 -28
- sentry_sdk/opentelemetry/consts.py +11 -4
- sentry_sdk/opentelemetry/contextvars_context.py +17 -15
- sentry_sdk/opentelemetry/propagator.py +38 -21
- sentry_sdk/opentelemetry/sampler.py +51 -34
- sentry_sdk/opentelemetry/scope.py +46 -37
- sentry_sdk/opentelemetry/span_processor.py +43 -59
- sentry_sdk/opentelemetry/tracing.py +32 -12
- sentry_sdk/opentelemetry/utils.py +180 -196
- sentry_sdk/profiler/continuous_profiler.py +108 -97
- sentry_sdk/profiler/transaction_profiler.py +70 -97
- sentry_sdk/profiler/utils.py +11 -15
- sentry_sdk/scope.py +251 -264
- sentry_sdk/scrubber.py +22 -26
- sentry_sdk/serializer.py +48 -65
- sentry_sdk/session.py +44 -61
- sentry_sdk/sessions.py +35 -49
- sentry_sdk/spotlight.py +15 -21
- sentry_sdk/tracing.py +118 -184
- sentry_sdk/tracing_utils.py +103 -123
- sentry_sdk/transport.py +131 -157
- sentry_sdk/utils.py +278 -309
- sentry_sdk/worker.py +16 -28
- {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a4.dist-info}/METADATA +1 -1
- sentry_sdk-3.0.0a4.dist-info/RECORD +168 -0
- sentry_sdk-3.0.0a2.dist-info/RECORD +0 -154
- {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a4.dist-info}/WHEEL +0 -0
- {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a4.dist-info}/entry_points.txt +0 -0
- {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a4.dist-info}/licenses/LICENSE +0 -0
- {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a4.dist-info}/top_level.txt +0 -0
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
1
2
|
from functools import wraps
|
|
2
3
|
|
|
3
4
|
import sentry_sdk
|
|
@@ -10,6 +11,7 @@ from sentry_sdk.scope import should_send_default_pii
|
|
|
10
11
|
from sentry_sdk.utils import (
|
|
11
12
|
capture_internal_exceptions,
|
|
12
13
|
event_from_exception,
|
|
14
|
+
safe_serialize,
|
|
13
15
|
)
|
|
14
16
|
|
|
15
17
|
from typing import TYPE_CHECKING
|
|
@@ -19,6 +21,11 @@ if TYPE_CHECKING:
|
|
|
19
21
|
from sentry_sdk.tracing import Span
|
|
20
22
|
|
|
21
23
|
try:
|
|
24
|
+
try:
|
|
25
|
+
from openai import NOT_GIVEN
|
|
26
|
+
except ImportError:
|
|
27
|
+
NOT_GIVEN = None
|
|
28
|
+
|
|
22
29
|
from openai.resources.chat.completions import Completions, AsyncCompletions
|
|
23
30
|
from openai.resources import Embeddings, AsyncEmbeddings
|
|
24
31
|
|
|
@@ -27,13 +34,24 @@ try:
|
|
|
27
34
|
except ImportError:
|
|
28
35
|
raise DidNotEnable("OpenAI not installed")
|
|
29
36
|
|
|
37
|
+
RESPONSES_API_ENABLED = True
|
|
38
|
+
try:
|
|
39
|
+
# responses API support was introduced in v1.66.0
|
|
40
|
+
from openai.resources.responses import Responses, AsyncResponses
|
|
41
|
+
from openai.types.responses.response_completed_event import ResponseCompletedEvent
|
|
42
|
+
except ImportError:
|
|
43
|
+
RESPONSES_API_ENABLED = False
|
|
44
|
+
|
|
30
45
|
|
|
31
46
|
class OpenAIIntegration(Integration):
|
|
32
47
|
identifier = "openai"
|
|
33
48
|
origin = f"auto.ai.{identifier}"
|
|
34
49
|
|
|
35
|
-
def __init__(
|
|
36
|
-
|
|
50
|
+
def __init__(
|
|
51
|
+
self: OpenAIIntegration,
|
|
52
|
+
include_prompts: bool = True,
|
|
53
|
+
tiktoken_encoding_name: Optional[str] = None,
|
|
54
|
+
) -> None:
|
|
37
55
|
self.include_prompts = include_prompts
|
|
38
56
|
|
|
39
57
|
self.tiktoken_encoding = None
|
|
@@ -43,25 +61,32 @@ class OpenAIIntegration(Integration):
|
|
|
43
61
|
self.tiktoken_encoding = tiktoken.get_encoding(tiktoken_encoding_name)
|
|
44
62
|
|
|
45
63
|
@staticmethod
|
|
46
|
-
def setup_once():
|
|
47
|
-
# type: () -> None
|
|
64
|
+
def setup_once() -> None:
|
|
48
65
|
Completions.create = _wrap_chat_completion_create(Completions.create)
|
|
49
|
-
Embeddings.create = _wrap_embeddings_create(Embeddings.create)
|
|
50
|
-
|
|
51
66
|
AsyncCompletions.create = _wrap_async_chat_completion_create(
|
|
52
67
|
AsyncCompletions.create
|
|
53
68
|
)
|
|
69
|
+
|
|
70
|
+
Embeddings.create = _wrap_embeddings_create(Embeddings.create)
|
|
54
71
|
AsyncEmbeddings.create = _wrap_async_embeddings_create(AsyncEmbeddings.create)
|
|
55
72
|
|
|
56
|
-
|
|
57
|
-
|
|
73
|
+
if RESPONSES_API_ENABLED:
|
|
74
|
+
Responses.create = _wrap_responses_create(Responses.create)
|
|
75
|
+
AsyncResponses.create = _wrap_async_responses_create(AsyncResponses.create)
|
|
76
|
+
|
|
77
|
+
def count_tokens(self: OpenAIIntegration, s: str) -> int:
|
|
58
78
|
if self.tiktoken_encoding is not None:
|
|
59
79
|
return len(self.tiktoken_encoding.encode_ordinary(s))
|
|
60
80
|
return 0
|
|
61
81
|
|
|
62
82
|
|
|
63
|
-
def _capture_exception(exc):
|
|
64
|
-
#
|
|
83
|
+
def _capture_exception(exc: Any) -> None:
|
|
84
|
+
# Close an eventually open span
|
|
85
|
+
# We need to do this by hand because we are not using the start_span context manager
|
|
86
|
+
current_span = sentry_sdk.get_current_span()
|
|
87
|
+
if current_span is not None:
|
|
88
|
+
current_span.__exit__(None, None, None)
|
|
89
|
+
|
|
65
90
|
event, hint = event_from_exception(
|
|
66
91
|
exc,
|
|
67
92
|
client_options=sentry_sdk.get_client().options,
|
|
@@ -70,185 +95,327 @@ def _capture_exception(exc):
|
|
|
70
95
|
sentry_sdk.capture_event(event, hint=hint)
|
|
71
96
|
|
|
72
97
|
|
|
73
|
-
def
|
|
74
|
-
|
|
75
|
-
):
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
98
|
+
def _get_usage(usage: Any, names: List[str]) -> int:
|
|
99
|
+
for name in names:
|
|
100
|
+
if hasattr(usage, name) and isinstance(getattr(usage, name), int):
|
|
101
|
+
return getattr(usage, name)
|
|
102
|
+
return 0
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def _calculate_token_usage(
|
|
106
|
+
messages: Optional[Iterable[ChatCompletionMessageParam]],
|
|
107
|
+
response: Any,
|
|
108
|
+
span: Span,
|
|
109
|
+
streaming_message_responses: Optional[List[str]],
|
|
110
|
+
count_tokens: Callable[..., Any],
|
|
111
|
+
) -> None:
|
|
112
|
+
input_tokens: Optional[int] = 0
|
|
113
|
+
input_tokens_cached: Optional[int] = 0
|
|
114
|
+
output_tokens: Optional[int] = 0
|
|
115
|
+
output_tokens_reasoning: Optional[int] = 0
|
|
116
|
+
total_tokens: Optional[int] = 0
|
|
117
|
+
|
|
80
118
|
if hasattr(response, "usage"):
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
119
|
+
input_tokens = _get_usage(response.usage, ["input_tokens", "prompt_tokens"])
|
|
120
|
+
if hasattr(response.usage, "input_tokens_details"):
|
|
121
|
+
input_tokens_cached = _get_usage(
|
|
122
|
+
response.usage.input_tokens_details, ["cached_tokens"]
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
output_tokens = _get_usage(
|
|
126
|
+
response.usage, ["output_tokens", "completion_tokens"]
|
|
127
|
+
)
|
|
128
|
+
if hasattr(response.usage, "output_tokens_details"):
|
|
129
|
+
output_tokens_reasoning = _get_usage(
|
|
130
|
+
response.usage.output_tokens_details, ["reasoning_tokens"]
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
total_tokens = _get_usage(response.usage, ["total_tokens"])
|
|
134
|
+
|
|
135
|
+
# Manually count tokens
|
|
136
|
+
if input_tokens == 0:
|
|
137
|
+
for message in messages or []:
|
|
138
|
+
if isinstance(message, dict) and "content" in message:
|
|
139
|
+
input_tokens += count_tokens(message["content"])
|
|
140
|
+
elif isinstance(message, str):
|
|
141
|
+
input_tokens += count_tokens(message)
|
|
142
|
+
|
|
143
|
+
if output_tokens == 0:
|
|
100
144
|
if streaming_message_responses is not None:
|
|
101
145
|
for message in streaming_message_responses:
|
|
102
|
-
|
|
146
|
+
output_tokens += count_tokens(message)
|
|
103
147
|
elif hasattr(response, "choices"):
|
|
104
148
|
for choice in response.choices:
|
|
105
149
|
if hasattr(choice, "message"):
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
if
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
150
|
+
output_tokens += count_tokens(choice.message)
|
|
151
|
+
|
|
152
|
+
# Do not set token data if it is 0
|
|
153
|
+
input_tokens = input_tokens or None
|
|
154
|
+
input_tokens_cached = input_tokens_cached or None
|
|
155
|
+
output_tokens = output_tokens or None
|
|
156
|
+
output_tokens_reasoning = output_tokens_reasoning or None
|
|
157
|
+
total_tokens = total_tokens or None
|
|
158
|
+
|
|
159
|
+
record_token_usage(
|
|
160
|
+
span,
|
|
161
|
+
input_tokens=input_tokens,
|
|
162
|
+
input_tokens_cached=input_tokens_cached,
|
|
163
|
+
output_tokens=output_tokens,
|
|
164
|
+
output_tokens_reasoning=output_tokens_reasoning,
|
|
165
|
+
total_tokens=total_tokens,
|
|
166
|
+
)
|
|
116
167
|
|
|
117
|
-
def _new_chat_completion_common(f, *args, **kwargs):
|
|
118
|
-
# type: (Any, *Any, **Any) -> Any
|
|
119
|
-
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
120
|
-
if integration is None:
|
|
121
|
-
return f(*args, **kwargs)
|
|
122
168
|
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
169
|
+
def _set_input_data(
|
|
170
|
+
span: Span, kwargs: Any, operation: str, integration: OpenAIIntegration
|
|
171
|
+
) -> None:
|
|
172
|
+
# Input messages (the prompt or data sent to the model)
|
|
173
|
+
messages = kwargs.get("messages")
|
|
174
|
+
if messages is None:
|
|
175
|
+
messages = kwargs.get("input")
|
|
176
|
+
|
|
177
|
+
if isinstance(messages, str):
|
|
178
|
+
messages = [messages]
|
|
179
|
+
|
|
180
|
+
if (
|
|
181
|
+
messages is not None
|
|
182
|
+
and len(messages) > 0
|
|
183
|
+
and should_send_default_pii()
|
|
184
|
+
and integration.include_prompts
|
|
185
|
+
):
|
|
186
|
+
set_data_normalized(span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages)
|
|
187
|
+
|
|
188
|
+
# Input attributes: Common
|
|
189
|
+
set_data_normalized(span, SPANDATA.GEN_AI_SYSTEM, "openai")
|
|
190
|
+
set_data_normalized(span, SPANDATA.GEN_AI_OPERATION_NAME, operation)
|
|
191
|
+
|
|
192
|
+
# Input attributes: Optional
|
|
193
|
+
kwargs_keys_to_attributes = {
|
|
194
|
+
"model": SPANDATA.GEN_AI_REQUEST_MODEL,
|
|
195
|
+
"stream": SPANDATA.GEN_AI_RESPONSE_STREAMING,
|
|
196
|
+
"max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS,
|
|
197
|
+
"presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
|
198
|
+
"frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
|
199
|
+
"temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
|
|
200
|
+
"top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
|
|
201
|
+
}
|
|
202
|
+
for key, attribute in kwargs_keys_to_attributes.items():
|
|
203
|
+
value = kwargs.get(key)
|
|
204
|
+
|
|
205
|
+
if value is not NOT_GIVEN and value is not None:
|
|
206
|
+
set_data_normalized(span, attribute, value)
|
|
207
|
+
|
|
208
|
+
# Input attributes: Tools
|
|
209
|
+
tools = kwargs.get("tools")
|
|
210
|
+
if tools is not NOT_GIVEN and tools is not None and len(tools) > 0:
|
|
211
|
+
set_data_normalized(
|
|
212
|
+
span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools)
|
|
213
|
+
)
|
|
126
214
|
|
|
127
|
-
try:
|
|
128
|
-
iter(kwargs["messages"])
|
|
129
|
-
except TypeError:
|
|
130
|
-
# invalid call (in all versions), messages must be iterable
|
|
131
|
-
return f(*args, **kwargs)
|
|
132
215
|
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
216
|
+
def _set_output_data(
|
|
217
|
+
span: Span,
|
|
218
|
+
response: Any,
|
|
219
|
+
kwargs: Any,
|
|
220
|
+
integration: OpenAIIntegration,
|
|
221
|
+
finish_span: bool = True,
|
|
222
|
+
) -> None:
|
|
223
|
+
if hasattr(response, "model"):
|
|
224
|
+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_MODEL, response.model)
|
|
137
225
|
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
)
|
|
144
|
-
span.__enter__()
|
|
226
|
+
# Input messages (the prompt or data sent to the model)
|
|
227
|
+
# used for the token usage calculation
|
|
228
|
+
messages = kwargs.get("messages")
|
|
229
|
+
if messages is None:
|
|
230
|
+
messages = kwargs.get("input")
|
|
145
231
|
|
|
146
|
-
|
|
232
|
+
if messages is not None and isinstance(messages, str):
|
|
233
|
+
messages = [messages]
|
|
147
234
|
|
|
148
|
-
|
|
235
|
+
if hasattr(response, "choices"):
|
|
149
236
|
if should_send_default_pii() and integration.include_prompts:
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
237
|
+
response_text = [choice.message.dict() for choice in response.choices]
|
|
238
|
+
if len(response_text) > 0:
|
|
239
|
+
set_data_normalized(
|
|
240
|
+
span,
|
|
241
|
+
SPANDATA.GEN_AI_RESPONSE_TEXT,
|
|
242
|
+
safe_serialize(response_text),
|
|
243
|
+
)
|
|
244
|
+
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
|
|
245
|
+
if finish_span:
|
|
246
|
+
span.__exit__(None, None, None)
|
|
154
247
|
|
|
155
|
-
|
|
156
|
-
|
|
248
|
+
elif hasattr(response, "output"):
|
|
249
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
250
|
+
response_text = [item.to_dict() for item in response.output]
|
|
251
|
+
if len(response_text) > 0:
|
|
157
252
|
set_data_normalized(
|
|
158
253
|
span,
|
|
159
|
-
SPANDATA.
|
|
160
|
-
|
|
254
|
+
SPANDATA.GEN_AI_RESPONSE_TEXT,
|
|
255
|
+
safe_serialize(response_text),
|
|
161
256
|
)
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
)
|
|
257
|
+
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
|
|
258
|
+
if finish_span:
|
|
165
259
|
span.__exit__(None, None, None)
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
260
|
+
|
|
261
|
+
elif hasattr(response, "_iterator"):
|
|
262
|
+
data_buf: list[list[str]] = [] # one for each choice
|
|
263
|
+
|
|
264
|
+
old_iterator = response._iterator
|
|
265
|
+
|
|
266
|
+
def new_iterator() -> Iterator[ChatCompletionChunk]:
|
|
267
|
+
with capture_internal_exceptions():
|
|
268
|
+
count_tokens_manually = True
|
|
269
|
+
for x in old_iterator:
|
|
270
|
+
# OpenAI chat completion API
|
|
271
|
+
if hasattr(x, "choices"):
|
|
272
|
+
choice_index = 0
|
|
273
|
+
for choice in x.choices:
|
|
274
|
+
if hasattr(choice, "delta") and hasattr(
|
|
275
|
+
choice.delta, "content"
|
|
276
|
+
):
|
|
277
|
+
content = choice.delta.content
|
|
278
|
+
if len(data_buf) <= choice_index:
|
|
279
|
+
data_buf.append([])
|
|
280
|
+
data_buf[choice_index].append(content or "")
|
|
281
|
+
choice_index += 1
|
|
282
|
+
|
|
283
|
+
# OpenAI responses API
|
|
284
|
+
elif hasattr(x, "delta"):
|
|
285
|
+
if len(data_buf) == 0:
|
|
286
|
+
data_buf.append([])
|
|
287
|
+
data_buf[0].append(x.delta or "")
|
|
288
|
+
|
|
289
|
+
# OpenAI responses API end of streaming response
|
|
290
|
+
if RESPONSES_API_ENABLED and isinstance(x, ResponseCompletedEvent):
|
|
291
|
+
_calculate_token_usage(
|
|
292
|
+
messages,
|
|
293
|
+
x.response,
|
|
294
|
+
span,
|
|
295
|
+
None,
|
|
296
|
+
integration.count_tokens,
|
|
190
297
|
)
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
298
|
+
count_tokens_manually = False
|
|
299
|
+
|
|
300
|
+
yield x
|
|
301
|
+
|
|
302
|
+
if len(data_buf) > 0:
|
|
303
|
+
all_responses = ["".join(chunk) for chunk in data_buf]
|
|
304
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
305
|
+
set_data_normalized(
|
|
306
|
+
span, SPANDATA.GEN_AI_RESPONSE_TEXT, all_responses
|
|
307
|
+
)
|
|
308
|
+
if count_tokens_manually:
|
|
309
|
+
_calculate_token_usage(
|
|
196
310
|
messages,
|
|
197
|
-
|
|
311
|
+
response,
|
|
198
312
|
span,
|
|
199
313
|
all_responses,
|
|
200
314
|
integration.count_tokens,
|
|
201
315
|
)
|
|
316
|
+
|
|
317
|
+
if finish_span:
|
|
202
318
|
span.__exit__(None, None, None)
|
|
203
319
|
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
data_buf
|
|
218
|
-
choice_index
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
320
|
+
async def new_iterator_async() -> AsyncIterator[ChatCompletionChunk]:
|
|
321
|
+
with capture_internal_exceptions():
|
|
322
|
+
count_tokens_manually = True
|
|
323
|
+
async for x in old_iterator:
|
|
324
|
+
# OpenAI chat completion API
|
|
325
|
+
if hasattr(x, "choices"):
|
|
326
|
+
choice_index = 0
|
|
327
|
+
for choice in x.choices:
|
|
328
|
+
if hasattr(choice, "delta") and hasattr(
|
|
329
|
+
choice.delta, "content"
|
|
330
|
+
):
|
|
331
|
+
content = choice.delta.content
|
|
332
|
+
if len(data_buf) <= choice_index:
|
|
333
|
+
data_buf.append([])
|
|
334
|
+
data_buf[choice_index].append(content or "")
|
|
335
|
+
choice_index += 1
|
|
336
|
+
|
|
337
|
+
# OpenAI responses API
|
|
338
|
+
elif hasattr(x, "delta"):
|
|
339
|
+
if len(data_buf) == 0:
|
|
340
|
+
data_buf.append([])
|
|
341
|
+
data_buf[0].append(x.delta or "")
|
|
342
|
+
|
|
343
|
+
# OpenAI responses API end of streaming response
|
|
344
|
+
if RESPONSES_API_ENABLED and isinstance(x, ResponseCompletedEvent):
|
|
345
|
+
_calculate_token_usage(
|
|
346
|
+
messages,
|
|
347
|
+
x.response,
|
|
348
|
+
span,
|
|
349
|
+
None,
|
|
350
|
+
integration.count_tokens,
|
|
351
|
+
)
|
|
352
|
+
count_tokens_manually = False
|
|
353
|
+
|
|
354
|
+
yield x
|
|
355
|
+
|
|
356
|
+
if len(data_buf) > 0:
|
|
357
|
+
all_responses = ["".join(chunk) for chunk in data_buf]
|
|
358
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
359
|
+
set_data_normalized(
|
|
360
|
+
span, SPANDATA.GEN_AI_RESPONSE_TEXT, all_responses
|
|
223
361
|
)
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
span, SPANDATA.AI_RESPONSES, all_responses
|
|
227
|
-
)
|
|
228
|
-
_calculate_chat_completion_usage(
|
|
362
|
+
if count_tokens_manually:
|
|
363
|
+
_calculate_token_usage(
|
|
229
364
|
messages,
|
|
230
|
-
|
|
365
|
+
response,
|
|
231
366
|
span,
|
|
232
367
|
all_responses,
|
|
233
368
|
integration.count_tokens,
|
|
234
369
|
)
|
|
370
|
+
if finish_span:
|
|
235
371
|
span.__exit__(None, None, None)
|
|
236
372
|
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
else:
|
|
240
|
-
res._iterator = new_iterator()
|
|
241
|
-
|
|
373
|
+
if str(type(response._iterator)) == "<class 'async_generator'>":
|
|
374
|
+
response._iterator = new_iterator_async()
|
|
242
375
|
else:
|
|
243
|
-
|
|
376
|
+
response._iterator = new_iterator()
|
|
377
|
+
else:
|
|
378
|
+
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
|
|
379
|
+
if finish_span:
|
|
244
380
|
span.__exit__(None, None, None)
|
|
245
|
-
return res
|
|
246
381
|
|
|
247
382
|
|
|
248
|
-
def
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
383
|
+
def _new_chat_completion_common(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
384
|
+
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
385
|
+
if integration is None:
|
|
386
|
+
return f(*args, **kwargs)
|
|
387
|
+
|
|
388
|
+
if "messages" not in kwargs:
|
|
389
|
+
# invalid call (in all versions of openai), let it return error
|
|
390
|
+
return f(*args, **kwargs)
|
|
391
|
+
|
|
392
|
+
try:
|
|
393
|
+
iter(kwargs["messages"])
|
|
394
|
+
except TypeError:
|
|
395
|
+
# invalid call (in all versions), messages must be iterable
|
|
396
|
+
return f(*args, **kwargs)
|
|
397
|
+
|
|
398
|
+
model = kwargs.get("model")
|
|
399
|
+
operation = "chat"
|
|
400
|
+
|
|
401
|
+
span = sentry_sdk.start_span(
|
|
402
|
+
op=consts.OP.GEN_AI_CHAT,
|
|
403
|
+
name=f"{operation} {model}",
|
|
404
|
+
origin=OpenAIIntegration.origin,
|
|
405
|
+
)
|
|
406
|
+
span.__enter__()
|
|
407
|
+
|
|
408
|
+
_set_input_data(span, kwargs, operation, integration)
|
|
409
|
+
|
|
410
|
+
response = yield f, args, kwargs
|
|
411
|
+
|
|
412
|
+
_set_output_data(span, response, kwargs, integration, finish_span=True)
|
|
413
|
+
|
|
414
|
+
return response
|
|
415
|
+
|
|
416
|
+
|
|
417
|
+
def _wrap_chat_completion_create(f: Callable[..., Any]) -> Callable[..., Any]:
|
|
418
|
+
def _execute_sync(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
252
419
|
gen = _new_chat_completion_common(f, *args, **kwargs)
|
|
253
420
|
|
|
254
421
|
try:
|
|
@@ -268,8 +435,7 @@ def _wrap_chat_completion_create(f):
|
|
|
268
435
|
return e.value
|
|
269
436
|
|
|
270
437
|
@wraps(f)
|
|
271
|
-
def _sentry_patched_create_sync(*args, **kwargs):
|
|
272
|
-
# type: (*Any, **Any) -> Any
|
|
438
|
+
def _sentry_patched_create_sync(*args: Any, **kwargs: Any) -> Any:
|
|
273
439
|
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
274
440
|
if integration is None or "messages" not in kwargs:
|
|
275
441
|
# no "messages" means invalid call (in all versions of openai), let it return error
|
|
@@ -280,10 +446,8 @@ def _wrap_chat_completion_create(f):
|
|
|
280
446
|
return _sentry_patched_create_sync
|
|
281
447
|
|
|
282
448
|
|
|
283
|
-
def _wrap_async_chat_completion_create(f):
|
|
284
|
-
|
|
285
|
-
async def _execute_async(f, *args, **kwargs):
|
|
286
|
-
# type: (Any, *Any, **Any) -> Any
|
|
449
|
+
def _wrap_async_chat_completion_create(f: Callable[..., Any]) -> Callable[..., Any]:
|
|
450
|
+
async def _execute_async(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
287
451
|
gen = _new_chat_completion_common(f, *args, **kwargs)
|
|
288
452
|
|
|
289
453
|
try:
|
|
@@ -303,8 +467,7 @@ def _wrap_async_chat_completion_create(f):
|
|
|
303
467
|
return e.value
|
|
304
468
|
|
|
305
469
|
@wraps(f)
|
|
306
|
-
async def _sentry_patched_create_async(*args, **kwargs):
|
|
307
|
-
# type: (*Any, **Any) -> Any
|
|
470
|
+
async def _sentry_patched_create_async(*args: Any, **kwargs: Any) -> Any:
|
|
308
471
|
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
309
472
|
if integration is None or "messages" not in kwargs:
|
|
310
473
|
# no "messages" means invalid call (in all versions of openai), let it return error
|
|
@@ -315,58 +478,30 @@ def _wrap_async_chat_completion_create(f):
|
|
|
315
478
|
return _sentry_patched_create_async
|
|
316
479
|
|
|
317
480
|
|
|
318
|
-
def _new_embeddings_create_common(f, *args, **kwargs):
|
|
319
|
-
# type: (Any, *Any, **Any) -> Any
|
|
481
|
+
def _new_embeddings_create_common(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
320
482
|
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
321
483
|
if integration is None:
|
|
322
484
|
return f(*args, **kwargs)
|
|
323
485
|
|
|
486
|
+
model = kwargs.get("model")
|
|
487
|
+
operation = "embeddings"
|
|
488
|
+
|
|
324
489
|
with sentry_sdk.start_span(
|
|
325
|
-
op=consts.OP.
|
|
326
|
-
|
|
490
|
+
op=consts.OP.GEN_AI_EMBEDDINGS,
|
|
491
|
+
name=f"{operation} {model}",
|
|
327
492
|
origin=OpenAIIntegration.origin,
|
|
328
|
-
only_if_parent=True,
|
|
329
493
|
) as span:
|
|
330
|
-
|
|
331
|
-
should_send_default_pii() and integration.include_prompts
|
|
332
|
-
):
|
|
333
|
-
if isinstance(kwargs["input"], str):
|
|
334
|
-
set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, [kwargs["input"]])
|
|
335
|
-
elif (
|
|
336
|
-
isinstance(kwargs["input"], list)
|
|
337
|
-
and len(kwargs["input"]) > 0
|
|
338
|
-
and isinstance(kwargs["input"][0], str)
|
|
339
|
-
):
|
|
340
|
-
set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, kwargs["input"])
|
|
341
|
-
if "model" in kwargs:
|
|
342
|
-
set_data_normalized(span, SPANDATA.AI_MODEL_ID, kwargs["model"])
|
|
494
|
+
_set_input_data(span, kwargs, operation, integration)
|
|
343
495
|
|
|
344
496
|
response = yield f, args, kwargs
|
|
345
497
|
|
|
346
|
-
|
|
347
|
-
total_tokens = 0
|
|
348
|
-
if hasattr(response, "usage"):
|
|
349
|
-
if hasattr(response.usage, "prompt_tokens") and isinstance(
|
|
350
|
-
response.usage.prompt_tokens, int
|
|
351
|
-
):
|
|
352
|
-
prompt_tokens = response.usage.prompt_tokens
|
|
353
|
-
if hasattr(response.usage, "total_tokens") and isinstance(
|
|
354
|
-
response.usage.total_tokens, int
|
|
355
|
-
):
|
|
356
|
-
total_tokens = response.usage.total_tokens
|
|
357
|
-
|
|
358
|
-
if prompt_tokens == 0:
|
|
359
|
-
prompt_tokens = integration.count_tokens(kwargs["input"] or "")
|
|
360
|
-
|
|
361
|
-
record_token_usage(span, prompt_tokens, None, total_tokens or prompt_tokens)
|
|
498
|
+
_set_output_data(span, response, kwargs, integration, finish_span=False)
|
|
362
499
|
|
|
363
500
|
return response
|
|
364
501
|
|
|
365
502
|
|
|
366
|
-
def _wrap_embeddings_create(f):
|
|
367
|
-
|
|
368
|
-
def _execute_sync(f, *args, **kwargs):
|
|
369
|
-
# type: (Any, *Any, **Any) -> Any
|
|
503
|
+
def _wrap_embeddings_create(f: Any) -> Any:
|
|
504
|
+
def _execute_sync(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
370
505
|
gen = _new_embeddings_create_common(f, *args, **kwargs)
|
|
371
506
|
|
|
372
507
|
try:
|
|
@@ -386,8 +521,7 @@ def _wrap_embeddings_create(f):
|
|
|
386
521
|
return e.value
|
|
387
522
|
|
|
388
523
|
@wraps(f)
|
|
389
|
-
def _sentry_patched_create_sync(*args, **kwargs):
|
|
390
|
-
# type: (*Any, **Any) -> Any
|
|
524
|
+
def _sentry_patched_create_sync(*args: Any, **kwargs: Any) -> Any:
|
|
391
525
|
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
392
526
|
if integration is None:
|
|
393
527
|
return f(*args, **kwargs)
|
|
@@ -397,10 +531,8 @@ def _wrap_embeddings_create(f):
|
|
|
397
531
|
return _sentry_patched_create_sync
|
|
398
532
|
|
|
399
533
|
|
|
400
|
-
def _wrap_async_embeddings_create(f):
|
|
401
|
-
|
|
402
|
-
async def _execute_async(f, *args, **kwargs):
|
|
403
|
-
# type: (Any, *Any, **Any) -> Any
|
|
534
|
+
def _wrap_async_embeddings_create(f: Any) -> Any:
|
|
535
|
+
async def _execute_async(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
404
536
|
gen = _new_embeddings_create_common(f, *args, **kwargs)
|
|
405
537
|
|
|
406
538
|
try:
|
|
@@ -420,8 +552,7 @@ def _wrap_async_embeddings_create(f):
|
|
|
420
552
|
return e.value
|
|
421
553
|
|
|
422
554
|
@wraps(f)
|
|
423
|
-
async def _sentry_patched_create_async(*args, **kwargs):
|
|
424
|
-
# type: (*Any, **Any) -> Any
|
|
555
|
+
async def _sentry_patched_create_async(*args: Any, **kwargs: Any) -> Any:
|
|
425
556
|
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
426
557
|
if integration is None:
|
|
427
558
|
return await f(*args, **kwargs)
|
|
@@ -429,3 +560,89 @@ def _wrap_async_embeddings_create(f):
|
|
|
429
560
|
return await _execute_async(f, *args, **kwargs)
|
|
430
561
|
|
|
431
562
|
return _sentry_patched_create_async
|
|
563
|
+
|
|
564
|
+
|
|
565
|
+
def _new_responses_create_common(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
566
|
+
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
567
|
+
if integration is None:
|
|
568
|
+
return f(*args, **kwargs)
|
|
569
|
+
|
|
570
|
+
model = kwargs.get("model")
|
|
571
|
+
operation = "responses"
|
|
572
|
+
|
|
573
|
+
span = sentry_sdk.start_span(
|
|
574
|
+
op=consts.OP.GEN_AI_RESPONSES,
|
|
575
|
+
name=f"{operation} {model}",
|
|
576
|
+
origin=OpenAIIntegration.origin,
|
|
577
|
+
)
|
|
578
|
+
span.__enter__()
|
|
579
|
+
|
|
580
|
+
_set_input_data(span, kwargs, operation, integration)
|
|
581
|
+
|
|
582
|
+
response = yield f, args, kwargs
|
|
583
|
+
|
|
584
|
+
_set_output_data(span, response, kwargs, integration, finish_span=True)
|
|
585
|
+
|
|
586
|
+
return response
|
|
587
|
+
|
|
588
|
+
|
|
589
|
+
def _wrap_responses_create(f: Any) -> Any:
|
|
590
|
+
def _execute_sync(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
591
|
+
gen = _new_responses_create_common(f, *args, **kwargs)
|
|
592
|
+
|
|
593
|
+
try:
|
|
594
|
+
f, args, kwargs = next(gen)
|
|
595
|
+
except StopIteration as e:
|
|
596
|
+
return e.value
|
|
597
|
+
|
|
598
|
+
try:
|
|
599
|
+
try:
|
|
600
|
+
result = f(*args, **kwargs)
|
|
601
|
+
except Exception as e:
|
|
602
|
+
_capture_exception(e)
|
|
603
|
+
raise e from None
|
|
604
|
+
|
|
605
|
+
return gen.send(result)
|
|
606
|
+
except StopIteration as e:
|
|
607
|
+
return e.value
|
|
608
|
+
|
|
609
|
+
@wraps(f)
|
|
610
|
+
def _sentry_patched_create_sync(*args: Any, **kwargs: Any) -> Any:
|
|
611
|
+
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
612
|
+
if integration is None:
|
|
613
|
+
return f(*args, **kwargs)
|
|
614
|
+
|
|
615
|
+
return _execute_sync(f, *args, **kwargs)
|
|
616
|
+
|
|
617
|
+
return _sentry_patched_create_sync
|
|
618
|
+
|
|
619
|
+
|
|
620
|
+
def _wrap_async_responses_create(f: Any) -> Any:
|
|
621
|
+
async def _execute_async(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
622
|
+
gen = _new_responses_create_common(f, *args, **kwargs)
|
|
623
|
+
|
|
624
|
+
try:
|
|
625
|
+
f, args, kwargs = next(gen)
|
|
626
|
+
except StopIteration as e:
|
|
627
|
+
return await e.value
|
|
628
|
+
|
|
629
|
+
try:
|
|
630
|
+
try:
|
|
631
|
+
result = await f(*args, **kwargs)
|
|
632
|
+
except Exception as e:
|
|
633
|
+
_capture_exception(e)
|
|
634
|
+
raise e from None
|
|
635
|
+
|
|
636
|
+
return gen.send(result)
|
|
637
|
+
except StopIteration as e:
|
|
638
|
+
return e.value
|
|
639
|
+
|
|
640
|
+
@wraps(f)
|
|
641
|
+
async def _sentry_patched_responses_async(*args: Any, **kwargs: Any) -> Any:
|
|
642
|
+
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
643
|
+
if integration is None:
|
|
644
|
+
return await f(*args, **kwargs)
|
|
645
|
+
|
|
646
|
+
return await _execute_async(f, *args, **kwargs)
|
|
647
|
+
|
|
648
|
+
return _sentry_patched_responses_async
|