sentry-sdk 2.33.1__py2.py3-none-any.whl → 2.34.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sentry-sdk might be problematic. Click here for more details.
- sentry_sdk/__init__.py +1 -0
- sentry_sdk/ai/monitoring.py +3 -3
- sentry_sdk/api.py +7 -0
- sentry_sdk/client.py +4 -1
- sentry_sdk/consts.py +28 -5
- sentry_sdk/integrations/celery/__init__.py +1 -0
- sentry_sdk/integrations/django/__init__.py +12 -1
- sentry_sdk/integrations/gnu_backtrace.py +3 -14
- sentry_sdk/integrations/openai.py +346 -146
- sentry_sdk/integrations/openai_agents/utils.py +1 -49
- sentry_sdk/integrations/threading.py +1 -1
- sentry_sdk/serializer.py +9 -1
- sentry_sdk/utils.py +46 -0
- {sentry_sdk-2.33.1.dist-info → sentry_sdk-2.34.0.dist-info}/METADATA +1 -1
- {sentry_sdk-2.33.1.dist-info → sentry_sdk-2.34.0.dist-info}/RECORD +19 -19
- {sentry_sdk-2.33.1.dist-info → sentry_sdk-2.34.0.dist-info}/WHEEL +0 -0
- {sentry_sdk-2.33.1.dist-info → sentry_sdk-2.34.0.dist-info}/entry_points.txt +0 -0
- {sentry_sdk-2.33.1.dist-info → sentry_sdk-2.34.0.dist-info}/licenses/LICENSE +0 -0
- {sentry_sdk-2.33.1.dist-info → sentry_sdk-2.34.0.dist-info}/top_level.txt +0 -0
sentry_sdk/__init__.py
CHANGED
sentry_sdk/ai/monitoring.py
CHANGED
|
@@ -40,7 +40,7 @@ def ai_track(description, **span_kwargs):
|
|
|
40
40
|
for k, v in kwargs.pop("sentry_data", {}).items():
|
|
41
41
|
span.set_data(k, v)
|
|
42
42
|
if curr_pipeline:
|
|
43
|
-
span.set_data(SPANDATA.
|
|
43
|
+
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, curr_pipeline)
|
|
44
44
|
return f(*args, **kwargs)
|
|
45
45
|
else:
|
|
46
46
|
_ai_pipeline_name.set(description)
|
|
@@ -69,7 +69,7 @@ def ai_track(description, **span_kwargs):
|
|
|
69
69
|
for k, v in kwargs.pop("sentry_data", {}).items():
|
|
70
70
|
span.set_data(k, v)
|
|
71
71
|
if curr_pipeline:
|
|
72
|
-
span.set_data(SPANDATA.
|
|
72
|
+
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, curr_pipeline)
|
|
73
73
|
return await f(*args, **kwargs)
|
|
74
74
|
else:
|
|
75
75
|
_ai_pipeline_name.set(description)
|
|
@@ -108,7 +108,7 @@ def record_token_usage(
|
|
|
108
108
|
# TODO: move pipeline name elsewhere
|
|
109
109
|
ai_pipeline_name = get_ai_pipeline_name()
|
|
110
110
|
if ai_pipeline_name:
|
|
111
|
-
span.set_data(SPANDATA.
|
|
111
|
+
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, ai_pipeline_name)
|
|
112
112
|
|
|
113
113
|
if input_tokens is not None:
|
|
114
114
|
span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
|
sentry_sdk/api.py
CHANGED
|
@@ -84,6 +84,7 @@ __all__ = [
|
|
|
84
84
|
"monitor",
|
|
85
85
|
"start_session",
|
|
86
86
|
"end_session",
|
|
87
|
+
"set_transaction_name",
|
|
87
88
|
]
|
|
88
89
|
|
|
89
90
|
|
|
@@ -466,3 +467,9 @@ def start_session(
|
|
|
466
467
|
def end_session():
|
|
467
468
|
# type: () -> None
|
|
468
469
|
return get_isolation_scope().end_session()
|
|
470
|
+
|
|
471
|
+
|
|
472
|
+
@scopemethod
|
|
473
|
+
def set_transaction_name(name, source=None):
|
|
474
|
+
# type: (str, Optional[str]) -> None
|
|
475
|
+
return get_current_scope().set_transaction_name(name, source)
|
sentry_sdk/client.py
CHANGED
|
@@ -47,7 +47,6 @@ from sentry_sdk.profiler.transaction_profiler import (
|
|
|
47
47
|
)
|
|
48
48
|
from sentry_sdk.scrubber import EventScrubber
|
|
49
49
|
from sentry_sdk.monitor import Monitor
|
|
50
|
-
from sentry_sdk.spotlight import setup_spotlight
|
|
51
50
|
|
|
52
51
|
if TYPE_CHECKING:
|
|
53
52
|
from typing import Any
|
|
@@ -429,6 +428,10 @@ class _Client(BaseClient):
|
|
|
429
428
|
)
|
|
430
429
|
|
|
431
430
|
if self.options.get("spotlight"):
|
|
431
|
+
# This is intentionally here to prevent setting up spotlight
|
|
432
|
+
# stuff we don't need unless spotlight is explicitly enabled
|
|
433
|
+
from sentry_sdk.spotlight import setup_spotlight
|
|
434
|
+
|
|
432
435
|
self.spotlight = setup_spotlight(self.options)
|
|
433
436
|
if not self.options["dsn"]:
|
|
434
437
|
sample_all = lambda *_args, **_kwargs: 1.0
|
sentry_sdk/consts.py
CHANGED
|
@@ -3,7 +3,10 @@ from enum import Enum
|
|
|
3
3
|
from typing import TYPE_CHECKING
|
|
4
4
|
|
|
5
5
|
# up top to prevent circular import due to integration import
|
|
6
|
-
|
|
6
|
+
# This is more or less an arbitrary large-ish value for now, so that we allow
|
|
7
|
+
# pretty long strings (like LLM prompts), but still have *some* upper limit
|
|
8
|
+
# until we verify that removing the trimming completely is safe.
|
|
9
|
+
DEFAULT_MAX_VALUE_LENGTH = 100_000
|
|
7
10
|
|
|
8
11
|
DEFAULT_MAX_STACK_FRAMES = 100
|
|
9
12
|
DEFAULT_ADD_FULL_STACK = False
|
|
@@ -169,6 +172,7 @@ class SPANDATA:
|
|
|
169
172
|
AI_PIPELINE_NAME = "ai.pipeline.name"
|
|
170
173
|
"""
|
|
171
174
|
Name of the AI pipeline or chain being executed.
|
|
175
|
+
DEPRECATED: Use GEN_AI_PIPELINE_NAME instead.
|
|
172
176
|
Example: "qa-pipeline"
|
|
173
177
|
"""
|
|
174
178
|
|
|
@@ -229,6 +233,7 @@ class SPANDATA:
|
|
|
229
233
|
AI_STREAMING = "ai.streaming"
|
|
230
234
|
"""
|
|
231
235
|
Whether or not the AI model call's response was streamed back asynchronously
|
|
236
|
+
DEPRECATED: Use GEN_AI_RESPONSE_STREAMING instead.
|
|
232
237
|
Example: true
|
|
233
238
|
"""
|
|
234
239
|
|
|
@@ -372,6 +377,24 @@ class SPANDATA:
|
|
|
372
377
|
Example: "chat"
|
|
373
378
|
"""
|
|
374
379
|
|
|
380
|
+
GEN_AI_PIPELINE_NAME = "gen_ai.pipeline.name"
|
|
381
|
+
"""
|
|
382
|
+
Name of the AI pipeline or chain being executed.
|
|
383
|
+
Example: "qa-pipeline"
|
|
384
|
+
"""
|
|
385
|
+
|
|
386
|
+
GEN_AI_RESPONSE_MODEL = "gen_ai.response.model"
|
|
387
|
+
"""
|
|
388
|
+
Exact model identifier used to generate the response
|
|
389
|
+
Example: gpt-4o-mini-2024-07-18
|
|
390
|
+
"""
|
|
391
|
+
|
|
392
|
+
GEN_AI_RESPONSE_STREAMING = "gen_ai.response.streaming"
|
|
393
|
+
"""
|
|
394
|
+
Whether or not the AI model call's response was streamed back asynchronously
|
|
395
|
+
Example: true
|
|
396
|
+
"""
|
|
397
|
+
|
|
375
398
|
GEN_AI_RESPONSE_TEXT = "gen_ai.response.text"
|
|
376
399
|
"""
|
|
377
400
|
The model's response text messages.
|
|
@@ -411,7 +434,7 @@ class SPANDATA:
|
|
|
411
434
|
GEN_AI_REQUEST_MODEL = "gen_ai.request.model"
|
|
412
435
|
"""
|
|
413
436
|
The model identifier being used for the request.
|
|
414
|
-
Example: "gpt-4-turbo
|
|
437
|
+
Example: "gpt-4-turbo"
|
|
415
438
|
"""
|
|
416
439
|
|
|
417
440
|
GEN_AI_REQUEST_PRESENCE_PENALTY = "gen_ai.request.presence_penalty"
|
|
@@ -649,9 +672,11 @@ class OP:
|
|
|
649
672
|
FUNCTION_AWS = "function.aws"
|
|
650
673
|
FUNCTION_GCP = "function.gcp"
|
|
651
674
|
GEN_AI_CHAT = "gen_ai.chat"
|
|
675
|
+
GEN_AI_EMBEDDINGS = "gen_ai.embeddings"
|
|
652
676
|
GEN_AI_EXECUTE_TOOL = "gen_ai.execute_tool"
|
|
653
677
|
GEN_AI_HANDOFF = "gen_ai.handoff"
|
|
654
678
|
GEN_AI_INVOKE_AGENT = "gen_ai.invoke_agent"
|
|
679
|
+
GEN_AI_RESPONSES = "gen_ai.responses"
|
|
655
680
|
GRAPHQL_EXECUTE = "graphql.execute"
|
|
656
681
|
GRAPHQL_MUTATION = "graphql.mutation"
|
|
657
682
|
GRAPHQL_PARSE = "graphql.parse"
|
|
@@ -674,8 +699,6 @@ class OP:
|
|
|
674
699
|
MIDDLEWARE_STARLITE = "middleware.starlite"
|
|
675
700
|
MIDDLEWARE_STARLITE_RECEIVE = "middleware.starlite.receive"
|
|
676
701
|
MIDDLEWARE_STARLITE_SEND = "middleware.starlite.send"
|
|
677
|
-
OPENAI_CHAT_COMPLETIONS_CREATE = "ai.chat_completions.create.openai"
|
|
678
|
-
OPENAI_EMBEDDINGS_CREATE = "ai.embeddings.create.openai"
|
|
679
702
|
HUGGINGFACE_HUB_CHAT_COMPLETIONS_CREATE = (
|
|
680
703
|
"ai.chat_completions.create.huggingface_hub"
|
|
681
704
|
)
|
|
@@ -1181,4 +1204,4 @@ DEFAULT_OPTIONS = _get_default_options()
|
|
|
1181
1204
|
del _get_default_options
|
|
1182
1205
|
|
|
1183
1206
|
|
|
1184
|
-
VERSION = "2.
|
|
1207
|
+
VERSION = "2.34.0"
|
|
@@ -7,7 +7,7 @@ from importlib import import_module
|
|
|
7
7
|
import sentry_sdk
|
|
8
8
|
from sentry_sdk.consts import OP, SPANDATA
|
|
9
9
|
from sentry_sdk.scope import add_global_event_processor, should_send_default_pii
|
|
10
|
-
from sentry_sdk.serializer import add_global_repr_processor
|
|
10
|
+
from sentry_sdk.serializer import add_global_repr_processor, add_repr_sequence_type
|
|
11
11
|
from sentry_sdk.tracing import SOURCE_FOR_STYLE, TransactionSource
|
|
12
12
|
from sentry_sdk.tracing_utils import add_query_source, record_sql_queries
|
|
13
13
|
from sentry_sdk.utils import (
|
|
@@ -269,6 +269,7 @@ class DjangoIntegration(Integration):
|
|
|
269
269
|
patch_views()
|
|
270
270
|
patch_templates()
|
|
271
271
|
patch_signals()
|
|
272
|
+
add_template_context_repr_sequence()
|
|
272
273
|
|
|
273
274
|
if patch_caching is not None:
|
|
274
275
|
patch_caching()
|
|
@@ -745,3 +746,13 @@ def _set_db_data(span, cursor_or_db):
|
|
|
745
746
|
server_socket_address = connection_params.get("unix_socket")
|
|
746
747
|
if server_socket_address is not None:
|
|
747
748
|
span.set_data(SPANDATA.SERVER_SOCKET_ADDRESS, server_socket_address)
|
|
749
|
+
|
|
750
|
+
|
|
751
|
+
def add_template_context_repr_sequence():
|
|
752
|
+
# type: () -> None
|
|
753
|
+
try:
|
|
754
|
+
from django.template.context import BaseContext
|
|
755
|
+
|
|
756
|
+
add_repr_sequence_type(BaseContext)
|
|
757
|
+
except Exception:
|
|
758
|
+
pass
|
|
@@ -12,23 +12,12 @@ if TYPE_CHECKING:
|
|
|
12
12
|
from sentry_sdk._types import Event
|
|
13
13
|
|
|
14
14
|
|
|
15
|
-
|
|
16
|
-
TYPE_RE = r"[a-zA-Z0-9._:<>,-]+"
|
|
17
|
-
HEXVAL_RE = r"[A-Fa-f0-9]+"
|
|
18
|
-
|
|
15
|
+
FUNCTION_RE = r"[^@]+?)\s+@\s+0x[0-9a-fA-F]+"
|
|
19
16
|
|
|
20
17
|
FRAME_RE = r"""
|
|
21
|
-
^(?P<index>\d+)\.\s
|
|
22
|
-
(?P<package>{MODULE_RE})\(
|
|
23
|
-
(?P<retval>{TYPE_RE}\ )?
|
|
24
|
-
((?P<function>{TYPE_RE})
|
|
25
|
-
(?P<args>\(.*\))?
|
|
26
|
-
)?
|
|
27
|
-
((?P<constoffset>\ const)?\+0x(?P<offset>{HEXVAL_RE}))?
|
|
28
|
-
\)\s
|
|
29
|
-
\[0x(?P<retaddr>{HEXVAL_RE})\]$
|
|
18
|
+
^(?P<index>\d+)\.\s+(?P<function>{FUNCTION_RE}\s+in\s+(?P<package>.+)$
|
|
30
19
|
""".format(
|
|
31
|
-
|
|
20
|
+
FUNCTION_RE=FUNCTION_RE,
|
|
32
21
|
)
|
|
33
22
|
|
|
34
23
|
FRAME_RE = re.compile(FRAME_RE, re.MULTILINE | re.VERBOSE)
|
|
@@ -10,6 +10,7 @@ from sentry_sdk.scope import should_send_default_pii
|
|
|
10
10
|
from sentry_sdk.utils import (
|
|
11
11
|
capture_internal_exceptions,
|
|
12
12
|
event_from_exception,
|
|
13
|
+
safe_serialize,
|
|
13
14
|
)
|
|
14
15
|
|
|
15
16
|
from typing import TYPE_CHECKING
|
|
@@ -27,6 +28,14 @@ try:
|
|
|
27
28
|
except ImportError:
|
|
28
29
|
raise DidNotEnable("OpenAI not installed")
|
|
29
30
|
|
|
31
|
+
RESPONSES_API_ENABLED = True
|
|
32
|
+
try:
|
|
33
|
+
# responses API support was introduced in v1.66.0
|
|
34
|
+
from openai.resources.responses import Responses, AsyncResponses
|
|
35
|
+
from openai.types.responses.response_completed_event import ResponseCompletedEvent
|
|
36
|
+
except ImportError:
|
|
37
|
+
RESPONSES_API_ENABLED = False
|
|
38
|
+
|
|
30
39
|
|
|
31
40
|
class OpenAIIntegration(Integration):
|
|
32
41
|
identifier = "openai"
|
|
@@ -46,13 +55,17 @@ class OpenAIIntegration(Integration):
|
|
|
46
55
|
def setup_once():
|
|
47
56
|
# type: () -> None
|
|
48
57
|
Completions.create = _wrap_chat_completion_create(Completions.create)
|
|
49
|
-
Embeddings.create = _wrap_embeddings_create(Embeddings.create)
|
|
50
|
-
|
|
51
58
|
AsyncCompletions.create = _wrap_async_chat_completion_create(
|
|
52
59
|
AsyncCompletions.create
|
|
53
60
|
)
|
|
61
|
+
|
|
62
|
+
Embeddings.create = _wrap_embeddings_create(Embeddings.create)
|
|
54
63
|
AsyncEmbeddings.create = _wrap_async_embeddings_create(AsyncEmbeddings.create)
|
|
55
64
|
|
|
65
|
+
if RESPONSES_API_ENABLED:
|
|
66
|
+
Responses.create = _wrap_responses_create(Responses.create)
|
|
67
|
+
AsyncResponses.create = _wrap_async_responses_create(AsyncResponses.create)
|
|
68
|
+
|
|
56
69
|
def count_tokens(self, s):
|
|
57
70
|
# type: (OpenAIIntegration, str) -> int
|
|
58
71
|
if self.tiktoken_encoding is not None:
|
|
@@ -62,6 +75,12 @@ class OpenAIIntegration(Integration):
|
|
|
62
75
|
|
|
63
76
|
def _capture_exception(exc):
|
|
64
77
|
# type: (Any) -> None
|
|
78
|
+
# Close an eventually open span
|
|
79
|
+
# We need to do this by hand because we are not using the start_span context manager
|
|
80
|
+
current_span = sentry_sdk.get_current_span()
|
|
81
|
+
if current_span is not None:
|
|
82
|
+
current_span.__exit__(None, None, None)
|
|
83
|
+
|
|
65
84
|
event, hint = event_from_exception(
|
|
66
85
|
exc,
|
|
67
86
|
client_options=sentry_sdk.get_client().options,
|
|
@@ -81,7 +100,7 @@ def _get_usage(usage, names):
|
|
|
81
100
|
def _calculate_token_usage(
|
|
82
101
|
messages, response, span, streaming_message_responses, count_tokens
|
|
83
102
|
):
|
|
84
|
-
# type: (Iterable[ChatCompletionMessageParam], Any, Span, Optional[List[str]], Callable[..., Any]) -> None
|
|
103
|
+
# type: (Optional[Iterable[ChatCompletionMessageParam]], Any, Span, Optional[List[str]], Callable[..., Any]) -> None
|
|
85
104
|
input_tokens = 0 # type: Optional[int]
|
|
86
105
|
input_tokens_cached = 0 # type: Optional[int]
|
|
87
106
|
output_tokens = 0 # type: Optional[int]
|
|
@@ -106,13 +125,13 @@ def _calculate_token_usage(
|
|
|
106
125
|
total_tokens = _get_usage(response.usage, ["total_tokens"])
|
|
107
126
|
|
|
108
127
|
# Manually count tokens
|
|
109
|
-
# TODO: when implementing responses API, check for responses API
|
|
110
128
|
if input_tokens == 0:
|
|
111
|
-
for message in messages:
|
|
112
|
-
if "content" in message:
|
|
129
|
+
for message in messages or []:
|
|
130
|
+
if isinstance(message, dict) and "content" in message:
|
|
113
131
|
input_tokens += count_tokens(message["content"])
|
|
132
|
+
elif isinstance(message, str):
|
|
133
|
+
input_tokens += count_tokens(message)
|
|
114
134
|
|
|
115
|
-
# TODO: when implementing responses API, check for responses API
|
|
116
135
|
if output_tokens == 0:
|
|
117
136
|
if streaming_message_responses is not None:
|
|
118
137
|
for message in streaming_message_responses:
|
|
@@ -139,138 +158,254 @@ def _calculate_token_usage(
|
|
|
139
158
|
)
|
|
140
159
|
|
|
141
160
|
|
|
142
|
-
def
|
|
143
|
-
# type: (
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
161
|
+
def _set_input_data(span, kwargs, operation, integration):
|
|
162
|
+
# type: (Span, dict[str, Any], str, OpenAIIntegration) -> None
|
|
163
|
+
# Input messages (the prompt or data sent to the model)
|
|
164
|
+
messages = kwargs.get("messages")
|
|
165
|
+
if messages is None:
|
|
166
|
+
messages = kwargs.get("input")
|
|
167
|
+
|
|
168
|
+
if isinstance(messages, str):
|
|
169
|
+
messages = [messages]
|
|
170
|
+
|
|
171
|
+
if (
|
|
172
|
+
messages is not None
|
|
173
|
+
and len(messages) > 0
|
|
174
|
+
and should_send_default_pii()
|
|
175
|
+
and integration.include_prompts
|
|
176
|
+
):
|
|
177
|
+
set_data_normalized(span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages)
|
|
178
|
+
|
|
179
|
+
# Input attributes: Common
|
|
180
|
+
set_data_normalized(span, SPANDATA.GEN_AI_SYSTEM, "openai")
|
|
181
|
+
set_data_normalized(span, SPANDATA.GEN_AI_OPERATION_NAME, operation)
|
|
182
|
+
|
|
183
|
+
# Input attributes: Optional
|
|
184
|
+
kwargs_keys_to_attributes = {
|
|
185
|
+
"model": SPANDATA.GEN_AI_REQUEST_MODEL,
|
|
186
|
+
"stream": SPANDATA.GEN_AI_RESPONSE_STREAMING,
|
|
187
|
+
"max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS,
|
|
188
|
+
"presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
|
189
|
+
"frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
|
190
|
+
"temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
|
|
191
|
+
"top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
|
|
192
|
+
}
|
|
193
|
+
for key, attribute in kwargs_keys_to_attributes.items():
|
|
194
|
+
value = kwargs.get(key)
|
|
195
|
+
if value is not None:
|
|
196
|
+
set_data_normalized(span, attribute, value)
|
|
197
|
+
|
|
198
|
+
# Input attributes: Tools
|
|
199
|
+
tools = kwargs.get("tools")
|
|
200
|
+
if tools is not None and len(tools) > 0:
|
|
201
|
+
set_data_normalized(
|
|
202
|
+
span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools)
|
|
203
|
+
)
|
|
147
204
|
|
|
148
|
-
if "messages" not in kwargs:
|
|
149
|
-
# invalid call (in all versions of openai), let it return error
|
|
150
|
-
return f(*args, **kwargs)
|
|
151
205
|
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
return f(*args, **kwargs)
|
|
206
|
+
def _set_output_data(span, response, kwargs, integration, finish_span=True):
|
|
207
|
+
# type: (Span, Any, dict[str, Any], OpenAIIntegration, bool) -> None
|
|
208
|
+
if hasattr(response, "model"):
|
|
209
|
+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_MODEL, response.model)
|
|
157
210
|
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
span = sentry_sdk.start_span(
|
|
164
|
-
op=consts.OP.OPENAI_CHAT_COMPLETIONS_CREATE,
|
|
165
|
-
name="Chat Completion",
|
|
166
|
-
origin=OpenAIIntegration.origin,
|
|
167
|
-
)
|
|
168
|
-
span.__enter__()
|
|
211
|
+
# Input messages (the prompt or data sent to the model)
|
|
212
|
+
# used for the token usage calculation
|
|
213
|
+
messages = kwargs.get("messages")
|
|
214
|
+
if messages is None:
|
|
215
|
+
messages = kwargs.get("input")
|
|
169
216
|
|
|
170
|
-
|
|
217
|
+
if messages is not None and isinstance(messages, str):
|
|
218
|
+
messages = [messages]
|
|
171
219
|
|
|
172
|
-
|
|
220
|
+
if hasattr(response, "choices"):
|
|
173
221
|
if should_send_default_pii() and integration.include_prompts:
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
222
|
+
response_text = [choice.message.dict() for choice in response.choices]
|
|
223
|
+
if len(response_text) > 0:
|
|
224
|
+
set_data_normalized(
|
|
225
|
+
span,
|
|
226
|
+
SPANDATA.GEN_AI_RESPONSE_TEXT,
|
|
227
|
+
safe_serialize(response_text),
|
|
228
|
+
)
|
|
229
|
+
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
|
|
230
|
+
if finish_span:
|
|
231
|
+
span.__exit__(None, None, None)
|
|
178
232
|
|
|
179
|
-
|
|
180
|
-
|
|
233
|
+
elif hasattr(response, "output"):
|
|
234
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
235
|
+
response_text = [item.to_dict() for item in response.output]
|
|
236
|
+
if len(response_text) > 0:
|
|
181
237
|
set_data_normalized(
|
|
182
238
|
span,
|
|
183
|
-
SPANDATA.
|
|
184
|
-
|
|
239
|
+
SPANDATA.GEN_AI_RESPONSE_TEXT,
|
|
240
|
+
safe_serialize(response_text),
|
|
185
241
|
)
|
|
186
|
-
|
|
242
|
+
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
|
|
243
|
+
if finish_span:
|
|
187
244
|
span.__exit__(None, None, None)
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
245
|
+
|
|
246
|
+
elif hasattr(response, "_iterator"):
|
|
247
|
+
data_buf: list[list[str]] = [] # one for each choice
|
|
248
|
+
|
|
249
|
+
old_iterator = response._iterator
|
|
250
|
+
|
|
251
|
+
def new_iterator():
|
|
252
|
+
# type: () -> Iterator[ChatCompletionChunk]
|
|
253
|
+
with capture_internal_exceptions():
|
|
254
|
+
count_tokens_manually = True
|
|
255
|
+
for x in old_iterator:
|
|
256
|
+
# OpenAI chat completion API
|
|
257
|
+
if hasattr(x, "choices"):
|
|
258
|
+
choice_index = 0
|
|
259
|
+
for choice in x.choices:
|
|
260
|
+
if hasattr(choice, "delta") and hasattr(
|
|
261
|
+
choice.delta, "content"
|
|
262
|
+
):
|
|
263
|
+
content = choice.delta.content
|
|
264
|
+
if len(data_buf) <= choice_index:
|
|
265
|
+
data_buf.append([])
|
|
266
|
+
data_buf[choice_index].append(content or "")
|
|
267
|
+
choice_index += 1
|
|
268
|
+
|
|
269
|
+
# OpenAI responses API
|
|
270
|
+
elif hasattr(x, "delta"):
|
|
271
|
+
if len(data_buf) == 0:
|
|
272
|
+
data_buf.append([])
|
|
273
|
+
data_buf[0].append(x.delta or "")
|
|
274
|
+
|
|
275
|
+
# OpenAI responses API end of streaming response
|
|
276
|
+
if RESPONSES_API_ENABLED and isinstance(x, ResponseCompletedEvent):
|
|
277
|
+
_calculate_token_usage(
|
|
278
|
+
messages,
|
|
279
|
+
x.response,
|
|
280
|
+
span,
|
|
281
|
+
None,
|
|
282
|
+
integration.count_tokens,
|
|
212
283
|
)
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
284
|
+
count_tokens_manually = False
|
|
285
|
+
|
|
286
|
+
yield x
|
|
287
|
+
|
|
288
|
+
if len(data_buf) > 0:
|
|
289
|
+
all_responses = ["".join(chunk) for chunk in data_buf]
|
|
290
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
291
|
+
set_data_normalized(
|
|
292
|
+
span, SPANDATA.GEN_AI_RESPONSE_TEXT, all_responses
|
|
293
|
+
)
|
|
294
|
+
if count_tokens_manually:
|
|
217
295
|
_calculate_token_usage(
|
|
218
296
|
messages,
|
|
219
|
-
|
|
297
|
+
response,
|
|
220
298
|
span,
|
|
221
299
|
all_responses,
|
|
222
300
|
integration.count_tokens,
|
|
223
301
|
)
|
|
302
|
+
|
|
303
|
+
if finish_span:
|
|
224
304
|
span.__exit__(None, None, None)
|
|
225
305
|
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
306
|
+
async def new_iterator_async():
|
|
307
|
+
# type: () -> AsyncIterator[ChatCompletionChunk]
|
|
308
|
+
with capture_internal_exceptions():
|
|
309
|
+
count_tokens_manually = True
|
|
310
|
+
async for x in old_iterator:
|
|
311
|
+
# OpenAI chat completion API
|
|
312
|
+
if hasattr(x, "choices"):
|
|
313
|
+
choice_index = 0
|
|
314
|
+
for choice in x.choices:
|
|
315
|
+
if hasattr(choice, "delta") and hasattr(
|
|
316
|
+
choice.delta, "content"
|
|
317
|
+
):
|
|
318
|
+
content = choice.delta.content
|
|
319
|
+
if len(data_buf) <= choice_index:
|
|
320
|
+
data_buf.append([])
|
|
321
|
+
data_buf[choice_index].append(content or "")
|
|
322
|
+
choice_index += 1
|
|
323
|
+
|
|
324
|
+
# OpenAI responses API
|
|
325
|
+
elif hasattr(x, "delta"):
|
|
326
|
+
if len(data_buf) == 0:
|
|
327
|
+
data_buf.append([])
|
|
328
|
+
data_buf[0].append(x.delta or "")
|
|
329
|
+
|
|
330
|
+
# OpenAI responses API end of streaming response
|
|
331
|
+
if RESPONSES_API_ENABLED and isinstance(x, ResponseCompletedEvent):
|
|
332
|
+
_calculate_token_usage(
|
|
333
|
+
messages,
|
|
334
|
+
x.response,
|
|
335
|
+
span,
|
|
336
|
+
None,
|
|
337
|
+
integration.count_tokens,
|
|
338
|
+
)
|
|
339
|
+
count_tokens_manually = False
|
|
340
|
+
|
|
341
|
+
yield x
|
|
342
|
+
|
|
343
|
+
if len(data_buf) > 0:
|
|
344
|
+
all_responses = ["".join(chunk) for chunk in data_buf]
|
|
345
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
346
|
+
set_data_normalized(
|
|
347
|
+
span, SPANDATA.GEN_AI_RESPONSE_TEXT, all_responses
|
|
245
348
|
)
|
|
246
|
-
|
|
247
|
-
set_data_normalized(
|
|
248
|
-
span, SPANDATA.AI_RESPONSES, all_responses
|
|
249
|
-
)
|
|
349
|
+
if count_tokens_manually:
|
|
250
350
|
_calculate_token_usage(
|
|
251
351
|
messages,
|
|
252
|
-
|
|
352
|
+
response,
|
|
253
353
|
span,
|
|
254
354
|
all_responses,
|
|
255
355
|
integration.count_tokens,
|
|
256
356
|
)
|
|
357
|
+
if finish_span:
|
|
257
358
|
span.__exit__(None, None, None)
|
|
258
359
|
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
else:
|
|
262
|
-
res._iterator = new_iterator()
|
|
263
|
-
|
|
360
|
+
if str(type(response._iterator)) == "<class 'async_generator'>":
|
|
361
|
+
response._iterator = new_iterator_async()
|
|
264
362
|
else:
|
|
265
|
-
|
|
363
|
+
response._iterator = new_iterator()
|
|
364
|
+
else:
|
|
365
|
+
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
|
|
366
|
+
if finish_span:
|
|
266
367
|
span.__exit__(None, None, None)
|
|
267
|
-
|
|
368
|
+
|
|
369
|
+
|
|
370
|
+
def _new_chat_completion_common(f, *args, **kwargs):
|
|
371
|
+
# type: (Any, Any, Any) -> Any
|
|
372
|
+
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
373
|
+
if integration is None:
|
|
374
|
+
return f(*args, **kwargs)
|
|
375
|
+
|
|
376
|
+
if "messages" not in kwargs:
|
|
377
|
+
# invalid call (in all versions of openai), let it return error
|
|
378
|
+
return f(*args, **kwargs)
|
|
379
|
+
|
|
380
|
+
try:
|
|
381
|
+
iter(kwargs["messages"])
|
|
382
|
+
except TypeError:
|
|
383
|
+
# invalid call (in all versions), messages must be iterable
|
|
384
|
+
return f(*args, **kwargs)
|
|
385
|
+
|
|
386
|
+
model = kwargs.get("model")
|
|
387
|
+
operation = "chat"
|
|
388
|
+
|
|
389
|
+
span = sentry_sdk.start_span(
|
|
390
|
+
op=consts.OP.GEN_AI_CHAT,
|
|
391
|
+
name=f"{operation} {model}",
|
|
392
|
+
origin=OpenAIIntegration.origin,
|
|
393
|
+
)
|
|
394
|
+
span.__enter__()
|
|
395
|
+
|
|
396
|
+
_set_input_data(span, kwargs, operation, integration)
|
|
397
|
+
|
|
398
|
+
response = yield f, args, kwargs
|
|
399
|
+
|
|
400
|
+
_set_output_data(span, response, kwargs, integration, finish_span=True)
|
|
401
|
+
|
|
402
|
+
return response
|
|
268
403
|
|
|
269
404
|
|
|
270
405
|
def _wrap_chat_completion_create(f):
|
|
271
406
|
# type: (Callable[..., Any]) -> Callable[..., Any]
|
|
272
407
|
def _execute_sync(f, *args, **kwargs):
|
|
273
|
-
# type: (Any,
|
|
408
|
+
# type: (Any, Any, Any) -> Any
|
|
274
409
|
gen = _new_chat_completion_common(f, *args, **kwargs)
|
|
275
410
|
|
|
276
411
|
try:
|
|
@@ -291,7 +426,7 @@ def _wrap_chat_completion_create(f):
|
|
|
291
426
|
|
|
292
427
|
@wraps(f)
|
|
293
428
|
def _sentry_patched_create_sync(*args, **kwargs):
|
|
294
|
-
# type: (
|
|
429
|
+
# type: (Any, Any) -> Any
|
|
295
430
|
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
296
431
|
if integration is None or "messages" not in kwargs:
|
|
297
432
|
# no "messages" means invalid call (in all versions of openai), let it return error
|
|
@@ -305,7 +440,7 @@ def _wrap_chat_completion_create(f):
|
|
|
305
440
|
def _wrap_async_chat_completion_create(f):
|
|
306
441
|
# type: (Callable[..., Any]) -> Callable[..., Any]
|
|
307
442
|
async def _execute_async(f, *args, **kwargs):
|
|
308
|
-
# type: (Any,
|
|
443
|
+
# type: (Any, Any, Any) -> Any
|
|
309
444
|
gen = _new_chat_completion_common(f, *args, **kwargs)
|
|
310
445
|
|
|
311
446
|
try:
|
|
@@ -326,7 +461,7 @@ def _wrap_async_chat_completion_create(f):
|
|
|
326
461
|
|
|
327
462
|
@wraps(f)
|
|
328
463
|
async def _sentry_patched_create_async(*args, **kwargs):
|
|
329
|
-
# type: (
|
|
464
|
+
# type: (Any, Any) -> Any
|
|
330
465
|
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
331
466
|
if integration is None or "messages" not in kwargs:
|
|
332
467
|
# no "messages" means invalid call (in all versions of openai), let it return error
|
|
@@ -338,52 +473,24 @@ def _wrap_async_chat_completion_create(f):
|
|
|
338
473
|
|
|
339
474
|
|
|
340
475
|
def _new_embeddings_create_common(f, *args, **kwargs):
|
|
341
|
-
# type: (Any,
|
|
476
|
+
# type: (Any, Any, Any) -> Any
|
|
342
477
|
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
343
478
|
if integration is None:
|
|
344
479
|
return f(*args, **kwargs)
|
|
345
480
|
|
|
481
|
+
model = kwargs.get("model")
|
|
482
|
+
operation = "embeddings"
|
|
483
|
+
|
|
346
484
|
with sentry_sdk.start_span(
|
|
347
|
-
op=consts.OP.
|
|
348
|
-
|
|
485
|
+
op=consts.OP.GEN_AI_EMBEDDINGS,
|
|
486
|
+
name=f"{operation} {model}",
|
|
349
487
|
origin=OpenAIIntegration.origin,
|
|
350
488
|
) as span:
|
|
351
|
-
|
|
352
|
-
should_send_default_pii() and integration.include_prompts
|
|
353
|
-
):
|
|
354
|
-
if isinstance(kwargs["input"], str):
|
|
355
|
-
set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, [kwargs["input"]])
|
|
356
|
-
elif (
|
|
357
|
-
isinstance(kwargs["input"], list)
|
|
358
|
-
and len(kwargs["input"]) > 0
|
|
359
|
-
and isinstance(kwargs["input"][0], str)
|
|
360
|
-
):
|
|
361
|
-
set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, kwargs["input"])
|
|
362
|
-
if "model" in kwargs:
|
|
363
|
-
set_data_normalized(span, SPANDATA.AI_MODEL_ID, kwargs["model"])
|
|
489
|
+
_set_input_data(span, kwargs, operation, integration)
|
|
364
490
|
|
|
365
491
|
response = yield f, args, kwargs
|
|
366
492
|
|
|
367
|
-
|
|
368
|
-
total_tokens = 0
|
|
369
|
-
if hasattr(response, "usage"):
|
|
370
|
-
if hasattr(response.usage, "prompt_tokens") and isinstance(
|
|
371
|
-
response.usage.prompt_tokens, int
|
|
372
|
-
):
|
|
373
|
-
input_tokens = response.usage.prompt_tokens
|
|
374
|
-
if hasattr(response.usage, "total_tokens") and isinstance(
|
|
375
|
-
response.usage.total_tokens, int
|
|
376
|
-
):
|
|
377
|
-
total_tokens = response.usage.total_tokens
|
|
378
|
-
|
|
379
|
-
if input_tokens == 0:
|
|
380
|
-
input_tokens = integration.count_tokens(kwargs["input"] or "")
|
|
381
|
-
|
|
382
|
-
record_token_usage(
|
|
383
|
-
span,
|
|
384
|
-
input_tokens=input_tokens,
|
|
385
|
-
total_tokens=total_tokens or input_tokens,
|
|
386
|
-
)
|
|
493
|
+
_set_output_data(span, response, kwargs, integration, finish_span=False)
|
|
387
494
|
|
|
388
495
|
return response
|
|
389
496
|
|
|
@@ -391,7 +498,7 @@ def _new_embeddings_create_common(f, *args, **kwargs):
|
|
|
391
498
|
def _wrap_embeddings_create(f):
|
|
392
499
|
# type: (Any) -> Any
|
|
393
500
|
def _execute_sync(f, *args, **kwargs):
|
|
394
|
-
# type: (Any,
|
|
501
|
+
# type: (Any, Any, Any) -> Any
|
|
395
502
|
gen = _new_embeddings_create_common(f, *args, **kwargs)
|
|
396
503
|
|
|
397
504
|
try:
|
|
@@ -412,7 +519,7 @@ def _wrap_embeddings_create(f):
|
|
|
412
519
|
|
|
413
520
|
@wraps(f)
|
|
414
521
|
def _sentry_patched_create_sync(*args, **kwargs):
|
|
415
|
-
# type: (
|
|
522
|
+
# type: (Any, Any) -> Any
|
|
416
523
|
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
417
524
|
if integration is None:
|
|
418
525
|
return f(*args, **kwargs)
|
|
@@ -425,7 +532,7 @@ def _wrap_embeddings_create(f):
|
|
|
425
532
|
def _wrap_async_embeddings_create(f):
|
|
426
533
|
# type: (Any) -> Any
|
|
427
534
|
async def _execute_async(f, *args, **kwargs):
|
|
428
|
-
# type: (Any,
|
|
535
|
+
# type: (Any, Any, Any) -> Any
|
|
429
536
|
gen = _new_embeddings_create_common(f, *args, **kwargs)
|
|
430
537
|
|
|
431
538
|
try:
|
|
@@ -446,7 +553,7 @@ def _wrap_async_embeddings_create(f):
|
|
|
446
553
|
|
|
447
554
|
@wraps(f)
|
|
448
555
|
async def _sentry_patched_create_async(*args, **kwargs):
|
|
449
|
-
# type: (
|
|
556
|
+
# type: (Any, Any) -> Any
|
|
450
557
|
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
451
558
|
if integration is None:
|
|
452
559
|
return await f(*args, **kwargs)
|
|
@@ -454,3 +561,96 @@ def _wrap_async_embeddings_create(f):
|
|
|
454
561
|
return await _execute_async(f, *args, **kwargs)
|
|
455
562
|
|
|
456
563
|
return _sentry_patched_create_async
|
|
564
|
+
|
|
565
|
+
|
|
566
|
+
def _new_responses_create_common(f, *args, **kwargs):
|
|
567
|
+
# type: (Any, Any, Any) -> Any
|
|
568
|
+
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
569
|
+
if integration is None:
|
|
570
|
+
return f(*args, **kwargs)
|
|
571
|
+
|
|
572
|
+
model = kwargs.get("model")
|
|
573
|
+
operation = "responses"
|
|
574
|
+
|
|
575
|
+
span = sentry_sdk.start_span(
|
|
576
|
+
op=consts.OP.GEN_AI_RESPONSES,
|
|
577
|
+
name=f"{operation} {model}",
|
|
578
|
+
origin=OpenAIIntegration.origin,
|
|
579
|
+
)
|
|
580
|
+
span.__enter__()
|
|
581
|
+
|
|
582
|
+
_set_input_data(span, kwargs, operation, integration)
|
|
583
|
+
|
|
584
|
+
response = yield f, args, kwargs
|
|
585
|
+
|
|
586
|
+
_set_output_data(span, response, kwargs, integration, finish_span=True)
|
|
587
|
+
|
|
588
|
+
return response
|
|
589
|
+
|
|
590
|
+
|
|
591
|
+
def _wrap_responses_create(f):
|
|
592
|
+
# type: (Any) -> Any
|
|
593
|
+
def _execute_sync(f, *args, **kwargs):
|
|
594
|
+
# type: (Any, Any, Any) -> Any
|
|
595
|
+
gen = _new_responses_create_common(f, *args, **kwargs)
|
|
596
|
+
|
|
597
|
+
try:
|
|
598
|
+
f, args, kwargs = next(gen)
|
|
599
|
+
except StopIteration as e:
|
|
600
|
+
return e.value
|
|
601
|
+
|
|
602
|
+
try:
|
|
603
|
+
try:
|
|
604
|
+
result = f(*args, **kwargs)
|
|
605
|
+
except Exception as e:
|
|
606
|
+
_capture_exception(e)
|
|
607
|
+
raise e from None
|
|
608
|
+
|
|
609
|
+
return gen.send(result)
|
|
610
|
+
except StopIteration as e:
|
|
611
|
+
return e.value
|
|
612
|
+
|
|
613
|
+
@wraps(f)
|
|
614
|
+
def _sentry_patched_create_sync(*args, **kwargs):
|
|
615
|
+
# type: (Any, Any) -> Any
|
|
616
|
+
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
617
|
+
if integration is None:
|
|
618
|
+
return f(*args, **kwargs)
|
|
619
|
+
|
|
620
|
+
return _execute_sync(f, *args, **kwargs)
|
|
621
|
+
|
|
622
|
+
return _sentry_patched_create_sync
|
|
623
|
+
|
|
624
|
+
|
|
625
|
+
def _wrap_async_responses_create(f):
|
|
626
|
+
# type: (Any) -> Any
|
|
627
|
+
async def _execute_async(f, *args, **kwargs):
|
|
628
|
+
# type: (Any, Any, Any) -> Any
|
|
629
|
+
gen = _new_responses_create_common(f, *args, **kwargs)
|
|
630
|
+
|
|
631
|
+
try:
|
|
632
|
+
f, args, kwargs = next(gen)
|
|
633
|
+
except StopIteration as e:
|
|
634
|
+
return await e.value
|
|
635
|
+
|
|
636
|
+
try:
|
|
637
|
+
try:
|
|
638
|
+
result = await f(*args, **kwargs)
|
|
639
|
+
except Exception as e:
|
|
640
|
+
_capture_exception(e)
|
|
641
|
+
raise e from None
|
|
642
|
+
|
|
643
|
+
return gen.send(result)
|
|
644
|
+
except StopIteration as e:
|
|
645
|
+
return e.value
|
|
646
|
+
|
|
647
|
+
@wraps(f)
|
|
648
|
+
async def _sentry_patched_responses_async(*args, **kwargs):
|
|
649
|
+
# type: (Any, Any) -> Any
|
|
650
|
+
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
651
|
+
if integration is None:
|
|
652
|
+
return await f(*args, **kwargs)
|
|
653
|
+
|
|
654
|
+
return await _execute_async(f, *args, **kwargs)
|
|
655
|
+
|
|
656
|
+
return _sentry_patched_responses_async
|
|
@@ -1,16 +1,14 @@
|
|
|
1
|
-
import json
|
|
2
1
|
import sentry_sdk
|
|
3
2
|
from sentry_sdk.consts import SPANDATA
|
|
4
3
|
from sentry_sdk.integrations import DidNotEnable
|
|
5
4
|
from sentry_sdk.scope import should_send_default_pii
|
|
6
|
-
from sentry_sdk.utils import event_from_exception
|
|
5
|
+
from sentry_sdk.utils import event_from_exception, safe_serialize
|
|
7
6
|
|
|
8
7
|
from typing import TYPE_CHECKING
|
|
9
8
|
|
|
10
9
|
if TYPE_CHECKING:
|
|
11
10
|
from typing import Any
|
|
12
11
|
from typing import Callable
|
|
13
|
-
from typing import Union
|
|
14
12
|
from agents import Usage
|
|
15
13
|
|
|
16
14
|
try:
|
|
@@ -162,49 +160,3 @@ def _set_output_data(span, result):
|
|
|
162
160
|
span.set_data(
|
|
163
161
|
SPANDATA.GEN_AI_RESPONSE_TEXT, safe_serialize(output_messages["response"])
|
|
164
162
|
)
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
def safe_serialize(data):
|
|
168
|
-
# type: (Any) -> str
|
|
169
|
-
"""Safely serialize to a readable string."""
|
|
170
|
-
|
|
171
|
-
def serialize_item(item):
|
|
172
|
-
# type: (Any) -> Union[str, dict[Any, Any], list[Any], tuple[Any, ...]]
|
|
173
|
-
if callable(item):
|
|
174
|
-
try:
|
|
175
|
-
module = getattr(item, "__module__", None)
|
|
176
|
-
qualname = getattr(item, "__qualname__", None)
|
|
177
|
-
name = getattr(item, "__name__", "anonymous")
|
|
178
|
-
|
|
179
|
-
if module and qualname:
|
|
180
|
-
full_path = f"{module}.{qualname}"
|
|
181
|
-
elif module and name:
|
|
182
|
-
full_path = f"{module}.{name}"
|
|
183
|
-
else:
|
|
184
|
-
full_path = name
|
|
185
|
-
|
|
186
|
-
return f"<function {full_path}>"
|
|
187
|
-
except Exception:
|
|
188
|
-
return f"<callable {type(item).__name__}>"
|
|
189
|
-
elif isinstance(item, dict):
|
|
190
|
-
return {k: serialize_item(v) for k, v in item.items()}
|
|
191
|
-
elif isinstance(item, (list, tuple)):
|
|
192
|
-
return [serialize_item(x) for x in item]
|
|
193
|
-
elif hasattr(item, "__dict__"):
|
|
194
|
-
try:
|
|
195
|
-
attrs = {
|
|
196
|
-
k: serialize_item(v)
|
|
197
|
-
for k, v in vars(item).items()
|
|
198
|
-
if not k.startswith("_")
|
|
199
|
-
}
|
|
200
|
-
return f"<{type(item).__name__} {attrs}>"
|
|
201
|
-
except Exception:
|
|
202
|
-
return repr(item)
|
|
203
|
-
else:
|
|
204
|
-
return item
|
|
205
|
-
|
|
206
|
-
try:
|
|
207
|
-
serialized = serialize_item(data)
|
|
208
|
-
return json.dumps(serialized, default=str)
|
|
209
|
-
except Exception:
|
|
210
|
-
return str(data)
|
|
@@ -120,7 +120,7 @@ def _wrap_run(isolation_scope_to_use, current_scope_to_use, old_run_func):
|
|
|
120
120
|
# type: () -> Any
|
|
121
121
|
try:
|
|
122
122
|
self = current_thread()
|
|
123
|
-
return old_run_func(self, *a, **kw)
|
|
123
|
+
return old_run_func(self, *a[1:], **kw)
|
|
124
124
|
except Exception:
|
|
125
125
|
reraise(*_capture_exception())
|
|
126
126
|
|
sentry_sdk/serializer.py
CHANGED
|
@@ -63,6 +63,14 @@ def add_global_repr_processor(processor):
|
|
|
63
63
|
global_repr_processors.append(processor)
|
|
64
64
|
|
|
65
65
|
|
|
66
|
+
sequence_types = [Sequence, Set] # type: List[type]
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def add_repr_sequence_type(ty):
|
|
70
|
+
# type: (type) -> None
|
|
71
|
+
sequence_types.append(ty)
|
|
72
|
+
|
|
73
|
+
|
|
66
74
|
class Memo:
|
|
67
75
|
__slots__ = ("_ids", "_objs")
|
|
68
76
|
|
|
@@ -332,7 +340,7 @@ def serialize(event, **kwargs):
|
|
|
332
340
|
return rv_dict
|
|
333
341
|
|
|
334
342
|
elif not isinstance(obj, serializable_str_types) and isinstance(
|
|
335
|
-
obj, (
|
|
343
|
+
obj, tuple(sequence_types)
|
|
336
344
|
):
|
|
337
345
|
rv_list = []
|
|
338
346
|
|
sentry_sdk/utils.py
CHANGED
|
@@ -1938,3 +1938,49 @@ def try_convert(convert_func, value):
|
|
|
1938
1938
|
return convert_func(value)
|
|
1939
1939
|
except Exception:
|
|
1940
1940
|
return None
|
|
1941
|
+
|
|
1942
|
+
|
|
1943
|
+
def safe_serialize(data):
|
|
1944
|
+
# type: (Any) -> str
|
|
1945
|
+
"""Safely serialize to a readable string."""
|
|
1946
|
+
|
|
1947
|
+
def serialize_item(item):
|
|
1948
|
+
# type: (Any) -> Union[str, dict[Any, Any], list[Any], tuple[Any, ...]]
|
|
1949
|
+
if callable(item):
|
|
1950
|
+
try:
|
|
1951
|
+
module = getattr(item, "__module__", None)
|
|
1952
|
+
qualname = getattr(item, "__qualname__", None)
|
|
1953
|
+
name = getattr(item, "__name__", "anonymous")
|
|
1954
|
+
|
|
1955
|
+
if module and qualname:
|
|
1956
|
+
full_path = f"{module}.{qualname}"
|
|
1957
|
+
elif module and name:
|
|
1958
|
+
full_path = f"{module}.{name}"
|
|
1959
|
+
else:
|
|
1960
|
+
full_path = name
|
|
1961
|
+
|
|
1962
|
+
return f"<function {full_path}>"
|
|
1963
|
+
except Exception:
|
|
1964
|
+
return f"<callable {type(item).__name__}>"
|
|
1965
|
+
elif isinstance(item, dict):
|
|
1966
|
+
return {k: serialize_item(v) for k, v in item.items()}
|
|
1967
|
+
elif isinstance(item, (list, tuple)):
|
|
1968
|
+
return [serialize_item(x) for x in item]
|
|
1969
|
+
elif hasattr(item, "__dict__"):
|
|
1970
|
+
try:
|
|
1971
|
+
attrs = {
|
|
1972
|
+
k: serialize_item(v)
|
|
1973
|
+
for k, v in vars(item).items()
|
|
1974
|
+
if not k.startswith("_")
|
|
1975
|
+
}
|
|
1976
|
+
return f"<{type(item).__name__} {attrs}>"
|
|
1977
|
+
except Exception:
|
|
1978
|
+
return repr(item)
|
|
1979
|
+
else:
|
|
1980
|
+
return item
|
|
1981
|
+
|
|
1982
|
+
try:
|
|
1983
|
+
serialized = serialize_item(data)
|
|
1984
|
+
return json.dumps(serialized, default=str)
|
|
1985
|
+
except Exception:
|
|
1986
|
+
return str(data)
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
sentry_sdk/__init__.py,sha256=
|
|
1
|
+
sentry_sdk/__init__.py,sha256=a9ZsEg5C8RSuLekRk1dbS_9-4ej5E2ebvktY5YPnT-k,1283
|
|
2
2
|
sentry_sdk/_compat.py,sha256=Pxcg6cUYPiOoXIFfLI_H3ATb7SfrcXOeZdzpeWv3umI,3116
|
|
3
3
|
sentry_sdk/_init_implementation.py,sha256=WL54d8nggjRunBm3XlG-sWSx4yS5lpYYggd7YBWpuVk,2559
|
|
4
4
|
sentry_sdk/_log_batcher.py,sha256=bBpspIlf1ejxlbudo17bZOSir226LGAdjDe_3kHkOro,5085
|
|
@@ -6,10 +6,10 @@ sentry_sdk/_lru_cache.py,sha256=phZMBm9EKU1m67OOApnKCffnlWAlVz9bYjig7CglQuk,1229
|
|
|
6
6
|
sentry_sdk/_queue.py,sha256=UUzbmliDYmdVYiDA32NMYkX369ElWMFNSj5kodqVQZE,11250
|
|
7
7
|
sentry_sdk/_types.py,sha256=TMdmMSxc0dYErvRA5ikEnNxH_Iwb2Wiw3ZUMNlp0HCA,10482
|
|
8
8
|
sentry_sdk/_werkzeug.py,sha256=m3GPf-jHd8v3eVOfBHaKw5f0uHoLkXrSO1EcY-8EisY,3734
|
|
9
|
-
sentry_sdk/api.py,sha256=
|
|
9
|
+
sentry_sdk/api.py,sha256=K4cNSmsJXI1HFyeCdHMans-IgQuDxviyhO4H2rrMkWY,12387
|
|
10
10
|
sentry_sdk/attachments.py,sha256=0Dylhm065O6hNFjB40fWCd5Hg4qWSXndmi1TPWglZkI,3109
|
|
11
|
-
sentry_sdk/client.py,sha256=
|
|
12
|
-
sentry_sdk/consts.py,sha256=
|
|
11
|
+
sentry_sdk/client.py,sha256=gHznIT7uGb6-h5gZFtN2qmjUEZNOuqIJQXwB1V-lSPU,38839
|
|
12
|
+
sentry_sdk/consts.py,sha256=ikw6F3xZxjhylV9Dy8r2uj78guKAhK2KekpxZj6YZCo,45825
|
|
13
13
|
sentry_sdk/debug.py,sha256=ddBehQlAuQC1sg1XO-N4N3diZ0x0iT5RWJwFdrtcsjw,1019
|
|
14
14
|
sentry_sdk/envelope.py,sha256=Mgcib0uLm_5tSVzOrznRLdK9B3CjQ6TEgM1ZIZIfjWo,10355
|
|
15
15
|
sentry_sdk/feature_flags.py,sha256=99JRig6TBkrkBzVCKqYcmVgjsuA_Hk-ul7jFHGhJplc,2233
|
|
@@ -20,7 +20,7 @@ sentry_sdk/monitor.py,sha256=52CG1m2e8okFDVoTpbqfm9zeeaLa0ciC_r9x2RiXuDg,3639
|
|
|
20
20
|
sentry_sdk/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
21
21
|
sentry_sdk/scope.py,sha256=fl6Hm7BD-1HlzghOHkWY_zQY3FkakrNrqdjebfJ0LbY,63942
|
|
22
22
|
sentry_sdk/scrubber.py,sha256=rENmQ35buugDl269bRZuIAtgr27B9SzisJYUF-691pc,6064
|
|
23
|
-
sentry_sdk/serializer.py,sha256=
|
|
23
|
+
sentry_sdk/serializer.py,sha256=xUw3xjSsGF0cWRHL9ofe0nmWEtZvzPOHSQ6IHvo6UAc,13239
|
|
24
24
|
sentry_sdk/session.py,sha256=TqDVmRKKHUDSmZb4jQR-s8wDt7Fwb6QaG21hawUGWEs,5571
|
|
25
25
|
sentry_sdk/sessions.py,sha256=UZ2jfrqhYvZzTxCDGc1MLD6P_aHLJnTFetSUROIaPaA,9154
|
|
26
26
|
sentry_sdk/spotlight.py,sha256=93kdd8KxdLfcPaxFnFuqHgYAAL4FCfpK1hiiPoD7Ac4,8678
|
|
@@ -28,10 +28,10 @@ sentry_sdk/tracing.py,sha256=dEyLZn0JSj5WMjVJEQUxRud5NewBRau9dkuDrrzJ_Xw,48114
|
|
|
28
28
|
sentry_sdk/tracing_utils.py,sha256=J_eY_0XuyydslEmcFZcrv8dt2ItpW7uWwe6CoXxoK5Q,28820
|
|
29
29
|
sentry_sdk/transport.py,sha256=A0uux7XnniDJuExLudLyyFDYnS5C6r7zozGbkveUM7E,32469
|
|
30
30
|
sentry_sdk/types.py,sha256=NLbnRzww2K3_oGz2GzcC8TdX5L2DXYso1-H1uCv2Hwc,1222
|
|
31
|
-
sentry_sdk/utils.py,sha256=
|
|
31
|
+
sentry_sdk/utils.py,sha256=Uv_85CVVn_grmr1GjqGkogAbZPW1mr-iEcYcvlYp6EE,61036
|
|
32
32
|
sentry_sdk/worker.py,sha256=VSMaigRMbInVyupSFpBC42bft2oIViea-0C_d9ThnIo,4464
|
|
33
33
|
sentry_sdk/ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
34
|
-
sentry_sdk/ai/monitoring.py,sha256=
|
|
34
|
+
sentry_sdk/ai/monitoring.py,sha256=OqQHsi832ZTL6mf38hO_qehaqMqVAb2E6HOyyaXSOtY,4948
|
|
35
35
|
sentry_sdk/ai/utils.py,sha256=QCwhHoptrdXyYroJqzCKxqi0cmrlD9IDDWUcBk6yWZc,950
|
|
36
36
|
sentry_sdk/crons/__init__.py,sha256=3Zt6g1-pZZ12uRKKsC8QLm3XgJ4K1VYxgVpNNUygOZY,221
|
|
37
37
|
sentry_sdk/crons/api.py,sha256=s3x6SG-jqIdWS-Kj0sAxJv0nz2A3stdGE1UCtQyRUy4,1559
|
|
@@ -65,7 +65,7 @@ sentry_sdk/integrations/falcon.py,sha256=uhjqFPKa8bWRQr0za4pVXGYaPr-LRdICw2rUO-l
|
|
|
65
65
|
sentry_sdk/integrations/fastapi.py,sha256=KJsG73Xrm5AmAb2yiiINyfvlU9tIaVbPWA4urj6uEOU,4718
|
|
66
66
|
sentry_sdk/integrations/flask.py,sha256=t7q73JoJT46RWDtrNImtIloGyDg7CnsNFKpS4gOuBIw,8740
|
|
67
67
|
sentry_sdk/integrations/gcp.py,sha256=u1rSi3nK2ISUQqkRnmKFG23Ks-SefshTf5PV0Dwp3_4,8274
|
|
68
|
-
sentry_sdk/integrations/gnu_backtrace.py,sha256=
|
|
68
|
+
sentry_sdk/integrations/gnu_backtrace.py,sha256=EdMQB6ZFBZhZHtkmEyKdQdJzNmzFRIP1hjg1ve2_qOQ,2658
|
|
69
69
|
sentry_sdk/integrations/gql.py,sha256=ppC7fjpyQ6jWST-batRt5HtebxE_9IeHbmZ-CJ1TfUU,4179
|
|
70
70
|
sentry_sdk/integrations/graphene.py,sha256=I6ZJ8Apd9dO9XPVvZY7I46-v1eXOW1C1rAkWwasF3gU,5042
|
|
71
71
|
sentry_sdk/integrations/httpx.py,sha256=WwUulqzBLoGGqWUUdQg_MThwQUKzBXnA-m3g_1GOpCE,5866
|
|
@@ -77,7 +77,7 @@ sentry_sdk/integrations/litestar.py,sha256=ui52AfgyyAO4aQ9XSkqJZNcPduX0BccCYUkQA
|
|
|
77
77
|
sentry_sdk/integrations/logging.py,sha256=-0o9HTFo5RpHkCpxfZvpiBj5VWpH4aIJmH-HNQzj3Ec,13643
|
|
78
78
|
sentry_sdk/integrations/loguru.py,sha256=mEWYWsNHQLlWknU4M8RBgOf2-5B5cBr5aGd-ZH1Emq4,6193
|
|
79
79
|
sentry_sdk/integrations/modules.py,sha256=vzLx3Erg77Vl4mnUvAgTg_3teAuWy7zylFpAidBI9I0,820
|
|
80
|
-
sentry_sdk/integrations/openai.py,sha256=
|
|
80
|
+
sentry_sdk/integrations/openai.py,sha256=1IyriExZ4BVCteq9Ml8Q0swRR4BkAboqfumoSFm74TA,22788
|
|
81
81
|
sentry_sdk/integrations/openfeature.py,sha256=NXRKnhg0knMKOx_TO_2Z4zSsh4Glgk3tStu-lI99XsE,1235
|
|
82
82
|
sentry_sdk/integrations/pure_eval.py,sha256=OvT76XvllQ_J6ABu3jVNU6KD2QAxnXMtTZ7hqhXNhpY,4581
|
|
83
83
|
sentry_sdk/integrations/pymongo.py,sha256=cPpMGEbXHlV6HTHgmIDL1F-x3w7ZMROXVb4eUhLs3bw,6380
|
|
@@ -96,16 +96,16 @@ sentry_sdk/integrations/statsig.py,sha256=-e57hxHfHo1S13YQKObV65q_UvREyxbR56fnf7
|
|
|
96
96
|
sentry_sdk/integrations/stdlib.py,sha256=vgB9weDGh455vBwmUSgcQRgzViKstu3O0syOthCn_H0,8831
|
|
97
97
|
sentry_sdk/integrations/strawberry.py,sha256=u7Lk4u3sNEycdSmY1nQBzYKmqI-mO8BWKAAJkCSuTRA,14126
|
|
98
98
|
sentry_sdk/integrations/sys_exit.py,sha256=AwShgGBWPdiY25aOWDLRAs2RBUKm5T3CrL-Q-zAk0l4,2493
|
|
99
|
-
sentry_sdk/integrations/threading.py,sha256=
|
|
99
|
+
sentry_sdk/integrations/threading.py,sha256=tV7pQB8LwR8dIju-I81rgjps4sRxSofj_2YFBL2JXWM,5396
|
|
100
100
|
sentry_sdk/integrations/tornado.py,sha256=Qcft8FZxdVICnaa1AhsDB262sInEQZPf-pvgI-Agjmc,7206
|
|
101
101
|
sentry_sdk/integrations/trytond.py,sha256=BaLCNqQeRWDbHHDEelS5tmj-p_CrbmtGEHIn6JfzEFE,1651
|
|
102
102
|
sentry_sdk/integrations/typer.py,sha256=FQrFgpR9t6yQWF-oWCI9KJLFioEnA2c_1BEtYV-mPAs,1815
|
|
103
103
|
sentry_sdk/integrations/unleash.py,sha256=6JshqyuAY_kbu9Nr20tMOhtgx-ryqPHCrq_EQIzeqm4,1058
|
|
104
104
|
sentry_sdk/integrations/wsgi.py,sha256=aW_EnDCcex41NGdrxKFZsfJxJhndsMCv0d2a5LBb7wU,10747
|
|
105
|
-
sentry_sdk/integrations/celery/__init__.py,sha256=
|
|
105
|
+
sentry_sdk/integrations/celery/__init__.py,sha256=FNmrLL0Cs95kv6yUCpJGu9X8Cpw20mMYGmnkBC4IL4Y,18699
|
|
106
106
|
sentry_sdk/integrations/celery/beat.py,sha256=WHEdKetrDJgtZGNp1VUMa6BG1q-MhsLZMefUmVrPu3w,8953
|
|
107
107
|
sentry_sdk/integrations/celery/utils.py,sha256=CMWQOpg9yniEkm3WlXe7YakJfVnLwaY0-jyeo2GX-ZI,1208
|
|
108
|
-
sentry_sdk/integrations/django/__init__.py,sha256=
|
|
108
|
+
sentry_sdk/integrations/django/__init__.py,sha256=KqAgBKkuyJGw0lqNZBj0otqZGy_YHqPsisgPZLCN8mQ,25247
|
|
109
109
|
sentry_sdk/integrations/django/asgi.py,sha256=RdDiCjlWAJ2pKm84-0li3jpp2Zl_GmLNprYdkLDTXgY,8333
|
|
110
110
|
sentry_sdk/integrations/django/caching.py,sha256=UvYaiI7xrN08Se59vMgJWrSO2BuowOyx3jmXmZoxQJo,6427
|
|
111
111
|
sentry_sdk/integrations/django/middleware.py,sha256=UVKq134w_TyOVPV7WwBW0QjHY-ziDipcZBIDQmjqceE,6009
|
|
@@ -122,7 +122,7 @@ sentry_sdk/integrations/grpc/aio/client.py,sha256=csOwlJb7fg9fBnzeNHxr-qpZEmU97I
|
|
|
122
122
|
sentry_sdk/integrations/grpc/aio/server.py,sha256=SCkdikPZRdWyrlnZewsSGpPk4v6AsdSApVAbO-lf_Lk,4019
|
|
123
123
|
sentry_sdk/integrations/openai_agents/__init__.py,sha256=-ydqG0sFIrvJlT9JHO58EZpCAzyy9J59Av8dxn0fHuw,1424
|
|
124
124
|
sentry_sdk/integrations/openai_agents/consts.py,sha256=PTb3vlqkuMPktu21ALK72o5WMIX4-cewTEiTRdHKFdQ,38
|
|
125
|
-
sentry_sdk/integrations/openai_agents/utils.py,sha256=
|
|
125
|
+
sentry_sdk/integrations/openai_agents/utils.py,sha256=ZtsID9kIF7pUYRqzJcGrtnhJZ838DxO2G7yhPdTHRUc,5499
|
|
126
126
|
sentry_sdk/integrations/openai_agents/patches/__init__.py,sha256=I7C9JZ70Mf8PV3wPdFsxTqvcYl4TYUgSZYfNU2Spb7Y,231
|
|
127
127
|
sentry_sdk/integrations/openai_agents/patches/agent_run.py,sha256=jDYY2jVTcoJLiH-0KOKMryv7IAoDKjWXsMwnxJU8KHM,5736
|
|
128
128
|
sentry_sdk/integrations/openai_agents/patches/models.py,sha256=DtwqCmSsYFlhRZquKM2jiTOnnAg97eyCTtJYZkWqdww,1405
|
|
@@ -158,9 +158,9 @@ sentry_sdk/profiler/__init__.py,sha256=3PI3bHk9RSkkOXZKN84DDedk_7M65EiqqaIGo-DYs
|
|
|
158
158
|
sentry_sdk/profiler/continuous_profiler.py,sha256=s0DHkj3RZYRg9HnQQC0G44ku6DaFqRy30fZTMtTYvIs,22828
|
|
159
159
|
sentry_sdk/profiler/transaction_profiler.py,sha256=4Gj6FHLnK1di3GmnI1cCc_DbNcBVMdBjZZFvPvm7C7k,27877
|
|
160
160
|
sentry_sdk/profiler/utils.py,sha256=G5s4tYai9ATJqcHrQ3bOIxlK6jIaHzELrDtU5k3N4HI,6556
|
|
161
|
-
sentry_sdk-2.
|
|
162
|
-
sentry_sdk-2.
|
|
163
|
-
sentry_sdk-2.
|
|
164
|
-
sentry_sdk-2.
|
|
165
|
-
sentry_sdk-2.
|
|
166
|
-
sentry_sdk-2.
|
|
161
|
+
sentry_sdk-2.34.0.dist-info/licenses/LICENSE,sha256=KhQNZg9GKBL6KQvHQNBGMxJsXsRdhLebVp4Sew7t3Qs,1093
|
|
162
|
+
sentry_sdk-2.34.0.dist-info/METADATA,sha256=lx1pIBA63c_gfGyea7bRqZdtENj63J4xeHDUQbE8ie4,10278
|
|
163
|
+
sentry_sdk-2.34.0.dist-info/WHEEL,sha256=JNWh1Fm1UdwIQV075glCn4MVuCRs0sotJIq-J6rbxCU,109
|
|
164
|
+
sentry_sdk-2.34.0.dist-info/entry_points.txt,sha256=qacZEz40UspQZD1IukCXykx0JtImqGDOctS5KfOLTko,91
|
|
165
|
+
sentry_sdk-2.34.0.dist-info/top_level.txt,sha256=XrQz30XE9FKXSY_yGLrd9bsv2Rk390GTDJOSujYaMxI,11
|
|
166
|
+
sentry_sdk-2.34.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|