sentry-sdk 3.0.0a1__py2.py3-none-any.whl → 3.0.0a3__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sentry-sdk might be problematic. Click here for more details.
- sentry_sdk/__init__.py +2 -0
- sentry_sdk/_compat.py +5 -12
- sentry_sdk/_init_implementation.py +7 -7
- sentry_sdk/_log_batcher.py +17 -29
- sentry_sdk/_lru_cache.py +7 -9
- sentry_sdk/_queue.py +2 -4
- sentry_sdk/_types.py +11 -18
- sentry_sdk/_werkzeug.py +5 -7
- sentry_sdk/ai/monitoring.py +44 -31
- sentry_sdk/ai/utils.py +3 -4
- sentry_sdk/api.py +75 -87
- sentry_sdk/attachments.py +10 -12
- sentry_sdk/client.py +137 -155
- sentry_sdk/consts.py +430 -174
- sentry_sdk/crons/api.py +16 -17
- sentry_sdk/crons/decorator.py +25 -27
- sentry_sdk/debug.py +4 -6
- sentry_sdk/envelope.py +46 -112
- sentry_sdk/feature_flags.py +9 -15
- sentry_sdk/integrations/__init__.py +24 -19
- sentry_sdk/integrations/_asgi_common.py +15 -18
- sentry_sdk/integrations/_wsgi_common.py +22 -33
- sentry_sdk/integrations/aiohttp.py +32 -30
- sentry_sdk/integrations/anthropic.py +42 -37
- sentry_sdk/integrations/argv.py +3 -4
- sentry_sdk/integrations/ariadne.py +16 -18
- sentry_sdk/integrations/arq.py +21 -29
- sentry_sdk/integrations/asgi.py +63 -37
- sentry_sdk/integrations/asyncio.py +14 -16
- sentry_sdk/integrations/atexit.py +6 -10
- sentry_sdk/integrations/aws_lambda.py +26 -36
- sentry_sdk/integrations/beam.py +10 -18
- sentry_sdk/integrations/boto3.py +18 -16
- sentry_sdk/integrations/bottle.py +25 -34
- sentry_sdk/integrations/celery/__init__.py +41 -61
- sentry_sdk/integrations/celery/beat.py +23 -27
- sentry_sdk/integrations/celery/utils.py +15 -17
- sentry_sdk/integrations/chalice.py +8 -10
- sentry_sdk/integrations/clickhouse_driver.py +21 -31
- sentry_sdk/integrations/cloud_resource_context.py +9 -16
- sentry_sdk/integrations/cohere.py +27 -33
- sentry_sdk/integrations/dedupe.py +5 -8
- sentry_sdk/integrations/django/__init__.py +57 -72
- sentry_sdk/integrations/django/asgi.py +26 -34
- sentry_sdk/integrations/django/caching.py +23 -19
- sentry_sdk/integrations/django/middleware.py +17 -20
- sentry_sdk/integrations/django/signals_handlers.py +11 -10
- sentry_sdk/integrations/django/templates.py +19 -16
- sentry_sdk/integrations/django/transactions.py +16 -11
- sentry_sdk/integrations/django/views.py +6 -10
- sentry_sdk/integrations/dramatiq.py +21 -21
- sentry_sdk/integrations/excepthook.py +10 -10
- sentry_sdk/integrations/executing.py +3 -4
- sentry_sdk/integrations/falcon.py +27 -42
- sentry_sdk/integrations/fastapi.py +13 -16
- sentry_sdk/integrations/flask.py +31 -38
- sentry_sdk/integrations/gcp.py +13 -16
- sentry_sdk/integrations/gnu_backtrace.py +4 -6
- sentry_sdk/integrations/gql.py +16 -17
- sentry_sdk/integrations/graphene.py +13 -12
- sentry_sdk/integrations/grpc/__init__.py +19 -1
- sentry_sdk/integrations/grpc/aio/server.py +15 -14
- sentry_sdk/integrations/grpc/client.py +19 -9
- sentry_sdk/integrations/grpc/consts.py +2 -0
- sentry_sdk/integrations/grpc/server.py +12 -8
- sentry_sdk/integrations/httpx.py +9 -12
- sentry_sdk/integrations/huey.py +13 -20
- sentry_sdk/integrations/huggingface_hub.py +18 -18
- sentry_sdk/integrations/langchain.py +203 -113
- sentry_sdk/integrations/launchdarkly.py +13 -10
- sentry_sdk/integrations/litestar.py +37 -35
- sentry_sdk/integrations/logging.py +52 -65
- sentry_sdk/integrations/loguru.py +127 -57
- sentry_sdk/integrations/modules.py +3 -4
- sentry_sdk/integrations/openai.py +100 -88
- sentry_sdk/integrations/openai_agents/__init__.py +49 -0
- sentry_sdk/integrations/openai_agents/consts.py +1 -0
- sentry_sdk/integrations/openai_agents/patches/__init__.py +4 -0
- sentry_sdk/integrations/openai_agents/patches/agent_run.py +152 -0
- sentry_sdk/integrations/openai_agents/patches/models.py +52 -0
- sentry_sdk/integrations/openai_agents/patches/runner.py +42 -0
- sentry_sdk/integrations/openai_agents/patches/tools.py +84 -0
- sentry_sdk/integrations/openai_agents/spans/__init__.py +5 -0
- sentry_sdk/integrations/openai_agents/spans/agent_workflow.py +20 -0
- sentry_sdk/integrations/openai_agents/spans/ai_client.py +46 -0
- sentry_sdk/integrations/openai_agents/spans/execute_tool.py +47 -0
- sentry_sdk/integrations/openai_agents/spans/handoff.py +24 -0
- sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +41 -0
- sentry_sdk/integrations/openai_agents/utils.py +201 -0
- sentry_sdk/integrations/openfeature.py +11 -6
- sentry_sdk/integrations/pure_eval.py +6 -10
- sentry_sdk/integrations/pymongo.py +13 -17
- sentry_sdk/integrations/pyramid.py +31 -36
- sentry_sdk/integrations/quart.py +23 -28
- sentry_sdk/integrations/ray.py +73 -64
- sentry_sdk/integrations/redis/__init__.py +7 -4
- sentry_sdk/integrations/redis/_async_common.py +25 -12
- sentry_sdk/integrations/redis/_sync_common.py +19 -13
- sentry_sdk/integrations/redis/modules/caches.py +17 -8
- sentry_sdk/integrations/redis/modules/queries.py +9 -8
- sentry_sdk/integrations/redis/rb.py +3 -2
- sentry_sdk/integrations/redis/redis.py +4 -4
- sentry_sdk/integrations/redis/redis_cluster.py +21 -13
- sentry_sdk/integrations/redis/redis_py_cluster_legacy.py +3 -2
- sentry_sdk/integrations/redis/utils.py +23 -24
- sentry_sdk/integrations/rq.py +13 -16
- sentry_sdk/integrations/rust_tracing.py +9 -6
- sentry_sdk/integrations/sanic.py +34 -46
- sentry_sdk/integrations/serverless.py +22 -27
- sentry_sdk/integrations/socket.py +27 -15
- sentry_sdk/integrations/spark/__init__.py +1 -0
- sentry_sdk/integrations/spark/spark_driver.py +45 -83
- sentry_sdk/integrations/spark/spark_worker.py +7 -11
- sentry_sdk/integrations/sqlalchemy.py +22 -19
- sentry_sdk/integrations/starlette.py +86 -90
- sentry_sdk/integrations/starlite.py +28 -34
- sentry_sdk/integrations/statsig.py +5 -4
- sentry_sdk/integrations/stdlib.py +28 -24
- sentry_sdk/integrations/strawberry.py +62 -49
- sentry_sdk/integrations/sys_exit.py +7 -11
- sentry_sdk/integrations/threading.py +12 -14
- sentry_sdk/integrations/tornado.py +28 -32
- sentry_sdk/integrations/trytond.py +4 -3
- sentry_sdk/integrations/typer.py +8 -6
- sentry_sdk/integrations/unleash.py +5 -4
- sentry_sdk/integrations/wsgi.py +47 -46
- sentry_sdk/logger.py +41 -10
- sentry_sdk/monitor.py +16 -28
- sentry_sdk/opentelemetry/consts.py +11 -4
- sentry_sdk/opentelemetry/contextvars_context.py +26 -16
- sentry_sdk/opentelemetry/propagator.py +38 -21
- sentry_sdk/opentelemetry/sampler.py +51 -34
- sentry_sdk/opentelemetry/scope.py +36 -37
- sentry_sdk/opentelemetry/span_processor.py +48 -58
- sentry_sdk/opentelemetry/tracing.py +58 -14
- sentry_sdk/opentelemetry/utils.py +186 -194
- sentry_sdk/profiler/continuous_profiler.py +108 -97
- sentry_sdk/profiler/transaction_profiler.py +70 -97
- sentry_sdk/profiler/utils.py +11 -15
- sentry_sdk/scope.py +251 -273
- sentry_sdk/scrubber.py +22 -26
- sentry_sdk/serializer.py +40 -54
- sentry_sdk/session.py +44 -61
- sentry_sdk/sessions.py +35 -49
- sentry_sdk/spotlight.py +15 -21
- sentry_sdk/tracing.py +121 -187
- sentry_sdk/tracing_utils.py +104 -122
- sentry_sdk/transport.py +131 -157
- sentry_sdk/utils.py +232 -309
- sentry_sdk/worker.py +16 -28
- {sentry_sdk-3.0.0a1.dist-info → sentry_sdk-3.0.0a3.dist-info}/METADATA +3 -3
- sentry_sdk-3.0.0a3.dist-info/RECORD +168 -0
- {sentry_sdk-3.0.0a1.dist-info → sentry_sdk-3.0.0a3.dist-info}/WHEEL +1 -1
- sentry_sdk-3.0.0a1.dist-info/RECORD +0 -154
- {sentry_sdk-3.0.0a1.dist-info → sentry_sdk-3.0.0a3.dist-info}/entry_points.txt +0 -0
- {sentry_sdk-3.0.0a1.dist-info → sentry_sdk-3.0.0a3.dist-info}/licenses/LICENSE +0 -0
- {sentry_sdk-3.0.0a1.dist-info → sentry_sdk-3.0.0a3.dist-info}/top_level.txt +0 -0
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
1
2
|
import sentry_sdk
|
|
2
3
|
from sentry_sdk.integrations import Integration
|
|
3
4
|
from sentry_sdk.scope import add_global_event_processor
|
|
@@ -14,11 +15,9 @@ class ModulesIntegration(Integration):
|
|
|
14
15
|
identifier = "modules"
|
|
15
16
|
|
|
16
17
|
@staticmethod
|
|
17
|
-
def setup_once():
|
|
18
|
-
# type: () -> None
|
|
18
|
+
def setup_once() -> None:
|
|
19
19
|
@add_global_event_processor
|
|
20
|
-
def processor(event, hint):
|
|
21
|
-
# type: (Event, Any) -> Event
|
|
20
|
+
def processor(event: Event, hint: Any) -> Event:
|
|
22
21
|
if event.get("type") == "transaction":
|
|
23
22
|
return event
|
|
24
23
|
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
1
2
|
from functools import wraps
|
|
2
3
|
|
|
3
4
|
import sentry_sdk
|
|
@@ -32,8 +33,11 @@ class OpenAIIntegration(Integration):
|
|
|
32
33
|
identifier = "openai"
|
|
33
34
|
origin = f"auto.ai.{identifier}"
|
|
34
35
|
|
|
35
|
-
def __init__(
|
|
36
|
-
|
|
36
|
+
def __init__(
|
|
37
|
+
self: OpenAIIntegration,
|
|
38
|
+
include_prompts: bool = True,
|
|
39
|
+
tiktoken_encoding_name: Optional[str] = None,
|
|
40
|
+
) -> None:
|
|
37
41
|
self.include_prompts = include_prompts
|
|
38
42
|
|
|
39
43
|
self.tiktoken_encoding = None
|
|
@@ -43,8 +47,7 @@ class OpenAIIntegration(Integration):
|
|
|
43
47
|
self.tiktoken_encoding = tiktoken.get_encoding(tiktoken_encoding_name)
|
|
44
48
|
|
|
45
49
|
@staticmethod
|
|
46
|
-
def setup_once():
|
|
47
|
-
# type: () -> None
|
|
50
|
+
def setup_once() -> None:
|
|
48
51
|
Completions.create = _wrap_chat_completion_create(Completions.create)
|
|
49
52
|
Embeddings.create = _wrap_embeddings_create(Embeddings.create)
|
|
50
53
|
|
|
@@ -53,15 +56,13 @@ class OpenAIIntegration(Integration):
|
|
|
53
56
|
)
|
|
54
57
|
AsyncEmbeddings.create = _wrap_async_embeddings_create(AsyncEmbeddings.create)
|
|
55
58
|
|
|
56
|
-
def count_tokens(self, s):
|
|
57
|
-
# type: (OpenAIIntegration, str) -> int
|
|
59
|
+
def count_tokens(self: OpenAIIntegration, s: str) -> int:
|
|
58
60
|
if self.tiktoken_encoding is not None:
|
|
59
61
|
return len(self.tiktoken_encoding.encode_ordinary(s))
|
|
60
62
|
return 0
|
|
61
63
|
|
|
62
64
|
|
|
63
|
-
def _capture_exception(exc):
|
|
64
|
-
# type: (Any) -> None
|
|
65
|
+
def _capture_exception(exc: Any) -> None:
|
|
65
66
|
event, hint = event_from_exception(
|
|
66
67
|
exc,
|
|
67
68
|
client_options=sentry_sdk.get_client().options,
|
|
@@ -70,52 +71,78 @@ def _capture_exception(exc):
|
|
|
70
71
|
sentry_sdk.capture_event(event, hint=hint)
|
|
71
72
|
|
|
72
73
|
|
|
73
|
-
def
|
|
74
|
-
|
|
75
|
-
):
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
74
|
+
def _get_usage(usage: Any, names: List[str]) -> int:
|
|
75
|
+
for name in names:
|
|
76
|
+
if hasattr(usage, name) and isinstance(getattr(usage, name), int):
|
|
77
|
+
return getattr(usage, name)
|
|
78
|
+
return 0
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def _calculate_token_usage(
|
|
82
|
+
messages: Iterable[ChatCompletionMessageParam],
|
|
83
|
+
response: Any,
|
|
84
|
+
span: Span,
|
|
85
|
+
streaming_message_responses: Optional[List[str]],
|
|
86
|
+
count_tokens: Callable[..., Any],
|
|
87
|
+
) -> None:
|
|
88
|
+
input_tokens: Optional[int] = 0
|
|
89
|
+
input_tokens_cached: Optional[int] = 0
|
|
90
|
+
output_tokens: Optional[int] = 0
|
|
91
|
+
output_tokens_reasoning: Optional[int] = 0
|
|
92
|
+
total_tokens: Optional[int] = 0
|
|
93
|
+
|
|
80
94
|
if hasattr(response, "usage"):
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
response.usage.prompt_tokens, int
|
|
87
|
-
):
|
|
88
|
-
prompt_tokens = response.usage.prompt_tokens
|
|
89
|
-
if hasattr(response.usage, "total_tokens") and isinstance(
|
|
90
|
-
response.usage.total_tokens, int
|
|
91
|
-
):
|
|
92
|
-
total_tokens = response.usage.total_tokens
|
|
95
|
+
input_tokens = _get_usage(response.usage, ["input_tokens", "prompt_tokens"])
|
|
96
|
+
if hasattr(response.usage, "input_tokens_details"):
|
|
97
|
+
input_tokens_cached = _get_usage(
|
|
98
|
+
response.usage.input_tokens_details, ["cached_tokens"]
|
|
99
|
+
)
|
|
93
100
|
|
|
94
|
-
|
|
101
|
+
output_tokens = _get_usage(
|
|
102
|
+
response.usage, ["output_tokens", "completion_tokens"]
|
|
103
|
+
)
|
|
104
|
+
if hasattr(response.usage, "output_tokens_details"):
|
|
105
|
+
output_tokens_reasoning = _get_usage(
|
|
106
|
+
response.usage.output_tokens_details, ["reasoning_tokens"]
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
total_tokens = _get_usage(response.usage, ["total_tokens"])
|
|
110
|
+
|
|
111
|
+
# Manually count tokens
|
|
112
|
+
# TODO: when implementing responses API, check for responses API
|
|
113
|
+
if input_tokens == 0:
|
|
95
114
|
for message in messages:
|
|
96
115
|
if "content" in message:
|
|
97
|
-
|
|
116
|
+
input_tokens += count_tokens(message["content"])
|
|
98
117
|
|
|
99
|
-
|
|
118
|
+
# TODO: when implementing responses API, check for responses API
|
|
119
|
+
if output_tokens == 0:
|
|
100
120
|
if streaming_message_responses is not None:
|
|
101
121
|
for message in streaming_message_responses:
|
|
102
|
-
|
|
122
|
+
output_tokens += count_tokens(message)
|
|
103
123
|
elif hasattr(response, "choices"):
|
|
104
124
|
for choice in response.choices:
|
|
105
125
|
if hasattr(choice, "message"):
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
if
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
126
|
+
output_tokens += count_tokens(choice.message)
|
|
127
|
+
|
|
128
|
+
# Do not set token data if it is 0
|
|
129
|
+
input_tokens = input_tokens or None
|
|
130
|
+
input_tokens_cached = input_tokens_cached or None
|
|
131
|
+
output_tokens = output_tokens or None
|
|
132
|
+
output_tokens_reasoning = output_tokens_reasoning or None
|
|
133
|
+
total_tokens = total_tokens or None
|
|
134
|
+
|
|
135
|
+
record_token_usage(
|
|
136
|
+
span,
|
|
137
|
+
input_tokens=input_tokens,
|
|
138
|
+
input_tokens_cached=input_tokens_cached,
|
|
139
|
+
output_tokens=output_tokens,
|
|
140
|
+
output_tokens_reasoning=output_tokens_reasoning,
|
|
141
|
+
total_tokens=total_tokens,
|
|
142
|
+
)
|
|
115
143
|
|
|
116
144
|
|
|
117
|
-
def _new_chat_completion_common(f, *args, **kwargs):
|
|
118
|
-
# type: (Any, *Any, **Any) -> Any
|
|
145
|
+
def _new_chat_completion_common(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
119
146
|
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
120
147
|
if integration is None:
|
|
121
148
|
return f(*args, **kwargs)
|
|
@@ -139,7 +166,6 @@ def _new_chat_completion_common(f, *args, **kwargs):
|
|
|
139
166
|
op=consts.OP.OPENAI_CHAT_COMPLETIONS_CREATE,
|
|
140
167
|
name="Chat Completion",
|
|
141
168
|
origin=OpenAIIntegration.origin,
|
|
142
|
-
only_if_parent=True,
|
|
143
169
|
)
|
|
144
170
|
span.__enter__()
|
|
145
171
|
|
|
@@ -156,20 +182,17 @@ def _new_chat_completion_common(f, *args, **kwargs):
|
|
|
156
182
|
if should_send_default_pii() and integration.include_prompts:
|
|
157
183
|
set_data_normalized(
|
|
158
184
|
span,
|
|
159
|
-
|
|
185
|
+
SPANDATA.AI_RESPONSES,
|
|
160
186
|
list(map(lambda x: x.message, res.choices)),
|
|
161
187
|
)
|
|
162
|
-
|
|
163
|
-
messages, res, span, None, integration.count_tokens
|
|
164
|
-
)
|
|
188
|
+
_calculate_token_usage(messages, res, span, None, integration.count_tokens)
|
|
165
189
|
span.__exit__(None, None, None)
|
|
166
190
|
elif hasattr(res, "_iterator"):
|
|
167
191
|
data_buf: list[list[str]] = [] # one for each choice
|
|
168
192
|
|
|
169
193
|
old_iterator = res._iterator
|
|
170
194
|
|
|
171
|
-
def new_iterator():
|
|
172
|
-
# type: () -> Iterator[ChatCompletionChunk]
|
|
195
|
+
def new_iterator() -> Iterator[ChatCompletionChunk]:
|
|
173
196
|
with capture_internal_exceptions():
|
|
174
197
|
for x in old_iterator:
|
|
175
198
|
if hasattr(x, "choices"):
|
|
@@ -192,7 +215,7 @@ def _new_chat_completion_common(f, *args, **kwargs):
|
|
|
192
215
|
set_data_normalized(
|
|
193
216
|
span, SPANDATA.AI_RESPONSES, all_responses
|
|
194
217
|
)
|
|
195
|
-
|
|
218
|
+
_calculate_token_usage(
|
|
196
219
|
messages,
|
|
197
220
|
res,
|
|
198
221
|
span,
|
|
@@ -201,8 +224,7 @@ def _new_chat_completion_common(f, *args, **kwargs):
|
|
|
201
224
|
)
|
|
202
225
|
span.__exit__(None, None, None)
|
|
203
226
|
|
|
204
|
-
async def new_iterator_async():
|
|
205
|
-
# type: () -> AsyncIterator[ChatCompletionChunk]
|
|
227
|
+
async def new_iterator_async() -> AsyncIterator[ChatCompletionChunk]:
|
|
206
228
|
with capture_internal_exceptions():
|
|
207
229
|
async for x in old_iterator:
|
|
208
230
|
if hasattr(x, "choices"):
|
|
@@ -225,7 +247,7 @@ def _new_chat_completion_common(f, *args, **kwargs):
|
|
|
225
247
|
set_data_normalized(
|
|
226
248
|
span, SPANDATA.AI_RESPONSES, all_responses
|
|
227
249
|
)
|
|
228
|
-
|
|
250
|
+
_calculate_token_usage(
|
|
229
251
|
messages,
|
|
230
252
|
res,
|
|
231
253
|
span,
|
|
@@ -245,10 +267,8 @@ def _new_chat_completion_common(f, *args, **kwargs):
|
|
|
245
267
|
return res
|
|
246
268
|
|
|
247
269
|
|
|
248
|
-
def _wrap_chat_completion_create(f):
|
|
249
|
-
|
|
250
|
-
def _execute_sync(f, *args, **kwargs):
|
|
251
|
-
# type: (Any, *Any, **Any) -> Any
|
|
270
|
+
def _wrap_chat_completion_create(f: Callable[..., Any]) -> Callable[..., Any]:
|
|
271
|
+
def _execute_sync(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
252
272
|
gen = _new_chat_completion_common(f, *args, **kwargs)
|
|
253
273
|
|
|
254
274
|
try:
|
|
@@ -268,8 +288,7 @@ def _wrap_chat_completion_create(f):
|
|
|
268
288
|
return e.value
|
|
269
289
|
|
|
270
290
|
@wraps(f)
|
|
271
|
-
def _sentry_patched_create_sync(*args, **kwargs):
|
|
272
|
-
# type: (*Any, **Any) -> Any
|
|
291
|
+
def _sentry_patched_create_sync(*args: Any, **kwargs: Any) -> Any:
|
|
273
292
|
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
274
293
|
if integration is None or "messages" not in kwargs:
|
|
275
294
|
# no "messages" means invalid call (in all versions of openai), let it return error
|
|
@@ -280,10 +299,8 @@ def _wrap_chat_completion_create(f):
|
|
|
280
299
|
return _sentry_patched_create_sync
|
|
281
300
|
|
|
282
301
|
|
|
283
|
-
def _wrap_async_chat_completion_create(f):
|
|
284
|
-
|
|
285
|
-
async def _execute_async(f, *args, **kwargs):
|
|
286
|
-
# type: (Any, *Any, **Any) -> Any
|
|
302
|
+
def _wrap_async_chat_completion_create(f: Callable[..., Any]) -> Callable[..., Any]:
|
|
303
|
+
async def _execute_async(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
287
304
|
gen = _new_chat_completion_common(f, *args, **kwargs)
|
|
288
305
|
|
|
289
306
|
try:
|
|
@@ -303,8 +320,7 @@ def _wrap_async_chat_completion_create(f):
|
|
|
303
320
|
return e.value
|
|
304
321
|
|
|
305
322
|
@wraps(f)
|
|
306
|
-
async def _sentry_patched_create_async(*args, **kwargs):
|
|
307
|
-
# type: (*Any, **Any) -> Any
|
|
323
|
+
async def _sentry_patched_create_async(*args: Any, **kwargs: Any) -> Any:
|
|
308
324
|
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
309
325
|
if integration is None or "messages" not in kwargs:
|
|
310
326
|
# no "messages" means invalid call (in all versions of openai), let it return error
|
|
@@ -315,8 +331,7 @@ def _wrap_async_chat_completion_create(f):
|
|
|
315
331
|
return _sentry_patched_create_async
|
|
316
332
|
|
|
317
333
|
|
|
318
|
-
def _new_embeddings_create_common(f, *args, **kwargs):
|
|
319
|
-
# type: (Any, *Any, **Any) -> Any
|
|
334
|
+
def _new_embeddings_create_common(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
320
335
|
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
321
336
|
if integration is None:
|
|
322
337
|
return f(*args, **kwargs)
|
|
@@ -325,48 +340,49 @@ def _new_embeddings_create_common(f, *args, **kwargs):
|
|
|
325
340
|
op=consts.OP.OPENAI_EMBEDDINGS_CREATE,
|
|
326
341
|
description="OpenAI Embedding Creation",
|
|
327
342
|
origin=OpenAIIntegration.origin,
|
|
328
|
-
only_if_parent=True,
|
|
329
343
|
) as span:
|
|
330
344
|
if "input" in kwargs and (
|
|
331
345
|
should_send_default_pii() and integration.include_prompts
|
|
332
346
|
):
|
|
333
347
|
if isinstance(kwargs["input"], str):
|
|
334
|
-
set_data_normalized(span,
|
|
348
|
+
set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, [kwargs["input"]])
|
|
335
349
|
elif (
|
|
336
350
|
isinstance(kwargs["input"], list)
|
|
337
351
|
and len(kwargs["input"]) > 0
|
|
338
352
|
and isinstance(kwargs["input"][0], str)
|
|
339
353
|
):
|
|
340
|
-
set_data_normalized(span,
|
|
354
|
+
set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, kwargs["input"])
|
|
341
355
|
if "model" in kwargs:
|
|
342
|
-
set_data_normalized(span,
|
|
356
|
+
set_data_normalized(span, SPANDATA.AI_MODEL_ID, kwargs["model"])
|
|
343
357
|
|
|
344
358
|
response = yield f, args, kwargs
|
|
345
359
|
|
|
346
|
-
|
|
360
|
+
input_tokens = 0
|
|
347
361
|
total_tokens = 0
|
|
348
362
|
if hasattr(response, "usage"):
|
|
349
363
|
if hasattr(response.usage, "prompt_tokens") and isinstance(
|
|
350
364
|
response.usage.prompt_tokens, int
|
|
351
365
|
):
|
|
352
|
-
|
|
366
|
+
input_tokens = response.usage.prompt_tokens
|
|
353
367
|
if hasattr(response.usage, "total_tokens") and isinstance(
|
|
354
368
|
response.usage.total_tokens, int
|
|
355
369
|
):
|
|
356
370
|
total_tokens = response.usage.total_tokens
|
|
357
371
|
|
|
358
|
-
if
|
|
359
|
-
|
|
372
|
+
if input_tokens == 0:
|
|
373
|
+
input_tokens = integration.count_tokens(kwargs["input"] or "")
|
|
360
374
|
|
|
361
|
-
record_token_usage(
|
|
375
|
+
record_token_usage(
|
|
376
|
+
span,
|
|
377
|
+
input_tokens=input_tokens,
|
|
378
|
+
total_tokens=total_tokens or input_tokens,
|
|
379
|
+
)
|
|
362
380
|
|
|
363
381
|
return response
|
|
364
382
|
|
|
365
383
|
|
|
366
|
-
def _wrap_embeddings_create(f):
|
|
367
|
-
|
|
368
|
-
def _execute_sync(f, *args, **kwargs):
|
|
369
|
-
# type: (Any, *Any, **Any) -> Any
|
|
384
|
+
def _wrap_embeddings_create(f: Any) -> Any:
|
|
385
|
+
def _execute_sync(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
370
386
|
gen = _new_embeddings_create_common(f, *args, **kwargs)
|
|
371
387
|
|
|
372
388
|
try:
|
|
@@ -386,8 +402,7 @@ def _wrap_embeddings_create(f):
|
|
|
386
402
|
return e.value
|
|
387
403
|
|
|
388
404
|
@wraps(f)
|
|
389
|
-
def _sentry_patched_create_sync(*args, **kwargs):
|
|
390
|
-
# type: (*Any, **Any) -> Any
|
|
405
|
+
def _sentry_patched_create_sync(*args: Any, **kwargs: Any) -> Any:
|
|
391
406
|
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
392
407
|
if integration is None:
|
|
393
408
|
return f(*args, **kwargs)
|
|
@@ -397,10 +412,8 @@ def _wrap_embeddings_create(f):
|
|
|
397
412
|
return _sentry_patched_create_sync
|
|
398
413
|
|
|
399
414
|
|
|
400
|
-
def _wrap_async_embeddings_create(f):
|
|
401
|
-
|
|
402
|
-
async def _execute_async(f, *args, **kwargs):
|
|
403
|
-
# type: (Any, *Any, **Any) -> Any
|
|
415
|
+
def _wrap_async_embeddings_create(f: Any) -> Any:
|
|
416
|
+
async def _execute_async(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
404
417
|
gen = _new_embeddings_create_common(f, *args, **kwargs)
|
|
405
418
|
|
|
406
419
|
try:
|
|
@@ -420,8 +433,7 @@ def _wrap_async_embeddings_create(f):
|
|
|
420
433
|
return e.value
|
|
421
434
|
|
|
422
435
|
@wraps(f)
|
|
423
|
-
async def _sentry_patched_create_async(*args, **kwargs):
|
|
424
|
-
# type: (*Any, **Any) -> Any
|
|
436
|
+
async def _sentry_patched_create_async(*args: Any, **kwargs: Any) -> Any:
|
|
425
437
|
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
426
438
|
if integration is None:
|
|
427
439
|
return await f(*args, **kwargs)
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
from sentry_sdk.integrations import DidNotEnable, Integration
|
|
2
|
+
|
|
3
|
+
from .patches import (
|
|
4
|
+
_create_get_model_wrapper,
|
|
5
|
+
_create_get_all_tools_wrapper,
|
|
6
|
+
_create_run_wrapper,
|
|
7
|
+
_patch_agent_run,
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
import agents
|
|
12
|
+
|
|
13
|
+
except ImportError:
|
|
14
|
+
raise DidNotEnable("OpenAI Agents not installed")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _patch_runner() -> None:
|
|
18
|
+
# Create the root span for one full agent run (including eventual handoffs)
|
|
19
|
+
# Note agents.run.DEFAULT_AGENT_RUNNER.run_sync is a wrapper around
|
|
20
|
+
# agents.run.DEFAULT_AGENT_RUNNER.run. It does not need to be wrapped separately.
|
|
21
|
+
# TODO-anton: Also patch streaming runner: agents.Runner.run_streamed
|
|
22
|
+
agents.run.DEFAULT_AGENT_RUNNER.run = _create_run_wrapper(
|
|
23
|
+
agents.run.DEFAULT_AGENT_RUNNER.run
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
# Creating the actual spans for each agent run.
|
|
27
|
+
_patch_agent_run()
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _patch_model() -> None:
|
|
31
|
+
agents.run.AgentRunner._get_model = classmethod(
|
|
32
|
+
_create_get_model_wrapper(agents.run.AgentRunner._get_model),
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _patch_tools() -> None:
|
|
37
|
+
agents.run.AgentRunner._get_all_tools = classmethod(
|
|
38
|
+
_create_get_all_tools_wrapper(agents.run.AgentRunner._get_all_tools),
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class OpenAIAgentsIntegration(Integration):
|
|
43
|
+
identifier = "openai_agents"
|
|
44
|
+
|
|
45
|
+
@staticmethod
|
|
46
|
+
def setup_once() -> None:
|
|
47
|
+
_patch_tools()
|
|
48
|
+
_patch_model()
|
|
49
|
+
_patch_runner()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
SPAN_ORIGIN = "auto.ai.openai_agents"
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from functools import wraps
|
|
4
|
+
|
|
5
|
+
from sentry_sdk.integrations import DidNotEnable
|
|
6
|
+
|
|
7
|
+
from ..spans import invoke_agent_span, update_invoke_agent_span, handoff_span
|
|
8
|
+
|
|
9
|
+
from typing import TYPE_CHECKING
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from typing import Any, Optional
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
try:
|
|
16
|
+
import agents
|
|
17
|
+
except ImportError:
|
|
18
|
+
raise DidNotEnable("OpenAI Agents not installed")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _patch_agent_run() -> None:
|
|
22
|
+
"""
|
|
23
|
+
Patches AgentRunner methods to create agent invocation spans.
|
|
24
|
+
This directly patches the execution flow to track when agents start and stop.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
# Store original methods
|
|
28
|
+
original_run_single_turn = agents.run.AgentRunner._run_single_turn
|
|
29
|
+
original_execute_handoffs = agents._run_impl.RunImpl.execute_handoffs
|
|
30
|
+
original_execute_final_output = agents._run_impl.RunImpl.execute_final_output
|
|
31
|
+
|
|
32
|
+
def _start_invoke_agent_span(
|
|
33
|
+
context_wrapper: agents.RunContextWrapper, agent: agents.Agent
|
|
34
|
+
) -> None:
|
|
35
|
+
"""Start an agent invocation span"""
|
|
36
|
+
# Store the agent on the context wrapper so we can access it later
|
|
37
|
+
context_wrapper._sentry_current_agent = agent
|
|
38
|
+
invoke_agent_span(context_wrapper, agent)
|
|
39
|
+
|
|
40
|
+
def _end_invoke_agent_span(
|
|
41
|
+
context_wrapper: agents.RunContextWrapper,
|
|
42
|
+
agent: agents.Agent,
|
|
43
|
+
output: Optional[Any] = None,
|
|
44
|
+
) -> None:
|
|
45
|
+
"""End the agent invocation span"""
|
|
46
|
+
# Clear the stored agent
|
|
47
|
+
if hasattr(context_wrapper, "_sentry_current_agent"):
|
|
48
|
+
delattr(context_wrapper, "_sentry_current_agent")
|
|
49
|
+
|
|
50
|
+
update_invoke_agent_span(context_wrapper, agent, output)
|
|
51
|
+
|
|
52
|
+
def _has_active_agent_span(context_wrapper: agents.RunContextWrapper) -> bool:
|
|
53
|
+
"""Check if there's an active agent span for this context"""
|
|
54
|
+
return getattr(context_wrapper, "_sentry_current_agent", None) is not None
|
|
55
|
+
|
|
56
|
+
def _get_current_agent(
|
|
57
|
+
context_wrapper: agents.RunContextWrapper,
|
|
58
|
+
) -> Optional[agents.Agent]:
|
|
59
|
+
"""Get the current agent from context wrapper"""
|
|
60
|
+
return getattr(context_wrapper, "_sentry_current_agent", None)
|
|
61
|
+
|
|
62
|
+
@wraps(
|
|
63
|
+
original_run_single_turn.__func__
|
|
64
|
+
if hasattr(original_run_single_turn, "__func__")
|
|
65
|
+
else original_run_single_turn
|
|
66
|
+
)
|
|
67
|
+
async def patched_run_single_turn(
|
|
68
|
+
cls: agents.Runner, *args: Any, **kwargs: Any
|
|
69
|
+
) -> Any:
|
|
70
|
+
"""Patched _run_single_turn that creates agent invocation spans"""
|
|
71
|
+
agent = kwargs.get("agent")
|
|
72
|
+
context_wrapper = kwargs.get("context_wrapper")
|
|
73
|
+
should_run_agent_start_hooks = kwargs.get("should_run_agent_start_hooks")
|
|
74
|
+
|
|
75
|
+
# Start agent span when agent starts (but only once per agent)
|
|
76
|
+
if should_run_agent_start_hooks and agent and context_wrapper:
|
|
77
|
+
# End any existing span for a different agent
|
|
78
|
+
if _has_active_agent_span(context_wrapper):
|
|
79
|
+
current_agent = _get_current_agent(context_wrapper)
|
|
80
|
+
if current_agent and current_agent != agent:
|
|
81
|
+
_end_invoke_agent_span(context_wrapper, current_agent)
|
|
82
|
+
|
|
83
|
+
_start_invoke_agent_span(context_wrapper, agent)
|
|
84
|
+
|
|
85
|
+
# Call original method with all the correct parameters
|
|
86
|
+
try:
|
|
87
|
+
result = await original_run_single_turn(*args, **kwargs)
|
|
88
|
+
finally:
|
|
89
|
+
if agent and context_wrapper and _has_active_agent_span(context_wrapper):
|
|
90
|
+
_end_invoke_agent_span(context_wrapper, agent)
|
|
91
|
+
|
|
92
|
+
return result
|
|
93
|
+
|
|
94
|
+
@wraps(
|
|
95
|
+
original_execute_handoffs.__func__
|
|
96
|
+
if hasattr(original_execute_handoffs, "__func__")
|
|
97
|
+
else original_execute_handoffs
|
|
98
|
+
)
|
|
99
|
+
async def patched_execute_handoffs(
|
|
100
|
+
cls: agents.Runner, *args: Any, **kwargs: Any
|
|
101
|
+
) -> Any:
|
|
102
|
+
"""Patched execute_handoffs that creates handoff spans and ends agent span for handoffs"""
|
|
103
|
+
context_wrapper = kwargs.get("context_wrapper")
|
|
104
|
+
run_handoffs = kwargs.get("run_handoffs")
|
|
105
|
+
agent = kwargs.get("agent")
|
|
106
|
+
|
|
107
|
+
# Create Sentry handoff span for the first handoff (agents library only processes the first one)
|
|
108
|
+
if run_handoffs:
|
|
109
|
+
first_handoff = run_handoffs[0]
|
|
110
|
+
handoff_agent_name = first_handoff.handoff.agent_name
|
|
111
|
+
handoff_span(context_wrapper, agent, handoff_agent_name)
|
|
112
|
+
|
|
113
|
+
# Call original method with all parameters
|
|
114
|
+
try:
|
|
115
|
+
result = await original_execute_handoffs(*args, **kwargs)
|
|
116
|
+
|
|
117
|
+
finally:
|
|
118
|
+
# End span for current agent after handoff processing is complete
|
|
119
|
+
if agent and context_wrapper and _has_active_agent_span(context_wrapper):
|
|
120
|
+
_end_invoke_agent_span(context_wrapper, agent)
|
|
121
|
+
|
|
122
|
+
return result
|
|
123
|
+
|
|
124
|
+
@wraps(
|
|
125
|
+
original_execute_final_output.__func__
|
|
126
|
+
if hasattr(original_execute_final_output, "__func__")
|
|
127
|
+
else original_execute_final_output
|
|
128
|
+
)
|
|
129
|
+
async def patched_execute_final_output(
|
|
130
|
+
cls: agents.Runner, *args: Any, **kwargs: Any
|
|
131
|
+
) -> Any:
|
|
132
|
+
"""Patched execute_final_output that ends agent span for final outputs"""
|
|
133
|
+
agent = kwargs.get("agent")
|
|
134
|
+
context_wrapper = kwargs.get("context_wrapper")
|
|
135
|
+
final_output = kwargs.get("final_output")
|
|
136
|
+
|
|
137
|
+
# Call original method with all parameters
|
|
138
|
+
try:
|
|
139
|
+
result = await original_execute_final_output(*args, **kwargs)
|
|
140
|
+
finally:
|
|
141
|
+
# End span for current agent after final output processing is complete
|
|
142
|
+
if agent and context_wrapper and _has_active_agent_span(context_wrapper):
|
|
143
|
+
_end_invoke_agent_span(context_wrapper, agent, final_output)
|
|
144
|
+
|
|
145
|
+
return result
|
|
146
|
+
|
|
147
|
+
# Apply patches
|
|
148
|
+
agents.run.AgentRunner._run_single_turn = classmethod(patched_run_single_turn)
|
|
149
|
+
agents._run_impl.RunImpl.execute_handoffs = classmethod(patched_execute_handoffs)
|
|
150
|
+
agents._run_impl.RunImpl.execute_final_output = classmethod(
|
|
151
|
+
patched_execute_final_output
|
|
152
|
+
)
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from functools import wraps
|
|
4
|
+
|
|
5
|
+
from sentry_sdk.integrations import DidNotEnable
|
|
6
|
+
|
|
7
|
+
from ..spans import ai_client_span, update_ai_client_span
|
|
8
|
+
|
|
9
|
+
from typing import TYPE_CHECKING
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from typing import Any, Callable
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
try:
|
|
16
|
+
import agents
|
|
17
|
+
except ImportError:
|
|
18
|
+
raise DidNotEnable("OpenAI Agents not installed")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _create_get_model_wrapper(
|
|
22
|
+
original_get_model: Callable[..., Any],
|
|
23
|
+
) -> Callable[..., Any]:
|
|
24
|
+
"""
|
|
25
|
+
Wraps the agents.Runner._get_model method to wrap the get_response method of the model to create a AI client span.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
@wraps(
|
|
29
|
+
original_get_model.__func__
|
|
30
|
+
if hasattr(original_get_model, "__func__")
|
|
31
|
+
else original_get_model
|
|
32
|
+
)
|
|
33
|
+
def wrapped_get_model(
|
|
34
|
+
cls: agents.Runner, agent: agents.Agent, run_config: agents.RunConfig
|
|
35
|
+
) -> agents.Model:
|
|
36
|
+
model = original_get_model(agent, run_config)
|
|
37
|
+
original_get_response = model.get_response
|
|
38
|
+
|
|
39
|
+
@wraps(original_get_response)
|
|
40
|
+
async def wrapped_get_response(*args: Any, **kwargs: Any) -> Any:
|
|
41
|
+
with ai_client_span(agent, kwargs) as span:
|
|
42
|
+
result = await original_get_response(*args, **kwargs)
|
|
43
|
+
|
|
44
|
+
update_ai_client_span(span, agent, kwargs, result)
|
|
45
|
+
|
|
46
|
+
return result
|
|
47
|
+
|
|
48
|
+
model.get_response = wrapped_get_response
|
|
49
|
+
|
|
50
|
+
return model
|
|
51
|
+
|
|
52
|
+
return wrapped_get_model
|