sentry-sdk 3.0.0a3__py2.py3-none-any.whl → 3.0.0a5__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sentry-sdk might be problematic. Click here for more details.
- sentry_sdk/__init__.py +3 -0
- sentry_sdk/ai/monitoring.py +7 -7
- sentry_sdk/ai/utils.py +5 -1
- sentry_sdk/api.py +73 -0
- sentry_sdk/client.py +10 -7
- sentry_sdk/consts.py +148 -8
- sentry_sdk/integrations/aiohttp.py +1 -1
- sentry_sdk/integrations/anthropic.py +1 -1
- sentry_sdk/integrations/arq.py +1 -1
- sentry_sdk/integrations/asyncio.py +1 -1
- sentry_sdk/integrations/asyncpg.py +1 -1
- sentry_sdk/integrations/boto3.py +2 -2
- sentry_sdk/integrations/celery/__init__.py +4 -3
- sentry_sdk/integrations/clickhouse_driver.py +1 -1
- sentry_sdk/integrations/cohere.py +2 -2
- sentry_sdk/integrations/django/__init__.py +12 -2
- sentry_sdk/integrations/django/asgi.py +1 -1
- sentry_sdk/integrations/django/caching.py +1 -1
- sentry_sdk/integrations/django/middleware.py +1 -1
- sentry_sdk/integrations/django/signals_handlers.py +1 -1
- sentry_sdk/integrations/django/templates.py +2 -2
- sentry_sdk/integrations/django/views.py +2 -2
- sentry_sdk/integrations/gnu_backtrace.py +3 -14
- sentry_sdk/integrations/graphene.py +1 -1
- sentry_sdk/integrations/grpc/aio/client.py +2 -2
- sentry_sdk/integrations/grpc/client.py +2 -2
- sentry_sdk/integrations/httpx.py +2 -2
- sentry_sdk/integrations/huey.py +1 -1
- sentry_sdk/integrations/huggingface_hub.py +1 -1
- sentry_sdk/integrations/langchain.py +1 -1
- sentry_sdk/integrations/litestar.py +3 -3
- sentry_sdk/integrations/logging.py +1 -1
- sentry_sdk/integrations/loguru.py +1 -1
- sentry_sdk/integrations/openai.py +339 -134
- sentry_sdk/integrations/openai_agents/utils.py +1 -49
- sentry_sdk/integrations/openfeature.py +4 -5
- sentry_sdk/integrations/pymongo.py +1 -1
- sentry_sdk/integrations/ray.py +1 -1
- sentry_sdk/integrations/redis/_async_common.py +3 -3
- sentry_sdk/integrations/redis/_sync_common.py +3 -3
- sentry_sdk/integrations/rust_tracing.py +1 -1
- sentry_sdk/integrations/socket.py +2 -2
- sentry_sdk/integrations/starlette.py +3 -3
- sentry_sdk/integrations/starlite.py +4 -4
- sentry_sdk/integrations/stdlib.py +4 -4
- sentry_sdk/integrations/strawberry.py +1 -1
- sentry_sdk/integrations/threading.py +1 -1
- sentry_sdk/opentelemetry/scope.py +13 -1
- sentry_sdk/opentelemetry/span_processor.py +1 -0
- sentry_sdk/serializer.py +8 -11
- sentry_sdk/tracing.py +9 -4
- sentry_sdk/tracing_utils.py +3 -3
- sentry_sdk/utils.py +46 -0
- {sentry_sdk-3.0.0a3.dist-info → sentry_sdk-3.0.0a5.dist-info}/METADATA +1 -1
- {sentry_sdk-3.0.0a3.dist-info → sentry_sdk-3.0.0a5.dist-info}/RECORD +59 -59
- {sentry_sdk-3.0.0a3.dist-info → sentry_sdk-3.0.0a5.dist-info}/WHEEL +0 -0
- {sentry_sdk-3.0.0a3.dist-info → sentry_sdk-3.0.0a5.dist-info}/entry_points.txt +0 -0
- {sentry_sdk-3.0.0a3.dist-info → sentry_sdk-3.0.0a5.dist-info}/licenses/LICENSE +0 -0
- {sentry_sdk-3.0.0a3.dist-info → sentry_sdk-3.0.0a5.dist-info}/top_level.txt +0 -0
|
@@ -11,6 +11,7 @@ from sentry_sdk.scope import should_send_default_pii
|
|
|
11
11
|
from sentry_sdk.utils import (
|
|
12
12
|
capture_internal_exceptions,
|
|
13
13
|
event_from_exception,
|
|
14
|
+
safe_serialize,
|
|
14
15
|
)
|
|
15
16
|
|
|
16
17
|
from typing import TYPE_CHECKING
|
|
@@ -20,6 +21,11 @@ if TYPE_CHECKING:
|
|
|
20
21
|
from sentry_sdk.tracing import Span
|
|
21
22
|
|
|
22
23
|
try:
|
|
24
|
+
try:
|
|
25
|
+
from openai import NOT_GIVEN
|
|
26
|
+
except ImportError:
|
|
27
|
+
NOT_GIVEN = None
|
|
28
|
+
|
|
23
29
|
from openai.resources.chat.completions import Completions, AsyncCompletions
|
|
24
30
|
from openai.resources import Embeddings, AsyncEmbeddings
|
|
25
31
|
|
|
@@ -28,6 +34,14 @@ try:
|
|
|
28
34
|
except ImportError:
|
|
29
35
|
raise DidNotEnable("OpenAI not installed")
|
|
30
36
|
|
|
37
|
+
RESPONSES_API_ENABLED = True
|
|
38
|
+
try:
|
|
39
|
+
# responses API support was introduced in v1.66.0
|
|
40
|
+
from openai.resources.responses import Responses, AsyncResponses
|
|
41
|
+
from openai.types.responses.response_completed_event import ResponseCompletedEvent
|
|
42
|
+
except ImportError:
|
|
43
|
+
RESPONSES_API_ENABLED = False
|
|
44
|
+
|
|
31
45
|
|
|
32
46
|
class OpenAIIntegration(Integration):
|
|
33
47
|
identifier = "openai"
|
|
@@ -49,13 +63,17 @@ class OpenAIIntegration(Integration):
|
|
|
49
63
|
@staticmethod
|
|
50
64
|
def setup_once() -> None:
|
|
51
65
|
Completions.create = _wrap_chat_completion_create(Completions.create)
|
|
52
|
-
Embeddings.create = _wrap_embeddings_create(Embeddings.create)
|
|
53
|
-
|
|
54
66
|
AsyncCompletions.create = _wrap_async_chat_completion_create(
|
|
55
67
|
AsyncCompletions.create
|
|
56
68
|
)
|
|
69
|
+
|
|
70
|
+
Embeddings.create = _wrap_embeddings_create(Embeddings.create)
|
|
57
71
|
AsyncEmbeddings.create = _wrap_async_embeddings_create(AsyncEmbeddings.create)
|
|
58
72
|
|
|
73
|
+
if RESPONSES_API_ENABLED:
|
|
74
|
+
Responses.create = _wrap_responses_create(Responses.create)
|
|
75
|
+
AsyncResponses.create = _wrap_async_responses_create(AsyncResponses.create)
|
|
76
|
+
|
|
59
77
|
def count_tokens(self: OpenAIIntegration, s: str) -> int:
|
|
60
78
|
if self.tiktoken_encoding is not None:
|
|
61
79
|
return len(self.tiktoken_encoding.encode_ordinary(s))
|
|
@@ -63,6 +81,12 @@ class OpenAIIntegration(Integration):
|
|
|
63
81
|
|
|
64
82
|
|
|
65
83
|
def _capture_exception(exc: Any) -> None:
|
|
84
|
+
# Close an eventually open span
|
|
85
|
+
# We need to do this by hand because we are not using the start_span context manager
|
|
86
|
+
current_span = sentry_sdk.get_current_span()
|
|
87
|
+
if current_span is not None:
|
|
88
|
+
current_span.__exit__(None, None, None)
|
|
89
|
+
|
|
66
90
|
event, hint = event_from_exception(
|
|
67
91
|
exc,
|
|
68
92
|
client_options=sentry_sdk.get_client().options,
|
|
@@ -79,7 +103,7 @@ def _get_usage(usage: Any, names: List[str]) -> int:
|
|
|
79
103
|
|
|
80
104
|
|
|
81
105
|
def _calculate_token_usage(
|
|
82
|
-
messages: Iterable[ChatCompletionMessageParam],
|
|
106
|
+
messages: Optional[Iterable[ChatCompletionMessageParam]],
|
|
83
107
|
response: Any,
|
|
84
108
|
span: Span,
|
|
85
109
|
streaming_message_responses: Optional[List[str]],
|
|
@@ -109,13 +133,13 @@ def _calculate_token_usage(
|
|
|
109
133
|
total_tokens = _get_usage(response.usage, ["total_tokens"])
|
|
110
134
|
|
|
111
135
|
# Manually count tokens
|
|
112
|
-
# TODO: when implementing responses API, check for responses API
|
|
113
136
|
if input_tokens == 0:
|
|
114
|
-
for message in messages:
|
|
115
|
-
if "content" in message:
|
|
137
|
+
for message in messages or []:
|
|
138
|
+
if isinstance(message, dict) and "content" in message:
|
|
116
139
|
input_tokens += count_tokens(message["content"])
|
|
140
|
+
elif isinstance(message, str):
|
|
141
|
+
input_tokens += count_tokens(message)
|
|
117
142
|
|
|
118
|
-
# TODO: when implementing responses API, check for responses API
|
|
119
143
|
if output_tokens == 0:
|
|
120
144
|
if streaming_message_responses is not None:
|
|
121
145
|
for message in streaming_message_responses:
|
|
@@ -142,129 +166,252 @@ def _calculate_token_usage(
|
|
|
142
166
|
)
|
|
143
167
|
|
|
144
168
|
|
|
145
|
-
def
|
|
146
|
-
integration
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
if
|
|
151
|
-
|
|
152
|
-
|
|
169
|
+
def _set_input_data(
|
|
170
|
+
span: Span, kwargs: Any, operation: str, integration: OpenAIIntegration
|
|
171
|
+
) -> None:
|
|
172
|
+
# Input messages (the prompt or data sent to the model)
|
|
173
|
+
messages = kwargs.get("messages")
|
|
174
|
+
if messages is None:
|
|
175
|
+
messages = kwargs.get("input")
|
|
176
|
+
|
|
177
|
+
if isinstance(messages, str):
|
|
178
|
+
messages = [messages]
|
|
179
|
+
|
|
180
|
+
if (
|
|
181
|
+
messages is not None
|
|
182
|
+
and len(messages) > 0
|
|
183
|
+
and should_send_default_pii()
|
|
184
|
+
and integration.include_prompts
|
|
185
|
+
):
|
|
186
|
+
set_data_normalized(span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages)
|
|
187
|
+
|
|
188
|
+
# Input attributes: Common
|
|
189
|
+
set_data_normalized(span, SPANDATA.GEN_AI_SYSTEM, "openai")
|
|
190
|
+
set_data_normalized(span, SPANDATA.GEN_AI_OPERATION_NAME, operation)
|
|
191
|
+
|
|
192
|
+
# Input attributes: Optional
|
|
193
|
+
kwargs_keys_to_attributes = {
|
|
194
|
+
"model": SPANDATA.GEN_AI_REQUEST_MODEL,
|
|
195
|
+
"stream": SPANDATA.GEN_AI_RESPONSE_STREAMING,
|
|
196
|
+
"max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS,
|
|
197
|
+
"presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
|
198
|
+
"frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
|
199
|
+
"temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
|
|
200
|
+
"top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
|
|
201
|
+
}
|
|
202
|
+
for key, attribute in kwargs_keys_to_attributes.items():
|
|
203
|
+
value = kwargs.get(key)
|
|
204
|
+
|
|
205
|
+
if value is not NOT_GIVEN and value is not None:
|
|
206
|
+
set_data_normalized(span, attribute, value)
|
|
207
|
+
|
|
208
|
+
# Input attributes: Tools
|
|
209
|
+
tools = kwargs.get("tools")
|
|
210
|
+
if tools is not NOT_GIVEN and tools is not None and len(tools) > 0:
|
|
211
|
+
set_data_normalized(
|
|
212
|
+
span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools)
|
|
213
|
+
)
|
|
153
214
|
|
|
154
|
-
try:
|
|
155
|
-
iter(kwargs["messages"])
|
|
156
|
-
except TypeError:
|
|
157
|
-
# invalid call (in all versions), messages must be iterable
|
|
158
|
-
return f(*args, **kwargs)
|
|
159
215
|
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
216
|
+
def _set_output_data(
|
|
217
|
+
span: Span,
|
|
218
|
+
response: Any,
|
|
219
|
+
kwargs: Any,
|
|
220
|
+
integration: OpenAIIntegration,
|
|
221
|
+
finish_span: bool = True,
|
|
222
|
+
) -> None:
|
|
223
|
+
if hasattr(response, "model"):
|
|
224
|
+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_MODEL, response.model)
|
|
164
225
|
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
span.__enter__()
|
|
226
|
+
# Input messages (the prompt or data sent to the model)
|
|
227
|
+
# used for the token usage calculation
|
|
228
|
+
messages = kwargs.get("messages")
|
|
229
|
+
if messages is None:
|
|
230
|
+
messages = kwargs.get("input")
|
|
171
231
|
|
|
172
|
-
|
|
232
|
+
if messages is not None and isinstance(messages, str):
|
|
233
|
+
messages = [messages]
|
|
173
234
|
|
|
174
|
-
|
|
235
|
+
if hasattr(response, "choices"):
|
|
175
236
|
if should_send_default_pii() and integration.include_prompts:
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
237
|
+
response_text = [choice.message.dict() for choice in response.choices]
|
|
238
|
+
if len(response_text) > 0:
|
|
239
|
+
set_data_normalized(
|
|
240
|
+
span,
|
|
241
|
+
SPANDATA.GEN_AI_RESPONSE_TEXT,
|
|
242
|
+
safe_serialize(response_text),
|
|
243
|
+
)
|
|
244
|
+
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
|
|
245
|
+
if finish_span:
|
|
246
|
+
span.__exit__(None, None, None)
|
|
180
247
|
|
|
181
|
-
|
|
182
|
-
|
|
248
|
+
elif hasattr(response, "output"):
|
|
249
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
250
|
+
response_text = [item.to_dict() for item in response.output]
|
|
251
|
+
if len(response_text) > 0:
|
|
183
252
|
set_data_normalized(
|
|
184
253
|
span,
|
|
185
|
-
SPANDATA.
|
|
186
|
-
|
|
254
|
+
SPANDATA.GEN_AI_RESPONSE_TEXT,
|
|
255
|
+
safe_serialize(response_text),
|
|
187
256
|
)
|
|
188
|
-
|
|
257
|
+
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
|
|
258
|
+
if finish_span:
|
|
189
259
|
span.__exit__(None, None, None)
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
260
|
+
|
|
261
|
+
elif hasattr(response, "_iterator"):
|
|
262
|
+
data_buf: list[list[str]] = [] # one for each choice
|
|
263
|
+
|
|
264
|
+
old_iterator = response._iterator
|
|
265
|
+
|
|
266
|
+
def new_iterator() -> Iterator[ChatCompletionChunk]:
|
|
267
|
+
with capture_internal_exceptions():
|
|
268
|
+
count_tokens_manually = True
|
|
269
|
+
for x in old_iterator:
|
|
270
|
+
# OpenAI chat completion API
|
|
271
|
+
if hasattr(x, "choices"):
|
|
272
|
+
choice_index = 0
|
|
273
|
+
for choice in x.choices:
|
|
274
|
+
if hasattr(choice, "delta") and hasattr(
|
|
275
|
+
choice.delta, "content"
|
|
276
|
+
):
|
|
277
|
+
content = choice.delta.content
|
|
278
|
+
if len(data_buf) <= choice_index:
|
|
279
|
+
data_buf.append([])
|
|
280
|
+
data_buf[choice_index].append(content or "")
|
|
281
|
+
choice_index += 1
|
|
282
|
+
|
|
283
|
+
# OpenAI responses API
|
|
284
|
+
elif hasattr(x, "delta"):
|
|
285
|
+
if len(data_buf) == 0:
|
|
286
|
+
data_buf.append([])
|
|
287
|
+
data_buf[0].append(x.delta or "")
|
|
288
|
+
|
|
289
|
+
# OpenAI responses API end of streaming response
|
|
290
|
+
if RESPONSES_API_ENABLED and isinstance(x, ResponseCompletedEvent):
|
|
291
|
+
_calculate_token_usage(
|
|
292
|
+
messages,
|
|
293
|
+
x.response,
|
|
294
|
+
span,
|
|
295
|
+
None,
|
|
296
|
+
integration.count_tokens,
|
|
213
297
|
)
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
298
|
+
count_tokens_manually = False
|
|
299
|
+
|
|
300
|
+
yield x
|
|
301
|
+
|
|
302
|
+
if len(data_buf) > 0:
|
|
303
|
+
all_responses = ["".join(chunk) for chunk in data_buf]
|
|
304
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
305
|
+
set_data_normalized(
|
|
306
|
+
span, SPANDATA.GEN_AI_RESPONSE_TEXT, all_responses
|
|
307
|
+
)
|
|
308
|
+
if count_tokens_manually:
|
|
218
309
|
_calculate_token_usage(
|
|
219
310
|
messages,
|
|
220
|
-
|
|
311
|
+
response,
|
|
221
312
|
span,
|
|
222
313
|
all_responses,
|
|
223
314
|
integration.count_tokens,
|
|
224
315
|
)
|
|
316
|
+
|
|
317
|
+
if finish_span:
|
|
225
318
|
span.__exit__(None, None, None)
|
|
226
319
|
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
320
|
+
async def new_iterator_async() -> AsyncIterator[ChatCompletionChunk]:
|
|
321
|
+
with capture_internal_exceptions():
|
|
322
|
+
count_tokens_manually = True
|
|
323
|
+
async for x in old_iterator:
|
|
324
|
+
# OpenAI chat completion API
|
|
325
|
+
if hasattr(x, "choices"):
|
|
326
|
+
choice_index = 0
|
|
327
|
+
for choice in x.choices:
|
|
328
|
+
if hasattr(choice, "delta") and hasattr(
|
|
329
|
+
choice.delta, "content"
|
|
330
|
+
):
|
|
331
|
+
content = choice.delta.content
|
|
332
|
+
if len(data_buf) <= choice_index:
|
|
333
|
+
data_buf.append([])
|
|
334
|
+
data_buf[choice_index].append(content or "")
|
|
335
|
+
choice_index += 1
|
|
336
|
+
|
|
337
|
+
# OpenAI responses API
|
|
338
|
+
elif hasattr(x, "delta"):
|
|
339
|
+
if len(data_buf) == 0:
|
|
340
|
+
data_buf.append([])
|
|
341
|
+
data_buf[0].append(x.delta or "")
|
|
342
|
+
|
|
343
|
+
# OpenAI responses API end of streaming response
|
|
344
|
+
if RESPONSES_API_ENABLED and isinstance(x, ResponseCompletedEvent):
|
|
345
|
+
_calculate_token_usage(
|
|
346
|
+
messages,
|
|
347
|
+
x.response,
|
|
348
|
+
span,
|
|
349
|
+
None,
|
|
350
|
+
integration.count_tokens,
|
|
351
|
+
)
|
|
352
|
+
count_tokens_manually = False
|
|
353
|
+
|
|
354
|
+
yield x
|
|
355
|
+
|
|
356
|
+
if len(data_buf) > 0:
|
|
357
|
+
all_responses = ["".join(chunk) for chunk in data_buf]
|
|
358
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
359
|
+
set_data_normalized(
|
|
360
|
+
span, SPANDATA.GEN_AI_RESPONSE_TEXT, all_responses
|
|
245
361
|
)
|
|
246
|
-
|
|
247
|
-
set_data_normalized(
|
|
248
|
-
span, SPANDATA.AI_RESPONSES, all_responses
|
|
249
|
-
)
|
|
362
|
+
if count_tokens_manually:
|
|
250
363
|
_calculate_token_usage(
|
|
251
364
|
messages,
|
|
252
|
-
|
|
365
|
+
response,
|
|
253
366
|
span,
|
|
254
367
|
all_responses,
|
|
255
368
|
integration.count_tokens,
|
|
256
369
|
)
|
|
370
|
+
if finish_span:
|
|
257
371
|
span.__exit__(None, None, None)
|
|
258
372
|
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
else:
|
|
262
|
-
res._iterator = new_iterator()
|
|
263
|
-
|
|
373
|
+
if str(type(response._iterator)) == "<class 'async_generator'>":
|
|
374
|
+
response._iterator = new_iterator_async()
|
|
264
375
|
else:
|
|
265
|
-
|
|
376
|
+
response._iterator = new_iterator()
|
|
377
|
+
else:
|
|
378
|
+
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
|
|
379
|
+
if finish_span:
|
|
266
380
|
span.__exit__(None, None, None)
|
|
267
|
-
|
|
381
|
+
|
|
382
|
+
|
|
383
|
+
def _new_chat_completion_common(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
384
|
+
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
385
|
+
if integration is None:
|
|
386
|
+
return f(*args, **kwargs)
|
|
387
|
+
|
|
388
|
+
if "messages" not in kwargs:
|
|
389
|
+
# invalid call (in all versions of openai), let it return error
|
|
390
|
+
return f(*args, **kwargs)
|
|
391
|
+
|
|
392
|
+
try:
|
|
393
|
+
iter(kwargs["messages"])
|
|
394
|
+
except TypeError:
|
|
395
|
+
# invalid call (in all versions), messages must be iterable
|
|
396
|
+
return f(*args, **kwargs)
|
|
397
|
+
|
|
398
|
+
model = kwargs.get("model")
|
|
399
|
+
operation = "chat"
|
|
400
|
+
|
|
401
|
+
span = sentry_sdk.start_span(
|
|
402
|
+
op=consts.OP.GEN_AI_CHAT,
|
|
403
|
+
name=f"{operation} {model}",
|
|
404
|
+
origin=OpenAIIntegration.origin,
|
|
405
|
+
)
|
|
406
|
+
span.__enter__()
|
|
407
|
+
|
|
408
|
+
_set_input_data(span, kwargs, operation, integration)
|
|
409
|
+
|
|
410
|
+
response = yield f, args, kwargs
|
|
411
|
+
|
|
412
|
+
_set_output_data(span, response, kwargs, integration, finish_span=True)
|
|
413
|
+
|
|
414
|
+
return response
|
|
268
415
|
|
|
269
416
|
|
|
270
417
|
def _wrap_chat_completion_create(f: Callable[..., Any]) -> Callable[..., Any]:
|
|
@@ -336,47 +483,19 @@ def _new_embeddings_create_common(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
|
336
483
|
if integration is None:
|
|
337
484
|
return f(*args, **kwargs)
|
|
338
485
|
|
|
486
|
+
model = kwargs.get("model")
|
|
487
|
+
operation = "embeddings"
|
|
488
|
+
|
|
339
489
|
with sentry_sdk.start_span(
|
|
340
|
-
op=consts.OP.
|
|
341
|
-
|
|
490
|
+
op=consts.OP.GEN_AI_EMBEDDINGS,
|
|
491
|
+
name=f"{operation} {model}",
|
|
342
492
|
origin=OpenAIIntegration.origin,
|
|
343
493
|
) as span:
|
|
344
|
-
|
|
345
|
-
should_send_default_pii() and integration.include_prompts
|
|
346
|
-
):
|
|
347
|
-
if isinstance(kwargs["input"], str):
|
|
348
|
-
set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, [kwargs["input"]])
|
|
349
|
-
elif (
|
|
350
|
-
isinstance(kwargs["input"], list)
|
|
351
|
-
and len(kwargs["input"]) > 0
|
|
352
|
-
and isinstance(kwargs["input"][0], str)
|
|
353
|
-
):
|
|
354
|
-
set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, kwargs["input"])
|
|
355
|
-
if "model" in kwargs:
|
|
356
|
-
set_data_normalized(span, SPANDATA.AI_MODEL_ID, kwargs["model"])
|
|
494
|
+
_set_input_data(span, kwargs, operation, integration)
|
|
357
495
|
|
|
358
496
|
response = yield f, args, kwargs
|
|
359
497
|
|
|
360
|
-
|
|
361
|
-
total_tokens = 0
|
|
362
|
-
if hasattr(response, "usage"):
|
|
363
|
-
if hasattr(response.usage, "prompt_tokens") and isinstance(
|
|
364
|
-
response.usage.prompt_tokens, int
|
|
365
|
-
):
|
|
366
|
-
input_tokens = response.usage.prompt_tokens
|
|
367
|
-
if hasattr(response.usage, "total_tokens") and isinstance(
|
|
368
|
-
response.usage.total_tokens, int
|
|
369
|
-
):
|
|
370
|
-
total_tokens = response.usage.total_tokens
|
|
371
|
-
|
|
372
|
-
if input_tokens == 0:
|
|
373
|
-
input_tokens = integration.count_tokens(kwargs["input"] or "")
|
|
374
|
-
|
|
375
|
-
record_token_usage(
|
|
376
|
-
span,
|
|
377
|
-
input_tokens=input_tokens,
|
|
378
|
-
total_tokens=total_tokens or input_tokens,
|
|
379
|
-
)
|
|
498
|
+
_set_output_data(span, response, kwargs, integration, finish_span=False)
|
|
380
499
|
|
|
381
500
|
return response
|
|
382
501
|
|
|
@@ -441,3 +560,89 @@ def _wrap_async_embeddings_create(f: Any) -> Any:
|
|
|
441
560
|
return await _execute_async(f, *args, **kwargs)
|
|
442
561
|
|
|
443
562
|
return _sentry_patched_create_async
|
|
563
|
+
|
|
564
|
+
|
|
565
|
+
def _new_responses_create_common(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
566
|
+
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
567
|
+
if integration is None:
|
|
568
|
+
return f(*args, **kwargs)
|
|
569
|
+
|
|
570
|
+
model = kwargs.get("model")
|
|
571
|
+
operation = "responses"
|
|
572
|
+
|
|
573
|
+
span = sentry_sdk.start_span(
|
|
574
|
+
op=consts.OP.GEN_AI_RESPONSES,
|
|
575
|
+
name=f"{operation} {model}",
|
|
576
|
+
origin=OpenAIIntegration.origin,
|
|
577
|
+
)
|
|
578
|
+
span.__enter__()
|
|
579
|
+
|
|
580
|
+
_set_input_data(span, kwargs, operation, integration)
|
|
581
|
+
|
|
582
|
+
response = yield f, args, kwargs
|
|
583
|
+
|
|
584
|
+
_set_output_data(span, response, kwargs, integration, finish_span=True)
|
|
585
|
+
|
|
586
|
+
return response
|
|
587
|
+
|
|
588
|
+
|
|
589
|
+
def _wrap_responses_create(f: Any) -> Any:
|
|
590
|
+
def _execute_sync(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
591
|
+
gen = _new_responses_create_common(f, *args, **kwargs)
|
|
592
|
+
|
|
593
|
+
try:
|
|
594
|
+
f, args, kwargs = next(gen)
|
|
595
|
+
except StopIteration as e:
|
|
596
|
+
return e.value
|
|
597
|
+
|
|
598
|
+
try:
|
|
599
|
+
try:
|
|
600
|
+
result = f(*args, **kwargs)
|
|
601
|
+
except Exception as e:
|
|
602
|
+
_capture_exception(e)
|
|
603
|
+
raise e from None
|
|
604
|
+
|
|
605
|
+
return gen.send(result)
|
|
606
|
+
except StopIteration as e:
|
|
607
|
+
return e.value
|
|
608
|
+
|
|
609
|
+
@wraps(f)
|
|
610
|
+
def _sentry_patched_create_sync(*args: Any, **kwargs: Any) -> Any:
|
|
611
|
+
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
612
|
+
if integration is None:
|
|
613
|
+
return f(*args, **kwargs)
|
|
614
|
+
|
|
615
|
+
return _execute_sync(f, *args, **kwargs)
|
|
616
|
+
|
|
617
|
+
return _sentry_patched_create_sync
|
|
618
|
+
|
|
619
|
+
|
|
620
|
+
def _wrap_async_responses_create(f: Any) -> Any:
|
|
621
|
+
async def _execute_async(f: Any, *args: Any, **kwargs: Any) -> Any:
|
|
622
|
+
gen = _new_responses_create_common(f, *args, **kwargs)
|
|
623
|
+
|
|
624
|
+
try:
|
|
625
|
+
f, args, kwargs = next(gen)
|
|
626
|
+
except StopIteration as e:
|
|
627
|
+
return await e.value
|
|
628
|
+
|
|
629
|
+
try:
|
|
630
|
+
try:
|
|
631
|
+
result = await f(*args, **kwargs)
|
|
632
|
+
except Exception as e:
|
|
633
|
+
_capture_exception(e)
|
|
634
|
+
raise e from None
|
|
635
|
+
|
|
636
|
+
return gen.send(result)
|
|
637
|
+
except StopIteration as e:
|
|
638
|
+
return e.value
|
|
639
|
+
|
|
640
|
+
@wraps(f)
|
|
641
|
+
async def _sentry_patched_responses_async(*args: Any, **kwargs: Any) -> Any:
|
|
642
|
+
integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
|
|
643
|
+
if integration is None:
|
|
644
|
+
return await f(*args, **kwargs)
|
|
645
|
+
|
|
646
|
+
return await _execute_async(f, *args, **kwargs)
|
|
647
|
+
|
|
648
|
+
return _sentry_patched_responses_async
|
|
@@ -1,17 +1,15 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
import json
|
|
4
3
|
import sentry_sdk
|
|
5
4
|
from sentry_sdk.consts import SPANDATA
|
|
6
5
|
from sentry_sdk.integrations import DidNotEnable
|
|
7
6
|
from sentry_sdk.scope import should_send_default_pii
|
|
8
|
-
from sentry_sdk.utils import event_from_exception
|
|
7
|
+
from sentry_sdk.utils import event_from_exception, safe_serialize
|
|
9
8
|
|
|
10
9
|
from typing import TYPE_CHECKING
|
|
11
10
|
|
|
12
11
|
if TYPE_CHECKING:
|
|
13
12
|
from typing import Any
|
|
14
|
-
from typing import Union
|
|
15
13
|
from agents import Usage
|
|
16
14
|
|
|
17
15
|
try:
|
|
@@ -153,49 +151,3 @@ def _set_output_data(span: sentry_sdk.tracing.Span, result: Any) -> None:
|
|
|
153
151
|
span.set_attribute(
|
|
154
152
|
SPANDATA.GEN_AI_RESPONSE_TEXT, safe_serialize(output_messages["response"])
|
|
155
153
|
)
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
def safe_serialize(data: Any) -> str:
|
|
159
|
-
"""Safely serialize to a readable string."""
|
|
160
|
-
|
|
161
|
-
def serialize_item(
|
|
162
|
-
item: Any,
|
|
163
|
-
) -> Union[str, dict[Any, Any], list[Any], tuple[Any, ...]]:
|
|
164
|
-
if callable(item):
|
|
165
|
-
try:
|
|
166
|
-
module = getattr(item, "__module__", None)
|
|
167
|
-
qualname = getattr(item, "__qualname__", None)
|
|
168
|
-
name = getattr(item, "__name__", "anonymous")
|
|
169
|
-
|
|
170
|
-
if module and qualname:
|
|
171
|
-
full_path = f"{module}.{qualname}"
|
|
172
|
-
elif module and name:
|
|
173
|
-
full_path = f"{module}.{name}"
|
|
174
|
-
else:
|
|
175
|
-
full_path = name
|
|
176
|
-
|
|
177
|
-
return f"<function {full_path}>"
|
|
178
|
-
except Exception:
|
|
179
|
-
return f"<callable {type(item).__name__}>"
|
|
180
|
-
elif isinstance(item, dict):
|
|
181
|
-
return {k: serialize_item(v) for k, v in item.items()}
|
|
182
|
-
elif isinstance(item, (list, tuple)):
|
|
183
|
-
return [serialize_item(x) for x in item]
|
|
184
|
-
elif hasattr(item, "__dict__"):
|
|
185
|
-
try:
|
|
186
|
-
attrs = {
|
|
187
|
-
k: serialize_item(v)
|
|
188
|
-
for k, v in vars(item).items()
|
|
189
|
-
if not k.startswith("_")
|
|
190
|
-
}
|
|
191
|
-
return f"<{type(item).__name__} {attrs}>"
|
|
192
|
-
except Exception:
|
|
193
|
-
return repr(item)
|
|
194
|
-
else:
|
|
195
|
-
return item
|
|
196
|
-
|
|
197
|
-
try:
|
|
198
|
-
serialized = serialize_item(data)
|
|
199
|
-
return json.dumps(serialized, default=str)
|
|
200
|
-
except Exception:
|
|
201
|
-
return str(data)
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
|
-
from typing import TYPE_CHECKING
|
|
2
|
+
from typing import TYPE_CHECKING, Any
|
|
3
3
|
|
|
4
4
|
from sentry_sdk.feature_flags import add_feature_flag
|
|
5
5
|
from sentry_sdk.integrations import DidNotEnable, Integration
|
|
@@ -9,7 +9,6 @@ try:
|
|
|
9
9
|
from openfeature.hook import Hook
|
|
10
10
|
|
|
11
11
|
if TYPE_CHECKING:
|
|
12
|
-
from openfeature.flag_evaluation import FlagEvaluationDetails
|
|
13
12
|
from openfeature.hook import HookContext, HookHints
|
|
14
13
|
except ImportError:
|
|
15
14
|
raise DidNotEnable("OpenFeature is not installed")
|
|
@@ -28,9 +27,9 @@ class OpenFeatureHook(Hook):
|
|
|
28
27
|
|
|
29
28
|
def after(
|
|
30
29
|
self,
|
|
31
|
-
hook_context:
|
|
32
|
-
details:
|
|
33
|
-
hints:
|
|
30
|
+
hook_context: Any,
|
|
31
|
+
details: Any,
|
|
32
|
+
hints: Any,
|
|
34
33
|
) -> None:
|
|
35
34
|
if isinstance(details.value, bool):
|
|
36
35
|
add_feature_flag(details.flag_key, details.value)
|