sentry-sdk 0.18.0__py2.py3-none-any.whl → 2.46.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (193) hide show
  1. sentry_sdk/__init__.py +48 -6
  2. sentry_sdk/_compat.py +64 -56
  3. sentry_sdk/_init_implementation.py +84 -0
  4. sentry_sdk/_log_batcher.py +172 -0
  5. sentry_sdk/_lru_cache.py +47 -0
  6. sentry_sdk/_metrics_batcher.py +167 -0
  7. sentry_sdk/_queue.py +81 -19
  8. sentry_sdk/_types.py +311 -11
  9. sentry_sdk/_werkzeug.py +98 -0
  10. sentry_sdk/ai/__init__.py +7 -0
  11. sentry_sdk/ai/monitoring.py +137 -0
  12. sentry_sdk/ai/utils.py +144 -0
  13. sentry_sdk/api.py +409 -67
  14. sentry_sdk/attachments.py +75 -0
  15. sentry_sdk/client.py +849 -103
  16. sentry_sdk/consts.py +1389 -34
  17. sentry_sdk/crons/__init__.py +10 -0
  18. sentry_sdk/crons/api.py +62 -0
  19. sentry_sdk/crons/consts.py +4 -0
  20. sentry_sdk/crons/decorator.py +135 -0
  21. sentry_sdk/debug.py +12 -15
  22. sentry_sdk/envelope.py +112 -61
  23. sentry_sdk/feature_flags.py +71 -0
  24. sentry_sdk/hub.py +442 -386
  25. sentry_sdk/integrations/__init__.py +228 -58
  26. sentry_sdk/integrations/_asgi_common.py +108 -0
  27. sentry_sdk/integrations/_wsgi_common.py +131 -40
  28. sentry_sdk/integrations/aiohttp.py +221 -72
  29. sentry_sdk/integrations/anthropic.py +439 -0
  30. sentry_sdk/integrations/argv.py +4 -6
  31. sentry_sdk/integrations/ariadne.py +161 -0
  32. sentry_sdk/integrations/arq.py +247 -0
  33. sentry_sdk/integrations/asgi.py +237 -135
  34. sentry_sdk/integrations/asyncio.py +144 -0
  35. sentry_sdk/integrations/asyncpg.py +208 -0
  36. sentry_sdk/integrations/atexit.py +13 -18
  37. sentry_sdk/integrations/aws_lambda.py +233 -80
  38. sentry_sdk/integrations/beam.py +27 -35
  39. sentry_sdk/integrations/boto3.py +137 -0
  40. sentry_sdk/integrations/bottle.py +91 -69
  41. sentry_sdk/integrations/celery/__init__.py +529 -0
  42. sentry_sdk/integrations/celery/beat.py +293 -0
  43. sentry_sdk/integrations/celery/utils.py +43 -0
  44. sentry_sdk/integrations/chalice.py +35 -28
  45. sentry_sdk/integrations/clickhouse_driver.py +177 -0
  46. sentry_sdk/integrations/cloud_resource_context.py +280 -0
  47. sentry_sdk/integrations/cohere.py +274 -0
  48. sentry_sdk/integrations/dedupe.py +32 -8
  49. sentry_sdk/integrations/django/__init__.py +343 -89
  50. sentry_sdk/integrations/django/asgi.py +201 -22
  51. sentry_sdk/integrations/django/caching.py +204 -0
  52. sentry_sdk/integrations/django/middleware.py +80 -32
  53. sentry_sdk/integrations/django/signals_handlers.py +91 -0
  54. sentry_sdk/integrations/django/templates.py +69 -2
  55. sentry_sdk/integrations/django/transactions.py +39 -14
  56. sentry_sdk/integrations/django/views.py +69 -16
  57. sentry_sdk/integrations/dramatiq.py +226 -0
  58. sentry_sdk/integrations/excepthook.py +19 -13
  59. sentry_sdk/integrations/executing.py +5 -6
  60. sentry_sdk/integrations/falcon.py +128 -65
  61. sentry_sdk/integrations/fastapi.py +141 -0
  62. sentry_sdk/integrations/flask.py +114 -75
  63. sentry_sdk/integrations/gcp.py +67 -36
  64. sentry_sdk/integrations/gnu_backtrace.py +14 -22
  65. sentry_sdk/integrations/google_genai/__init__.py +301 -0
  66. sentry_sdk/integrations/google_genai/consts.py +16 -0
  67. sentry_sdk/integrations/google_genai/streaming.py +155 -0
  68. sentry_sdk/integrations/google_genai/utils.py +576 -0
  69. sentry_sdk/integrations/gql.py +162 -0
  70. sentry_sdk/integrations/graphene.py +151 -0
  71. sentry_sdk/integrations/grpc/__init__.py +168 -0
  72. sentry_sdk/integrations/grpc/aio/__init__.py +7 -0
  73. sentry_sdk/integrations/grpc/aio/client.py +95 -0
  74. sentry_sdk/integrations/grpc/aio/server.py +100 -0
  75. sentry_sdk/integrations/grpc/client.py +91 -0
  76. sentry_sdk/integrations/grpc/consts.py +1 -0
  77. sentry_sdk/integrations/grpc/server.py +66 -0
  78. sentry_sdk/integrations/httpx.py +178 -0
  79. sentry_sdk/integrations/huey.py +174 -0
  80. sentry_sdk/integrations/huggingface_hub.py +378 -0
  81. sentry_sdk/integrations/langchain.py +1132 -0
  82. sentry_sdk/integrations/langgraph.py +337 -0
  83. sentry_sdk/integrations/launchdarkly.py +61 -0
  84. sentry_sdk/integrations/litellm.py +287 -0
  85. sentry_sdk/integrations/litestar.py +315 -0
  86. sentry_sdk/integrations/logging.py +261 -85
  87. sentry_sdk/integrations/loguru.py +213 -0
  88. sentry_sdk/integrations/mcp.py +566 -0
  89. sentry_sdk/integrations/modules.py +6 -33
  90. sentry_sdk/integrations/openai.py +725 -0
  91. sentry_sdk/integrations/openai_agents/__init__.py +61 -0
  92. sentry_sdk/integrations/openai_agents/consts.py +1 -0
  93. sentry_sdk/integrations/openai_agents/patches/__init__.py +5 -0
  94. sentry_sdk/integrations/openai_agents/patches/agent_run.py +140 -0
  95. sentry_sdk/integrations/openai_agents/patches/error_tracing.py +77 -0
  96. sentry_sdk/integrations/openai_agents/patches/models.py +50 -0
  97. sentry_sdk/integrations/openai_agents/patches/runner.py +45 -0
  98. sentry_sdk/integrations/openai_agents/patches/tools.py +77 -0
  99. sentry_sdk/integrations/openai_agents/spans/__init__.py +5 -0
  100. sentry_sdk/integrations/openai_agents/spans/agent_workflow.py +21 -0
  101. sentry_sdk/integrations/openai_agents/spans/ai_client.py +42 -0
  102. sentry_sdk/integrations/openai_agents/spans/execute_tool.py +48 -0
  103. sentry_sdk/integrations/openai_agents/spans/handoff.py +19 -0
  104. sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +86 -0
  105. sentry_sdk/integrations/openai_agents/utils.py +199 -0
  106. sentry_sdk/integrations/openfeature.py +35 -0
  107. sentry_sdk/integrations/opentelemetry/__init__.py +7 -0
  108. sentry_sdk/integrations/opentelemetry/consts.py +5 -0
  109. sentry_sdk/integrations/opentelemetry/integration.py +58 -0
  110. sentry_sdk/integrations/opentelemetry/propagator.py +117 -0
  111. sentry_sdk/integrations/opentelemetry/span_processor.py +391 -0
  112. sentry_sdk/integrations/otlp.py +82 -0
  113. sentry_sdk/integrations/pure_eval.py +20 -11
  114. sentry_sdk/integrations/pydantic_ai/__init__.py +47 -0
  115. sentry_sdk/integrations/pydantic_ai/consts.py +1 -0
  116. sentry_sdk/integrations/pydantic_ai/patches/__init__.py +4 -0
  117. sentry_sdk/integrations/pydantic_ai/patches/agent_run.py +215 -0
  118. sentry_sdk/integrations/pydantic_ai/patches/graph_nodes.py +110 -0
  119. sentry_sdk/integrations/pydantic_ai/patches/model_request.py +40 -0
  120. sentry_sdk/integrations/pydantic_ai/patches/tools.py +98 -0
  121. sentry_sdk/integrations/pydantic_ai/spans/__init__.py +3 -0
  122. sentry_sdk/integrations/pydantic_ai/spans/ai_client.py +246 -0
  123. sentry_sdk/integrations/pydantic_ai/spans/execute_tool.py +49 -0
  124. sentry_sdk/integrations/pydantic_ai/spans/invoke_agent.py +112 -0
  125. sentry_sdk/integrations/pydantic_ai/utils.py +223 -0
  126. sentry_sdk/integrations/pymongo.py +214 -0
  127. sentry_sdk/integrations/pyramid.py +71 -60
  128. sentry_sdk/integrations/quart.py +237 -0
  129. sentry_sdk/integrations/ray.py +165 -0
  130. sentry_sdk/integrations/redis/__init__.py +48 -0
  131. sentry_sdk/integrations/redis/_async_common.py +116 -0
  132. sentry_sdk/integrations/redis/_sync_common.py +119 -0
  133. sentry_sdk/integrations/redis/consts.py +19 -0
  134. sentry_sdk/integrations/redis/modules/__init__.py +0 -0
  135. sentry_sdk/integrations/redis/modules/caches.py +118 -0
  136. sentry_sdk/integrations/redis/modules/queries.py +65 -0
  137. sentry_sdk/integrations/redis/rb.py +32 -0
  138. sentry_sdk/integrations/redis/redis.py +69 -0
  139. sentry_sdk/integrations/redis/redis_cluster.py +107 -0
  140. sentry_sdk/integrations/redis/redis_py_cluster_legacy.py +50 -0
  141. sentry_sdk/integrations/redis/utils.py +148 -0
  142. sentry_sdk/integrations/rq.py +62 -52
  143. sentry_sdk/integrations/rust_tracing.py +284 -0
  144. sentry_sdk/integrations/sanic.py +248 -114
  145. sentry_sdk/integrations/serverless.py +13 -22
  146. sentry_sdk/integrations/socket.py +96 -0
  147. sentry_sdk/integrations/spark/spark_driver.py +115 -62
  148. sentry_sdk/integrations/spark/spark_worker.py +42 -50
  149. sentry_sdk/integrations/sqlalchemy.py +82 -37
  150. sentry_sdk/integrations/starlette.py +737 -0
  151. sentry_sdk/integrations/starlite.py +292 -0
  152. sentry_sdk/integrations/statsig.py +37 -0
  153. sentry_sdk/integrations/stdlib.py +100 -58
  154. sentry_sdk/integrations/strawberry.py +394 -0
  155. sentry_sdk/integrations/sys_exit.py +70 -0
  156. sentry_sdk/integrations/threading.py +142 -38
  157. sentry_sdk/integrations/tornado.py +68 -53
  158. sentry_sdk/integrations/trytond.py +15 -20
  159. sentry_sdk/integrations/typer.py +60 -0
  160. sentry_sdk/integrations/unleash.py +33 -0
  161. sentry_sdk/integrations/unraisablehook.py +53 -0
  162. sentry_sdk/integrations/wsgi.py +126 -125
  163. sentry_sdk/logger.py +96 -0
  164. sentry_sdk/metrics.py +81 -0
  165. sentry_sdk/monitor.py +120 -0
  166. sentry_sdk/profiler/__init__.py +49 -0
  167. sentry_sdk/profiler/continuous_profiler.py +730 -0
  168. sentry_sdk/profiler/transaction_profiler.py +839 -0
  169. sentry_sdk/profiler/utils.py +195 -0
  170. sentry_sdk/scope.py +1542 -112
  171. sentry_sdk/scrubber.py +177 -0
  172. sentry_sdk/serializer.py +152 -210
  173. sentry_sdk/session.py +177 -0
  174. sentry_sdk/sessions.py +202 -179
  175. sentry_sdk/spotlight.py +242 -0
  176. sentry_sdk/tracing.py +1202 -294
  177. sentry_sdk/tracing_utils.py +1236 -0
  178. sentry_sdk/transport.py +693 -189
  179. sentry_sdk/types.py +52 -0
  180. sentry_sdk/utils.py +1395 -228
  181. sentry_sdk/worker.py +30 -17
  182. sentry_sdk-2.46.0.dist-info/METADATA +268 -0
  183. sentry_sdk-2.46.0.dist-info/RECORD +189 -0
  184. {sentry_sdk-0.18.0.dist-info → sentry_sdk-2.46.0.dist-info}/WHEEL +1 -1
  185. sentry_sdk-2.46.0.dist-info/entry_points.txt +2 -0
  186. sentry_sdk-2.46.0.dist-info/licenses/LICENSE +21 -0
  187. sentry_sdk/_functools.py +0 -66
  188. sentry_sdk/integrations/celery.py +0 -275
  189. sentry_sdk/integrations/redis.py +0 -103
  190. sentry_sdk-0.18.0.dist-info/LICENSE +0 -9
  191. sentry_sdk-0.18.0.dist-info/METADATA +0 -66
  192. sentry_sdk-0.18.0.dist-info/RECORD +0 -65
  193. {sentry_sdk-0.18.0.dist-info → sentry_sdk-2.46.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,725 @@
1
+ from functools import wraps
2
+
3
+ import sentry_sdk
4
+ from sentry_sdk import consts
5
+ from sentry_sdk.ai.monitoring import record_token_usage
6
+ from sentry_sdk.ai.utils import (
7
+ set_data_normalized,
8
+ normalize_message_roles,
9
+ truncate_and_annotate_messages,
10
+ )
11
+ from sentry_sdk.consts import SPANDATA
12
+ from sentry_sdk.integrations import DidNotEnable, Integration
13
+ from sentry_sdk.scope import should_send_default_pii
14
+ from sentry_sdk.tracing_utils import set_span_errored
15
+ from sentry_sdk.utils import (
16
+ capture_internal_exceptions,
17
+ event_from_exception,
18
+ safe_serialize,
19
+ )
20
+
21
+ from typing import TYPE_CHECKING
22
+
23
+ if TYPE_CHECKING:
24
+ from typing import Any, Iterable, List, Optional, Callable, AsyncIterator, Iterator
25
+ from sentry_sdk.tracing import Span
26
+
27
+ try:
28
+ try:
29
+ from openai import NotGiven
30
+ except ImportError:
31
+ NotGiven = None
32
+
33
+ try:
34
+ from openai import Omit
35
+ except ImportError:
36
+ Omit = None
37
+
38
+ from openai.resources.chat.completions import Completions, AsyncCompletions
39
+ from openai.resources import Embeddings, AsyncEmbeddings
40
+
41
+ if TYPE_CHECKING:
42
+ from openai.types.chat import ChatCompletionMessageParam, ChatCompletionChunk
43
+ except ImportError:
44
+ raise DidNotEnable("OpenAI not installed")
45
+
46
+ RESPONSES_API_ENABLED = True
47
+ try:
48
+ # responses API support was introduced in v1.66.0
49
+ from openai.resources.responses import Responses, AsyncResponses
50
+ from openai.types.responses.response_completed_event import ResponseCompletedEvent
51
+ except ImportError:
52
+ RESPONSES_API_ENABLED = False
53
+
54
+
55
+ class OpenAIIntegration(Integration):
56
+ identifier = "openai"
57
+ origin = f"auto.ai.{identifier}"
58
+
59
+ def __init__(self, include_prompts=True, tiktoken_encoding_name=None):
60
+ # type: (OpenAIIntegration, bool, Optional[str]) -> None
61
+ self.include_prompts = include_prompts
62
+
63
+ self.tiktoken_encoding = None
64
+ if tiktoken_encoding_name is not None:
65
+ import tiktoken # type: ignore
66
+
67
+ self.tiktoken_encoding = tiktoken.get_encoding(tiktoken_encoding_name)
68
+
69
+ @staticmethod
70
+ def setup_once():
71
+ # type: () -> None
72
+ Completions.create = _wrap_chat_completion_create(Completions.create)
73
+ AsyncCompletions.create = _wrap_async_chat_completion_create(
74
+ AsyncCompletions.create
75
+ )
76
+
77
+ Embeddings.create = _wrap_embeddings_create(Embeddings.create)
78
+ AsyncEmbeddings.create = _wrap_async_embeddings_create(AsyncEmbeddings.create)
79
+
80
+ if RESPONSES_API_ENABLED:
81
+ Responses.create = _wrap_responses_create(Responses.create)
82
+ AsyncResponses.create = _wrap_async_responses_create(AsyncResponses.create)
83
+
84
+ def count_tokens(self, s):
85
+ # type: (OpenAIIntegration, str) -> int
86
+ if self.tiktoken_encoding is not None:
87
+ return len(self.tiktoken_encoding.encode_ordinary(s))
88
+ return 0
89
+
90
+
91
+ def _capture_exception(exc, manual_span_cleanup=True):
92
+ # type: (Any, bool) -> None
93
+ # Close an eventually open span
94
+ # We need to do this by hand because we are not using the start_span context manager
95
+ current_span = sentry_sdk.get_current_span()
96
+ set_span_errored(current_span)
97
+
98
+ if manual_span_cleanup and current_span is not None:
99
+ current_span.__exit__(None, None, None)
100
+
101
+ event, hint = event_from_exception(
102
+ exc,
103
+ client_options=sentry_sdk.get_client().options,
104
+ mechanism={"type": "openai", "handled": False},
105
+ )
106
+ sentry_sdk.capture_event(event, hint=hint)
107
+
108
+
109
+ def _get_usage(usage, names):
110
+ # type: (Any, List[str]) -> int
111
+ for name in names:
112
+ if hasattr(usage, name) and isinstance(getattr(usage, name), int):
113
+ return getattr(usage, name)
114
+ return 0
115
+
116
+
117
+ def _calculate_token_usage(
118
+ messages, response, span, streaming_message_responses, count_tokens
119
+ ):
120
+ # type: (Optional[Iterable[ChatCompletionMessageParam]], Any, Span, Optional[List[str]], Callable[..., Any]) -> None
121
+ input_tokens = 0 # type: Optional[int]
122
+ input_tokens_cached = 0 # type: Optional[int]
123
+ output_tokens = 0 # type: Optional[int]
124
+ output_tokens_reasoning = 0 # type: Optional[int]
125
+ total_tokens = 0 # type: Optional[int]
126
+
127
+ if hasattr(response, "usage"):
128
+ input_tokens = _get_usage(response.usage, ["input_tokens", "prompt_tokens"])
129
+ if hasattr(response.usage, "input_tokens_details"):
130
+ input_tokens_cached = _get_usage(
131
+ response.usage.input_tokens_details, ["cached_tokens"]
132
+ )
133
+
134
+ output_tokens = _get_usage(
135
+ response.usage, ["output_tokens", "completion_tokens"]
136
+ )
137
+ if hasattr(response.usage, "output_tokens_details"):
138
+ output_tokens_reasoning = _get_usage(
139
+ response.usage.output_tokens_details, ["reasoning_tokens"]
140
+ )
141
+
142
+ total_tokens = _get_usage(response.usage, ["total_tokens"])
143
+
144
+ # Manually count tokens
145
+ if input_tokens == 0:
146
+ for message in messages or []:
147
+ if isinstance(message, dict) and "content" in message:
148
+ input_tokens += count_tokens(message["content"])
149
+ elif isinstance(message, str):
150
+ input_tokens += count_tokens(message)
151
+
152
+ if output_tokens == 0:
153
+ if streaming_message_responses is not None:
154
+ for message in streaming_message_responses:
155
+ output_tokens += count_tokens(message)
156
+ elif hasattr(response, "choices"):
157
+ for choice in response.choices:
158
+ if hasattr(choice, "message"):
159
+ output_tokens += count_tokens(choice.message)
160
+
161
+ # Do not set token data if it is 0
162
+ input_tokens = input_tokens or None
163
+ input_tokens_cached = input_tokens_cached or None
164
+ output_tokens = output_tokens or None
165
+ output_tokens_reasoning = output_tokens_reasoning or None
166
+ total_tokens = total_tokens or None
167
+
168
+ record_token_usage(
169
+ span,
170
+ input_tokens=input_tokens,
171
+ input_tokens_cached=input_tokens_cached,
172
+ output_tokens=output_tokens,
173
+ output_tokens_reasoning=output_tokens_reasoning,
174
+ total_tokens=total_tokens,
175
+ )
176
+
177
+
178
+ def _set_input_data(span, kwargs, operation, integration):
179
+ # type: (Span, dict[str, Any], str, OpenAIIntegration) -> None
180
+ # Input messages (the prompt or data sent to the model)
181
+ messages = kwargs.get("messages")
182
+ if messages is None:
183
+ messages = kwargs.get("input")
184
+
185
+ if isinstance(messages, str):
186
+ messages = [messages]
187
+
188
+ if (
189
+ messages is not None
190
+ and len(messages) > 0
191
+ and should_send_default_pii()
192
+ and integration.include_prompts
193
+ ):
194
+ normalized_messages = normalize_message_roles(messages)
195
+ scope = sentry_sdk.get_current_scope()
196
+ messages_data = truncate_and_annotate_messages(normalized_messages, span, scope)
197
+ if messages_data is not None:
198
+ # Use appropriate field based on operation type
199
+ if operation == "embeddings":
200
+ set_data_normalized(
201
+ span, SPANDATA.GEN_AI_EMBEDDINGS_INPUT, messages_data, unpack=False
202
+ )
203
+ else:
204
+ set_data_normalized(
205
+ span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages_data, unpack=False
206
+ )
207
+
208
+ # Input attributes: Common
209
+ set_data_normalized(span, SPANDATA.GEN_AI_SYSTEM, "openai")
210
+ set_data_normalized(span, SPANDATA.GEN_AI_OPERATION_NAME, operation)
211
+
212
+ # Input attributes: Optional
213
+ kwargs_keys_to_attributes = {
214
+ "model": SPANDATA.GEN_AI_REQUEST_MODEL,
215
+ "stream": SPANDATA.GEN_AI_RESPONSE_STREAMING,
216
+ "max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS,
217
+ "presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY,
218
+ "frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
219
+ "temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
220
+ "top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
221
+ }
222
+ for key, attribute in kwargs_keys_to_attributes.items():
223
+ value = kwargs.get(key)
224
+
225
+ if value is not None and _is_given(value):
226
+ set_data_normalized(span, attribute, value)
227
+
228
+ # Input attributes: Tools
229
+ tools = kwargs.get("tools")
230
+ if tools is not None and _is_given(tools) and len(tools) > 0:
231
+ set_data_normalized(
232
+ span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools)
233
+ )
234
+
235
+
236
+ def _set_output_data(span, response, kwargs, integration, finish_span=True):
237
+ # type: (Span, Any, dict[str, Any], OpenAIIntegration, bool) -> None
238
+ if hasattr(response, "model"):
239
+ set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_MODEL, response.model)
240
+
241
+ # Input messages (the prompt or data sent to the model)
242
+ # used for the token usage calculation
243
+ messages = kwargs.get("messages")
244
+ if messages is None:
245
+ messages = kwargs.get("input")
246
+
247
+ if messages is not None and isinstance(messages, str):
248
+ messages = [messages]
249
+
250
+ if hasattr(response, "choices"):
251
+ if should_send_default_pii() and integration.include_prompts:
252
+ response_text = [
253
+ choice.message.model_dump()
254
+ for choice in response.choices
255
+ if choice.message is not None
256
+ ]
257
+ if len(response_text) > 0:
258
+ set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, response_text)
259
+
260
+ _calculate_token_usage(messages, response, span, None, integration.count_tokens)
261
+
262
+ if finish_span:
263
+ span.__exit__(None, None, None)
264
+
265
+ elif hasattr(response, "output"):
266
+ if should_send_default_pii() and integration.include_prompts:
267
+ output_messages = {
268
+ "response": [],
269
+ "tool": [],
270
+ } # type: (dict[str, list[Any]])
271
+
272
+ for output in response.output:
273
+ if output.type == "function_call":
274
+ output_messages["tool"].append(output.dict())
275
+ elif output.type == "message":
276
+ for output_message in output.content:
277
+ try:
278
+ output_messages["response"].append(output_message.text)
279
+ except AttributeError:
280
+ # Unknown output message type, just return the json
281
+ output_messages["response"].append(output_message.dict())
282
+
283
+ if len(output_messages["tool"]) > 0:
284
+ set_data_normalized(
285
+ span,
286
+ SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
287
+ output_messages["tool"],
288
+ unpack=False,
289
+ )
290
+
291
+ if len(output_messages["response"]) > 0:
292
+ set_data_normalized(
293
+ span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"]
294
+ )
295
+
296
+ _calculate_token_usage(messages, response, span, None, integration.count_tokens)
297
+
298
+ if finish_span:
299
+ span.__exit__(None, None, None)
300
+
301
+ elif hasattr(response, "_iterator"):
302
+ data_buf: list[list[str]] = [] # one for each choice
303
+
304
+ old_iterator = response._iterator
305
+
306
+ def new_iterator():
307
+ # type: () -> Iterator[ChatCompletionChunk]
308
+ count_tokens_manually = True
309
+ for x in old_iterator:
310
+ with capture_internal_exceptions():
311
+ # OpenAI chat completion API
312
+ if hasattr(x, "choices"):
313
+ choice_index = 0
314
+ for choice in x.choices:
315
+ if hasattr(choice, "delta") and hasattr(
316
+ choice.delta, "content"
317
+ ):
318
+ content = choice.delta.content
319
+ if len(data_buf) <= choice_index:
320
+ data_buf.append([])
321
+ data_buf[choice_index].append(content or "")
322
+ choice_index += 1
323
+
324
+ # OpenAI responses API
325
+ elif hasattr(x, "delta"):
326
+ if len(data_buf) == 0:
327
+ data_buf.append([])
328
+ data_buf[0].append(x.delta or "")
329
+
330
+ # OpenAI responses API end of streaming response
331
+ if RESPONSES_API_ENABLED and isinstance(x, ResponseCompletedEvent):
332
+ _calculate_token_usage(
333
+ messages,
334
+ x.response,
335
+ span,
336
+ None,
337
+ integration.count_tokens,
338
+ )
339
+ count_tokens_manually = False
340
+
341
+ yield x
342
+
343
+ with capture_internal_exceptions():
344
+ if len(data_buf) > 0:
345
+ all_responses = ["".join(chunk) for chunk in data_buf]
346
+ if should_send_default_pii() and integration.include_prompts:
347
+ set_data_normalized(
348
+ span, SPANDATA.GEN_AI_RESPONSE_TEXT, all_responses
349
+ )
350
+ if count_tokens_manually:
351
+ _calculate_token_usage(
352
+ messages,
353
+ response,
354
+ span,
355
+ all_responses,
356
+ integration.count_tokens,
357
+ )
358
+
359
+ if finish_span:
360
+ span.__exit__(None, None, None)
361
+
362
+ async def new_iterator_async():
363
+ # type: () -> AsyncIterator[ChatCompletionChunk]
364
+ count_tokens_manually = True
365
+ async for x in old_iterator:
366
+ with capture_internal_exceptions():
367
+ # OpenAI chat completion API
368
+ if hasattr(x, "choices"):
369
+ choice_index = 0
370
+ for choice in x.choices:
371
+ if hasattr(choice, "delta") and hasattr(
372
+ choice.delta, "content"
373
+ ):
374
+ content = choice.delta.content
375
+ if len(data_buf) <= choice_index:
376
+ data_buf.append([])
377
+ data_buf[choice_index].append(content or "")
378
+ choice_index += 1
379
+
380
+ # OpenAI responses API
381
+ elif hasattr(x, "delta"):
382
+ if len(data_buf) == 0:
383
+ data_buf.append([])
384
+ data_buf[0].append(x.delta or "")
385
+
386
+ # OpenAI responses API end of streaming response
387
+ if RESPONSES_API_ENABLED and isinstance(x, ResponseCompletedEvent):
388
+ _calculate_token_usage(
389
+ messages,
390
+ x.response,
391
+ span,
392
+ None,
393
+ integration.count_tokens,
394
+ )
395
+ count_tokens_manually = False
396
+
397
+ yield x
398
+
399
+ with capture_internal_exceptions():
400
+ if len(data_buf) > 0:
401
+ all_responses = ["".join(chunk) for chunk in data_buf]
402
+ if should_send_default_pii() and integration.include_prompts:
403
+ set_data_normalized(
404
+ span, SPANDATA.GEN_AI_RESPONSE_TEXT, all_responses
405
+ )
406
+ if count_tokens_manually:
407
+ _calculate_token_usage(
408
+ messages,
409
+ response,
410
+ span,
411
+ all_responses,
412
+ integration.count_tokens,
413
+ )
414
+ if finish_span:
415
+ span.__exit__(None, None, None)
416
+
417
+ if str(type(response._iterator)) == "<class 'async_generator'>":
418
+ response._iterator = new_iterator_async()
419
+ else:
420
+ response._iterator = new_iterator()
421
+ else:
422
+ _calculate_token_usage(messages, response, span, None, integration.count_tokens)
423
+ if finish_span:
424
+ span.__exit__(None, None, None)
425
+
426
+
427
+ def _new_chat_completion_common(f, *args, **kwargs):
428
+ # type: (Any, Any, Any) -> Any
429
+ integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
430
+ if integration is None:
431
+ return f(*args, **kwargs)
432
+
433
+ if "messages" not in kwargs:
434
+ # invalid call (in all versions of openai), let it return error
435
+ return f(*args, **kwargs)
436
+
437
+ try:
438
+ iter(kwargs["messages"])
439
+ except TypeError:
440
+ # invalid call (in all versions), messages must be iterable
441
+ return f(*args, **kwargs)
442
+
443
+ model = kwargs.get("model")
444
+ operation = "chat"
445
+
446
+ span = sentry_sdk.start_span(
447
+ op=consts.OP.GEN_AI_CHAT,
448
+ name=f"{operation} {model}",
449
+ origin=OpenAIIntegration.origin,
450
+ )
451
+ span.__enter__()
452
+
453
+ _set_input_data(span, kwargs, operation, integration)
454
+
455
+ response = yield f, args, kwargs
456
+
457
+ _set_output_data(span, response, kwargs, integration, finish_span=True)
458
+
459
+ return response
460
+
461
+
462
+ def _wrap_chat_completion_create(f):
463
+ # type: (Callable[..., Any]) -> Callable[..., Any]
464
+ def _execute_sync(f, *args, **kwargs):
465
+ # type: (Any, Any, Any) -> Any
466
+ gen = _new_chat_completion_common(f, *args, **kwargs)
467
+
468
+ try:
469
+ f, args, kwargs = next(gen)
470
+ except StopIteration as e:
471
+ return e.value
472
+
473
+ try:
474
+ try:
475
+ result = f(*args, **kwargs)
476
+ except Exception as e:
477
+ _capture_exception(e)
478
+ raise e from None
479
+
480
+ return gen.send(result)
481
+ except StopIteration as e:
482
+ return e.value
483
+
484
+ @wraps(f)
485
+ def _sentry_patched_create_sync(*args, **kwargs):
486
+ # type: (Any, Any) -> Any
487
+ integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
488
+ if integration is None or "messages" not in kwargs:
489
+ # no "messages" means invalid call (in all versions of openai), let it return error
490
+ return f(*args, **kwargs)
491
+
492
+ return _execute_sync(f, *args, **kwargs)
493
+
494
+ return _sentry_patched_create_sync
495
+
496
+
497
+ def _wrap_async_chat_completion_create(f):
498
+ # type: (Callable[..., Any]) -> Callable[..., Any]
499
+ async def _execute_async(f, *args, **kwargs):
500
+ # type: (Any, Any, Any) -> Any
501
+ gen = _new_chat_completion_common(f, *args, **kwargs)
502
+
503
+ try:
504
+ f, args, kwargs = next(gen)
505
+ except StopIteration as e:
506
+ return await e.value
507
+
508
+ try:
509
+ try:
510
+ result = await f(*args, **kwargs)
511
+ except Exception as e:
512
+ _capture_exception(e)
513
+ raise e from None
514
+
515
+ return gen.send(result)
516
+ except StopIteration as e:
517
+ return e.value
518
+
519
+ @wraps(f)
520
+ async def _sentry_patched_create_async(*args, **kwargs):
521
+ # type: (Any, Any) -> Any
522
+ integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
523
+ if integration is None or "messages" not in kwargs:
524
+ # no "messages" means invalid call (in all versions of openai), let it return error
525
+ return await f(*args, **kwargs)
526
+
527
+ return await _execute_async(f, *args, **kwargs)
528
+
529
+ return _sentry_patched_create_async
530
+
531
+
532
+ def _new_embeddings_create_common(f, *args, **kwargs):
533
+ # type: (Any, Any, Any) -> Any
534
+ integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
535
+ if integration is None:
536
+ return f(*args, **kwargs)
537
+
538
+ model = kwargs.get("model")
539
+ operation = "embeddings"
540
+
541
+ with sentry_sdk.start_span(
542
+ op=consts.OP.GEN_AI_EMBEDDINGS,
543
+ name=f"{operation} {model}",
544
+ origin=OpenAIIntegration.origin,
545
+ ) as span:
546
+ _set_input_data(span, kwargs, operation, integration)
547
+
548
+ response = yield f, args, kwargs
549
+
550
+ _set_output_data(span, response, kwargs, integration, finish_span=False)
551
+
552
+ return response
553
+
554
+
555
+ def _wrap_embeddings_create(f):
556
+ # type: (Any) -> Any
557
+ def _execute_sync(f, *args, **kwargs):
558
+ # type: (Any, Any, Any) -> Any
559
+ gen = _new_embeddings_create_common(f, *args, **kwargs)
560
+
561
+ try:
562
+ f, args, kwargs = next(gen)
563
+ except StopIteration as e:
564
+ return e.value
565
+
566
+ try:
567
+ try:
568
+ result = f(*args, **kwargs)
569
+ except Exception as e:
570
+ _capture_exception(e, manual_span_cleanup=False)
571
+ raise e from None
572
+
573
+ return gen.send(result)
574
+ except StopIteration as e:
575
+ return e.value
576
+
577
+ @wraps(f)
578
+ def _sentry_patched_create_sync(*args, **kwargs):
579
+ # type: (Any, Any) -> Any
580
+ integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
581
+ if integration is None:
582
+ return f(*args, **kwargs)
583
+
584
+ return _execute_sync(f, *args, **kwargs)
585
+
586
+ return _sentry_patched_create_sync
587
+
588
+
589
+ def _wrap_async_embeddings_create(f):
590
+ # type: (Any) -> Any
591
+ async def _execute_async(f, *args, **kwargs):
592
+ # type: (Any, Any, Any) -> Any
593
+ gen = _new_embeddings_create_common(f, *args, **kwargs)
594
+
595
+ try:
596
+ f, args, kwargs = next(gen)
597
+ except StopIteration as e:
598
+ return await e.value
599
+
600
+ try:
601
+ try:
602
+ result = await f(*args, **kwargs)
603
+ except Exception as e:
604
+ _capture_exception(e, manual_span_cleanup=False)
605
+ raise e from None
606
+
607
+ return gen.send(result)
608
+ except StopIteration as e:
609
+ return e.value
610
+
611
+ @wraps(f)
612
+ async def _sentry_patched_create_async(*args, **kwargs):
613
+ # type: (Any, Any) -> Any
614
+ integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
615
+ if integration is None:
616
+ return await f(*args, **kwargs)
617
+
618
+ return await _execute_async(f, *args, **kwargs)
619
+
620
+ return _sentry_patched_create_async
621
+
622
+
623
+ def _new_responses_create_common(f, *args, **kwargs):
624
+ # type: (Any, Any, Any) -> Any
625
+ integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
626
+ if integration is None:
627
+ return f(*args, **kwargs)
628
+
629
+ model = kwargs.get("model")
630
+ operation = "responses"
631
+
632
+ span = sentry_sdk.start_span(
633
+ op=consts.OP.GEN_AI_RESPONSES,
634
+ name=f"{operation} {model}",
635
+ origin=OpenAIIntegration.origin,
636
+ )
637
+ span.__enter__()
638
+
639
+ _set_input_data(span, kwargs, operation, integration)
640
+
641
+ response = yield f, args, kwargs
642
+
643
+ _set_output_data(span, response, kwargs, integration, finish_span=True)
644
+
645
+ return response
646
+
647
+
648
+ def _wrap_responses_create(f):
649
+ # type: (Any) -> Any
650
+ def _execute_sync(f, *args, **kwargs):
651
+ # type: (Any, Any, Any) -> Any
652
+ gen = _new_responses_create_common(f, *args, **kwargs)
653
+
654
+ try:
655
+ f, args, kwargs = next(gen)
656
+ except StopIteration as e:
657
+ return e.value
658
+
659
+ try:
660
+ try:
661
+ result = f(*args, **kwargs)
662
+ except Exception as e:
663
+ _capture_exception(e)
664
+ raise e from None
665
+
666
+ return gen.send(result)
667
+ except StopIteration as e:
668
+ return e.value
669
+
670
+ @wraps(f)
671
+ def _sentry_patched_create_sync(*args, **kwargs):
672
+ # type: (Any, Any) -> Any
673
+ integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
674
+ if integration is None:
675
+ return f(*args, **kwargs)
676
+
677
+ return _execute_sync(f, *args, **kwargs)
678
+
679
+ return _sentry_patched_create_sync
680
+
681
+
682
+ def _wrap_async_responses_create(f):
683
+ # type: (Any) -> Any
684
+ async def _execute_async(f, *args, **kwargs):
685
+ # type: (Any, Any, Any) -> Any
686
+ gen = _new_responses_create_common(f, *args, **kwargs)
687
+
688
+ try:
689
+ f, args, kwargs = next(gen)
690
+ except StopIteration as e:
691
+ return await e.value
692
+
693
+ try:
694
+ try:
695
+ result = await f(*args, **kwargs)
696
+ except Exception as e:
697
+ _capture_exception(e)
698
+ raise e from None
699
+
700
+ return gen.send(result)
701
+ except StopIteration as e:
702
+ return e.value
703
+
704
+ @wraps(f)
705
+ async def _sentry_patched_responses_async(*args, **kwargs):
706
+ # type: (Any, Any) -> Any
707
+ integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
708
+ if integration is None:
709
+ return await f(*args, **kwargs)
710
+
711
+ return await _execute_async(f, *args, **kwargs)
712
+
713
+ return _sentry_patched_responses_async
714
+
715
+
716
+ def _is_given(obj):
717
+ # type: (Any) -> bool
718
+ """
719
+ Check for givenness safely across different openai versions.
720
+ """
721
+ if NotGiven is not None and isinstance(obj, NotGiven):
722
+ return False
723
+ if Omit is not None and isinstance(obj, Omit):
724
+ return False
725
+ return True