sentry-sdk 3.0.0a2__py2.py3-none-any.whl → 3.0.0a3__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

Files changed (157) hide show
  1. sentry_sdk/__init__.py +2 -0
  2. sentry_sdk/_compat.py +5 -12
  3. sentry_sdk/_init_implementation.py +7 -7
  4. sentry_sdk/_log_batcher.py +17 -29
  5. sentry_sdk/_lru_cache.py +7 -9
  6. sentry_sdk/_queue.py +2 -4
  7. sentry_sdk/_types.py +9 -16
  8. sentry_sdk/_werkzeug.py +5 -7
  9. sentry_sdk/ai/monitoring.py +40 -28
  10. sentry_sdk/ai/utils.py +3 -4
  11. sentry_sdk/api.py +75 -87
  12. sentry_sdk/attachments.py +10 -12
  13. sentry_sdk/client.py +110 -153
  14. sentry_sdk/consts.py +398 -220
  15. sentry_sdk/crons/api.py +16 -17
  16. sentry_sdk/crons/decorator.py +25 -27
  17. sentry_sdk/debug.py +4 -6
  18. sentry_sdk/envelope.py +46 -112
  19. sentry_sdk/feature_flags.py +9 -15
  20. sentry_sdk/integrations/__init__.py +24 -19
  21. sentry_sdk/integrations/_asgi_common.py +16 -18
  22. sentry_sdk/integrations/_wsgi_common.py +22 -33
  23. sentry_sdk/integrations/aiohttp.py +32 -30
  24. sentry_sdk/integrations/anthropic.py +42 -37
  25. sentry_sdk/integrations/argv.py +3 -4
  26. sentry_sdk/integrations/ariadne.py +16 -18
  27. sentry_sdk/integrations/arq.py +19 -28
  28. sentry_sdk/integrations/asgi.py +63 -37
  29. sentry_sdk/integrations/asyncio.py +14 -16
  30. sentry_sdk/integrations/atexit.py +6 -10
  31. sentry_sdk/integrations/aws_lambda.py +26 -36
  32. sentry_sdk/integrations/beam.py +10 -18
  33. sentry_sdk/integrations/boto3.py +18 -16
  34. sentry_sdk/integrations/bottle.py +25 -34
  35. sentry_sdk/integrations/celery/__init__.py +36 -56
  36. sentry_sdk/integrations/celery/beat.py +22 -26
  37. sentry_sdk/integrations/celery/utils.py +15 -17
  38. sentry_sdk/integrations/chalice.py +8 -10
  39. sentry_sdk/integrations/clickhouse_driver.py +21 -31
  40. sentry_sdk/integrations/cloud_resource_context.py +9 -16
  41. sentry_sdk/integrations/cohere.py +17 -23
  42. sentry_sdk/integrations/dedupe.py +5 -8
  43. sentry_sdk/integrations/django/__init__.py +57 -72
  44. sentry_sdk/integrations/django/asgi.py +24 -32
  45. sentry_sdk/integrations/django/caching.py +23 -19
  46. sentry_sdk/integrations/django/middleware.py +17 -20
  47. sentry_sdk/integrations/django/signals_handlers.py +11 -10
  48. sentry_sdk/integrations/django/templates.py +19 -16
  49. sentry_sdk/integrations/django/transactions.py +16 -11
  50. sentry_sdk/integrations/django/views.py +6 -10
  51. sentry_sdk/integrations/dramatiq.py +21 -21
  52. sentry_sdk/integrations/excepthook.py +10 -10
  53. sentry_sdk/integrations/executing.py +3 -4
  54. sentry_sdk/integrations/falcon.py +27 -42
  55. sentry_sdk/integrations/fastapi.py +13 -16
  56. sentry_sdk/integrations/flask.py +31 -38
  57. sentry_sdk/integrations/gcp.py +13 -16
  58. sentry_sdk/integrations/gnu_backtrace.py +4 -6
  59. sentry_sdk/integrations/gql.py +16 -17
  60. sentry_sdk/integrations/graphene.py +13 -12
  61. sentry_sdk/integrations/grpc/__init__.py +3 -2
  62. sentry_sdk/integrations/grpc/aio/server.py +15 -14
  63. sentry_sdk/integrations/grpc/client.py +19 -9
  64. sentry_sdk/integrations/grpc/consts.py +2 -0
  65. sentry_sdk/integrations/grpc/server.py +12 -8
  66. sentry_sdk/integrations/httpx.py +9 -12
  67. sentry_sdk/integrations/huey.py +13 -20
  68. sentry_sdk/integrations/huggingface_hub.py +16 -16
  69. sentry_sdk/integrations/langchain.py +203 -113
  70. sentry_sdk/integrations/launchdarkly.py +13 -10
  71. sentry_sdk/integrations/litestar.py +37 -35
  72. sentry_sdk/integrations/logging.py +28 -35
  73. sentry_sdk/integrations/loguru.py +15 -19
  74. sentry_sdk/integrations/modules.py +3 -4
  75. sentry_sdk/integrations/openai.py +96 -84
  76. sentry_sdk/integrations/openai_agents/__init__.py +49 -0
  77. sentry_sdk/integrations/openai_agents/consts.py +1 -0
  78. sentry_sdk/integrations/openai_agents/patches/__init__.py +4 -0
  79. sentry_sdk/integrations/openai_agents/patches/agent_run.py +152 -0
  80. sentry_sdk/integrations/openai_agents/patches/models.py +52 -0
  81. sentry_sdk/integrations/openai_agents/patches/runner.py +42 -0
  82. sentry_sdk/integrations/openai_agents/patches/tools.py +84 -0
  83. sentry_sdk/integrations/openai_agents/spans/__init__.py +5 -0
  84. sentry_sdk/integrations/openai_agents/spans/agent_workflow.py +20 -0
  85. sentry_sdk/integrations/openai_agents/spans/ai_client.py +46 -0
  86. sentry_sdk/integrations/openai_agents/spans/execute_tool.py +47 -0
  87. sentry_sdk/integrations/openai_agents/spans/handoff.py +24 -0
  88. sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +41 -0
  89. sentry_sdk/integrations/openai_agents/utils.py +201 -0
  90. sentry_sdk/integrations/openfeature.py +11 -6
  91. sentry_sdk/integrations/pure_eval.py +6 -10
  92. sentry_sdk/integrations/pymongo.py +13 -17
  93. sentry_sdk/integrations/pyramid.py +31 -36
  94. sentry_sdk/integrations/quart.py +23 -28
  95. sentry_sdk/integrations/ray.py +73 -64
  96. sentry_sdk/integrations/redis/__init__.py +7 -4
  97. sentry_sdk/integrations/redis/_async_common.py +15 -9
  98. sentry_sdk/integrations/redis/_sync_common.py +13 -12
  99. sentry_sdk/integrations/redis/modules/caches.py +17 -8
  100. sentry_sdk/integrations/redis/modules/queries.py +9 -8
  101. sentry_sdk/integrations/redis/rb.py +3 -2
  102. sentry_sdk/integrations/redis/redis.py +4 -4
  103. sentry_sdk/integrations/redis/redis_cluster.py +10 -8
  104. sentry_sdk/integrations/redis/redis_py_cluster_legacy.py +3 -2
  105. sentry_sdk/integrations/redis/utils.py +21 -22
  106. sentry_sdk/integrations/rq.py +13 -16
  107. sentry_sdk/integrations/rust_tracing.py +9 -6
  108. sentry_sdk/integrations/sanic.py +34 -46
  109. sentry_sdk/integrations/serverless.py +22 -27
  110. sentry_sdk/integrations/socket.py +27 -15
  111. sentry_sdk/integrations/spark/__init__.py +1 -0
  112. sentry_sdk/integrations/spark/spark_driver.py +45 -83
  113. sentry_sdk/integrations/spark/spark_worker.py +7 -11
  114. sentry_sdk/integrations/sqlalchemy.py +22 -19
  115. sentry_sdk/integrations/starlette.py +86 -90
  116. sentry_sdk/integrations/starlite.py +28 -34
  117. sentry_sdk/integrations/statsig.py +5 -4
  118. sentry_sdk/integrations/stdlib.py +28 -24
  119. sentry_sdk/integrations/strawberry.py +62 -49
  120. sentry_sdk/integrations/sys_exit.py +7 -11
  121. sentry_sdk/integrations/threading.py +12 -14
  122. sentry_sdk/integrations/tornado.py +28 -32
  123. sentry_sdk/integrations/trytond.py +4 -3
  124. sentry_sdk/integrations/typer.py +8 -6
  125. sentry_sdk/integrations/unleash.py +5 -4
  126. sentry_sdk/integrations/wsgi.py +47 -46
  127. sentry_sdk/logger.py +13 -9
  128. sentry_sdk/monitor.py +16 -28
  129. sentry_sdk/opentelemetry/consts.py +11 -4
  130. sentry_sdk/opentelemetry/contextvars_context.py +17 -15
  131. sentry_sdk/opentelemetry/propagator.py +38 -21
  132. sentry_sdk/opentelemetry/sampler.py +51 -34
  133. sentry_sdk/opentelemetry/scope.py +36 -37
  134. sentry_sdk/opentelemetry/span_processor.py +43 -59
  135. sentry_sdk/opentelemetry/tracing.py +32 -12
  136. sentry_sdk/opentelemetry/utils.py +180 -196
  137. sentry_sdk/profiler/continuous_profiler.py +108 -97
  138. sentry_sdk/profiler/transaction_profiler.py +70 -97
  139. sentry_sdk/profiler/utils.py +11 -15
  140. sentry_sdk/scope.py +251 -264
  141. sentry_sdk/scrubber.py +22 -26
  142. sentry_sdk/serializer.py +40 -54
  143. sentry_sdk/session.py +44 -61
  144. sentry_sdk/sessions.py +35 -49
  145. sentry_sdk/spotlight.py +15 -21
  146. sentry_sdk/tracing.py +116 -182
  147. sentry_sdk/tracing_utils.py +100 -120
  148. sentry_sdk/transport.py +131 -157
  149. sentry_sdk/utils.py +232 -309
  150. sentry_sdk/worker.py +16 -28
  151. {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a3.dist-info}/METADATA +1 -1
  152. sentry_sdk-3.0.0a3.dist-info/RECORD +168 -0
  153. sentry_sdk-3.0.0a2.dist-info/RECORD +0 -154
  154. {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a3.dist-info}/WHEEL +0 -0
  155. {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a3.dist-info}/entry_points.txt +0 -0
  156. {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a3.dist-info}/licenses/LICENSE +0 -0
  157. {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a3.dist-info}/top_level.txt +0 -0
sentry_sdk/consts.py CHANGED
@@ -1,7 +1,21 @@
1
+ from __future__ import annotations
1
2
  import itertools
2
3
  from enum import Enum
3
4
  from typing import TYPE_CHECKING
4
5
 
6
+ if TYPE_CHECKING:
7
+ from typing import (
8
+ Optional,
9
+ Callable,
10
+ Union,
11
+ List,
12
+ Type,
13
+ Dict,
14
+ Any,
15
+ Sequence,
16
+ Tuple,
17
+ )
18
+
5
19
  # up top to prevent circular import due to integration import
6
20
  DEFAULT_MAX_VALUE_LENGTH = 1024
7
21
 
@@ -26,17 +40,6 @@ class CompressionAlgo(Enum):
26
40
 
27
41
 
28
42
  if TYPE_CHECKING:
29
- import sentry_sdk
30
-
31
- from typing import Optional
32
- from typing import Callable
33
- from typing import Union
34
- from typing import List
35
- from typing import Type
36
- from typing import Dict
37
- from typing import Any
38
- from typing import Sequence
39
- from typing import Tuple
40
43
  from typing_extensions import Literal
41
44
  from typing_extensions import TypedDict
42
45
 
@@ -52,6 +55,8 @@ if TYPE_CHECKING:
52
55
  TransactionProcessor,
53
56
  )
54
57
 
58
+ import sentry_sdk
59
+
55
60
  # Experiments are feature flags to enable and disable certain unstable SDK
56
61
  # functionality. Changing them from the defaults (`None`) in production
57
62
  # code is highly discouraged. They are not subject to any stability
@@ -95,16 +100,45 @@ class SPANDATA:
95
100
  See: https://develop.sentry.dev/sdk/performance/span-data-conventions/
96
101
  """
97
102
 
103
+ AI_CITATIONS = "ai.citations"
104
+ """
105
+ References or sources cited by the AI model in its response.
106
+ Example: ["Smith et al. 2020", "Jones 2019"]
107
+ """
108
+
109
+ AI_COMPLETION_TOKENS_USED = "ai.completion_tokens.used"
110
+ """
111
+ The number of output completion tokens used by the model.
112
+ Example: 10
113
+ """
114
+
115
+ AI_DOCUMENTS = "ai.documents"
116
+ """
117
+ Documents or content chunks used as context for the AI model.
118
+ Example: ["doc1.txt", "doc2.pdf"]
119
+ """
120
+
121
+ AI_FINISH_REASON = "ai.finish_reason"
122
+ """
123
+ The reason why the model stopped generating.
124
+ Example: "length"
125
+ """
126
+
98
127
  AI_FREQUENCY_PENALTY = "ai.frequency_penalty"
99
128
  """
100
129
  Used to reduce repetitiveness of generated tokens.
101
130
  Example: 0.5
102
131
  """
103
132
 
104
- AI_PRESENCE_PENALTY = "ai.presence_penalty"
133
+ AI_FUNCTION_CALL = "ai.function_call"
105
134
  """
106
- Used to reduce repetitiveness of generated tokens.
107
- Example: 0.5
135
+ For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls
136
+ """
137
+
138
+ AI_GENERATION_ID = "ai.generation_id"
139
+ """
140
+ Unique identifier for the completion.
141
+ Example: "gen_123abc"
108
142
  """
109
143
 
110
144
  AI_INPUT_MESSAGES = "ai.input_messages"
@@ -113,10 +147,9 @@ class SPANDATA:
113
147
  Example: [{"role": "user", "message": "hello"}]
114
148
  """
115
149
 
116
- AI_MODEL_ID = "ai.model_id"
150
+ AI_LOGIT_BIAS = "ai.logit_bias"
117
151
  """
118
- The unique descriptor of the model being execugted
119
- Example: gpt-4
152
+ For an AI model call, the logit bias
120
153
  """
121
154
 
122
155
  AI_METADATA = "ai.metadata"
@@ -125,161 +158,183 @@ class SPANDATA:
125
158
  Example: {"executed_function": "add_integers"}
126
159
  """
127
160
 
128
- AI_TAGS = "ai.tags"
161
+ AI_MODEL_ID = "ai.model_id"
129
162
  """
130
- Tags that describe an AI pipeline step.
131
- Example: {"executed_function": "add_integers"}
163
+ The unique descriptor of the model being execugted
164
+ Example: gpt-4
132
165
  """
133
166
 
134
- AI_STREAMING = "ai.streaming"
167
+ AI_PIPELINE_NAME = "ai.pipeline.name"
135
168
  """
136
- Whether or not the AI model call's repsonse was streamed back asynchronously
137
- Example: true
169
+ Name of the AI pipeline or chain being executed.
170
+ Example: "qa-pipeline"
138
171
  """
139
172
 
140
- AI_TEMPERATURE = "ai.temperature"
173
+ AI_PREAMBLE = "ai.preamble"
141
174
  """
142
- For an AI model call, the temperature parameter. Temperature essentially means how random the output will be.
143
- Example: 0.5
175
+ For an AI model call, the preamble parameter.
176
+ Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style.
177
+ Example: "You are now a clown."
144
178
  """
145
179
 
146
- AI_TOP_P = "ai.top_p"
180
+ AI_PRESENCE_PENALTY = "ai.presence_penalty"
147
181
  """
148
- For an AI model call, the top_p parameter. Top_p essentially controls how random the output will be.
182
+ Used to reduce repetitiveness of generated tokens.
149
183
  Example: 0.5
150
184
  """
151
185
 
152
- AI_TOP_K = "ai.top_k"
186
+ AI_PROMPT_TOKENS_USED = "ai.prompt_tokens.used"
153
187
  """
154
- For an AI model call, the top_k parameter. Top_k essentially controls how random the output will be.
155
- Example: 35
188
+ The number of input prompt tokens used by the model.
189
+ Example: 10
156
190
  """
157
191
 
158
- AI_FUNCTION_CALL = "ai.function_call"
192
+ AI_RAW_PROMPTING = "ai.raw_prompting"
159
193
  """
160
- For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls
194
+ Minimize pre-processing done to the prompt sent to the LLM.
195
+ Example: true
161
196
  """
162
197
 
163
- AI_TOOL_CALLS = "ai.tool_calls"
198
+ AI_RESPONSE_FORMAT = "ai.response_format"
164
199
  """
165
- For an AI model call, the function that was called.
200
+ For an AI model call, the format of the response
166
201
  """
167
202
 
168
- AI_TOOLS = "ai.tools"
203
+ AI_RESPONSES = "ai.responses"
169
204
  """
170
- For an AI model call, the functions that are available
205
+ The responses to an AI model call. Always as a list.
206
+ Example: ["hello", "world"]
171
207
  """
172
208
 
173
- AI_RESPONSE_FORMAT = "ai.response_format"
209
+ AI_SEARCH_QUERIES = "ai.search_queries"
174
210
  """
175
- For an AI model call, the format of the response
211
+ Queries used to search for relevant context or documents.
212
+ Example: ["climate change effects", "renewable energy"]
176
213
  """
177
214
 
178
- AI_LOGIT_BIAS = "ai.logit_bias"
215
+ AI_SEARCH_REQUIRED = "ai.is_search_required"
179
216
  """
180
- For an AI model call, the logit bias
217
+ Boolean indicating if the model needs to perform a search.
218
+ Example: true
181
219
  """
182
220
 
183
- AI_PREAMBLE = "ai.preamble"
221
+ AI_SEARCH_RESULTS = "ai.search_results"
184
222
  """
185
- For an AI model call, the preamble parameter.
186
- Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style.
187
- Example: "You are now a clown."
223
+ Results returned from search queries for context.
224
+ Example: ["Result 1", "Result 2"]
188
225
  """
189
226
 
190
- AI_RAW_PROMPTING = "ai.raw_prompting"
227
+ AI_SEED = "ai.seed"
191
228
  """
192
- Minimize pre-processing done to the prompt sent to the LLM.
229
+ The seed, ideally models given the same seed and same other parameters will produce the exact same output.
230
+ Example: 123.45
231
+ """
232
+
233
+ AI_STREAMING = "ai.streaming"
234
+ """
235
+ Whether or not the AI model call's response was streamed back asynchronously
193
236
  Example: true
194
237
  """
195
- AI_RESPONSES = "ai.responses"
238
+
239
+ AI_TAGS = "ai.tags"
196
240
  """
197
- The responses to an AI model call. Always as a list.
198
- Example: ["hello", "world"]
241
+ Tags that describe an AI pipeline step.
242
+ Example: {"executed_function": "add_integers"}
199
243
  """
200
244
 
201
- AI_SEED = "ai.seed"
245
+ AI_TEMPERATURE = "ai.temperature"
202
246
  """
203
- The seed, ideally models given the same seed and same other parameters will produce the exact same output.
204
- Example: 123.45
247
+ For an AI model call, the temperature parameter. Temperature essentially means how random the output will be.
248
+ Example: 0.5
205
249
  """
206
250
 
207
- AI_CITATIONS = "ai.citations"
251
+ AI_TEXTS = "ai.texts"
208
252
  """
209
- References or sources cited by the AI model in its response.
210
- Example: ["Smith et al. 2020", "Jones 2019"]
253
+ Raw text inputs provided to the model.
254
+ Example: ["What is machine learning?"]
211
255
  """
212
256
 
213
- AI_DOCUMENTS = "ai.documents"
257
+ AI_TOP_K = "ai.top_k"
214
258
  """
215
- Documents or content chunks used as context for the AI model.
216
- Example: ["doc1.txt", "doc2.pdf"]
259
+ For an AI model call, the top_k parameter. Top_k essentially controls how random the output will be.
260
+ Example: 35
217
261
  """
218
262
 
219
- AI_SEARCH_QUERIES = "ai.search_queries"
263
+ AI_TOP_P = "ai.top_p"
220
264
  """
221
- Queries used to search for relevant context or documents.
222
- Example: ["climate change effects", "renewable energy"]
265
+ For an AI model call, the top_p parameter. Top_p essentially controls how random the output will be.
266
+ Example: 0.5
223
267
  """
224
268
 
225
- AI_SEARCH_RESULTS = "ai.search_results"
269
+ AI_TOOL_CALLS = "ai.tool_calls"
226
270
  """
227
- Results returned from search queries for context.
228
- Example: ["Result 1", "Result 2"]
271
+ For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls
229
272
  """
230
273
 
231
- AI_GENERATION_ID = "ai.generation_id"
274
+ AI_TOOLS = "ai.tools"
232
275
  """
233
- Unique identifier for the completion.
234
- Example: "gen_123abc"
276
+ For an AI model call, the functions that are available
235
277
  """
236
278
 
237
- AI_SEARCH_REQUIRED = "ai.is_search_required"
279
+ AI_TOTAL_TOKENS_USED = "ai.total_tokens.used"
238
280
  """
239
- Boolean indicating if the model needs to perform a search.
281
+ The total number of tokens (input + output) used by the request to the model.
282
+ Example: 20
283
+ """
284
+
285
+ AI_WARNINGS = "ai.warnings"
286
+ """
287
+ Warning messages generated during model execution.
288
+ Example: ["Token limit exceeded"]
289
+ """
290
+
291
+ CACHE_HIT = "cache.hit"
292
+ """
293
+ A boolean indicating whether the requested data was found in the cache.
240
294
  Example: true
241
295
  """
242
296
 
243
- AI_FINISH_REASON = "ai.finish_reason"
297
+ CACHE_ITEM_SIZE = "cache.item_size"
244
298
  """
245
- The reason why the model stopped generating.
246
- Example: "length"
299
+ The size of the requested data in bytes.
300
+ Example: 58
247
301
  """
248
302
 
249
- AI_PIPELINE_NAME = "ai.pipeline.name"
303
+ CACHE_KEY = "cache.key"
250
304
  """
251
- Name of the AI pipeline or chain being executed.
252
- Example: "qa-pipeline"
305
+ The key of the requested data.
306
+ Example: template.cache.some_item.867da7e2af8e6b2f3aa7213a4080edb3
253
307
  """
254
308
 
255
- AI_PROMPT_TOKENS_USED = "ai.prompt_tokens.used"
309
+ CODE_FILEPATH = "code.filepath"
256
310
  """
257
- The number of input prompt tokens used by the model.
258
- Example: 10
311
+ The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path).
312
+ Example: "/app/myapplication/http/handler/server.py"
259
313
  """
260
314
 
261
- AI_COMPLETION_TOKENS_USED = "ai.completion_tokens.used"
315
+ CODE_FUNCTION = "code.function"
262
316
  """
263
- The number of output completion tokens used by the model.
264
- Example: 10
317
+ The method or function name, or equivalent (usually rightmost part of the code unit's name).
318
+ Example: "server_request"
265
319
  """
266
320
 
267
- AI_TOTAL_TOKENS_USED = "ai.total_tokens.used"
321
+ CODE_LINENO = "code.lineno"
268
322
  """
269
- The total number of tokens (input + output) used by the request to the model.
270
- Example: 20
323
+ The line number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`.
324
+ Example: 42
271
325
  """
272
326
 
273
- AI_TEXTS = "ai.texts"
327
+ CODE_NAMESPACE = "code.namespace"
274
328
  """
275
- Raw text inputs provided to the model.
276
- Example: ["What is machine learning?"]
329
+ The "namespace" within which `code.function` is defined. Usually the qualified class or module name, such that `code.namespace` + some separator + `code.function` form a unique identifier for the code unit.
330
+ Example: "http.handler"
277
331
  """
278
332
 
279
- AI_WARNINGS = "ai.warnings"
333
+ DB_MONGODB_COLLECTION = "db.mongodb.collection"
280
334
  """
281
- Warning messages generated during model execution.
282
- Example: ["Token limit exceeded"]
335
+ The MongoDB collection being accessed within the database.
336
+ See: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/database/mongodb.md#attributes
337
+ Example: public.users; customers
283
338
  """
284
339
 
285
340
  DB_NAME = "db.name"
@@ -288,13 +343,6 @@ class SPANDATA:
288
343
  Example: myDatabase
289
344
  """
290
345
 
291
- DB_USER = "db.user"
292
- """
293
- The name of the database user used for connecting to the database.
294
- See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md
295
- Example: my_user
296
- """
297
-
298
346
  DB_OPERATION = "db.operation"
299
347
  """
300
348
  The name of the operation being executed, e.g. the MongoDB command name such as findAndModify, or the SQL keyword.
@@ -309,47 +357,161 @@ class SPANDATA:
309
357
  Example: postgresql
310
358
  """
311
359
 
312
- DB_MONGODB_COLLECTION = "db.mongodb.collection"
360
+ DB_USER = "db.user"
313
361
  """
314
- The MongoDB collection being accessed within the database.
315
- See: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/database/mongodb.md#attributes
316
- Example: public.users; customers
362
+ The name of the database user used for connecting to the database.
363
+ See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md
364
+ Example: my_user
317
365
  """
318
366
 
319
- CACHE_HIT = "cache.hit"
367
+ GEN_AI_AGENT_NAME = "gen_ai.agent.name"
320
368
  """
321
- A boolean indicating whether the requested data was found in the cache.
322
- Example: true
369
+ The name of the agent being used.
370
+ Example: "ResearchAssistant"
323
371
  """
324
372
 
325
- CACHE_ITEM_SIZE = "cache.item_size"
373
+ GEN_AI_CHOICE = "gen_ai.choice"
326
374
  """
327
- The size of the requested data in bytes.
328
- Example: 58
375
+ The model's response message.
376
+ Example: "The weather in Paris is rainy and overcast, with temperatures around 57°F"
329
377
  """
330
378
 
331
- CACHE_KEY = "cache.key"
379
+ GEN_AI_OPERATION_NAME = "gen_ai.operation.name"
332
380
  """
333
- The key of the requested data.
334
- Example: template.cache.some_item.867da7e2af8e6b2f3aa7213a4080edb3
381
+ The name of the operation being performed.
382
+ Example: "chat"
335
383
  """
336
384
 
337
- NETWORK_PEER_ADDRESS = "network.peer.address"
385
+ GEN_AI_RESPONSE_TEXT = "gen_ai.response.text"
338
386
  """
339
- Peer address of the network connection - IP address or Unix domain socket name.
340
- Example: 10.1.2.80, /tmp/my.sock, localhost
387
+ The model's response text messages.
388
+ Example: ["The weather in Paris is rainy and overcast, with temperatures around 57°F", "The weather in London is sunny and warm, with temperatures around 65°F"]
341
389
  """
342
390
 
343
- NETWORK_PEER_PORT = "network.peer.port"
391
+ GEN_AI_RESPONSE_TOOL_CALLS = "gen_ai.response.tool_calls"
344
392
  """
345
- Peer port number of the network connection.
346
- Example: 6379
393
+ The tool calls in the model's response.
394
+ Example: [{"name": "get_weather", "arguments": {"location": "Paris"}}]
347
395
  """
348
396
 
349
- HTTP_QUERY = "http.query"
397
+ GEN_AI_REQUEST_AVAILABLE_TOOLS = "gen_ai.request.available_tools"
350
398
  """
351
- The Query string present in the URL.
352
- Example: ?foo=bar&bar=baz
399
+ The available tools for the model.
400
+ Example: [{"name": "get_weather", "description": "Get the weather for a given location"}, {"name": "get_news", "description": "Get the news for a given topic"}]
401
+ """
402
+
403
+ GEN_AI_REQUEST_FREQUENCY_PENALTY = "gen_ai.request.frequency_penalty"
404
+ """
405
+ The frequency penalty parameter used to reduce repetitiveness of generated tokens.
406
+ Example: 0.1
407
+ """
408
+
409
+ GEN_AI_REQUEST_MAX_TOKENS = "gen_ai.request.max_tokens"
410
+ """
411
+ The maximum number of tokens to generate in the response.
412
+ Example: 2048
413
+ """
414
+
415
+ GEN_AI_REQUEST_MESSAGES = "gen_ai.request.messages"
416
+ """
417
+ The messages passed to the model. The "content" can be a string or an array of objects.
418
+ Example: [{role: "system", "content: "Generate a random number."}, {"role": "user", "content": [{"text": "Generate a random number between 0 and 10.", "type": "text"}]}]
419
+ """
420
+
421
+ GEN_AI_REQUEST_MODEL = "gen_ai.request.model"
422
+ """
423
+ The model identifier being used for the request.
424
+ Example: "gpt-4-turbo-preview"
425
+ """
426
+
427
+ GEN_AI_REQUEST_PRESENCE_PENALTY = "gen_ai.request.presence_penalty"
428
+ """
429
+ The presence penalty parameter used to reduce repetitiveness of generated tokens.
430
+ Example: 0.1
431
+ """
432
+
433
+ GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature"
434
+ """
435
+ The temperature parameter used to control randomness in the output.
436
+ Example: 0.7
437
+ """
438
+
439
+ GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p"
440
+ """
441
+ The top_p parameter used to control diversity via nucleus sampling.
442
+ Example: 1.0
443
+ """
444
+
445
+ GEN_AI_SYSTEM = "gen_ai.system"
446
+ """
447
+ The name of the AI system being used.
448
+ Example: "openai"
449
+ """
450
+
451
+ GEN_AI_TOOL_DESCRIPTION = "gen_ai.tool.description"
452
+ """
453
+ The description of the tool being used.
454
+ Example: "Searches the web for current information about a topic"
455
+ """
456
+
457
+ GEN_AI_TOOL_INPUT = "gen_ai.tool.input"
458
+ """
459
+ The input of the tool being used.
460
+ Example: {"location": "Paris"}
461
+ """
462
+
463
+ GEN_AI_TOOL_NAME = "gen_ai.tool.name"
464
+ """
465
+ The name of the tool being used.
466
+ Example: "web_search"
467
+ """
468
+
469
+ GEN_AI_TOOL_OUTPUT = "gen_ai.tool.output"
470
+ """
471
+ The output of the tool being used.
472
+ Example: "rainy, 57°F"
473
+ """
474
+
475
+ GEN_AI_TOOL_TYPE = "gen_ai.tool.type"
476
+ """
477
+ The type of tool being used.
478
+ Example: "function"
479
+ """
480
+
481
+ GEN_AI_USAGE_INPUT_TOKENS = "gen_ai.usage.input_tokens"
482
+ """
483
+ The number of tokens in the input.
484
+ Example: 150
485
+ """
486
+
487
+ GEN_AI_USAGE_INPUT_TOKENS_CACHED = "gen_ai.usage.input_tokens.cached"
488
+ """
489
+ The number of cached tokens in the input.
490
+ Example: 50
491
+ """
492
+
493
+ GEN_AI_USAGE_OUTPUT_TOKENS = "gen_ai.usage.output_tokens"
494
+ """
495
+ The number of tokens in the output.
496
+ Example: 250
497
+ """
498
+
499
+ GEN_AI_USAGE_OUTPUT_TOKENS_REASONING = "gen_ai.usage.output_tokens.reasoning"
500
+ """
501
+ The number of tokens used for reasoning in the output.
502
+ Example: 75
503
+ """
504
+
505
+ GEN_AI_USAGE_TOTAL_TOKENS = "gen_ai.usage.total_tokens"
506
+ """
507
+ The total number of tokens used (input + output).
508
+ Example: 400
509
+ """
510
+
511
+ GEN_AI_USER_MESSAGE = "gen_ai.user.message"
512
+ """
513
+ The user message passed to the model.
514
+ Example: "What's the weather in Paris?"
353
515
  """
354
516
 
355
517
  HTTP_FRAGMENT = "http.fragment"
@@ -364,6 +526,12 @@ class SPANDATA:
364
526
  Example: GET
365
527
  """
366
528
 
529
+ HTTP_QUERY = "http.query"
530
+ """
531
+ The Query string present in the URL.
532
+ Example: ?foo=bar&bar=baz
533
+ """
534
+
367
535
  HTTP_STATUS_CODE = "http.response.status_code"
368
536
  """
369
537
  The HTTP status code as an integer.
@@ -381,14 +549,14 @@ class SPANDATA:
381
549
  The message's identifier.
382
550
  """
383
551
 
384
- MESSAGING_MESSAGE_RETRY_COUNT = "messaging.message.retry.count"
552
+ MESSAGING_MESSAGE_RECEIVE_LATENCY = "messaging.message.receive.latency"
385
553
  """
386
- Number of retries/attempts to process a message.
554
+ The latency between when the task was enqueued and when it was started to be processed.
387
555
  """
388
556
 
389
- MESSAGING_MESSAGE_RECEIVE_LATENCY = "messaging.message.receive.latency"
557
+ MESSAGING_MESSAGE_RETRY_COUNT = "messaging.message.retry.count"
390
558
  """
391
- The latency between when the task was enqueued and when it was started to be processed.
559
+ Number of retries/attempts to process a message.
392
560
  """
393
561
 
394
562
  MESSAGING_SYSTEM = "messaging.system"
@@ -396,6 +564,24 @@ class SPANDATA:
396
564
  The messaging system's name, e.g. `kafka`, `aws_sqs`
397
565
  """
398
566
 
567
+ NETWORK_PEER_ADDRESS = "network.peer.address"
568
+ """
569
+ Peer address of the network connection - IP address or Unix domain socket name.
570
+ Example: 10.1.2.80, /tmp/my.sock, localhost
571
+ """
572
+
573
+ NETWORK_PEER_PORT = "network.peer.port"
574
+ """
575
+ Peer port number of the network connection.
576
+ Example: 6379
577
+ """
578
+
579
+ PROFILER_ID = "profiler_id"
580
+ """
581
+ Label identifying the profiler id that the span occurred in. This should be a string.
582
+ Example: "5249fbada8d5416482c2f6e47e337372"
583
+ """
584
+
399
585
  SERVER_ADDRESS = "server.address"
400
586
  """
401
587
  Name of the database host.
@@ -421,30 +607,6 @@ class SPANDATA:
421
607
  Example: 16456
422
608
  """
423
609
 
424
- CODE_FILEPATH = "code.filepath"
425
- """
426
- The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path).
427
- Example: "/app/myapplication/http/handler/server.py"
428
- """
429
-
430
- CODE_LINENO = "code.lineno"
431
- """
432
- The line number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`.
433
- Example: 42
434
- """
435
-
436
- CODE_FUNCTION = "code.function"
437
- """
438
- The method or function name, or equivalent (usually rightmost part of the code unit's name).
439
- Example: "server_request"
440
- """
441
-
442
- CODE_NAMESPACE = "code.namespace"
443
- """
444
- The "namespace" within which `code.function` is defined. Usually the qualified class or module name, such that `code.namespace` + some separator + `code.function` form a unique identifier for the code unit.
445
- Example: "http.handler"
446
- """
447
-
448
610
  THREAD_ID = "thread.id"
449
611
  """
450
612
  Identifier of a thread from where the span originated. This should be a string.
@@ -457,12 +619,6 @@ class SPANDATA:
457
619
  Example: "MainThread"
458
620
  """
459
621
 
460
- PROFILER_ID = "profiler_id"
461
- """
462
- Label identifying the profiler id that the span occurred in. This should be a string.
463
- Example: "5249fbada8d5416482c2f6e47e337372"
464
- """
465
-
466
622
 
467
623
  class SPANSTATUS:
468
624
  """
@@ -502,6 +658,10 @@ class OP:
502
658
  FUNCTION = "function"
503
659
  FUNCTION_AWS = "function.aws"
504
660
  FUNCTION_GCP = "function.gcp"
661
+ GEN_AI_CHAT = "gen_ai.chat"
662
+ GEN_AI_EXECUTE_TOOL = "gen_ai.execute_tool"
663
+ GEN_AI_HANDOFF = "gen_ai.handoff"
664
+ GEN_AI_INVOKE_AGENT = "gen_ai.invoke_agent"
505
665
  GRAPHQL_EXECUTE = "graphql.execute"
506
666
  GRAPHQL_MUTATION = "graphql.mutation"
507
667
  GRAPHQL_PARSE = "graphql.parse"
@@ -514,6 +674,8 @@ class OP:
514
674
  HTTP_CLIENT = "http.client"
515
675
  HTTP_CLIENT_STREAM = "http.client.stream"
516
676
  HTTP_SERVER = "http.server"
677
+ HTTP = "http"
678
+ MESSAGE = "message"
517
679
  MIDDLEWARE_DJANGO = "middleware.django"
518
680
  MIDDLEWARE_LITESTAR = "middleware.litestar"
519
681
  MIDDLEWARE_LITESTAR_RECEIVE = "middleware.litestar.receive"
@@ -545,6 +707,7 @@ class OP:
545
707
  QUEUE_TASK_HUEY = "queue.task.huey"
546
708
  QUEUE_SUBMIT_RAY = "queue.submit.ray"
547
709
  QUEUE_TASK_RAY = "queue.task.ray"
710
+ RPC = "rpc"
548
711
  SUBPROCESS = "subprocess"
549
712
  SUBPROCESS_WAIT = "subprocess.wait"
550
713
  SUBPROCESS_COMMUNICATE = "subprocess.communicate"
@@ -573,8 +736,7 @@ class TransactionSource(str, Enum):
573
736
  URL = "url"
574
737
  VIEW = "view"
575
738
 
576
- def __str__(self):
577
- # type: () -> str
739
+ def __str__(self) -> str:
578
740
  return self.value
579
741
 
580
742
 
@@ -602,68 +764,74 @@ class ClientConstructor:
602
764
 
603
765
  def __init__(
604
766
  self,
605
- dsn=None, # type: Optional[str]
767
+ dsn: Optional[str] = None,
606
768
  *,
607
- max_breadcrumbs=DEFAULT_MAX_BREADCRUMBS, # type: int
608
- release=None, # type: Optional[str]
609
- environment=None, # type: Optional[str]
610
- server_name=None, # type: Optional[str]
611
- shutdown_timeout=2, # type: float
612
- integrations=[], # type: Sequence[sentry_sdk.integrations.Integration] # noqa: B006
613
- in_app_include=[], # type: List[str] # noqa: B006
614
- in_app_exclude=[], # type: List[str] # noqa: B006
615
- default_integrations=True, # type: bool
616
- dist=None, # type: Optional[str]
617
- transport=None, # type: Optional[Union[sentry_sdk.transport.Transport, Type[sentry_sdk.transport.Transport], Callable[[Event], None]]]
618
- transport_queue_size=DEFAULT_QUEUE_SIZE, # type: int
619
- sample_rate=1.0, # type: float
620
- send_default_pii=None, # type: Optional[bool]
621
- http_proxy=None, # type: Optional[str]
622
- https_proxy=None, # type: Optional[str]
623
- ignore_errors=[], # type: Sequence[Union[type, str]] # noqa: B006
624
- max_request_body_size="medium", # type: str
625
- socket_options=None, # type: Optional[List[Tuple[int, int, int | bytes]]]
626
- keep_alive=None, # type: Optional[bool]
627
- before_send=None, # type: Optional[EventProcessor]
628
- before_breadcrumb=None, # type: Optional[BreadcrumbProcessor]
629
- debug=None, # type: Optional[bool]
630
- attach_stacktrace=False, # type: bool
631
- ca_certs=None, # type: Optional[str]
632
- traces_sample_rate=None, # type: Optional[float]
633
- traces_sampler=None, # type: Optional[TracesSampler]
634
- profiles_sample_rate=None, # type: Optional[float]
635
- profiles_sampler=None, # type: Optional[TracesSampler]
636
- profiler_mode=None, # type: Optional[ProfilerMode]
637
- profile_lifecycle="manual", # type: Literal["manual", "trace"]
638
- profile_session_sample_rate=None, # type: Optional[float]
639
- auto_enabling_integrations=True, # type: bool
640
- disabled_integrations=None, # type: Optional[Sequence[sentry_sdk.integrations.Integration]]
641
- auto_session_tracking=True, # type: bool
642
- send_client_reports=True, # type: bool
643
- _experiments={}, # type: Experiments # noqa: B006
644
- proxy_headers=None, # type: Optional[Dict[str, str]]
645
- before_send_transaction=None, # type: Optional[TransactionProcessor]
646
- project_root=None, # type: Optional[str]
647
- include_local_variables=True, # type: Optional[bool]
648
- include_source_context=True, # type: Optional[bool]
649
- trace_propagation_targets=[ # noqa: B006
650
- MATCH_ALL
651
- ], # type: Optional[Sequence[str]]
652
- functions_to_trace=[], # type: Sequence[Dict[str, str]] # noqa: B006
653
- event_scrubber=None, # type: Optional[sentry_sdk.scrubber.EventScrubber]
654
- max_value_length=DEFAULT_MAX_VALUE_LENGTH, # type: int
655
- enable_backpressure_handling=True, # type: bool
656
- error_sampler=None, # type: Optional[Callable[[Event, Hint], Union[float, bool]]]
657
- enable_db_query_source=True, # type: bool
658
- db_query_source_threshold_ms=100, # type: int
659
- spotlight=None, # type: Optional[Union[bool, str]]
660
- cert_file=None, # type: Optional[str]
661
- key_file=None, # type: Optional[str]
662
- custom_repr=None, # type: Optional[Callable[..., Optional[str]]]
663
- add_full_stack=DEFAULT_ADD_FULL_STACK, # type: bool
664
- max_stack_frames=DEFAULT_MAX_STACK_FRAMES, # type: Optional[int]
665
- ):
666
- # type: (...) -> None
769
+ max_breadcrumbs: int = DEFAULT_MAX_BREADCRUMBS,
770
+ release: Optional[str] = None,
771
+ environment: Optional[str] = None,
772
+ server_name: Optional[str] = None,
773
+ shutdown_timeout: float = 2,
774
+ integrations: Sequence[sentry_sdk.integrations.Integration] = [], # noqa: B006
775
+ in_app_include: List[str] = [], # noqa: B006
776
+ in_app_exclude: List[str] = [], # noqa: B006
777
+ default_integrations: bool = True,
778
+ dist: Optional[str] = None,
779
+ transport: Optional[
780
+ Union[
781
+ sentry_sdk.transport.Transport,
782
+ Type[sentry_sdk.transport.Transport],
783
+ Callable[[Event], None],
784
+ ]
785
+ ] = None,
786
+ transport_queue_size: int = DEFAULT_QUEUE_SIZE,
787
+ sample_rate: float = 1.0,
788
+ send_default_pii: Optional[bool] = None,
789
+ http_proxy: Optional[str] = None,
790
+ https_proxy: Optional[str] = None,
791
+ ignore_errors: Sequence[Union[type, str]] = [], # noqa: B006
792
+ max_request_body_size: str = "medium",
793
+ socket_options: Optional[List[Tuple[int, int, int | bytes]]] = None,
794
+ keep_alive: Optional[bool] = None,
795
+ before_send: Optional[EventProcessor] = None,
796
+ before_breadcrumb: Optional[BreadcrumbProcessor] = None,
797
+ debug: Optional[bool] = None,
798
+ attach_stacktrace: bool = False,
799
+ ca_certs: Optional[str] = None,
800
+ traces_sample_rate: Optional[float] = None,
801
+ traces_sampler: Optional[TracesSampler] = None,
802
+ profiles_sample_rate: Optional[float] = None,
803
+ profiles_sampler: Optional[TracesSampler] = None,
804
+ profiler_mode: Optional[ProfilerMode] = None,
805
+ profile_lifecycle: Literal["manual", "trace"] = "manual",
806
+ profile_session_sample_rate: Optional[float] = None,
807
+ auto_enabling_integrations: bool = True,
808
+ disabled_integrations: Optional[
809
+ Sequence[sentry_sdk.integrations.Integration]
810
+ ] = None,
811
+ auto_session_tracking: bool = True,
812
+ send_client_reports: bool = True,
813
+ _experiments: Experiments = {}, # noqa: B006
814
+ proxy_headers: Optional[Dict[str, str]] = None,
815
+ before_send_transaction: Optional[TransactionProcessor] = None,
816
+ project_root: Optional[str] = None,
817
+ include_local_variables: Optional[bool] = True,
818
+ include_source_context: Optional[bool] = True,
819
+ trace_propagation_targets: Optional[Sequence[str]] = [MATCH_ALL], # noqa: B006
820
+ exclude_span_origins: Optional[Sequence[str]] = None,
821
+ functions_to_trace: Sequence[Dict[str, str]] = [], # noqa: B006
822
+ event_scrubber: Optional[sentry_sdk.scrubber.EventScrubber] = None,
823
+ max_value_length: int = DEFAULT_MAX_VALUE_LENGTH,
824
+ enable_backpressure_handling: bool = True,
825
+ error_sampler: Optional[Callable[[Event, Hint], Union[float, bool]]] = None,
826
+ enable_db_query_source: bool = True,
827
+ db_query_source_threshold_ms: int = 100,
828
+ spotlight: Optional[Union[bool, str]] = None,
829
+ cert_file: Optional[str] = None,
830
+ key_file: Optional[str] = None,
831
+ custom_repr: Optional[Callable[..., Optional[str]]] = None,
832
+ add_full_stack: bool = DEFAULT_ADD_FULL_STACK,
833
+ max_stack_frames: Optional[int] = DEFAULT_MAX_STACK_FRAMES,
834
+ ) -> None:
667
835
  """Initialize the Sentry SDK with the given parameters. All parameters described here can be used in a call to `sentry_sdk.init()`.
668
836
 
669
837
  :param dsn: The DSN tells the SDK where to send the events.
@@ -980,6 +1148,17 @@ class ClientConstructor:
980
1148
  If `trace_propagation_targets` is not provided, trace data is attached to every outgoing request from the
981
1149
  instrumented client.
982
1150
 
1151
+ :param exclude_span_origins: An optional list of strings or regex patterns to disable span creation based
1152
+ on span origin. When a span's origin would match any of the provided patterns, the span will not be
1153
+ created.
1154
+
1155
+ This can be useful to exclude automatic span creation from specific integrations without disabling the
1156
+ entire integration.
1157
+
1158
+ The option may contain a list of strings or regexes against which the span origins are matched.
1159
+ String entries do not have to be full matches, meaning a span origin is matched when it contains
1160
+ a string provided through the option.
1161
+
983
1162
  :param functions_to_trace: An optional list of functions that should be set up for tracing.
984
1163
 
985
1164
  For each function in the list, a span will be created when the function is executed.
@@ -1043,8 +1222,7 @@ class ClientConstructor:
1043
1222
  pass
1044
1223
 
1045
1224
 
1046
- def _get_default_options():
1047
- # type: () -> dict[str, Any]
1225
+ def _get_default_options() -> dict[str, Any]:
1048
1226
  import inspect
1049
1227
 
1050
1228
  a = inspect.getfullargspec(ClientConstructor.__init__)
@@ -1063,4 +1241,4 @@ DEFAULT_OPTIONS = _get_default_options()
1063
1241
  del _get_default_options
1064
1242
 
1065
1243
 
1066
- VERSION = "3.0.0a2"
1244
+ VERSION = "3.0.0a3"