sentry-sdk 3.0.0a2__py2.py3-none-any.whl → 3.0.0a4__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

Files changed (159) hide show
  1. sentry_sdk/__init__.py +4 -0
  2. sentry_sdk/_compat.py +5 -12
  3. sentry_sdk/_init_implementation.py +7 -7
  4. sentry_sdk/_log_batcher.py +17 -29
  5. sentry_sdk/_lru_cache.py +7 -9
  6. sentry_sdk/_queue.py +2 -4
  7. sentry_sdk/_types.py +9 -16
  8. sentry_sdk/_werkzeug.py +5 -7
  9. sentry_sdk/ai/monitoring.py +45 -33
  10. sentry_sdk/ai/utils.py +8 -5
  11. sentry_sdk/api.py +91 -87
  12. sentry_sdk/attachments.py +10 -12
  13. sentry_sdk/client.py +119 -159
  14. sentry_sdk/consts.py +432 -223
  15. sentry_sdk/crons/api.py +16 -17
  16. sentry_sdk/crons/decorator.py +25 -27
  17. sentry_sdk/debug.py +4 -6
  18. sentry_sdk/envelope.py +46 -112
  19. sentry_sdk/feature_flags.py +9 -15
  20. sentry_sdk/integrations/__init__.py +24 -19
  21. sentry_sdk/integrations/_asgi_common.py +16 -18
  22. sentry_sdk/integrations/_wsgi_common.py +22 -33
  23. sentry_sdk/integrations/aiohttp.py +33 -31
  24. sentry_sdk/integrations/anthropic.py +43 -38
  25. sentry_sdk/integrations/argv.py +3 -4
  26. sentry_sdk/integrations/ariadne.py +16 -18
  27. sentry_sdk/integrations/arq.py +20 -29
  28. sentry_sdk/integrations/asgi.py +63 -37
  29. sentry_sdk/integrations/asyncio.py +15 -17
  30. sentry_sdk/integrations/asyncpg.py +1 -1
  31. sentry_sdk/integrations/atexit.py +6 -10
  32. sentry_sdk/integrations/aws_lambda.py +26 -36
  33. sentry_sdk/integrations/beam.py +10 -18
  34. sentry_sdk/integrations/boto3.py +20 -18
  35. sentry_sdk/integrations/bottle.py +25 -34
  36. sentry_sdk/integrations/celery/__init__.py +40 -59
  37. sentry_sdk/integrations/celery/beat.py +22 -26
  38. sentry_sdk/integrations/celery/utils.py +15 -17
  39. sentry_sdk/integrations/chalice.py +8 -10
  40. sentry_sdk/integrations/clickhouse_driver.py +22 -32
  41. sentry_sdk/integrations/cloud_resource_context.py +9 -16
  42. sentry_sdk/integrations/cohere.py +19 -25
  43. sentry_sdk/integrations/dedupe.py +5 -8
  44. sentry_sdk/integrations/django/__init__.py +69 -74
  45. sentry_sdk/integrations/django/asgi.py +25 -33
  46. sentry_sdk/integrations/django/caching.py +24 -20
  47. sentry_sdk/integrations/django/middleware.py +18 -21
  48. sentry_sdk/integrations/django/signals_handlers.py +12 -11
  49. sentry_sdk/integrations/django/templates.py +21 -18
  50. sentry_sdk/integrations/django/transactions.py +16 -11
  51. sentry_sdk/integrations/django/views.py +8 -12
  52. sentry_sdk/integrations/dramatiq.py +21 -21
  53. sentry_sdk/integrations/excepthook.py +10 -10
  54. sentry_sdk/integrations/executing.py +3 -4
  55. sentry_sdk/integrations/falcon.py +27 -42
  56. sentry_sdk/integrations/fastapi.py +13 -16
  57. sentry_sdk/integrations/flask.py +31 -38
  58. sentry_sdk/integrations/gcp.py +13 -16
  59. sentry_sdk/integrations/gnu_backtrace.py +7 -20
  60. sentry_sdk/integrations/gql.py +16 -17
  61. sentry_sdk/integrations/graphene.py +14 -13
  62. sentry_sdk/integrations/grpc/__init__.py +3 -2
  63. sentry_sdk/integrations/grpc/aio/client.py +2 -2
  64. sentry_sdk/integrations/grpc/aio/server.py +15 -14
  65. sentry_sdk/integrations/grpc/client.py +21 -11
  66. sentry_sdk/integrations/grpc/consts.py +2 -0
  67. sentry_sdk/integrations/grpc/server.py +12 -8
  68. sentry_sdk/integrations/httpx.py +11 -14
  69. sentry_sdk/integrations/huey.py +14 -21
  70. sentry_sdk/integrations/huggingface_hub.py +17 -17
  71. sentry_sdk/integrations/langchain.py +204 -114
  72. sentry_sdk/integrations/launchdarkly.py +13 -10
  73. sentry_sdk/integrations/litestar.py +40 -38
  74. sentry_sdk/integrations/logging.py +29 -36
  75. sentry_sdk/integrations/loguru.py +16 -20
  76. sentry_sdk/integrations/modules.py +3 -4
  77. sentry_sdk/integrations/openai.py +421 -204
  78. sentry_sdk/integrations/openai_agents/__init__.py +49 -0
  79. sentry_sdk/integrations/openai_agents/consts.py +1 -0
  80. sentry_sdk/integrations/openai_agents/patches/__init__.py +4 -0
  81. sentry_sdk/integrations/openai_agents/patches/agent_run.py +152 -0
  82. sentry_sdk/integrations/openai_agents/patches/models.py +52 -0
  83. sentry_sdk/integrations/openai_agents/patches/runner.py +42 -0
  84. sentry_sdk/integrations/openai_agents/patches/tools.py +84 -0
  85. sentry_sdk/integrations/openai_agents/spans/__init__.py +5 -0
  86. sentry_sdk/integrations/openai_agents/spans/agent_workflow.py +20 -0
  87. sentry_sdk/integrations/openai_agents/spans/ai_client.py +46 -0
  88. sentry_sdk/integrations/openai_agents/spans/execute_tool.py +47 -0
  89. sentry_sdk/integrations/openai_agents/spans/handoff.py +24 -0
  90. sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +41 -0
  91. sentry_sdk/integrations/openai_agents/utils.py +153 -0
  92. sentry_sdk/integrations/openfeature.py +12 -8
  93. sentry_sdk/integrations/pure_eval.py +6 -10
  94. sentry_sdk/integrations/pymongo.py +14 -18
  95. sentry_sdk/integrations/pyramid.py +31 -36
  96. sentry_sdk/integrations/quart.py +23 -28
  97. sentry_sdk/integrations/ray.py +73 -64
  98. sentry_sdk/integrations/redis/__init__.py +7 -4
  99. sentry_sdk/integrations/redis/_async_common.py +18 -12
  100. sentry_sdk/integrations/redis/_sync_common.py +16 -15
  101. sentry_sdk/integrations/redis/modules/caches.py +17 -8
  102. sentry_sdk/integrations/redis/modules/queries.py +9 -8
  103. sentry_sdk/integrations/redis/rb.py +3 -2
  104. sentry_sdk/integrations/redis/redis.py +4 -4
  105. sentry_sdk/integrations/redis/redis_cluster.py +10 -8
  106. sentry_sdk/integrations/redis/redis_py_cluster_legacy.py +3 -2
  107. sentry_sdk/integrations/redis/utils.py +21 -22
  108. sentry_sdk/integrations/rq.py +13 -16
  109. sentry_sdk/integrations/rust_tracing.py +10 -7
  110. sentry_sdk/integrations/sanic.py +34 -46
  111. sentry_sdk/integrations/serverless.py +22 -27
  112. sentry_sdk/integrations/socket.py +29 -17
  113. sentry_sdk/integrations/spark/__init__.py +1 -0
  114. sentry_sdk/integrations/spark/spark_driver.py +45 -83
  115. sentry_sdk/integrations/spark/spark_worker.py +7 -11
  116. sentry_sdk/integrations/sqlalchemy.py +22 -19
  117. sentry_sdk/integrations/starlette.py +89 -93
  118. sentry_sdk/integrations/starlite.py +31 -37
  119. sentry_sdk/integrations/statsig.py +5 -4
  120. sentry_sdk/integrations/stdlib.py +32 -28
  121. sentry_sdk/integrations/strawberry.py +63 -50
  122. sentry_sdk/integrations/sys_exit.py +7 -11
  123. sentry_sdk/integrations/threading.py +13 -15
  124. sentry_sdk/integrations/tornado.py +28 -32
  125. sentry_sdk/integrations/trytond.py +4 -3
  126. sentry_sdk/integrations/typer.py +8 -6
  127. sentry_sdk/integrations/unleash.py +5 -4
  128. sentry_sdk/integrations/wsgi.py +47 -46
  129. sentry_sdk/logger.py +13 -9
  130. sentry_sdk/monitor.py +16 -28
  131. sentry_sdk/opentelemetry/consts.py +11 -4
  132. sentry_sdk/opentelemetry/contextvars_context.py +17 -15
  133. sentry_sdk/opentelemetry/propagator.py +38 -21
  134. sentry_sdk/opentelemetry/sampler.py +51 -34
  135. sentry_sdk/opentelemetry/scope.py +46 -37
  136. sentry_sdk/opentelemetry/span_processor.py +43 -59
  137. sentry_sdk/opentelemetry/tracing.py +32 -12
  138. sentry_sdk/opentelemetry/utils.py +180 -196
  139. sentry_sdk/profiler/continuous_profiler.py +108 -97
  140. sentry_sdk/profiler/transaction_profiler.py +70 -97
  141. sentry_sdk/profiler/utils.py +11 -15
  142. sentry_sdk/scope.py +251 -264
  143. sentry_sdk/scrubber.py +22 -26
  144. sentry_sdk/serializer.py +48 -65
  145. sentry_sdk/session.py +44 -61
  146. sentry_sdk/sessions.py +35 -49
  147. sentry_sdk/spotlight.py +15 -21
  148. sentry_sdk/tracing.py +118 -184
  149. sentry_sdk/tracing_utils.py +103 -123
  150. sentry_sdk/transport.py +131 -157
  151. sentry_sdk/utils.py +278 -309
  152. sentry_sdk/worker.py +16 -28
  153. {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a4.dist-info}/METADATA +1 -1
  154. sentry_sdk-3.0.0a4.dist-info/RECORD +168 -0
  155. sentry_sdk-3.0.0a2.dist-info/RECORD +0 -154
  156. {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a4.dist-info}/WHEEL +0 -0
  157. {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a4.dist-info}/entry_points.txt +0 -0
  158. {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a4.dist-info}/licenses/LICENSE +0 -0
  159. {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a4.dist-info}/top_level.txt +0 -0
sentry_sdk/consts.py CHANGED
@@ -1,9 +1,26 @@
1
+ from __future__ import annotations
1
2
  import itertools
2
3
  from enum import Enum
3
4
  from typing import TYPE_CHECKING
4
5
 
6
+ if TYPE_CHECKING:
7
+ from typing import (
8
+ Optional,
9
+ Callable,
10
+ Union,
11
+ List,
12
+ Type,
13
+ Dict,
14
+ Any,
15
+ Sequence,
16
+ Tuple,
17
+ )
18
+
5
19
  # up top to prevent circular import due to integration import
6
- DEFAULT_MAX_VALUE_LENGTH = 1024
20
+ # This is more or less an arbitrary large-ish value for now, so that we allow
21
+ # pretty long strings (like LLM prompts), but still have *some* upper limit
22
+ # until we verify that removing the trimming completely is safe.
23
+ DEFAULT_MAX_VALUE_LENGTH = 100_000
7
24
 
8
25
  DEFAULT_MAX_STACK_FRAMES = 100
9
26
  DEFAULT_ADD_FULL_STACK = False
@@ -26,17 +43,6 @@ class CompressionAlgo(Enum):
26
43
 
27
44
 
28
45
  if TYPE_CHECKING:
29
- import sentry_sdk
30
-
31
- from typing import Optional
32
- from typing import Callable
33
- from typing import Union
34
- from typing import List
35
- from typing import Type
36
- from typing import Dict
37
- from typing import Any
38
- from typing import Sequence
39
- from typing import Tuple
40
46
  from typing_extensions import Literal
41
47
  from typing_extensions import TypedDict
42
48
 
@@ -52,6 +58,8 @@ if TYPE_CHECKING:
52
58
  TransactionProcessor,
53
59
  )
54
60
 
61
+ import sentry_sdk
62
+
55
63
  # Experiments are feature flags to enable and disable certain unstable SDK
56
64
  # functionality. Changing them from the defaults (`None`) in production
57
65
  # code is highly discouraged. They are not subject to any stability
@@ -70,8 +78,6 @@ if TYPE_CHECKING:
70
78
  "transport_compression_algo": Optional[CompressionAlgo],
71
79
  "transport_num_pools": Optional[int],
72
80
  "transport_http2": Optional[bool],
73
- "enable_logs": Optional[bool],
74
- "before_send_log": Optional[Callable[[Log, Hint], Optional[Log]]],
75
81
  },
76
82
  total=False,
77
83
  )
@@ -95,16 +101,45 @@ class SPANDATA:
95
101
  See: https://develop.sentry.dev/sdk/performance/span-data-conventions/
96
102
  """
97
103
 
104
+ AI_CITATIONS = "ai.citations"
105
+ """
106
+ References or sources cited by the AI model in its response.
107
+ Example: ["Smith et al. 2020", "Jones 2019"]
108
+ """
109
+
110
+ AI_COMPLETION_TOKENS_USED = "ai.completion_tokens.used"
111
+ """
112
+ The number of output completion tokens used by the model.
113
+ Example: 10
114
+ """
115
+
116
+ AI_DOCUMENTS = "ai.documents"
117
+ """
118
+ Documents or content chunks used as context for the AI model.
119
+ Example: ["doc1.txt", "doc2.pdf"]
120
+ """
121
+
122
+ AI_FINISH_REASON = "ai.finish_reason"
123
+ """
124
+ The reason why the model stopped generating.
125
+ Example: "length"
126
+ """
127
+
98
128
  AI_FREQUENCY_PENALTY = "ai.frequency_penalty"
99
129
  """
100
130
  Used to reduce repetitiveness of generated tokens.
101
131
  Example: 0.5
102
132
  """
103
133
 
104
- AI_PRESENCE_PENALTY = "ai.presence_penalty"
134
+ AI_FUNCTION_CALL = "ai.function_call"
105
135
  """
106
- Used to reduce repetitiveness of generated tokens.
107
- Example: 0.5
136
+ For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls
137
+ """
138
+
139
+ AI_GENERATION_ID = "ai.generation_id"
140
+ """
141
+ Unique identifier for the completion.
142
+ Example: "gen_123abc"
108
143
  """
109
144
 
110
145
  AI_INPUT_MESSAGES = "ai.input_messages"
@@ -113,10 +148,9 @@ class SPANDATA:
113
148
  Example: [{"role": "user", "message": "hello"}]
114
149
  """
115
150
 
116
- AI_MODEL_ID = "ai.model_id"
151
+ AI_LOGIT_BIAS = "ai.logit_bias"
117
152
  """
118
- The unique descriptor of the model being execugted
119
- Example: gpt-4
153
+ For an AI model call, the logit bias
120
154
  """
121
155
 
122
156
  AI_METADATA = "ai.metadata"
@@ -125,28 +159,102 @@ class SPANDATA:
125
159
  Example: {"executed_function": "add_integers"}
126
160
  """
127
161
 
128
- AI_TAGS = "ai.tags"
162
+ AI_MODEL_ID = "ai.model_id"
129
163
  """
130
- Tags that describe an AI pipeline step.
131
- Example: {"executed_function": "add_integers"}
164
+ The unique descriptor of the model being execugted
165
+ Example: gpt-4
166
+ """
167
+
168
+ AI_PIPELINE_NAME = "ai.pipeline.name"
169
+ """
170
+ Name of the AI pipeline or chain being executed.
171
+ DEPRECATED: Use GEN_AI_PIPELINE_NAME instead.
172
+ Example: "qa-pipeline"
173
+ """
174
+
175
+ AI_PREAMBLE = "ai.preamble"
176
+ """
177
+ For an AI model call, the preamble parameter.
178
+ Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style.
179
+ Example: "You are now a clown."
180
+ """
181
+
182
+ AI_PRESENCE_PENALTY = "ai.presence_penalty"
183
+ """
184
+ Used to reduce repetitiveness of generated tokens.
185
+ Example: 0.5
186
+ """
187
+
188
+ AI_PROMPT_TOKENS_USED = "ai.prompt_tokens.used"
189
+ """
190
+ The number of input prompt tokens used by the model.
191
+ Example: 10
192
+ """
193
+
194
+ AI_RAW_PROMPTING = "ai.raw_prompting"
195
+ """
196
+ Minimize pre-processing done to the prompt sent to the LLM.
197
+ Example: true
198
+ """
199
+
200
+ AI_RESPONSE_FORMAT = "ai.response_format"
201
+ """
202
+ For an AI model call, the format of the response
203
+ """
204
+
205
+ AI_RESPONSES = "ai.responses"
206
+ """
207
+ The responses to an AI model call. Always as a list.
208
+ Example: ["hello", "world"]
209
+ """
210
+
211
+ AI_SEARCH_QUERIES = "ai.search_queries"
212
+ """
213
+ Queries used to search for relevant context or documents.
214
+ Example: ["climate change effects", "renewable energy"]
215
+ """
216
+
217
+ AI_SEARCH_REQUIRED = "ai.is_search_required"
218
+ """
219
+ Boolean indicating if the model needs to perform a search.
220
+ Example: true
221
+ """
222
+
223
+ AI_SEARCH_RESULTS = "ai.search_results"
224
+ """
225
+ Results returned from search queries for context.
226
+ Example: ["Result 1", "Result 2"]
227
+ """
228
+
229
+ AI_SEED = "ai.seed"
230
+ """
231
+ The seed, ideally models given the same seed and same other parameters will produce the exact same output.
232
+ Example: 123.45
132
233
  """
133
234
 
134
235
  AI_STREAMING = "ai.streaming"
135
236
  """
136
- Whether or not the AI model call's repsonse was streamed back asynchronously
237
+ Whether or not the AI model call's response was streamed back asynchronously
238
+ DEPRECATED: Use GEN_AI_RESPONSE_STREAMING instead.
137
239
  Example: true
138
240
  """
139
241
 
242
+ AI_TAGS = "ai.tags"
243
+ """
244
+ Tags that describe an AI pipeline step.
245
+ Example: {"executed_function": "add_integers"}
246
+ """
247
+
140
248
  AI_TEMPERATURE = "ai.temperature"
141
249
  """
142
250
  For an AI model call, the temperature parameter. Temperature essentially means how random the output will be.
143
251
  Example: 0.5
144
252
  """
145
253
 
146
- AI_TOP_P = "ai.top_p"
254
+ AI_TEXTS = "ai.texts"
147
255
  """
148
- For an AI model call, the top_p parameter. Top_p essentially controls how random the output will be.
149
- Example: 0.5
256
+ Raw text inputs provided to the model.
257
+ Example: ["What is machine learning?"]
150
258
  """
151
259
 
152
260
  AI_TOP_K = "ai.top_k"
@@ -155,14 +263,15 @@ class SPANDATA:
155
263
  Example: 35
156
264
  """
157
265
 
158
- AI_FUNCTION_CALL = "ai.function_call"
266
+ AI_TOP_P = "ai.top_p"
159
267
  """
160
- For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls
268
+ For an AI model call, the top_p parameter. Top_p essentially controls how random the output will be.
269
+ Example: 0.5
161
270
  """
162
271
 
163
272
  AI_TOOL_CALLS = "ai.tool_calls"
164
273
  """
165
- For an AI model call, the function that was called.
274
+ For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls
166
275
  """
167
276
 
168
277
  AI_TOOLS = "ai.tools"
@@ -170,186 +279,260 @@ class SPANDATA:
170
279
  For an AI model call, the functions that are available
171
280
  """
172
281
 
173
- AI_RESPONSE_FORMAT = "ai.response_format"
282
+ AI_TOTAL_TOKENS_USED = "ai.total_tokens.used"
174
283
  """
175
- For an AI model call, the format of the response
284
+ The total number of tokens (input + output) used by the request to the model.
285
+ Example: 20
176
286
  """
177
287
 
178
- AI_LOGIT_BIAS = "ai.logit_bias"
288
+ AI_WARNINGS = "ai.warnings"
179
289
  """
180
- For an AI model call, the logit bias
290
+ Warning messages generated during model execution.
291
+ Example: ["Token limit exceeded"]
181
292
  """
182
293
 
183
- AI_PREAMBLE = "ai.preamble"
294
+ CACHE_HIT = "cache.hit"
184
295
  """
185
- For an AI model call, the preamble parameter.
186
- Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style.
187
- Example: "You are now a clown."
296
+ A boolean indicating whether the requested data was found in the cache.
297
+ Example: true
188
298
  """
189
299
 
190
- AI_RAW_PROMPTING = "ai.raw_prompting"
300
+ CACHE_ITEM_SIZE = "cache.item_size"
191
301
  """
192
- Minimize pre-processing done to the prompt sent to the LLM.
193
- Example: true
302
+ The size of the requested data in bytes.
303
+ Example: 58
194
304
  """
195
- AI_RESPONSES = "ai.responses"
305
+
306
+ CACHE_KEY = "cache.key"
196
307
  """
197
- The responses to an AI model call. Always as a list.
198
- Example: ["hello", "world"]
308
+ The key of the requested data.
309
+ Example: template.cache.some_item.867da7e2af8e6b2f3aa7213a4080edb3
199
310
  """
200
311
 
201
- AI_SEED = "ai.seed"
312
+ CODE_FILEPATH = "code.filepath"
202
313
  """
203
- The seed, ideally models given the same seed and same other parameters will produce the exact same output.
204
- Example: 123.45
314
+ The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path).
315
+ Example: "/app/myapplication/http/handler/server.py"
205
316
  """
206
317
 
207
- AI_CITATIONS = "ai.citations"
318
+ CODE_FUNCTION = "code.function"
208
319
  """
209
- References or sources cited by the AI model in its response.
210
- Example: ["Smith et al. 2020", "Jones 2019"]
320
+ The method or function name, or equivalent (usually rightmost part of the code unit's name).
321
+ Example: "server_request"
211
322
  """
212
323
 
213
- AI_DOCUMENTS = "ai.documents"
324
+ CODE_LINENO = "code.lineno"
214
325
  """
215
- Documents or content chunks used as context for the AI model.
216
- Example: ["doc1.txt", "doc2.pdf"]
326
+ The line number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`.
327
+ Example: 42
217
328
  """
218
329
 
219
- AI_SEARCH_QUERIES = "ai.search_queries"
330
+ CODE_NAMESPACE = "code.namespace"
220
331
  """
221
- Queries used to search for relevant context or documents.
222
- Example: ["climate change effects", "renewable energy"]
332
+ The "namespace" within which `code.function` is defined. Usually the qualified class or module name, such that `code.namespace` + some separator + `code.function` form a unique identifier for the code unit.
333
+ Example: "http.handler"
223
334
  """
224
335
 
225
- AI_SEARCH_RESULTS = "ai.search_results"
336
+ DB_MONGODB_COLLECTION = "db.mongodb.collection"
226
337
  """
227
- Results returned from search queries for context.
228
- Example: ["Result 1", "Result 2"]
338
+ The MongoDB collection being accessed within the database.
339
+ See: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/database/mongodb.md#attributes
340
+ Example: public.users; customers
229
341
  """
230
342
 
231
- AI_GENERATION_ID = "ai.generation_id"
343
+ DB_NAME = "db.name"
232
344
  """
233
- Unique identifier for the completion.
234
- Example: "gen_123abc"
345
+ The name of the database being accessed. For commands that switch the database, this should be set to the target database (even if the command fails).
346
+ Example: myDatabase
235
347
  """
236
348
 
237
- AI_SEARCH_REQUIRED = "ai.is_search_required"
349
+ DB_OPERATION = "db.operation"
238
350
  """
239
- Boolean indicating if the model needs to perform a search.
240
- Example: true
351
+ The name of the operation being executed, e.g. the MongoDB command name such as findAndModify, or the SQL keyword.
352
+ See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md
353
+ Example: findAndModify, HMSET, SELECT
241
354
  """
242
355
 
243
- AI_FINISH_REASON = "ai.finish_reason"
356
+ DB_SYSTEM = "db.system"
244
357
  """
245
- The reason why the model stopped generating.
246
- Example: "length"
358
+ An identifier for the database management system (DBMS) product being used.
359
+ See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md
360
+ Example: postgresql
247
361
  """
248
362
 
249
- AI_PIPELINE_NAME = "ai.pipeline.name"
363
+ DB_USER = "db.user"
364
+ """
365
+ The name of the database user used for connecting to the database.
366
+ See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md
367
+ Example: my_user
368
+ """
369
+
370
+ GEN_AI_AGENT_NAME = "gen_ai.agent.name"
371
+ """
372
+ The name of the agent being used.
373
+ Example: "ResearchAssistant"
374
+ """
375
+
376
+ GEN_AI_CHOICE = "gen_ai.choice"
377
+ """
378
+ The model's response message.
379
+ Example: "The weather in Paris is rainy and overcast, with temperatures around 57°F"
380
+ """
381
+
382
+ GEN_AI_OPERATION_NAME = "gen_ai.operation.name"
383
+ """
384
+ The name of the operation being performed.
385
+ Example: "chat"
386
+ """
387
+
388
+ GEN_AI_PIPELINE_NAME = "gen_ai.pipeline.name"
250
389
  """
251
390
  Name of the AI pipeline or chain being executed.
252
391
  Example: "qa-pipeline"
253
392
  """
254
393
 
255
- AI_PROMPT_TOKENS_USED = "ai.prompt_tokens.used"
394
+ GEN_AI_RESPONSE_MODEL = "gen_ai.response.model"
256
395
  """
257
- The number of input prompt tokens used by the model.
258
- Example: 10
396
+ Exact model identifier used to generate the response
397
+ Example: gpt-4o-mini-2024-07-18
259
398
  """
260
399
 
261
- AI_COMPLETION_TOKENS_USED = "ai.completion_tokens.used"
400
+ GEN_AI_RESPONSE_STREAMING = "gen_ai.response.streaming"
262
401
  """
263
- The number of output completion tokens used by the model.
264
- Example: 10
402
+ Whether or not the AI model call's response was streamed back asynchronously
403
+ Example: true
265
404
  """
266
405
 
267
- AI_TOTAL_TOKENS_USED = "ai.total_tokens.used"
406
+ GEN_AI_RESPONSE_TEXT = "gen_ai.response.text"
268
407
  """
269
- The total number of tokens (input + output) used by the request to the model.
270
- Example: 20
408
+ The model's response text messages.
409
+ Example: ["The weather in Paris is rainy and overcast, with temperatures around 57°F", "The weather in London is sunny and warm, with temperatures around 65°F"]
271
410
  """
272
411
 
273
- AI_TEXTS = "ai.texts"
412
+ GEN_AI_RESPONSE_TOOL_CALLS = "gen_ai.response.tool_calls"
274
413
  """
275
- Raw text inputs provided to the model.
276
- Example: ["What is machine learning?"]
414
+ The tool calls in the model's response.
415
+ Example: [{"name": "get_weather", "arguments": {"location": "Paris"}}]
277
416
  """
278
417
 
279
- AI_WARNINGS = "ai.warnings"
418
+ GEN_AI_REQUEST_AVAILABLE_TOOLS = "gen_ai.request.available_tools"
280
419
  """
281
- Warning messages generated during model execution.
282
- Example: ["Token limit exceeded"]
420
+ The available tools for the model.
421
+ Example: [{"name": "get_weather", "description": "Get the weather for a given location"}, {"name": "get_news", "description": "Get the news for a given topic"}]
283
422
  """
284
423
 
285
- DB_NAME = "db.name"
424
+ GEN_AI_REQUEST_FREQUENCY_PENALTY = "gen_ai.request.frequency_penalty"
286
425
  """
287
- The name of the database being accessed. For commands that switch the database, this should be set to the target database (even if the command fails).
288
- Example: myDatabase
426
+ The frequency penalty parameter used to reduce repetitiveness of generated tokens.
427
+ Example: 0.1
289
428
  """
290
429
 
291
- DB_USER = "db.user"
430
+ GEN_AI_REQUEST_MAX_TOKENS = "gen_ai.request.max_tokens"
292
431
  """
293
- The name of the database user used for connecting to the database.
294
- See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md
295
- Example: my_user
432
+ The maximum number of tokens to generate in the response.
433
+ Example: 2048
296
434
  """
297
435
 
298
- DB_OPERATION = "db.operation"
436
+ GEN_AI_REQUEST_MESSAGES = "gen_ai.request.messages"
299
437
  """
300
- The name of the operation being executed, e.g. the MongoDB command name such as findAndModify, or the SQL keyword.
301
- See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md
302
- Example: findAndModify, HMSET, SELECT
438
+ The messages passed to the model. The "content" can be a string or an array of objects.
439
+ Example: [{role: "system", "content: "Generate a random number."}, {"role": "user", "content": [{"text": "Generate a random number between 0 and 10.", "type": "text"}]}]
303
440
  """
304
441
 
305
- DB_SYSTEM = "db.system"
442
+ GEN_AI_REQUEST_MODEL = "gen_ai.request.model"
306
443
  """
307
- An identifier for the database management system (DBMS) product being used.
308
- See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md
309
- Example: postgresql
444
+ The model identifier being used for the request.
445
+ Example: "gpt-4-turbo"
310
446
  """
311
447
 
312
- DB_MONGODB_COLLECTION = "db.mongodb.collection"
448
+ GEN_AI_REQUEST_PRESENCE_PENALTY = "gen_ai.request.presence_penalty"
313
449
  """
314
- The MongoDB collection being accessed within the database.
315
- See: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/database/mongodb.md#attributes
316
- Example: public.users; customers
450
+ The presence penalty parameter used to reduce repetitiveness of generated tokens.
451
+ Example: 0.1
317
452
  """
318
453
 
319
- CACHE_HIT = "cache.hit"
454
+ GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature"
320
455
  """
321
- A boolean indicating whether the requested data was found in the cache.
322
- Example: true
456
+ The temperature parameter used to control randomness in the output.
457
+ Example: 0.7
323
458
  """
324
459
 
325
- CACHE_ITEM_SIZE = "cache.item_size"
460
+ GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p"
326
461
  """
327
- The size of the requested data in bytes.
328
- Example: 58
462
+ The top_p parameter used to control diversity via nucleus sampling.
463
+ Example: 1.0
329
464
  """
330
465
 
331
- CACHE_KEY = "cache.key"
466
+ GEN_AI_SYSTEM = "gen_ai.system"
332
467
  """
333
- The key of the requested data.
334
- Example: template.cache.some_item.867da7e2af8e6b2f3aa7213a4080edb3
468
+ The name of the AI system being used.
469
+ Example: "openai"
335
470
  """
336
471
 
337
- NETWORK_PEER_ADDRESS = "network.peer.address"
472
+ GEN_AI_TOOL_DESCRIPTION = "gen_ai.tool.description"
338
473
  """
339
- Peer address of the network connection - IP address or Unix domain socket name.
340
- Example: 10.1.2.80, /tmp/my.sock, localhost
474
+ The description of the tool being used.
475
+ Example: "Searches the web for current information about a topic"
341
476
  """
342
477
 
343
- NETWORK_PEER_PORT = "network.peer.port"
478
+ GEN_AI_TOOL_INPUT = "gen_ai.tool.input"
344
479
  """
345
- Peer port number of the network connection.
346
- Example: 6379
480
+ The input of the tool being used.
481
+ Example: {"location": "Paris"}
347
482
  """
348
483
 
349
- HTTP_QUERY = "http.query"
484
+ GEN_AI_TOOL_NAME = "gen_ai.tool.name"
350
485
  """
351
- The Query string present in the URL.
352
- Example: ?foo=bar&bar=baz
486
+ The name of the tool being used.
487
+ Example: "web_search"
488
+ """
489
+
490
+ GEN_AI_TOOL_OUTPUT = "gen_ai.tool.output"
491
+ """
492
+ The output of the tool being used.
493
+ Example: "rainy, 57°F"
494
+ """
495
+
496
+ GEN_AI_TOOL_TYPE = "gen_ai.tool.type"
497
+ """
498
+ The type of tool being used.
499
+ Example: "function"
500
+ """
501
+
502
+ GEN_AI_USAGE_INPUT_TOKENS = "gen_ai.usage.input_tokens"
503
+ """
504
+ The number of tokens in the input.
505
+ Example: 150
506
+ """
507
+
508
+ GEN_AI_USAGE_INPUT_TOKENS_CACHED = "gen_ai.usage.input_tokens.cached"
509
+ """
510
+ The number of cached tokens in the input.
511
+ Example: 50
512
+ """
513
+
514
+ GEN_AI_USAGE_OUTPUT_TOKENS = "gen_ai.usage.output_tokens"
515
+ """
516
+ The number of tokens in the output.
517
+ Example: 250
518
+ """
519
+
520
+ GEN_AI_USAGE_OUTPUT_TOKENS_REASONING = "gen_ai.usage.output_tokens.reasoning"
521
+ """
522
+ The number of tokens used for reasoning in the output.
523
+ Example: 75
524
+ """
525
+
526
+ GEN_AI_USAGE_TOTAL_TOKENS = "gen_ai.usage.total_tokens"
527
+ """
528
+ The total number of tokens used (input + output).
529
+ Example: 400
530
+ """
531
+
532
+ GEN_AI_USER_MESSAGE = "gen_ai.user.message"
533
+ """
534
+ The user message passed to the model.
535
+ Example: "What's the weather in Paris?"
353
536
  """
354
537
 
355
538
  HTTP_FRAGMENT = "http.fragment"
@@ -364,6 +547,12 @@ class SPANDATA:
364
547
  Example: GET
365
548
  """
366
549
 
550
+ HTTP_QUERY = "http.query"
551
+ """
552
+ The Query string present in the URL.
553
+ Example: ?foo=bar&bar=baz
554
+ """
555
+
367
556
  HTTP_STATUS_CODE = "http.response.status_code"
368
557
  """
369
558
  The HTTP status code as an integer.
@@ -381,14 +570,14 @@ class SPANDATA:
381
570
  The message's identifier.
382
571
  """
383
572
 
384
- MESSAGING_MESSAGE_RETRY_COUNT = "messaging.message.retry.count"
573
+ MESSAGING_MESSAGE_RECEIVE_LATENCY = "messaging.message.receive.latency"
385
574
  """
386
- Number of retries/attempts to process a message.
575
+ The latency between when the task was enqueued and when it was started to be processed.
387
576
  """
388
577
 
389
- MESSAGING_MESSAGE_RECEIVE_LATENCY = "messaging.message.receive.latency"
578
+ MESSAGING_MESSAGE_RETRY_COUNT = "messaging.message.retry.count"
390
579
  """
391
- The latency between when the task was enqueued and when it was started to be processed.
580
+ Number of retries/attempts to process a message.
392
581
  """
393
582
 
394
583
  MESSAGING_SYSTEM = "messaging.system"
@@ -396,6 +585,24 @@ class SPANDATA:
396
585
  The messaging system's name, e.g. `kafka`, `aws_sqs`
397
586
  """
398
587
 
588
+ NETWORK_PEER_ADDRESS = "network.peer.address"
589
+ """
590
+ Peer address of the network connection - IP address or Unix domain socket name.
591
+ Example: 10.1.2.80, /tmp/my.sock, localhost
592
+ """
593
+
594
+ NETWORK_PEER_PORT = "network.peer.port"
595
+ """
596
+ Peer port number of the network connection.
597
+ Example: 6379
598
+ """
599
+
600
+ PROFILER_ID = "profiler_id"
601
+ """
602
+ Label identifying the profiler id that the span occurred in. This should be a string.
603
+ Example: "5249fbada8d5416482c2f6e47e337372"
604
+ """
605
+
399
606
  SERVER_ADDRESS = "server.address"
400
607
  """
401
608
  Name of the database host.
@@ -421,30 +628,6 @@ class SPANDATA:
421
628
  Example: 16456
422
629
  """
423
630
 
424
- CODE_FILEPATH = "code.filepath"
425
- """
426
- The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path).
427
- Example: "/app/myapplication/http/handler/server.py"
428
- """
429
-
430
- CODE_LINENO = "code.lineno"
431
- """
432
- The line number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`.
433
- Example: 42
434
- """
435
-
436
- CODE_FUNCTION = "code.function"
437
- """
438
- The method or function name, or equivalent (usually rightmost part of the code unit's name).
439
- Example: "server_request"
440
- """
441
-
442
- CODE_NAMESPACE = "code.namespace"
443
- """
444
- The "namespace" within which `code.function` is defined. Usually the qualified class or module name, such that `code.namespace` + some separator + `code.function` form a unique identifier for the code unit.
445
- Example: "http.handler"
446
- """
447
-
448
631
  THREAD_ID = "thread.id"
449
632
  """
450
633
  Identifier of a thread from where the span originated. This should be a string.
@@ -457,12 +640,6 @@ class SPANDATA:
457
640
  Example: "MainThread"
458
641
  """
459
642
 
460
- PROFILER_ID = "profiler_id"
461
- """
462
- Label identifying the profiler id that the span occurred in. This should be a string.
463
- Example: "5249fbada8d5416482c2f6e47e337372"
464
- """
465
-
466
643
 
467
644
  class SPANSTATUS:
468
645
  """
@@ -502,6 +679,12 @@ class OP:
502
679
  FUNCTION = "function"
503
680
  FUNCTION_AWS = "function.aws"
504
681
  FUNCTION_GCP = "function.gcp"
682
+ GEN_AI_CHAT = "gen_ai.chat"
683
+ GEN_AI_EMBEDDINGS = "gen_ai.embeddings"
684
+ GEN_AI_EXECUTE_TOOL = "gen_ai.execute_tool"
685
+ GEN_AI_HANDOFF = "gen_ai.handoff"
686
+ GEN_AI_INVOKE_AGENT = "gen_ai.invoke_agent"
687
+ GEN_AI_RESPONSES = "gen_ai.responses"
505
688
  GRAPHQL_EXECUTE = "graphql.execute"
506
689
  GRAPHQL_MUTATION = "graphql.mutation"
507
690
  GRAPHQL_PARSE = "graphql.parse"
@@ -514,6 +697,8 @@ class OP:
514
697
  HTTP_CLIENT = "http.client"
515
698
  HTTP_CLIENT_STREAM = "http.client.stream"
516
699
  HTTP_SERVER = "http.server"
700
+ HTTP = "http"
701
+ MESSAGE = "message"
517
702
  MIDDLEWARE_DJANGO = "middleware.django"
518
703
  MIDDLEWARE_LITESTAR = "middleware.litestar"
519
704
  MIDDLEWARE_LITESTAR_RECEIVE = "middleware.litestar.receive"
@@ -524,8 +709,6 @@ class OP:
524
709
  MIDDLEWARE_STARLITE = "middleware.starlite"
525
710
  MIDDLEWARE_STARLITE_RECEIVE = "middleware.starlite.receive"
526
711
  MIDDLEWARE_STARLITE_SEND = "middleware.starlite.send"
527
- OPENAI_CHAT_COMPLETIONS_CREATE = "ai.chat_completions.create.openai"
528
- OPENAI_EMBEDDINGS_CREATE = "ai.embeddings.create.openai"
529
712
  HUGGINGFACE_HUB_CHAT_COMPLETIONS_CREATE = (
530
713
  "ai.chat_completions.create.huggingface_hub"
531
714
  )
@@ -545,6 +728,7 @@ class OP:
545
728
  QUEUE_TASK_HUEY = "queue.task.huey"
546
729
  QUEUE_SUBMIT_RAY = "queue.submit.ray"
547
730
  QUEUE_TASK_RAY = "queue.task.ray"
731
+ RPC = "rpc"
548
732
  SUBPROCESS = "subprocess"
549
733
  SUBPROCESS_WAIT = "subprocess.wait"
550
734
  SUBPROCESS_COMMUNICATE = "subprocess.communicate"
@@ -573,8 +757,7 @@ class TransactionSource(str, Enum):
573
757
  URL = "url"
574
758
  VIEW = "view"
575
759
 
576
- def __str__(self):
577
- # type: () -> str
760
+ def __str__(self) -> str:
578
761
  return self.value
579
762
 
580
763
 
@@ -602,68 +785,76 @@ class ClientConstructor:
602
785
 
603
786
  def __init__(
604
787
  self,
605
- dsn=None, # type: Optional[str]
788
+ dsn: Optional[str] = None,
606
789
  *,
607
- max_breadcrumbs=DEFAULT_MAX_BREADCRUMBS, # type: int
608
- release=None, # type: Optional[str]
609
- environment=None, # type: Optional[str]
610
- server_name=None, # type: Optional[str]
611
- shutdown_timeout=2, # type: float
612
- integrations=[], # type: Sequence[sentry_sdk.integrations.Integration] # noqa: B006
613
- in_app_include=[], # type: List[str] # noqa: B006
614
- in_app_exclude=[], # type: List[str] # noqa: B006
615
- default_integrations=True, # type: bool
616
- dist=None, # type: Optional[str]
617
- transport=None, # type: Optional[Union[sentry_sdk.transport.Transport, Type[sentry_sdk.transport.Transport], Callable[[Event], None]]]
618
- transport_queue_size=DEFAULT_QUEUE_SIZE, # type: int
619
- sample_rate=1.0, # type: float
620
- send_default_pii=None, # type: Optional[bool]
621
- http_proxy=None, # type: Optional[str]
622
- https_proxy=None, # type: Optional[str]
623
- ignore_errors=[], # type: Sequence[Union[type, str]] # noqa: B006
624
- max_request_body_size="medium", # type: str
625
- socket_options=None, # type: Optional[List[Tuple[int, int, int | bytes]]]
626
- keep_alive=None, # type: Optional[bool]
627
- before_send=None, # type: Optional[EventProcessor]
628
- before_breadcrumb=None, # type: Optional[BreadcrumbProcessor]
629
- debug=None, # type: Optional[bool]
630
- attach_stacktrace=False, # type: bool
631
- ca_certs=None, # type: Optional[str]
632
- traces_sample_rate=None, # type: Optional[float]
633
- traces_sampler=None, # type: Optional[TracesSampler]
634
- profiles_sample_rate=None, # type: Optional[float]
635
- profiles_sampler=None, # type: Optional[TracesSampler]
636
- profiler_mode=None, # type: Optional[ProfilerMode]
637
- profile_lifecycle="manual", # type: Literal["manual", "trace"]
638
- profile_session_sample_rate=None, # type: Optional[float]
639
- auto_enabling_integrations=True, # type: bool
640
- disabled_integrations=None, # type: Optional[Sequence[sentry_sdk.integrations.Integration]]
641
- auto_session_tracking=True, # type: bool
642
- send_client_reports=True, # type: bool
643
- _experiments={}, # type: Experiments # noqa: B006
644
- proxy_headers=None, # type: Optional[Dict[str, str]]
645
- before_send_transaction=None, # type: Optional[TransactionProcessor]
646
- project_root=None, # type: Optional[str]
647
- include_local_variables=True, # type: Optional[bool]
648
- include_source_context=True, # type: Optional[bool]
649
- trace_propagation_targets=[ # noqa: B006
650
- MATCH_ALL
651
- ], # type: Optional[Sequence[str]]
652
- functions_to_trace=[], # type: Sequence[Dict[str, str]] # noqa: B006
653
- event_scrubber=None, # type: Optional[sentry_sdk.scrubber.EventScrubber]
654
- max_value_length=DEFAULT_MAX_VALUE_LENGTH, # type: int
655
- enable_backpressure_handling=True, # type: bool
656
- error_sampler=None, # type: Optional[Callable[[Event, Hint], Union[float, bool]]]
657
- enable_db_query_source=True, # type: bool
658
- db_query_source_threshold_ms=100, # type: int
659
- spotlight=None, # type: Optional[Union[bool, str]]
660
- cert_file=None, # type: Optional[str]
661
- key_file=None, # type: Optional[str]
662
- custom_repr=None, # type: Optional[Callable[..., Optional[str]]]
663
- add_full_stack=DEFAULT_ADD_FULL_STACK, # type: bool
664
- max_stack_frames=DEFAULT_MAX_STACK_FRAMES, # type: Optional[int]
665
- ):
666
- # type: (...) -> None
790
+ max_breadcrumbs: int = DEFAULT_MAX_BREADCRUMBS,
791
+ release: Optional[str] = None,
792
+ environment: Optional[str] = None,
793
+ server_name: Optional[str] = None,
794
+ shutdown_timeout: float = 2,
795
+ integrations: Sequence[sentry_sdk.integrations.Integration] = [], # noqa: B006
796
+ in_app_include: List[str] = [], # noqa: B006
797
+ in_app_exclude: List[str] = [], # noqa: B006
798
+ default_integrations: bool = True,
799
+ dist: Optional[str] = None,
800
+ transport: Optional[
801
+ Union[
802
+ sentry_sdk.transport.Transport,
803
+ Type[sentry_sdk.transport.Transport],
804
+ Callable[[Event], None],
805
+ ]
806
+ ] = None,
807
+ transport_queue_size: int = DEFAULT_QUEUE_SIZE,
808
+ sample_rate: float = 1.0,
809
+ send_default_pii: Optional[bool] = None,
810
+ http_proxy: Optional[str] = None,
811
+ https_proxy: Optional[str] = None,
812
+ ignore_errors: Sequence[Union[type, str]] = [], # noqa: B006
813
+ max_request_body_size: str = "medium",
814
+ socket_options: Optional[List[Tuple[int, int, int | bytes]]] = None,
815
+ keep_alive: Optional[bool] = None,
816
+ before_send: Optional[EventProcessor] = None,
817
+ before_breadcrumb: Optional[BreadcrumbProcessor] = None,
818
+ debug: Optional[bool] = None,
819
+ attach_stacktrace: bool = False,
820
+ ca_certs: Optional[str] = None,
821
+ traces_sample_rate: Optional[float] = None,
822
+ traces_sampler: Optional[TracesSampler] = None,
823
+ profiles_sample_rate: Optional[float] = None,
824
+ profiles_sampler: Optional[TracesSampler] = None,
825
+ profiler_mode: Optional[ProfilerMode] = None,
826
+ profile_lifecycle: Literal["manual", "trace"] = "manual",
827
+ profile_session_sample_rate: Optional[float] = None,
828
+ auto_enabling_integrations: bool = True,
829
+ disabled_integrations: Optional[
830
+ Sequence[sentry_sdk.integrations.Integration]
831
+ ] = None,
832
+ auto_session_tracking: bool = True,
833
+ send_client_reports: bool = True,
834
+ _experiments: Experiments = {}, # noqa: B006
835
+ proxy_headers: Optional[Dict[str, str]] = None,
836
+ before_send_transaction: Optional[TransactionProcessor] = None,
837
+ project_root: Optional[str] = None,
838
+ include_local_variables: Optional[bool] = True,
839
+ include_source_context: Optional[bool] = True,
840
+ trace_propagation_targets: Optional[Sequence[str]] = [MATCH_ALL], # noqa: B006
841
+ exclude_span_origins: Optional[Sequence[str]] = None,
842
+ functions_to_trace: Sequence[Dict[str, str]] = [], # noqa: B006
843
+ event_scrubber: Optional[sentry_sdk.scrubber.EventScrubber] = None,
844
+ max_value_length: int = DEFAULT_MAX_VALUE_LENGTH,
845
+ enable_backpressure_handling: bool = True,
846
+ error_sampler: Optional[Callable[[Event, Hint], Union[float, bool]]] = None,
847
+ enable_db_query_source: bool = True,
848
+ db_query_source_threshold_ms: int = 100,
849
+ spotlight: Optional[Union[bool, str]] = None,
850
+ cert_file: Optional[str] = None,
851
+ key_file: Optional[str] = None,
852
+ custom_repr: Optional[Callable[..., Optional[str]]] = None,
853
+ add_full_stack: bool = DEFAULT_ADD_FULL_STACK,
854
+ max_stack_frames: Optional[int] = DEFAULT_MAX_STACK_FRAMES,
855
+ enable_logs: bool = False,
856
+ before_send_log: Optional[Callable[[Log, Hint], Optional[Log]]] = None,
857
+ ) -> None:
667
858
  """Initialize the Sentry SDK with the given parameters. All parameters described here can be used in a call to `sentry_sdk.init()`.
668
859
 
669
860
  :param dsn: The DSN tells the SDK where to send the events.
@@ -980,6 +1171,17 @@ class ClientConstructor:
980
1171
  If `trace_propagation_targets` is not provided, trace data is attached to every outgoing request from the
981
1172
  instrumented client.
982
1173
 
1174
+ :param exclude_span_origins: An optional list of strings or regex patterns to disable span creation based
1175
+ on span origin. When a span's origin would match any of the provided patterns, the span will not be
1176
+ created.
1177
+
1178
+ This can be useful to exclude automatic span creation from specific integrations without disabling the
1179
+ entire integration.
1180
+
1181
+ The option may contain a list of strings or regexes against which the span origins are matched.
1182
+ String entries do not have to be full matches, meaning a span origin is matched when it contains
1183
+ a string provided through the option.
1184
+
983
1185
  :param functions_to_trace: An optional list of functions that should be set up for tracing.
984
1186
 
985
1187
  For each function in the list, a span will be created when the function is executed.
@@ -1038,13 +1240,20 @@ class ClientConstructor:
1038
1240
 
1039
1241
  :param instrumenter:
1040
1242
 
1243
+ :param enable_logs: Set `enable_logs` to True to enable the SDK to emit
1244
+ Sentry logs. Defaults to False.
1245
+
1246
+ :param before_send_log: An optional function to modify or filter out logs
1247
+ before they're sent to Sentry. Any modifications to the log in this
1248
+ function will be retained. If the function returns None, the log will
1249
+ not be sent to Sentry.
1250
+
1041
1251
  :param _experiments:
1042
1252
  """
1043
1253
  pass
1044
1254
 
1045
1255
 
1046
- def _get_default_options():
1047
- # type: () -> dict[str, Any]
1256
+ def _get_default_options() -> dict[str, Any]:
1048
1257
  import inspect
1049
1258
 
1050
1259
  a = inspect.getfullargspec(ClientConstructor.__init__)
@@ -1063,4 +1272,4 @@ DEFAULT_OPTIONS = _get_default_options()
1063
1272
  del _get_default_options
1064
1273
 
1065
1274
 
1066
- VERSION = "3.0.0a2"
1275
+ VERSION = "3.0.0a4"