sentry-sdk 3.0.0a1__py2.py3-none-any.whl → 3.0.0a3__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

Files changed (157) hide show
  1. sentry_sdk/__init__.py +2 -0
  2. sentry_sdk/_compat.py +5 -12
  3. sentry_sdk/_init_implementation.py +7 -7
  4. sentry_sdk/_log_batcher.py +17 -29
  5. sentry_sdk/_lru_cache.py +7 -9
  6. sentry_sdk/_queue.py +2 -4
  7. sentry_sdk/_types.py +11 -18
  8. sentry_sdk/_werkzeug.py +5 -7
  9. sentry_sdk/ai/monitoring.py +44 -31
  10. sentry_sdk/ai/utils.py +3 -4
  11. sentry_sdk/api.py +75 -87
  12. sentry_sdk/attachments.py +10 -12
  13. sentry_sdk/client.py +137 -155
  14. sentry_sdk/consts.py +430 -174
  15. sentry_sdk/crons/api.py +16 -17
  16. sentry_sdk/crons/decorator.py +25 -27
  17. sentry_sdk/debug.py +4 -6
  18. sentry_sdk/envelope.py +46 -112
  19. sentry_sdk/feature_flags.py +9 -15
  20. sentry_sdk/integrations/__init__.py +24 -19
  21. sentry_sdk/integrations/_asgi_common.py +15 -18
  22. sentry_sdk/integrations/_wsgi_common.py +22 -33
  23. sentry_sdk/integrations/aiohttp.py +32 -30
  24. sentry_sdk/integrations/anthropic.py +42 -37
  25. sentry_sdk/integrations/argv.py +3 -4
  26. sentry_sdk/integrations/ariadne.py +16 -18
  27. sentry_sdk/integrations/arq.py +21 -29
  28. sentry_sdk/integrations/asgi.py +63 -37
  29. sentry_sdk/integrations/asyncio.py +14 -16
  30. sentry_sdk/integrations/atexit.py +6 -10
  31. sentry_sdk/integrations/aws_lambda.py +26 -36
  32. sentry_sdk/integrations/beam.py +10 -18
  33. sentry_sdk/integrations/boto3.py +18 -16
  34. sentry_sdk/integrations/bottle.py +25 -34
  35. sentry_sdk/integrations/celery/__init__.py +41 -61
  36. sentry_sdk/integrations/celery/beat.py +23 -27
  37. sentry_sdk/integrations/celery/utils.py +15 -17
  38. sentry_sdk/integrations/chalice.py +8 -10
  39. sentry_sdk/integrations/clickhouse_driver.py +21 -31
  40. sentry_sdk/integrations/cloud_resource_context.py +9 -16
  41. sentry_sdk/integrations/cohere.py +27 -33
  42. sentry_sdk/integrations/dedupe.py +5 -8
  43. sentry_sdk/integrations/django/__init__.py +57 -72
  44. sentry_sdk/integrations/django/asgi.py +26 -34
  45. sentry_sdk/integrations/django/caching.py +23 -19
  46. sentry_sdk/integrations/django/middleware.py +17 -20
  47. sentry_sdk/integrations/django/signals_handlers.py +11 -10
  48. sentry_sdk/integrations/django/templates.py +19 -16
  49. sentry_sdk/integrations/django/transactions.py +16 -11
  50. sentry_sdk/integrations/django/views.py +6 -10
  51. sentry_sdk/integrations/dramatiq.py +21 -21
  52. sentry_sdk/integrations/excepthook.py +10 -10
  53. sentry_sdk/integrations/executing.py +3 -4
  54. sentry_sdk/integrations/falcon.py +27 -42
  55. sentry_sdk/integrations/fastapi.py +13 -16
  56. sentry_sdk/integrations/flask.py +31 -38
  57. sentry_sdk/integrations/gcp.py +13 -16
  58. sentry_sdk/integrations/gnu_backtrace.py +4 -6
  59. sentry_sdk/integrations/gql.py +16 -17
  60. sentry_sdk/integrations/graphene.py +13 -12
  61. sentry_sdk/integrations/grpc/__init__.py +19 -1
  62. sentry_sdk/integrations/grpc/aio/server.py +15 -14
  63. sentry_sdk/integrations/grpc/client.py +19 -9
  64. sentry_sdk/integrations/grpc/consts.py +2 -0
  65. sentry_sdk/integrations/grpc/server.py +12 -8
  66. sentry_sdk/integrations/httpx.py +9 -12
  67. sentry_sdk/integrations/huey.py +13 -20
  68. sentry_sdk/integrations/huggingface_hub.py +18 -18
  69. sentry_sdk/integrations/langchain.py +203 -113
  70. sentry_sdk/integrations/launchdarkly.py +13 -10
  71. sentry_sdk/integrations/litestar.py +37 -35
  72. sentry_sdk/integrations/logging.py +52 -65
  73. sentry_sdk/integrations/loguru.py +127 -57
  74. sentry_sdk/integrations/modules.py +3 -4
  75. sentry_sdk/integrations/openai.py +100 -88
  76. sentry_sdk/integrations/openai_agents/__init__.py +49 -0
  77. sentry_sdk/integrations/openai_agents/consts.py +1 -0
  78. sentry_sdk/integrations/openai_agents/patches/__init__.py +4 -0
  79. sentry_sdk/integrations/openai_agents/patches/agent_run.py +152 -0
  80. sentry_sdk/integrations/openai_agents/patches/models.py +52 -0
  81. sentry_sdk/integrations/openai_agents/patches/runner.py +42 -0
  82. sentry_sdk/integrations/openai_agents/patches/tools.py +84 -0
  83. sentry_sdk/integrations/openai_agents/spans/__init__.py +5 -0
  84. sentry_sdk/integrations/openai_agents/spans/agent_workflow.py +20 -0
  85. sentry_sdk/integrations/openai_agents/spans/ai_client.py +46 -0
  86. sentry_sdk/integrations/openai_agents/spans/execute_tool.py +47 -0
  87. sentry_sdk/integrations/openai_agents/spans/handoff.py +24 -0
  88. sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +41 -0
  89. sentry_sdk/integrations/openai_agents/utils.py +201 -0
  90. sentry_sdk/integrations/openfeature.py +11 -6
  91. sentry_sdk/integrations/pure_eval.py +6 -10
  92. sentry_sdk/integrations/pymongo.py +13 -17
  93. sentry_sdk/integrations/pyramid.py +31 -36
  94. sentry_sdk/integrations/quart.py +23 -28
  95. sentry_sdk/integrations/ray.py +73 -64
  96. sentry_sdk/integrations/redis/__init__.py +7 -4
  97. sentry_sdk/integrations/redis/_async_common.py +25 -12
  98. sentry_sdk/integrations/redis/_sync_common.py +19 -13
  99. sentry_sdk/integrations/redis/modules/caches.py +17 -8
  100. sentry_sdk/integrations/redis/modules/queries.py +9 -8
  101. sentry_sdk/integrations/redis/rb.py +3 -2
  102. sentry_sdk/integrations/redis/redis.py +4 -4
  103. sentry_sdk/integrations/redis/redis_cluster.py +21 -13
  104. sentry_sdk/integrations/redis/redis_py_cluster_legacy.py +3 -2
  105. sentry_sdk/integrations/redis/utils.py +23 -24
  106. sentry_sdk/integrations/rq.py +13 -16
  107. sentry_sdk/integrations/rust_tracing.py +9 -6
  108. sentry_sdk/integrations/sanic.py +34 -46
  109. sentry_sdk/integrations/serverless.py +22 -27
  110. sentry_sdk/integrations/socket.py +27 -15
  111. sentry_sdk/integrations/spark/__init__.py +1 -0
  112. sentry_sdk/integrations/spark/spark_driver.py +45 -83
  113. sentry_sdk/integrations/spark/spark_worker.py +7 -11
  114. sentry_sdk/integrations/sqlalchemy.py +22 -19
  115. sentry_sdk/integrations/starlette.py +86 -90
  116. sentry_sdk/integrations/starlite.py +28 -34
  117. sentry_sdk/integrations/statsig.py +5 -4
  118. sentry_sdk/integrations/stdlib.py +28 -24
  119. sentry_sdk/integrations/strawberry.py +62 -49
  120. sentry_sdk/integrations/sys_exit.py +7 -11
  121. sentry_sdk/integrations/threading.py +12 -14
  122. sentry_sdk/integrations/tornado.py +28 -32
  123. sentry_sdk/integrations/trytond.py +4 -3
  124. sentry_sdk/integrations/typer.py +8 -6
  125. sentry_sdk/integrations/unleash.py +5 -4
  126. sentry_sdk/integrations/wsgi.py +47 -46
  127. sentry_sdk/logger.py +41 -10
  128. sentry_sdk/monitor.py +16 -28
  129. sentry_sdk/opentelemetry/consts.py +11 -4
  130. sentry_sdk/opentelemetry/contextvars_context.py +26 -16
  131. sentry_sdk/opentelemetry/propagator.py +38 -21
  132. sentry_sdk/opentelemetry/sampler.py +51 -34
  133. sentry_sdk/opentelemetry/scope.py +36 -37
  134. sentry_sdk/opentelemetry/span_processor.py +48 -58
  135. sentry_sdk/opentelemetry/tracing.py +58 -14
  136. sentry_sdk/opentelemetry/utils.py +186 -194
  137. sentry_sdk/profiler/continuous_profiler.py +108 -97
  138. sentry_sdk/profiler/transaction_profiler.py +70 -97
  139. sentry_sdk/profiler/utils.py +11 -15
  140. sentry_sdk/scope.py +251 -273
  141. sentry_sdk/scrubber.py +22 -26
  142. sentry_sdk/serializer.py +40 -54
  143. sentry_sdk/session.py +44 -61
  144. sentry_sdk/sessions.py +35 -49
  145. sentry_sdk/spotlight.py +15 -21
  146. sentry_sdk/tracing.py +121 -187
  147. sentry_sdk/tracing_utils.py +104 -122
  148. sentry_sdk/transport.py +131 -157
  149. sentry_sdk/utils.py +232 -309
  150. sentry_sdk/worker.py +16 -28
  151. {sentry_sdk-3.0.0a1.dist-info → sentry_sdk-3.0.0a3.dist-info}/METADATA +3 -3
  152. sentry_sdk-3.0.0a3.dist-info/RECORD +168 -0
  153. {sentry_sdk-3.0.0a1.dist-info → sentry_sdk-3.0.0a3.dist-info}/WHEEL +1 -1
  154. sentry_sdk-3.0.0a1.dist-info/RECORD +0 -154
  155. {sentry_sdk-3.0.0a1.dist-info → sentry_sdk-3.0.0a3.dist-info}/entry_points.txt +0 -0
  156. {sentry_sdk-3.0.0a1.dist-info → sentry_sdk-3.0.0a3.dist-info}/licenses/LICENSE +0 -0
  157. {sentry_sdk-3.0.0a1.dist-info → sentry_sdk-3.0.0a3.dist-info}/top_level.txt +0 -0
sentry_sdk/consts.py CHANGED
@@ -1,8 +1,21 @@
1
+ from __future__ import annotations
1
2
  import itertools
2
-
3
3
  from enum import Enum
4
4
  from typing import TYPE_CHECKING
5
5
 
6
+ if TYPE_CHECKING:
7
+ from typing import (
8
+ Optional,
9
+ Callable,
10
+ Union,
11
+ List,
12
+ Type,
13
+ Dict,
14
+ Any,
15
+ Sequence,
16
+ Tuple,
17
+ )
18
+
6
19
  # up top to prevent circular import due to integration import
7
20
  DEFAULT_MAX_VALUE_LENGTH = 1024
8
21
 
@@ -27,17 +40,6 @@ class CompressionAlgo(Enum):
27
40
 
28
41
 
29
42
  if TYPE_CHECKING:
30
- import sentry_sdk
31
-
32
- from typing import Optional
33
- from typing import Callable
34
- from typing import Union
35
- from typing import List
36
- from typing import Type
37
- from typing import Dict
38
- from typing import Any
39
- from typing import Sequence
40
- from typing import Tuple
41
43
  from typing_extensions import Literal
42
44
  from typing_extensions import TypedDict
43
45
 
@@ -47,11 +49,14 @@ if TYPE_CHECKING:
47
49
  Event,
48
50
  EventProcessor,
49
51
  Hint,
52
+ Log,
50
53
  ProfilerMode,
51
54
  TracesSampler,
52
55
  TransactionProcessor,
53
56
  )
54
57
 
58
+ import sentry_sdk
59
+
55
60
  # Experiments are feature flags to enable and disable certain unstable SDK
56
61
  # functionality. Changing them from the defaults (`None`) in production
57
62
  # code is highly discouraged. They are not subject to any stability
@@ -71,6 +76,7 @@ if TYPE_CHECKING:
71
76
  "transport_num_pools": Optional[int],
72
77
  "transport_http2": Optional[bool],
73
78
  "enable_logs": Optional[bool],
79
+ "before_send_log": Optional[Callable[[Log, Hint], Optional[Log]]],
74
80
  },
75
81
  total=False,
76
82
  )
@@ -94,16 +100,45 @@ class SPANDATA:
94
100
  See: https://develop.sentry.dev/sdk/performance/span-data-conventions/
95
101
  """
96
102
 
103
+ AI_CITATIONS = "ai.citations"
104
+ """
105
+ References or sources cited by the AI model in its response.
106
+ Example: ["Smith et al. 2020", "Jones 2019"]
107
+ """
108
+
109
+ AI_COMPLETION_TOKENS_USED = "ai.completion_tokens.used"
110
+ """
111
+ The number of output completion tokens used by the model.
112
+ Example: 10
113
+ """
114
+
115
+ AI_DOCUMENTS = "ai.documents"
116
+ """
117
+ Documents or content chunks used as context for the AI model.
118
+ Example: ["doc1.txt", "doc2.pdf"]
119
+ """
120
+
121
+ AI_FINISH_REASON = "ai.finish_reason"
122
+ """
123
+ The reason why the model stopped generating.
124
+ Example: "length"
125
+ """
126
+
97
127
  AI_FREQUENCY_PENALTY = "ai.frequency_penalty"
98
128
  """
99
129
  Used to reduce repetitiveness of generated tokens.
100
130
  Example: 0.5
101
131
  """
102
132
 
103
- AI_PRESENCE_PENALTY = "ai.presence_penalty"
133
+ AI_FUNCTION_CALL = "ai.function_call"
104
134
  """
105
- Used to reduce repetitiveness of generated tokens.
106
- Example: 0.5
135
+ For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls
136
+ """
137
+
138
+ AI_GENERATION_ID = "ai.generation_id"
139
+ """
140
+ Unique identifier for the completion.
141
+ Example: "gen_123abc"
107
142
  """
108
143
 
109
144
  AI_INPUT_MESSAGES = "ai.input_messages"
@@ -112,10 +147,9 @@ class SPANDATA:
112
147
  Example: [{"role": "user", "message": "hello"}]
113
148
  """
114
149
 
115
- AI_MODEL_ID = "ai.model_id"
150
+ AI_LOGIT_BIAS = "ai.logit_bias"
116
151
  """
117
- The unique descriptor of the model being execugted
118
- Example: gpt-4
152
+ For an AI model call, the logit bias
119
153
  """
120
154
 
121
155
  AI_METADATA = "ai.metadata"
@@ -124,28 +158,100 @@ class SPANDATA:
124
158
  Example: {"executed_function": "add_integers"}
125
159
  """
126
160
 
127
- AI_TAGS = "ai.tags"
161
+ AI_MODEL_ID = "ai.model_id"
128
162
  """
129
- Tags that describe an AI pipeline step.
130
- Example: {"executed_function": "add_integers"}
163
+ The unique descriptor of the model being execugted
164
+ Example: gpt-4
165
+ """
166
+
167
+ AI_PIPELINE_NAME = "ai.pipeline.name"
168
+ """
169
+ Name of the AI pipeline or chain being executed.
170
+ Example: "qa-pipeline"
171
+ """
172
+
173
+ AI_PREAMBLE = "ai.preamble"
174
+ """
175
+ For an AI model call, the preamble parameter.
176
+ Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style.
177
+ Example: "You are now a clown."
178
+ """
179
+
180
+ AI_PRESENCE_PENALTY = "ai.presence_penalty"
181
+ """
182
+ Used to reduce repetitiveness of generated tokens.
183
+ Example: 0.5
184
+ """
185
+
186
+ AI_PROMPT_TOKENS_USED = "ai.prompt_tokens.used"
187
+ """
188
+ The number of input prompt tokens used by the model.
189
+ Example: 10
190
+ """
191
+
192
+ AI_RAW_PROMPTING = "ai.raw_prompting"
193
+ """
194
+ Minimize pre-processing done to the prompt sent to the LLM.
195
+ Example: true
196
+ """
197
+
198
+ AI_RESPONSE_FORMAT = "ai.response_format"
199
+ """
200
+ For an AI model call, the format of the response
201
+ """
202
+
203
+ AI_RESPONSES = "ai.responses"
204
+ """
205
+ The responses to an AI model call. Always as a list.
206
+ Example: ["hello", "world"]
207
+ """
208
+
209
+ AI_SEARCH_QUERIES = "ai.search_queries"
210
+ """
211
+ Queries used to search for relevant context or documents.
212
+ Example: ["climate change effects", "renewable energy"]
213
+ """
214
+
215
+ AI_SEARCH_REQUIRED = "ai.is_search_required"
216
+ """
217
+ Boolean indicating if the model needs to perform a search.
218
+ Example: true
219
+ """
220
+
221
+ AI_SEARCH_RESULTS = "ai.search_results"
222
+ """
223
+ Results returned from search queries for context.
224
+ Example: ["Result 1", "Result 2"]
225
+ """
226
+
227
+ AI_SEED = "ai.seed"
228
+ """
229
+ The seed, ideally models given the same seed and same other parameters will produce the exact same output.
230
+ Example: 123.45
131
231
  """
132
232
 
133
233
  AI_STREAMING = "ai.streaming"
134
234
  """
135
- Whether or not the AI model call's repsonse was streamed back asynchronously
235
+ Whether or not the AI model call's response was streamed back asynchronously
136
236
  Example: true
137
237
  """
138
238
 
239
+ AI_TAGS = "ai.tags"
240
+ """
241
+ Tags that describe an AI pipeline step.
242
+ Example: {"executed_function": "add_integers"}
243
+ """
244
+
139
245
  AI_TEMPERATURE = "ai.temperature"
140
246
  """
141
247
  For an AI model call, the temperature parameter. Temperature essentially means how random the output will be.
142
248
  Example: 0.5
143
249
  """
144
250
 
145
- AI_TOP_P = "ai.top_p"
251
+ AI_TEXTS = "ai.texts"
146
252
  """
147
- For an AI model call, the top_p parameter. Top_p essentially controls how random the output will be.
148
- Example: 0.5
253
+ Raw text inputs provided to the model.
254
+ Example: ["What is machine learning?"]
149
255
  """
150
256
 
151
257
  AI_TOP_K = "ai.top_k"
@@ -154,14 +260,15 @@ class SPANDATA:
154
260
  Example: 35
155
261
  """
156
262
 
157
- AI_FUNCTION_CALL = "ai.function_call"
263
+ AI_TOP_P = "ai.top_p"
158
264
  """
159
- For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls
265
+ For an AI model call, the top_p parameter. Top_p essentially controls how random the output will be.
266
+ Example: 0.5
160
267
  """
161
268
 
162
269
  AI_TOOL_CALLS = "ai.tool_calls"
163
270
  """
164
- For an AI model call, the function that was called.
271
+ For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls
165
272
  """
166
273
 
167
274
  AI_TOOLS = "ai.tools"
@@ -169,52 +276,71 @@ class SPANDATA:
169
276
  For an AI model call, the functions that are available
170
277
  """
171
278
 
172
- AI_RESPONSE_FORMAT = "ai.response_format"
279
+ AI_TOTAL_TOKENS_USED = "ai.total_tokens.used"
173
280
  """
174
- For an AI model call, the format of the response
281
+ The total number of tokens (input + output) used by the request to the model.
282
+ Example: 20
175
283
  """
176
284
 
177
- AI_LOGIT_BIAS = "ai.response_format"
285
+ AI_WARNINGS = "ai.warnings"
178
286
  """
179
- For an AI model call, the logit bias
287
+ Warning messages generated during model execution.
288
+ Example: ["Token limit exceeded"]
180
289
  """
181
290
 
182
- AI_PREAMBLE = "ai.preamble"
291
+ CACHE_HIT = "cache.hit"
183
292
  """
184
- For an AI model call, the preamble parameter.
185
- Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style.
186
- Example: "You are now a clown."
293
+ A boolean indicating whether the requested data was found in the cache.
294
+ Example: true
187
295
  """
188
296
 
189
- AI_RAW_PROMPTING = "ai.raw_prompting"
297
+ CACHE_ITEM_SIZE = "cache.item_size"
190
298
  """
191
- Minimize pre-processing done to the prompt sent to the LLM.
192
- Example: true
299
+ The size of the requested data in bytes.
300
+ Example: 58
193
301
  """
194
302
 
195
- AI_RESPONSES = "ai.responses"
303
+ CACHE_KEY = "cache.key"
196
304
  """
197
- The responses to an AI model call. Always as a list.
198
- Example: ["hello", "world"]
305
+ The key of the requested data.
306
+ Example: template.cache.some_item.867da7e2af8e6b2f3aa7213a4080edb3
199
307
  """
200
308
 
201
- AI_SEED = "ai.seed"
309
+ CODE_FILEPATH = "code.filepath"
202
310
  """
203
- The seed, ideally models given the same seed and same other parameters will produce the exact same output.
204
- Example: 123.45
311
+ The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path).
312
+ Example: "/app/myapplication/http/handler/server.py"
205
313
  """
206
314
 
207
- DB_NAME = "db.name"
315
+ CODE_FUNCTION = "code.function"
208
316
  """
209
- The name of the database being accessed. For commands that switch the database, this should be set to the target database (even if the command fails).
210
- Example: myDatabase
317
+ The method or function name, or equivalent (usually rightmost part of the code unit's name).
318
+ Example: "server_request"
211
319
  """
212
320
 
213
- DB_USER = "db.user"
321
+ CODE_LINENO = "code.lineno"
214
322
  """
215
- The name of the database user used for connecting to the database.
216
- See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md
217
- Example: my_user
323
+ The line number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`.
324
+ Example: 42
325
+ """
326
+
327
+ CODE_NAMESPACE = "code.namespace"
328
+ """
329
+ The "namespace" within which `code.function` is defined. Usually the qualified class or module name, such that `code.namespace` + some separator + `code.function` form a unique identifier for the code unit.
330
+ Example: "http.handler"
331
+ """
332
+
333
+ DB_MONGODB_COLLECTION = "db.mongodb.collection"
334
+ """
335
+ The MongoDB collection being accessed within the database.
336
+ See: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/database/mongodb.md#attributes
337
+ Example: public.users; customers
338
+ """
339
+
340
+ DB_NAME = "db.name"
341
+ """
342
+ The name of the database being accessed. For commands that switch the database, this should be set to the target database (even if the command fails).
343
+ Example: myDatabase
218
344
  """
219
345
 
220
346
  DB_OPERATION = "db.operation"
@@ -231,47 +357,161 @@ class SPANDATA:
231
357
  Example: postgresql
232
358
  """
233
359
 
234
- DB_MONGODB_COLLECTION = "db.mongodb.collection"
360
+ DB_USER = "db.user"
235
361
  """
236
- The MongoDB collection being accessed within the database.
237
- See: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/database/mongodb.md#attributes
238
- Example: public.users; customers
362
+ The name of the database user used for connecting to the database.
363
+ See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md
364
+ Example: my_user
239
365
  """
240
366
 
241
- CACHE_HIT = "cache.hit"
367
+ GEN_AI_AGENT_NAME = "gen_ai.agent.name"
242
368
  """
243
- A boolean indicating whether the requested data was found in the cache.
244
- Example: true
369
+ The name of the agent being used.
370
+ Example: "ResearchAssistant"
245
371
  """
246
372
 
247
- CACHE_ITEM_SIZE = "cache.item_size"
373
+ GEN_AI_CHOICE = "gen_ai.choice"
248
374
  """
249
- The size of the requested data in bytes.
250
- Example: 58
375
+ The model's response message.
376
+ Example: "The weather in Paris is rainy and overcast, with temperatures around 57°F"
251
377
  """
252
378
 
253
- CACHE_KEY = "cache.key"
379
+ GEN_AI_OPERATION_NAME = "gen_ai.operation.name"
254
380
  """
255
- The key of the requested data.
256
- Example: template.cache.some_item.867da7e2af8e6b2f3aa7213a4080edb3
381
+ The name of the operation being performed.
382
+ Example: "chat"
257
383
  """
258
384
 
259
- NETWORK_PEER_ADDRESS = "network.peer.address"
385
+ GEN_AI_RESPONSE_TEXT = "gen_ai.response.text"
260
386
  """
261
- Peer address of the network connection - IP address or Unix domain socket name.
262
- Example: 10.1.2.80, /tmp/my.sock, localhost
387
+ The model's response text messages.
388
+ Example: ["The weather in Paris is rainy and overcast, with temperatures around 57°F", "The weather in London is sunny and warm, with temperatures around 65°F"]
263
389
  """
264
390
 
265
- NETWORK_PEER_PORT = "network.peer.port"
391
+ GEN_AI_RESPONSE_TOOL_CALLS = "gen_ai.response.tool_calls"
266
392
  """
267
- Peer port number of the network connection.
268
- Example: 6379
393
+ The tool calls in the model's response.
394
+ Example: [{"name": "get_weather", "arguments": {"location": "Paris"}}]
269
395
  """
270
396
 
271
- HTTP_QUERY = "http.query"
397
+ GEN_AI_REQUEST_AVAILABLE_TOOLS = "gen_ai.request.available_tools"
272
398
  """
273
- The Query string present in the URL.
274
- Example: ?foo=bar&bar=baz
399
+ The available tools for the model.
400
+ Example: [{"name": "get_weather", "description": "Get the weather for a given location"}, {"name": "get_news", "description": "Get the news for a given topic"}]
401
+ """
402
+
403
+ GEN_AI_REQUEST_FREQUENCY_PENALTY = "gen_ai.request.frequency_penalty"
404
+ """
405
+ The frequency penalty parameter used to reduce repetitiveness of generated tokens.
406
+ Example: 0.1
407
+ """
408
+
409
+ GEN_AI_REQUEST_MAX_TOKENS = "gen_ai.request.max_tokens"
410
+ """
411
+ The maximum number of tokens to generate in the response.
412
+ Example: 2048
413
+ """
414
+
415
+ GEN_AI_REQUEST_MESSAGES = "gen_ai.request.messages"
416
+ """
417
+ The messages passed to the model. The "content" can be a string or an array of objects.
418
+ Example: [{role: "system", "content: "Generate a random number."}, {"role": "user", "content": [{"text": "Generate a random number between 0 and 10.", "type": "text"}]}]
419
+ """
420
+
421
+ GEN_AI_REQUEST_MODEL = "gen_ai.request.model"
422
+ """
423
+ The model identifier being used for the request.
424
+ Example: "gpt-4-turbo-preview"
425
+ """
426
+
427
+ GEN_AI_REQUEST_PRESENCE_PENALTY = "gen_ai.request.presence_penalty"
428
+ """
429
+ The presence penalty parameter used to reduce repetitiveness of generated tokens.
430
+ Example: 0.1
431
+ """
432
+
433
+ GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature"
434
+ """
435
+ The temperature parameter used to control randomness in the output.
436
+ Example: 0.7
437
+ """
438
+
439
+ GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p"
440
+ """
441
+ The top_p parameter used to control diversity via nucleus sampling.
442
+ Example: 1.0
443
+ """
444
+
445
+ GEN_AI_SYSTEM = "gen_ai.system"
446
+ """
447
+ The name of the AI system being used.
448
+ Example: "openai"
449
+ """
450
+
451
+ GEN_AI_TOOL_DESCRIPTION = "gen_ai.tool.description"
452
+ """
453
+ The description of the tool being used.
454
+ Example: "Searches the web for current information about a topic"
455
+ """
456
+
457
+ GEN_AI_TOOL_INPUT = "gen_ai.tool.input"
458
+ """
459
+ The input of the tool being used.
460
+ Example: {"location": "Paris"}
461
+ """
462
+
463
+ GEN_AI_TOOL_NAME = "gen_ai.tool.name"
464
+ """
465
+ The name of the tool being used.
466
+ Example: "web_search"
467
+ """
468
+
469
+ GEN_AI_TOOL_OUTPUT = "gen_ai.tool.output"
470
+ """
471
+ The output of the tool being used.
472
+ Example: "rainy, 57°F"
473
+ """
474
+
475
+ GEN_AI_TOOL_TYPE = "gen_ai.tool.type"
476
+ """
477
+ The type of tool being used.
478
+ Example: "function"
479
+ """
480
+
481
+ GEN_AI_USAGE_INPUT_TOKENS = "gen_ai.usage.input_tokens"
482
+ """
483
+ The number of tokens in the input.
484
+ Example: 150
485
+ """
486
+
487
+ GEN_AI_USAGE_INPUT_TOKENS_CACHED = "gen_ai.usage.input_tokens.cached"
488
+ """
489
+ The number of cached tokens in the input.
490
+ Example: 50
491
+ """
492
+
493
+ GEN_AI_USAGE_OUTPUT_TOKENS = "gen_ai.usage.output_tokens"
494
+ """
495
+ The number of tokens in the output.
496
+ Example: 250
497
+ """
498
+
499
+ GEN_AI_USAGE_OUTPUT_TOKENS_REASONING = "gen_ai.usage.output_tokens.reasoning"
500
+ """
501
+ The number of tokens used for reasoning in the output.
502
+ Example: 75
503
+ """
504
+
505
+ GEN_AI_USAGE_TOTAL_TOKENS = "gen_ai.usage.total_tokens"
506
+ """
507
+ The total number of tokens used (input + output).
508
+ Example: 400
509
+ """
510
+
511
+ GEN_AI_USER_MESSAGE = "gen_ai.user.message"
512
+ """
513
+ The user message passed to the model.
514
+ Example: "What's the weather in Paris?"
275
515
  """
276
516
 
277
517
  HTTP_FRAGMENT = "http.fragment"
@@ -286,6 +526,12 @@ class SPANDATA:
286
526
  Example: GET
287
527
  """
288
528
 
529
+ HTTP_QUERY = "http.query"
530
+ """
531
+ The Query string present in the URL.
532
+ Example: ?foo=bar&bar=baz
533
+ """
534
+
289
535
  HTTP_STATUS_CODE = "http.response.status_code"
290
536
  """
291
537
  The HTTP status code as an integer.
@@ -303,14 +549,14 @@ class SPANDATA:
303
549
  The message's identifier.
304
550
  """
305
551
 
306
- MESSAGING_MESSAGE_RETRY_COUNT = "messaging.message.retry.count"
552
+ MESSAGING_MESSAGE_RECEIVE_LATENCY = "messaging.message.receive.latency"
307
553
  """
308
- Number of retries/attempts to process a message.
554
+ The latency between when the task was enqueued and when it was started to be processed.
309
555
  """
310
556
 
311
- MESSAGING_MESSAGE_RECEIVE_LATENCY = "messaging.message.receive.latency"
557
+ MESSAGING_MESSAGE_RETRY_COUNT = "messaging.message.retry.count"
312
558
  """
313
- The latency between when the task was enqueued and when it was started to be processed.
559
+ Number of retries/attempts to process a message.
314
560
  """
315
561
 
316
562
  MESSAGING_SYSTEM = "messaging.system"
@@ -318,6 +564,24 @@ class SPANDATA:
318
564
  The messaging system's name, e.g. `kafka`, `aws_sqs`
319
565
  """
320
566
 
567
+ NETWORK_PEER_ADDRESS = "network.peer.address"
568
+ """
569
+ Peer address of the network connection - IP address or Unix domain socket name.
570
+ Example: 10.1.2.80, /tmp/my.sock, localhost
571
+ """
572
+
573
+ NETWORK_PEER_PORT = "network.peer.port"
574
+ """
575
+ Peer port number of the network connection.
576
+ Example: 6379
577
+ """
578
+
579
+ PROFILER_ID = "profiler_id"
580
+ """
581
+ Label identifying the profiler id that the span occurred in. This should be a string.
582
+ Example: "5249fbada8d5416482c2f6e47e337372"
583
+ """
584
+
321
585
  SERVER_ADDRESS = "server.address"
322
586
  """
323
587
  Name of the database host.
@@ -343,30 +607,6 @@ class SPANDATA:
343
607
  Example: 16456
344
608
  """
345
609
 
346
- CODE_FILEPATH = "code.filepath"
347
- """
348
- The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path).
349
- Example: "/app/myapplication/http/handler/server.py"
350
- """
351
-
352
- CODE_LINENO = "code.lineno"
353
- """
354
- The line number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`.
355
- Example: 42
356
- """
357
-
358
- CODE_FUNCTION = "code.function"
359
- """
360
- The method or function name, or equivalent (usually rightmost part of the code unit's name).
361
- Example: "server_request"
362
- """
363
-
364
- CODE_NAMESPACE = "code.namespace"
365
- """
366
- The "namespace" within which `code.function` is defined. Usually the qualified class or module name, such that `code.namespace` + some separator + `code.function` form a unique identifier for the code unit.
367
- Example: "http.handler"
368
- """
369
-
370
610
  THREAD_ID = "thread.id"
371
611
  """
372
612
  Identifier of a thread from where the span originated. This should be a string.
@@ -379,12 +619,6 @@ class SPANDATA:
379
619
  Example: "MainThread"
380
620
  """
381
621
 
382
- PROFILER_ID = "profiler_id"
383
- """
384
- Label identifying the profiler id that the span occurred in. This should be a string.
385
- Example: "5249fbada8d5416482c2f6e47e337372"
386
- """
387
-
388
622
 
389
623
  class SPANSTATUS:
390
624
  """
@@ -424,6 +658,10 @@ class OP:
424
658
  FUNCTION = "function"
425
659
  FUNCTION_AWS = "function.aws"
426
660
  FUNCTION_GCP = "function.gcp"
661
+ GEN_AI_CHAT = "gen_ai.chat"
662
+ GEN_AI_EXECUTE_TOOL = "gen_ai.execute_tool"
663
+ GEN_AI_HANDOFF = "gen_ai.handoff"
664
+ GEN_AI_INVOKE_AGENT = "gen_ai.invoke_agent"
427
665
  GRAPHQL_EXECUTE = "graphql.execute"
428
666
  GRAPHQL_MUTATION = "graphql.mutation"
429
667
  GRAPHQL_PARSE = "graphql.parse"
@@ -436,6 +674,8 @@ class OP:
436
674
  HTTP_CLIENT = "http.client"
437
675
  HTTP_CLIENT_STREAM = "http.client.stream"
438
676
  HTTP_SERVER = "http.server"
677
+ HTTP = "http"
678
+ MESSAGE = "message"
439
679
  MIDDLEWARE_DJANGO = "middleware.django"
440
680
  MIDDLEWARE_LITESTAR = "middleware.litestar"
441
681
  MIDDLEWARE_LITESTAR_RECEIVE = "middleware.litestar.receive"
@@ -467,6 +707,7 @@ class OP:
467
707
  QUEUE_TASK_HUEY = "queue.task.huey"
468
708
  QUEUE_SUBMIT_RAY = "queue.submit.ray"
469
709
  QUEUE_TASK_RAY = "queue.task.ray"
710
+ RPC = "rpc"
470
711
  SUBPROCESS = "subprocess"
471
712
  SUBPROCESS_WAIT = "subprocess.wait"
472
713
  SUBPROCESS_COMMUNICATE = "subprocess.communicate"
@@ -495,8 +736,7 @@ class TransactionSource(str, Enum):
495
736
  URL = "url"
496
737
  VIEW = "view"
497
738
 
498
- def __str__(self):
499
- # type: () -> str
739
+ def __str__(self) -> str:
500
740
  return self.value
501
741
 
502
742
 
@@ -524,68 +764,74 @@ class ClientConstructor:
524
764
 
525
765
  def __init__(
526
766
  self,
527
- dsn=None, # type: Optional[str]
767
+ dsn: Optional[str] = None,
528
768
  *,
529
- max_breadcrumbs=DEFAULT_MAX_BREADCRUMBS, # type: int
530
- release=None, # type: Optional[str]
531
- environment=None, # type: Optional[str]
532
- server_name=None, # type: Optional[str]
533
- shutdown_timeout=2, # type: float
534
- integrations=[], # type: Sequence[sentry_sdk.integrations.Integration] # noqa: B006
535
- in_app_include=[], # type: List[str] # noqa: B006
536
- in_app_exclude=[], # type: List[str] # noqa: B006
537
- default_integrations=True, # type: bool
538
- dist=None, # type: Optional[str]
539
- transport=None, # type: Optional[Union[sentry_sdk.transport.Transport, Type[sentry_sdk.transport.Transport], Callable[[Event], None]]]
540
- transport_queue_size=DEFAULT_QUEUE_SIZE, # type: int
541
- sample_rate=1.0, # type: float
542
- send_default_pii=None, # type: Optional[bool]
543
- http_proxy=None, # type: Optional[str]
544
- https_proxy=None, # type: Optional[str]
545
- ignore_errors=[], # type: Sequence[Union[type, str]] # noqa: B006
546
- max_request_body_size="medium", # type: str
547
- socket_options=None, # type: Optional[List[Tuple[int, int, int | bytes]]]
548
- keep_alive=False, # type: bool
549
- before_send=None, # type: Optional[EventProcessor]
550
- before_breadcrumb=None, # type: Optional[BreadcrumbProcessor]
551
- debug=None, # type: Optional[bool]
552
- attach_stacktrace=False, # type: bool
553
- ca_certs=None, # type: Optional[str]
554
- traces_sample_rate=None, # type: Optional[float]
555
- traces_sampler=None, # type: Optional[TracesSampler]
556
- profiles_sample_rate=None, # type: Optional[float]
557
- profiles_sampler=None, # type: Optional[TracesSampler]
558
- profiler_mode=None, # type: Optional[ProfilerMode]
559
- profile_lifecycle="manual", # type: Literal["manual", "trace"]
560
- profile_session_sample_rate=None, # type: Optional[float]
561
- auto_enabling_integrations=True, # type: bool
562
- disabled_integrations=None, # type: Optional[Sequence[sentry_sdk.integrations.Integration]]
563
- auto_session_tracking=True, # type: bool
564
- send_client_reports=True, # type: bool
565
- _experiments={}, # type: Experiments # noqa: B006
566
- proxy_headers=None, # type: Optional[Dict[str, str]]
567
- before_send_transaction=None, # type: Optional[TransactionProcessor]
568
- project_root=None, # type: Optional[str]
569
- include_local_variables=True, # type: Optional[bool]
570
- include_source_context=True, # type: Optional[bool]
571
- trace_propagation_targets=[ # noqa: B006
572
- MATCH_ALL
573
- ], # type: Optional[Sequence[str]]
574
- functions_to_trace=[], # type: Sequence[Dict[str, str]] # noqa: B006
575
- event_scrubber=None, # type: Optional[sentry_sdk.scrubber.EventScrubber]
576
- max_value_length=DEFAULT_MAX_VALUE_LENGTH, # type: int
577
- enable_backpressure_handling=True, # type: bool
578
- error_sampler=None, # type: Optional[Callable[[Event, Hint], Union[float, bool]]]
579
- enable_db_query_source=True, # type: bool
580
- db_query_source_threshold_ms=100, # type: int
581
- spotlight=None, # type: Optional[Union[bool, str]]
582
- cert_file=None, # type: Optional[str]
583
- key_file=None, # type: Optional[str]
584
- custom_repr=None, # type: Optional[Callable[..., Optional[str]]]
585
- add_full_stack=DEFAULT_ADD_FULL_STACK, # type: bool
586
- max_stack_frames=DEFAULT_MAX_STACK_FRAMES, # type: Optional[int]
587
- ):
588
- # type: (...) -> None
769
+ max_breadcrumbs: int = DEFAULT_MAX_BREADCRUMBS,
770
+ release: Optional[str] = None,
771
+ environment: Optional[str] = None,
772
+ server_name: Optional[str] = None,
773
+ shutdown_timeout: float = 2,
774
+ integrations: Sequence[sentry_sdk.integrations.Integration] = [], # noqa: B006
775
+ in_app_include: List[str] = [], # noqa: B006
776
+ in_app_exclude: List[str] = [], # noqa: B006
777
+ default_integrations: bool = True,
778
+ dist: Optional[str] = None,
779
+ transport: Optional[
780
+ Union[
781
+ sentry_sdk.transport.Transport,
782
+ Type[sentry_sdk.transport.Transport],
783
+ Callable[[Event], None],
784
+ ]
785
+ ] = None,
786
+ transport_queue_size: int = DEFAULT_QUEUE_SIZE,
787
+ sample_rate: float = 1.0,
788
+ send_default_pii: Optional[bool] = None,
789
+ http_proxy: Optional[str] = None,
790
+ https_proxy: Optional[str] = None,
791
+ ignore_errors: Sequence[Union[type, str]] = [], # noqa: B006
792
+ max_request_body_size: str = "medium",
793
+ socket_options: Optional[List[Tuple[int, int, int | bytes]]] = None,
794
+ keep_alive: Optional[bool] = None,
795
+ before_send: Optional[EventProcessor] = None,
796
+ before_breadcrumb: Optional[BreadcrumbProcessor] = None,
797
+ debug: Optional[bool] = None,
798
+ attach_stacktrace: bool = False,
799
+ ca_certs: Optional[str] = None,
800
+ traces_sample_rate: Optional[float] = None,
801
+ traces_sampler: Optional[TracesSampler] = None,
802
+ profiles_sample_rate: Optional[float] = None,
803
+ profiles_sampler: Optional[TracesSampler] = None,
804
+ profiler_mode: Optional[ProfilerMode] = None,
805
+ profile_lifecycle: Literal["manual", "trace"] = "manual",
806
+ profile_session_sample_rate: Optional[float] = None,
807
+ auto_enabling_integrations: bool = True,
808
+ disabled_integrations: Optional[
809
+ Sequence[sentry_sdk.integrations.Integration]
810
+ ] = None,
811
+ auto_session_tracking: bool = True,
812
+ send_client_reports: bool = True,
813
+ _experiments: Experiments = {}, # noqa: B006
814
+ proxy_headers: Optional[Dict[str, str]] = None,
815
+ before_send_transaction: Optional[TransactionProcessor] = None,
816
+ project_root: Optional[str] = None,
817
+ include_local_variables: Optional[bool] = True,
818
+ include_source_context: Optional[bool] = True,
819
+ trace_propagation_targets: Optional[Sequence[str]] = [MATCH_ALL], # noqa: B006
820
+ exclude_span_origins: Optional[Sequence[str]] = None,
821
+ functions_to_trace: Sequence[Dict[str, str]] = [], # noqa: B006
822
+ event_scrubber: Optional[sentry_sdk.scrubber.EventScrubber] = None,
823
+ max_value_length: int = DEFAULT_MAX_VALUE_LENGTH,
824
+ enable_backpressure_handling: bool = True,
825
+ error_sampler: Optional[Callable[[Event, Hint], Union[float, bool]]] = None,
826
+ enable_db_query_source: bool = True,
827
+ db_query_source_threshold_ms: int = 100,
828
+ spotlight: Optional[Union[bool, str]] = None,
829
+ cert_file: Optional[str] = None,
830
+ key_file: Optional[str] = None,
831
+ custom_repr: Optional[Callable[..., Optional[str]]] = None,
832
+ add_full_stack: bool = DEFAULT_ADD_FULL_STACK,
833
+ max_stack_frames: Optional[int] = DEFAULT_MAX_STACK_FRAMES,
834
+ ) -> None:
589
835
  """Initialize the Sentry SDK with the given parameters. All parameters described here can be used in a call to `sentry_sdk.init()`.
590
836
 
591
837
  :param dsn: The DSN tells the SDK where to send the events.
@@ -902,6 +1148,17 @@ class ClientConstructor:
902
1148
  If `trace_propagation_targets` is not provided, trace data is attached to every outgoing request from the
903
1149
  instrumented client.
904
1150
 
1151
+ :param exclude_span_origins: An optional list of strings or regex patterns to disable span creation based
1152
+ on span origin. When a span's origin would match any of the provided patterns, the span will not be
1153
+ created.
1154
+
1155
+ This can be useful to exclude automatic span creation from specific integrations without disabling the
1156
+ entire integration.
1157
+
1158
+ The option may contain a list of strings or regexes against which the span origins are matched.
1159
+ String entries do not have to be full matches, meaning a span origin is matched when it contains
1160
+ a string provided through the option.
1161
+
905
1162
  :param functions_to_trace: An optional list of functions that should be set up for tracing.
906
1163
 
907
1164
  For each function in the list, a span will be created when the function is executed.
@@ -965,8 +1222,7 @@ class ClientConstructor:
965
1222
  pass
966
1223
 
967
1224
 
968
- def _get_default_options():
969
- # type: () -> dict[str, Any]
1225
+ def _get_default_options() -> dict[str, Any]:
970
1226
  import inspect
971
1227
 
972
1228
  a = inspect.getfullargspec(ClientConstructor.__init__)
@@ -985,4 +1241,4 @@ DEFAULT_OPTIONS = _get_default_options()
985
1241
  del _get_default_options
986
1242
 
987
1243
 
988
- VERSION = "3.0.0a1"
1244
+ VERSION = "3.0.0a3"