sentry-sdk 3.0.0a4__py2.py3-none-any.whl → 3.0.0a6__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

Files changed (33) hide show
  1. sentry_sdk/__init__.py +1 -0
  2. sentry_sdk/ai/utils.py +7 -8
  3. sentry_sdk/api.py +68 -0
  4. sentry_sdk/client.py +93 -17
  5. sentry_sdk/consts.py +126 -9
  6. sentry_sdk/crons/api.py +5 -0
  7. sentry_sdk/integrations/anthropic.py +133 -73
  8. sentry_sdk/integrations/asgi.py +10 -9
  9. sentry_sdk/integrations/asyncio.py +85 -20
  10. sentry_sdk/integrations/clickhouse_driver.py +55 -28
  11. sentry_sdk/integrations/fastapi.py +1 -7
  12. sentry_sdk/integrations/gnu_backtrace.py +6 -3
  13. sentry_sdk/integrations/langchain.py +462 -218
  14. sentry_sdk/integrations/litestar.py +1 -1
  15. sentry_sdk/integrations/openai_agents/patches/agent_run.py +0 -2
  16. sentry_sdk/integrations/openai_agents/patches/runner.py +18 -15
  17. sentry_sdk/integrations/quart.py +1 -1
  18. sentry_sdk/integrations/starlette.py +1 -5
  19. sentry_sdk/integrations/starlite.py +1 -1
  20. sentry_sdk/opentelemetry/scope.py +3 -1
  21. sentry_sdk/opentelemetry/span_processor.py +1 -0
  22. sentry_sdk/scope.py +11 -11
  23. sentry_sdk/tracing.py +100 -18
  24. sentry_sdk/tracing_utils.py +330 -33
  25. sentry_sdk/transport.py +357 -62
  26. sentry_sdk/utils.py +23 -5
  27. sentry_sdk/worker.py +197 -3
  28. {sentry_sdk-3.0.0a4.dist-info → sentry_sdk-3.0.0a6.dist-info}/METADATA +3 -1
  29. {sentry_sdk-3.0.0a4.dist-info → sentry_sdk-3.0.0a6.dist-info}/RECORD +33 -33
  30. {sentry_sdk-3.0.0a4.dist-info → sentry_sdk-3.0.0a6.dist-info}/WHEEL +0 -0
  31. {sentry_sdk-3.0.0a4.dist-info → sentry_sdk-3.0.0a6.dist-info}/entry_points.txt +0 -0
  32. {sentry_sdk-3.0.0a4.dist-info → sentry_sdk-3.0.0a6.dist-info}/licenses/LICENSE +0 -0
  33. {sentry_sdk-3.0.0a4.dist-info → sentry_sdk-3.0.0a6.dist-info}/top_level.txt +0 -0
sentry_sdk/__init__.py CHANGED
@@ -49,6 +49,7 @@ __all__ = [ # noqa
49
49
  "start_session",
50
50
  "end_session",
51
51
  "set_transaction_name",
52
+ "update_current_span",
52
53
  ]
53
54
 
54
55
  # Initialize the debug support after everything is loaded
sentry_sdk/ai/utils.py CHANGED
@@ -8,8 +8,7 @@ from sentry_sdk.tracing import Span
8
8
  from sentry_sdk.utils import logger
9
9
 
10
10
 
11
- def _normalize_data(data: Any) -> Any:
12
-
11
+ def _normalize_data(data: Any, unpack: bool = True) -> Any:
13
12
  # convert pydantic data (e.g. OpenAI v1+) to json compatible format
14
13
  if hasattr(data, "model_dump"):
15
14
  try:
@@ -18,17 +17,17 @@ def _normalize_data(data: Any) -> Any:
18
17
  logger.warning("Could not convert pydantic data to JSON: %s", e)
19
18
  return data
20
19
  if isinstance(data, list):
21
- if len(data) == 1:
22
- return _normalize_data(data[0]) # remove empty dimensions
23
- return list(_normalize_data(x) for x in data)
20
+ if unpack and len(data) == 1:
21
+ return _normalize_data(data[0], unpack=unpack) # remove empty dimensions
22
+ return list(_normalize_data(x, unpack=unpack) for x in data)
24
23
  if isinstance(data, dict):
25
- return {k: _normalize_data(v) for (k, v) in data.items()}
24
+ return {k: _normalize_data(v, unpack=unpack) for (k, v) in data.items()}
26
25
 
27
26
  return data
28
27
 
29
28
 
30
- def set_data_normalized(span: Span, key: str, value: Any) -> None:
31
- normalized = _normalize_data(value)
29
+ def set_data_normalized(span: Span, key: str, value: Any, unpack: bool = True) -> None:
30
+ normalized = _normalize_data(value, unpack=unpack)
32
31
  if isinstance(normalized, (int, float, bool, str)):
33
32
  span.set_attribute(key, normalized)
34
33
  else:
sentry_sdk/api.py CHANGED
@@ -76,6 +76,7 @@ __all__ = [
76
76
  "start_session",
77
77
  "end_session",
78
78
  "set_transaction_name",
79
+ "update_current_span",
79
80
  ]
80
81
 
81
82
 
@@ -228,6 +229,14 @@ def flush(
228
229
  return get_client().flush(timeout=timeout, callback=callback)
229
230
 
230
231
 
232
+ @clientmethod
233
+ async def flush_async(
234
+ timeout: Optional[float] = None,
235
+ callback: Optional[Callable[[int, float], None]] = None,
236
+ ) -> None:
237
+ return await get_client().flush_async(timeout=timeout, callback=callback)
238
+
239
+
231
240
  def start_span(**kwargs: Any) -> Span:
232
241
  """
233
242
  Start and return a span.
@@ -341,3 +350,62 @@ def end_session() -> None:
341
350
  @scopemethod
342
351
  def set_transaction_name(name: str, source: Optional[str] = None) -> None:
343
352
  return get_current_scope().set_transaction_name(name, source)
353
+
354
+
355
+ def update_current_span(
356
+ op: Optional[str] = None,
357
+ name: Optional[str] = None,
358
+ attributes: Optional[dict[str, Union[str, int, float, bool]]] = None,
359
+ ) -> None:
360
+ """
361
+ Update the current active span with the provided parameters.
362
+
363
+ This function allows you to modify properties of the currently active span.
364
+ If no span is currently active, this function will do nothing.
365
+
366
+ :param op: The operation name for the span. This is a high-level description
367
+ of what the span represents (e.g., "http.client", "db.query").
368
+ You can use predefined constants from :py:class:`sentry_sdk.consts.OP`
369
+ or provide your own string. If not provided, the span's operation will
370
+ remain unchanged.
371
+ :type op: str or None
372
+
373
+ :param name: The human-readable name/description for the span. This provides
374
+ more specific details about what the span represents (e.g., "GET /api/users",
375
+ "SELECT * FROM users"). If not provided, the span's name will remain unchanged.
376
+ :type name: str or None
377
+
378
+ :param attributes: A dictionary of key-value pairs to add as attributes to the span.
379
+ Attribute values must be strings, integers, floats, or booleans. These
380
+ attributes will be merged with any existing span data. If not provided,
381
+ no attributes will be added.
382
+ :type attributes: dict[str, Union[str, int, float, bool]] or None
383
+
384
+ :returns: None
385
+
386
+ .. versionadded:: 2.35.0
387
+
388
+ Example::
389
+
390
+ import sentry_sdk
391
+ from sentry_sdk.consts import OP
392
+
393
+ sentry_sdk.update_current_span(
394
+ op=OP.FUNCTION,
395
+ name="process_user_data",
396
+ attributes={"user_id": 123, "batch_size": 50}
397
+ )
398
+ """
399
+ current_span = get_current_span()
400
+
401
+ if current_span is None:
402
+ return
403
+
404
+ if op is not None:
405
+ current_span.op = op
406
+
407
+ if name is not None:
408
+ current_span.name = name
409
+
410
+ if attributes is not None:
411
+ current_span.set_attributes(attributes)
sentry_sdk/client.py CHANGED
@@ -25,7 +25,7 @@ from sentry_sdk.utils import (
25
25
  )
26
26
  from sentry_sdk.serializer import serialize
27
27
  from sentry_sdk.tracing import trace
28
- from sentry_sdk.transport import BaseHttpTransport, make_transport
28
+ from sentry_sdk.transport import HttpTransportCore, make_transport, AsyncHttpTransport
29
29
  from sentry_sdk.consts import (
30
30
  SPANDATA,
31
31
  DEFAULT_MAX_VALUE_LENGTH,
@@ -214,6 +214,12 @@ class BaseClient:
214
214
  def flush(self, *args: Any, **kwargs: Any) -> None:
215
215
  return None
216
216
 
217
+ async def close_async(self, *args: Any, **kwargs: Any) -> None:
218
+ return None
219
+
220
+ async def flush_async(self, *args: Any, **kwargs: Any) -> None:
221
+ return None
222
+
217
223
  def __enter__(self) -> BaseClient:
218
224
  return self
219
225
 
@@ -406,7 +412,7 @@ class _Client(BaseClient):
406
412
  self.monitor
407
413
  or self.log_batcher
408
414
  or has_profiling_enabled(self.options)
409
- or isinstance(self.transport, BaseHttpTransport)
415
+ or isinstance(self.transport, HttpTransportCore)
410
416
  ):
411
417
  # If we have anything on that could spawn a background thread, we
412
418
  # need to check if it's safe to use them.
@@ -442,12 +448,12 @@ class _Client(BaseClient):
442
448
 
443
449
  previous_total_spans: Optional[int] = None
444
450
  previous_total_breadcrumbs: Optional[int] = None
451
+ is_transaction = event.get("type") == "transaction"
445
452
 
446
453
  if event.get("timestamp") is None:
447
454
  event["timestamp"] = datetime.now(timezone.utc)
448
455
 
449
456
  if scope is not None:
450
- is_transaction = event.get("type") == "transaction"
451
457
  spans_before = len(event.get("spans", []))
452
458
  event_ = scope.apply_to_event(event, hint, self.options)
453
459
 
@@ -488,7 +494,8 @@ class _Client(BaseClient):
488
494
  )
489
495
 
490
496
  if (
491
- self.options["attach_stacktrace"]
497
+ not is_transaction
498
+ and self.options["attach_stacktrace"]
492
499
  and "exception" not in event
493
500
  and "stacktrace" not in event
494
501
  and "threads" not in event
@@ -917,6 +924,14 @@ class _Client(BaseClient):
917
924
 
918
925
  return self.integrations.get(integration_name)
919
926
 
927
+ def _close_components(self) -> None:
928
+ """Kill all client components in the correct order."""
929
+ self.session_flusher.kill()
930
+ if self.log_batcher is not None:
931
+ self.log_batcher.kill()
932
+ if self.monitor:
933
+ self.monitor.kill()
934
+
920
935
  def close(
921
936
  self,
922
937
  timeout: Optional[float] = None,
@@ -927,19 +942,43 @@ class _Client(BaseClient):
927
942
  semantics as :py:meth:`Client.flush`.
928
943
  """
929
944
  if self.transport is not None:
945
+ if isinstance(self.transport, AsyncHttpTransport) and hasattr(
946
+ self.transport, "loop"
947
+ ):
948
+ logger.debug(
949
+ "close() used with AsyncHttpTransport, aborting. Please use close_async() instead."
950
+ )
951
+ return
930
952
  self.flush(timeout=timeout, callback=callback)
931
-
932
- self.session_flusher.kill()
933
-
934
- if self.log_batcher is not None:
935
- self.log_batcher.kill()
936
-
937
- if self.monitor:
938
- self.monitor.kill()
939
-
953
+ self._close_components()
940
954
  self.transport.kill()
941
955
  self.transport = None
942
956
 
957
+ async def close_async(
958
+ self,
959
+ timeout: Optional[float] = None,
960
+ callback: Optional[Callable[[int, float], None]] = None,
961
+ ) -> None:
962
+ """
963
+ Asynchronously close the client and shut down the transport. Arguments have the same
964
+ semantics as :py:meth:`Client.flush_async`.
965
+ """
966
+ if self.transport is not None:
967
+ if not (
968
+ isinstance(self.transport, AsyncHttpTransport)
969
+ and hasattr(self.transport, "loop")
970
+ ):
971
+ logger.debug(
972
+ "close_async() used with non-async transport, aborting. Please use close() instead."
973
+ )
974
+ return
975
+ await self.flush_async(timeout=timeout, callback=callback)
976
+ self._close_components()
977
+ kill_task = self.transport.kill() # type: ignore
978
+ if kill_task is not None:
979
+ await kill_task
980
+ self.transport = None
981
+
943
982
  def flush(
944
983
  self,
945
984
  timeout: Optional[float] = None,
@@ -953,15 +992,52 @@ class _Client(BaseClient):
953
992
  :param callback: Is invoked with the number of pending events and the configured timeout.
954
993
  """
955
994
  if self.transport is not None:
995
+ if isinstance(self.transport, AsyncHttpTransport) and hasattr(
996
+ self.transport, "loop"
997
+ ):
998
+ logger.debug(
999
+ "flush() used with AsyncHttpTransport, aborting. Please use flush_async() instead."
1000
+ )
1001
+ return
956
1002
  if timeout is None:
957
1003
  timeout = self.options["shutdown_timeout"]
958
- self.session_flusher.flush()
959
-
960
- if self.log_batcher is not None:
961
- self.log_batcher.flush()
1004
+ self._flush_components()
962
1005
 
963
1006
  self.transport.flush(timeout=timeout, callback=callback)
964
1007
 
1008
+ async def flush_async(
1009
+ self,
1010
+ timeout: Optional[float] = None,
1011
+ callback: Optional[Callable[[int, float], None]] = None,
1012
+ ) -> None:
1013
+ """
1014
+ Asynchronously wait for the current events to be sent.
1015
+
1016
+ :param timeout: Wait for at most `timeout` seconds. If no `timeout` is provided, the `shutdown_timeout` option value is used.
1017
+
1018
+ :param callback: Is invoked with the number of pending events and the configured timeout.
1019
+ """
1020
+ if self.transport is not None:
1021
+ if not (
1022
+ isinstance(self.transport, AsyncHttpTransport)
1023
+ and hasattr(self.transport, "loop")
1024
+ ):
1025
+ logger.debug(
1026
+ "flush_async() used with non-async transport, aborting. Please use flush() instead."
1027
+ )
1028
+ return
1029
+ if timeout is None:
1030
+ timeout = self.options["shutdown_timeout"]
1031
+ self._flush_components()
1032
+ flush_task = self.transport.flush(timeout=timeout, callback=callback) # type: ignore
1033
+ if flush_task is not None:
1034
+ await flush_task
1035
+
1036
+ def _flush_components(self) -> None:
1037
+ self.session_flusher.flush()
1038
+ if self.log_batcher is not None:
1039
+ self.log_batcher.flush()
1040
+
965
1041
  def __enter__(self) -> _Client:
966
1042
  return self
967
1043
 
sentry_sdk/consts.py CHANGED
@@ -78,6 +78,7 @@ if TYPE_CHECKING:
78
78
  "transport_compression_algo": Optional[CompressionAlgo],
79
79
  "transport_num_pools": Optional[int],
80
80
  "transport_http2": Optional[bool],
81
+ "transport_async": Optional[bool],
81
82
  },
82
83
  total=False,
83
84
  )
@@ -95,6 +96,17 @@ FALSE_VALUES = [
95
96
  ]
96
97
 
97
98
 
99
+ class SPANTEMPLATE(str, Enum):
100
+ DEFAULT = "default"
101
+ AI_AGENT = "ai_agent"
102
+ AI_TOOL = "ai_tool"
103
+ AI_CHAT = "ai_chat"
104
+
105
+ def __str__(self):
106
+ # type: () -> str
107
+ return self.value
108
+
109
+
98
110
  class SPANDATA:
99
111
  """
100
112
  Additional information describing the type of the span.
@@ -103,6 +115,9 @@ class SPANDATA:
103
115
 
104
116
  AI_CITATIONS = "ai.citations"
105
117
  """
118
+ .. deprecated::
119
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
120
+
106
121
  References or sources cited by the AI model in its response.
107
122
  Example: ["Smith et al. 2020", "Jones 2019"]
108
123
  """
@@ -115,65 +130,97 @@ class SPANDATA:
115
130
 
116
131
  AI_DOCUMENTS = "ai.documents"
117
132
  """
133
+ .. deprecated::
134
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
135
+
118
136
  Documents or content chunks used as context for the AI model.
119
137
  Example: ["doc1.txt", "doc2.pdf"]
120
138
  """
121
139
 
122
140
  AI_FINISH_REASON = "ai.finish_reason"
123
141
  """
142
+ .. deprecated::
143
+ This attribute is deprecated. Use GEN_AI_RESPONSE_FINISH_REASONS instead.
144
+
124
145
  The reason why the model stopped generating.
125
146
  Example: "length"
126
147
  """
127
148
 
128
149
  AI_FREQUENCY_PENALTY = "ai.frequency_penalty"
129
150
  """
151
+ .. deprecated::
152
+ This attribute is deprecated. Use GEN_AI_REQUEST_FREQUENCY_PENALTY instead.
153
+
130
154
  Used to reduce repetitiveness of generated tokens.
131
155
  Example: 0.5
132
156
  """
133
157
 
134
158
  AI_FUNCTION_CALL = "ai.function_call"
135
159
  """
160
+ .. deprecated::
161
+ This attribute is deprecated. Use GEN_AI_RESPONSE_TOOL_CALLS instead.
162
+
136
163
  For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls
137
164
  """
138
165
 
139
166
  AI_GENERATION_ID = "ai.generation_id"
140
167
  """
168
+ .. deprecated::
169
+ This attribute is deprecated. Use GEN_AI_RESPONSE_ID instead.
170
+
141
171
  Unique identifier for the completion.
142
172
  Example: "gen_123abc"
143
173
  """
144
174
 
145
175
  AI_INPUT_MESSAGES = "ai.input_messages"
146
176
  """
177
+ .. deprecated::
178
+ This attribute is deprecated. Use GEN_AI_REQUEST_MESSAGES instead.
179
+
147
180
  The input messages to an LLM call.
148
181
  Example: [{"role": "user", "message": "hello"}]
149
182
  """
150
183
 
151
184
  AI_LOGIT_BIAS = "ai.logit_bias"
152
185
  """
186
+ .. deprecated::
187
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
188
+
153
189
  For an AI model call, the logit bias
154
190
  """
155
191
 
156
192
  AI_METADATA = "ai.metadata"
157
193
  """
194
+ .. deprecated::
195
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
196
+
158
197
  Extra metadata passed to an AI pipeline step.
159
198
  Example: {"executed_function": "add_integers"}
160
199
  """
161
200
 
162
201
  AI_MODEL_ID = "ai.model_id"
163
202
  """
164
- The unique descriptor of the model being execugted
203
+ .. deprecated::
204
+ This attribute is deprecated. Use GEN_AI_REQUEST_MODEL or GEN_AI_RESPONSE_MODEL instead.
205
+
206
+ The unique descriptor of the model being executed.
165
207
  Example: gpt-4
166
208
  """
167
209
 
168
210
  AI_PIPELINE_NAME = "ai.pipeline.name"
169
211
  """
212
+ .. deprecated::
213
+ This attribute is deprecated. Use GEN_AI_PIPELINE_NAME instead.
214
+
170
215
  Name of the AI pipeline or chain being executed.
171
- DEPRECATED: Use GEN_AI_PIPELINE_NAME instead.
172
216
  Example: "qa-pipeline"
173
217
  """
174
218
 
175
219
  AI_PREAMBLE = "ai.preamble"
176
220
  """
221
+ .. deprecated::
222
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
223
+
177
224
  For an AI model call, the preamble parameter.
178
225
  Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style.
179
226
  Example: "You are now a clown."
@@ -181,6 +228,9 @@ class SPANDATA:
181
228
 
182
229
  AI_PRESENCE_PENALTY = "ai.presence_penalty"
183
230
  """
231
+ .. deprecated::
232
+ This attribute is deprecated. Use GEN_AI_REQUEST_PRESENCE_PENALTY instead.
233
+
184
234
  Used to reduce repetitiveness of generated tokens.
185
235
  Example: 0.5
186
236
  """
@@ -193,89 +243,133 @@ class SPANDATA:
193
243
 
194
244
  AI_RAW_PROMPTING = "ai.raw_prompting"
195
245
  """
246
+ .. deprecated::
247
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
248
+
196
249
  Minimize pre-processing done to the prompt sent to the LLM.
197
250
  Example: true
198
251
  """
199
252
 
200
253
  AI_RESPONSE_FORMAT = "ai.response_format"
201
254
  """
255
+ .. deprecated::
256
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
257
+
202
258
  For an AI model call, the format of the response
203
259
  """
204
260
 
205
261
  AI_RESPONSES = "ai.responses"
206
262
  """
263
+ .. deprecated::
264
+ This attribute is deprecated. Use GEN_AI_RESPONSE_TEXT instead.
265
+
207
266
  The responses to an AI model call. Always as a list.
208
267
  Example: ["hello", "world"]
209
268
  """
210
269
 
211
270
  AI_SEARCH_QUERIES = "ai.search_queries"
212
271
  """
272
+ .. deprecated::
273
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
274
+
213
275
  Queries used to search for relevant context or documents.
214
276
  Example: ["climate change effects", "renewable energy"]
215
277
  """
216
278
 
217
279
  AI_SEARCH_REQUIRED = "ai.is_search_required"
218
280
  """
281
+ .. deprecated::
282
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
283
+
219
284
  Boolean indicating if the model needs to perform a search.
220
285
  Example: true
221
286
  """
222
287
 
223
288
  AI_SEARCH_RESULTS = "ai.search_results"
224
289
  """
290
+ .. deprecated::
291
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
292
+
225
293
  Results returned from search queries for context.
226
294
  Example: ["Result 1", "Result 2"]
227
295
  """
228
296
 
229
297
  AI_SEED = "ai.seed"
230
298
  """
299
+ .. deprecated::
300
+ This attribute is deprecated. Use GEN_AI_REQUEST_SEED instead.
301
+
231
302
  The seed, ideally models given the same seed and same other parameters will produce the exact same output.
232
303
  Example: 123.45
233
304
  """
234
305
 
235
306
  AI_STREAMING = "ai.streaming"
236
307
  """
308
+ .. deprecated::
309
+ This attribute is deprecated. Use GEN_AI_RESPONSE_STREAMING instead.
310
+
237
311
  Whether or not the AI model call's response was streamed back asynchronously
238
- DEPRECATED: Use GEN_AI_RESPONSE_STREAMING instead.
239
312
  Example: true
240
313
  """
241
314
 
242
315
  AI_TAGS = "ai.tags"
243
316
  """
317
+ .. deprecated::
318
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
319
+
244
320
  Tags that describe an AI pipeline step.
245
321
  Example: {"executed_function": "add_integers"}
246
322
  """
247
323
 
248
324
  AI_TEMPERATURE = "ai.temperature"
249
325
  """
326
+ .. deprecated::
327
+ This attribute is deprecated. Use GEN_AI_REQUEST_TEMPERATURE instead.
328
+
250
329
  For an AI model call, the temperature parameter. Temperature essentially means how random the output will be.
251
330
  Example: 0.5
252
331
  """
253
332
 
254
333
  AI_TEXTS = "ai.texts"
255
334
  """
335
+ .. deprecated::
336
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
337
+
256
338
  Raw text inputs provided to the model.
257
339
  Example: ["What is machine learning?"]
258
340
  """
259
341
 
260
342
  AI_TOP_K = "ai.top_k"
261
343
  """
344
+ .. deprecated::
345
+ This attribute is deprecated. Use GEN_AI_REQUEST_TOP_K instead.
346
+
262
347
  For an AI model call, the top_k parameter. Top_k essentially controls how random the output will be.
263
348
  Example: 35
264
349
  """
265
350
 
266
351
  AI_TOP_P = "ai.top_p"
267
352
  """
353
+ .. deprecated::
354
+ This attribute is deprecated. Use GEN_AI_REQUEST_TOP_P instead.
355
+
268
356
  For an AI model call, the top_p parameter. Top_p essentially controls how random the output will be.
269
357
  Example: 0.5
270
358
  """
271
359
 
272
360
  AI_TOOL_CALLS = "ai.tool_calls"
273
361
  """
362
+ .. deprecated::
363
+ This attribute is deprecated. Use GEN_AI_RESPONSE_TOOL_CALLS instead.
364
+
274
365
  For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls
275
366
  """
276
367
 
277
368
  AI_TOOLS = "ai.tools"
278
369
  """
370
+ .. deprecated::
371
+ This attribute is deprecated. Use GEN_AI_REQUEST_AVAILABLE_TOOLS instead.
372
+
279
373
  For an AI model call, the functions that are available
280
374
  """
281
375
 
@@ -287,6 +381,9 @@ class SPANDATA:
287
381
 
288
382
  AI_WARNINGS = "ai.warnings"
289
383
  """
384
+ .. deprecated::
385
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
386
+
290
387
  Warning messages generated during model execution.
291
388
  Example: ["Token limit exceeded"]
292
389
  """
@@ -391,6 +488,18 @@ class SPANDATA:
391
488
  Example: "qa-pipeline"
392
489
  """
393
490
 
491
+ GEN_AI_RESPONSE_FINISH_REASONS = "gen_ai.response.finish_reasons"
492
+ """
493
+ The reason why the model stopped generating.
494
+ Example: "COMPLETE"
495
+ """
496
+
497
+ GEN_AI_RESPONSE_ID = "gen_ai.response.id"
498
+ """
499
+ Unique identifier for the completion.
500
+ Example: "gen_123abc"
501
+ """
502
+
394
503
  GEN_AI_RESPONSE_MODEL = "gen_ai.response.model"
395
504
  """
396
505
  Exact model identifier used to generate the response
@@ -451,12 +560,24 @@ class SPANDATA:
451
560
  Example: 0.1
452
561
  """
453
562
 
563
+ GEN_AI_REQUEST_SEED = "gen_ai.request.seed"
564
+ """
565
+ The seed, ideally models given the same seed and same other parameters will produce the exact same output.
566
+ Example: "1234567890"
567
+ """
568
+
454
569
  GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature"
455
570
  """
456
571
  The temperature parameter used to control randomness in the output.
457
572
  Example: 0.7
458
573
  """
459
574
 
575
+ GEN_AI_REQUEST_TOP_K = "gen_ai.request.top_k"
576
+ """
577
+ Limits the model to only consider the K most likely next tokens, where K is an integer (e.g., top_k=20 means only the 20 highest probability tokens are considered).
578
+ Example: 35
579
+ """
580
+
460
581
  GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p"
461
582
  """
462
583
  The top_p parameter used to control diversity via nucleus sampling.
@@ -683,6 +804,7 @@ class OP:
683
804
  GEN_AI_EMBEDDINGS = "gen_ai.embeddings"
684
805
  GEN_AI_EXECUTE_TOOL = "gen_ai.execute_tool"
685
806
  GEN_AI_HANDOFF = "gen_ai.handoff"
807
+ GEN_AI_PIPELINE = "gen_ai.pipeline"
686
808
  GEN_AI_INVOKE_AGENT = "gen_ai.invoke_agent"
687
809
  GEN_AI_RESPONSES = "gen_ai.responses"
688
810
  GRAPHQL_EXECUTE = "graphql.execute"
@@ -712,11 +834,6 @@ class OP:
712
834
  HUGGINGFACE_HUB_CHAT_COMPLETIONS_CREATE = (
713
835
  "ai.chat_completions.create.huggingface_hub"
714
836
  )
715
- LANGCHAIN_PIPELINE = "ai.pipeline.langchain"
716
- LANGCHAIN_RUN = "ai.run.langchain"
717
- LANGCHAIN_TOOL = "ai.tool.langchain"
718
- LANGCHAIN_AGENT = "ai.agent.langchain"
719
- LANGCHAIN_CHAT_COMPLETIONS_CREATE = "ai.chat_completions.create.langchain"
720
837
  QUEUE_PROCESS = "queue.process"
721
838
  QUEUE_PUBLISH = "queue.publish"
722
839
  QUEUE_SUBMIT_ARQ = "queue.submit.arq"
@@ -1272,4 +1389,4 @@ DEFAULT_OPTIONS = _get_default_options()
1272
1389
  del _get_default_options
1273
1390
 
1274
1391
 
1275
- VERSION = "3.0.0a4"
1392
+ VERSION = "3.0.0a6"
sentry_sdk/crons/api.py CHANGED
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
  import uuid
3
3
 
4
4
  import sentry_sdk
5
+ from sentry_sdk.utils import logger
5
6
 
6
7
  from typing import TYPE_CHECKING
7
8
 
@@ -53,4 +54,8 @@ def capture_checkin(
53
54
 
54
55
  sentry_sdk.capture_event(check_in_event)
55
56
 
57
+ logger.debug(
58
+ f"[Crons] Captured check-in ({check_in_event.get('check_in_id')}): {check_in_event.get('monitor_slug')} -> {check_in_event.get('status')}"
59
+ )
60
+
56
61
  return check_in_event["check_in_id"]