sentry-sdk 2.33.0__py2.py3-none-any.whl → 2.33.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

@@ -32,7 +32,7 @@ def ai_track(description, **span_kwargs):
32
32
  def sync_wrapped(*args, **kwargs):
33
33
  # type: (Any, Any) -> Any
34
34
  curr_pipeline = _ai_pipeline_name.get()
35
- op = span_kwargs.get("op", "ai.run" if curr_pipeline else "ai.pipeline")
35
+ op = span_kwargs.pop("op", "ai.run" if curr_pipeline else "ai.pipeline")
36
36
 
37
37
  with start_span(name=description, op=op, **span_kwargs) as span:
38
38
  for k, v in kwargs.pop("sentry_tags", {}).items():
@@ -61,7 +61,7 @@ def ai_track(description, **span_kwargs):
61
61
  async def async_wrapped(*args, **kwargs):
62
62
  # type: (Any, Any) -> Any
63
63
  curr_pipeline = _ai_pipeline_name.get()
64
- op = span_kwargs.get("op", "ai.run" if curr_pipeline else "ai.pipeline")
64
+ op = span_kwargs.pop("op", "ai.run" if curr_pipeline else "ai.pipeline")
65
65
 
66
66
  with start_span(name=description, op=op, **span_kwargs) as span:
67
67
  for k, v in kwargs.pop("sentry_tags", {}).items():
@@ -96,25 +96,40 @@ def ai_track(description, **span_kwargs):
96
96
 
97
97
 
98
98
  def record_token_usage(
99
- span, prompt_tokens=None, completion_tokens=None, total_tokens=None
99
+ span,
100
+ input_tokens=None,
101
+ input_tokens_cached=None,
102
+ output_tokens=None,
103
+ output_tokens_reasoning=None,
104
+ total_tokens=None,
100
105
  ):
101
- # type: (Span, Optional[int], Optional[int], Optional[int]) -> None
106
+ # type: (Span, Optional[int], Optional[int], Optional[int], Optional[int], Optional[int]) -> None
107
+
108
+ # TODO: move pipeline name elsewhere
102
109
  ai_pipeline_name = get_ai_pipeline_name()
103
110
  if ai_pipeline_name:
104
111
  span.set_data(SPANDATA.AI_PIPELINE_NAME, ai_pipeline_name)
105
112
 
106
- if prompt_tokens is not None:
107
- span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, prompt_tokens)
113
+ if input_tokens is not None:
114
+ span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
115
+
116
+ if input_tokens_cached is not None:
117
+ span.set_data(
118
+ SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED,
119
+ input_tokens_cached,
120
+ )
121
+
122
+ if output_tokens is not None:
123
+ span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens)
108
124
 
109
- if completion_tokens is not None:
110
- span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, completion_tokens)
125
+ if output_tokens_reasoning is not None:
126
+ span.set_data(
127
+ SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING,
128
+ output_tokens_reasoning,
129
+ )
111
130
 
112
- if (
113
- total_tokens is None
114
- and prompt_tokens is not None
115
- and completion_tokens is not None
116
- ):
117
- total_tokens = prompt_tokens + completion_tokens
131
+ if total_tokens is None and input_tokens is not None and output_tokens is not None:
132
+ total_tokens = input_tokens + output_tokens
118
133
 
119
134
  if total_tokens is not None:
120
135
  span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, total_tokens)
sentry_sdk/consts.py CHANGED
@@ -1181,4 +1181,4 @@ DEFAULT_OPTIONS = _get_default_options()
1181
1181
  del _get_default_options
1182
1182
 
1183
1183
 
1184
- VERSION = "2.33.0"
1184
+ VERSION = "2.33.1"
@@ -65,7 +65,13 @@ def _calculate_token_usage(result, span):
65
65
  output_tokens = usage.output_tokens
66
66
 
67
67
  total_tokens = input_tokens + output_tokens
68
- record_token_usage(span, input_tokens, output_tokens, total_tokens)
68
+
69
+ record_token_usage(
70
+ span,
71
+ input_tokens=input_tokens,
72
+ output_tokens=output_tokens,
73
+ total_tokens=total_tokens,
74
+ )
69
75
 
70
76
 
71
77
  def _get_responses(content):
@@ -126,7 +132,12 @@ def _add_ai_data_to_span(
126
132
  [{"type": "text", "text": complete_message}],
127
133
  )
128
134
  total_tokens = input_tokens + output_tokens
129
- record_token_usage(span, input_tokens, output_tokens, total_tokens)
135
+ record_token_usage(
136
+ span,
137
+ input_tokens=input_tokens,
138
+ output_tokens=output_tokens,
139
+ total_tokens=total_tokens,
140
+ )
130
141
  span.set_data(SPANDATA.AI_STREAMING, True)
131
142
 
132
143
 
@@ -116,14 +116,14 @@ def _wrap_chat(f, streaming):
116
116
  if hasattr(res.meta, "billed_units"):
117
117
  record_token_usage(
118
118
  span,
119
- prompt_tokens=res.meta.billed_units.input_tokens,
120
- completion_tokens=res.meta.billed_units.output_tokens,
119
+ input_tokens=res.meta.billed_units.input_tokens,
120
+ output_tokens=res.meta.billed_units.output_tokens,
121
121
  )
122
122
  elif hasattr(res.meta, "tokens"):
123
123
  record_token_usage(
124
124
  span,
125
- prompt_tokens=res.meta.tokens.input_tokens,
126
- completion_tokens=res.meta.tokens.output_tokens,
125
+ input_tokens=res.meta.tokens.input_tokens,
126
+ output_tokens=res.meta.tokens.output_tokens,
127
127
  )
128
128
 
129
129
  if hasattr(res.meta, "warnings"):
@@ -262,7 +262,7 @@ def _wrap_embed(f):
262
262
  ):
263
263
  record_token_usage(
264
264
  span,
265
- prompt_tokens=res.meta.billed_units.input_tokens,
265
+ input_tokens=res.meta.billed_units.input_tokens,
266
266
  total_tokens=res.meta.billed_units.input_tokens,
267
267
  )
268
268
  return res
@@ -111,7 +111,10 @@ def _wrap_text_generation(f):
111
111
  [res.generated_text],
112
112
  )
113
113
  if res.details is not None and res.details.generated_tokens > 0:
114
- record_token_usage(span, total_tokens=res.details.generated_tokens)
114
+ record_token_usage(
115
+ span,
116
+ total_tokens=res.details.generated_tokens,
117
+ )
115
118
  span.__exit__(None, None, None)
116
119
  return res
117
120
 
@@ -145,7 +148,10 @@ def _wrap_text_generation(f):
145
148
  span, SPANDATA.AI_RESPONSES, "".join(data_buf)
146
149
  )
147
150
  if tokens_used > 0:
148
- record_token_usage(span, total_tokens=tokens_used)
151
+ record_token_usage(
152
+ span,
153
+ total_tokens=tokens_used,
154
+ )
149
155
  span.__exit__(None, None, None)
150
156
 
151
157
  return new_details_iterator()
@@ -279,15 +279,15 @@ class SentryLangchainCallback(BaseCallbackHandler): # type: ignore[misc]
279
279
  if token_usage:
280
280
  record_token_usage(
281
281
  span_data.span,
282
- token_usage.get("prompt_tokens"),
283
- token_usage.get("completion_tokens"),
284
- token_usage.get("total_tokens"),
282
+ input_tokens=token_usage.get("prompt_tokens"),
283
+ output_tokens=token_usage.get("completion_tokens"),
284
+ total_tokens=token_usage.get("total_tokens"),
285
285
  )
286
286
  else:
287
287
  record_token_usage(
288
288
  span_data.span,
289
- span_data.num_prompt_tokens,
290
- span_data.num_completion_tokens,
289
+ input_tokens=span_data.num_prompt_tokens,
290
+ output_tokens=span_data.num_completion_tokens,
291
291
  )
292
292
 
293
293
  self._exit_span(span_data, run_id)
@@ -70,48 +70,73 @@ def _capture_exception(exc):
70
70
  sentry_sdk.capture_event(event, hint=hint)
71
71
 
72
72
 
73
- def _calculate_chat_completion_usage(
73
+ def _get_usage(usage, names):
74
+ # type: (Any, List[str]) -> int
75
+ for name in names:
76
+ if hasattr(usage, name) and isinstance(getattr(usage, name), int):
77
+ return getattr(usage, name)
78
+ return 0
79
+
80
+
81
+ def _calculate_token_usage(
74
82
  messages, response, span, streaming_message_responses, count_tokens
75
83
  ):
76
84
  # type: (Iterable[ChatCompletionMessageParam], Any, Span, Optional[List[str]], Callable[..., Any]) -> None
77
- completion_tokens = 0 # type: Optional[int]
78
- prompt_tokens = 0 # type: Optional[int]
85
+ input_tokens = 0 # type: Optional[int]
86
+ input_tokens_cached = 0 # type: Optional[int]
87
+ output_tokens = 0 # type: Optional[int]
88
+ output_tokens_reasoning = 0 # type: Optional[int]
79
89
  total_tokens = 0 # type: Optional[int]
90
+
80
91
  if hasattr(response, "usage"):
81
- if hasattr(response.usage, "completion_tokens") and isinstance(
82
- response.usage.completion_tokens, int
83
- ):
84
- completion_tokens = response.usage.completion_tokens
85
- if hasattr(response.usage, "prompt_tokens") and isinstance(
86
- response.usage.prompt_tokens, int
87
- ):
88
- prompt_tokens = response.usage.prompt_tokens
89
- if hasattr(response.usage, "total_tokens") and isinstance(
90
- response.usage.total_tokens, int
91
- ):
92
- total_tokens = response.usage.total_tokens
92
+ input_tokens = _get_usage(response.usage, ["input_tokens", "prompt_tokens"])
93
+ if hasattr(response.usage, "input_tokens_details"):
94
+ input_tokens_cached = _get_usage(
95
+ response.usage.input_tokens_details, ["cached_tokens"]
96
+ )
93
97
 
94
- if prompt_tokens == 0:
98
+ output_tokens = _get_usage(
99
+ response.usage, ["output_tokens", "completion_tokens"]
100
+ )
101
+ if hasattr(response.usage, "output_tokens_details"):
102
+ output_tokens_reasoning = _get_usage(
103
+ response.usage.output_tokens_details, ["reasoning_tokens"]
104
+ )
105
+
106
+ total_tokens = _get_usage(response.usage, ["total_tokens"])
107
+
108
+ # Manually count tokens
109
+ # TODO: when implementing responses API, check for responses API
110
+ if input_tokens == 0:
95
111
  for message in messages:
96
112
  if "content" in message:
97
- prompt_tokens += count_tokens(message["content"])
113
+ input_tokens += count_tokens(message["content"])
98
114
 
99
- if completion_tokens == 0:
115
+ # TODO: when implementing responses API, check for responses API
116
+ if output_tokens == 0:
100
117
  if streaming_message_responses is not None:
101
118
  for message in streaming_message_responses:
102
- completion_tokens += count_tokens(message)
119
+ output_tokens += count_tokens(message)
103
120
  elif hasattr(response, "choices"):
104
121
  for choice in response.choices:
105
122
  if hasattr(choice, "message"):
106
- completion_tokens += count_tokens(choice.message)
107
-
108
- if prompt_tokens == 0:
109
- prompt_tokens = None
110
- if completion_tokens == 0:
111
- completion_tokens = None
112
- if total_tokens == 0:
113
- total_tokens = None
114
- record_token_usage(span, prompt_tokens, completion_tokens, total_tokens)
123
+ output_tokens += count_tokens(choice.message)
124
+
125
+ # Do not set token data if it is 0
126
+ input_tokens = input_tokens or None
127
+ input_tokens_cached = input_tokens_cached or None
128
+ output_tokens = output_tokens or None
129
+ output_tokens_reasoning = output_tokens_reasoning or None
130
+ total_tokens = total_tokens or None
131
+
132
+ record_token_usage(
133
+ span,
134
+ input_tokens=input_tokens,
135
+ input_tokens_cached=input_tokens_cached,
136
+ output_tokens=output_tokens,
137
+ output_tokens_reasoning=output_tokens_reasoning,
138
+ total_tokens=total_tokens,
139
+ )
115
140
 
116
141
 
117
142
  def _new_chat_completion_common(f, *args, **kwargs):
@@ -158,9 +183,7 @@ def _new_chat_completion_common(f, *args, **kwargs):
158
183
  SPANDATA.AI_RESPONSES,
159
184
  list(map(lambda x: x.message, res.choices)),
160
185
  )
161
- _calculate_chat_completion_usage(
162
- messages, res, span, None, integration.count_tokens
163
- )
186
+ _calculate_token_usage(messages, res, span, None, integration.count_tokens)
164
187
  span.__exit__(None, None, None)
165
188
  elif hasattr(res, "_iterator"):
166
189
  data_buf: list[list[str]] = [] # one for each choice
@@ -191,7 +214,7 @@ def _new_chat_completion_common(f, *args, **kwargs):
191
214
  set_data_normalized(
192
215
  span, SPANDATA.AI_RESPONSES, all_responses
193
216
  )
194
- _calculate_chat_completion_usage(
217
+ _calculate_token_usage(
195
218
  messages,
196
219
  res,
197
220
  span,
@@ -224,7 +247,7 @@ def _new_chat_completion_common(f, *args, **kwargs):
224
247
  set_data_normalized(
225
248
  span, SPANDATA.AI_RESPONSES, all_responses
226
249
  )
227
- _calculate_chat_completion_usage(
250
+ _calculate_token_usage(
228
251
  messages,
229
252
  res,
230
253
  span,
@@ -341,22 +364,26 @@ def _new_embeddings_create_common(f, *args, **kwargs):
341
364
 
342
365
  response = yield f, args, kwargs
343
366
 
344
- prompt_tokens = 0
367
+ input_tokens = 0
345
368
  total_tokens = 0
346
369
  if hasattr(response, "usage"):
347
370
  if hasattr(response.usage, "prompt_tokens") and isinstance(
348
371
  response.usage.prompt_tokens, int
349
372
  ):
350
- prompt_tokens = response.usage.prompt_tokens
373
+ input_tokens = response.usage.prompt_tokens
351
374
  if hasattr(response.usage, "total_tokens") and isinstance(
352
375
  response.usage.total_tokens, int
353
376
  ):
354
377
  total_tokens = response.usage.total_tokens
355
378
 
356
- if prompt_tokens == 0:
357
- prompt_tokens = integration.count_tokens(kwargs["input"] or "")
379
+ if input_tokens == 0:
380
+ input_tokens = integration.count_tokens(kwargs["input"] or "")
358
381
 
359
- record_token_usage(span, prompt_tokens, None, total_tokens or prompt_tokens)
382
+ record_token_usage(
383
+ span,
384
+ input_tokens=input_tokens,
385
+ total_tokens=total_tokens or input_tokens,
386
+ )
360
387
 
361
388
  return response
362
389
 
sentry_sdk/monitor.py CHANGED
@@ -118,7 +118,3 @@ class Monitor:
118
118
  def kill(self):
119
119
  # type: () -> None
120
120
  self._running = False
121
-
122
- def __del__(self):
123
- # type: () -> None
124
- self.kill()
sentry_sdk/sessions.py CHANGED
@@ -271,7 +271,3 @@ class SessionFlusher:
271
271
  def kill(self):
272
272
  # type: (...) -> None
273
273
  self.__shutdown_requested.set()
274
-
275
- def __del__(self):
276
- # type: (...) -> None
277
- self.kill()
sentry_sdk/transport.py CHANGED
@@ -158,13 +158,6 @@ class Transport(ABC):
158
158
  # type: (Self) -> bool
159
159
  return True
160
160
 
161
- def __del__(self):
162
- # type: (Self) -> None
163
- try:
164
- self.kill()
165
- except Exception:
166
- pass
167
-
168
161
 
169
162
  def _parse_rate_limits(header, now=None):
170
163
  # type: (str, Optional[datetime]) -> Iterable[Tuple[Optional[EventDataCategory], datetime]]
sentry_sdk/utils.py CHANGED
@@ -591,9 +591,14 @@ def serialize_frame(
591
591
  if tb_lineno is None:
592
592
  tb_lineno = frame.f_lineno
593
593
 
594
+ try:
595
+ os_abs_path = os.path.abspath(abs_path) if abs_path else None
596
+ except Exception:
597
+ os_abs_path = None
598
+
594
599
  rv = {
595
600
  "filename": filename_for_module(module, abs_path) or None,
596
- "abs_path": os.path.abspath(abs_path) if abs_path else None,
601
+ "abs_path": os_abs_path,
597
602
  "function": function or "<unknown>",
598
603
  "module": module,
599
604
  "lineno": tb_lineno,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sentry-sdk
3
- Version: 2.33.0
3
+ Version: 2.33.1
4
4
  Summary: Python client for Sentry (https://sentry.io)
5
5
  Home-page: https://github.com/getsentry/sentry-python
6
6
  Author: Sentry Team and Contributors
@@ -9,29 +9,29 @@ sentry_sdk/_werkzeug.py,sha256=m3GPf-jHd8v3eVOfBHaKw5f0uHoLkXrSO1EcY-8EisY,3734
9
9
  sentry_sdk/api.py,sha256=mdw2-KPGLrYwN7QPRbk2TL4gDfOV56fIO8fAdafMcFo,12192
10
10
  sentry_sdk/attachments.py,sha256=0Dylhm065O6hNFjB40fWCd5Hg4qWSXndmi1TPWglZkI,3109
11
11
  sentry_sdk/client.py,sha256=7G9qH7YsBhl2ga9BZgmW0ESuXl4Z8pQZz2M8GC3aIV4,38668
12
- sentry_sdk/consts.py,sha256=PE5l_ESiXr5Gy1z2IPuKyCQPvVjuxjDdrkuS6DFGhSE,45069
12
+ sentry_sdk/consts.py,sha256=lCgJ_91AWBwfPSc_JmMVFBk9zjV-Sf-sv3c-jBX07EI,45069
13
13
  sentry_sdk/debug.py,sha256=ddBehQlAuQC1sg1XO-N4N3diZ0x0iT5RWJwFdrtcsjw,1019
14
14
  sentry_sdk/envelope.py,sha256=Mgcib0uLm_5tSVzOrznRLdK9B3CjQ6TEgM1ZIZIfjWo,10355
15
15
  sentry_sdk/feature_flags.py,sha256=99JRig6TBkrkBzVCKqYcmVgjsuA_Hk-ul7jFHGhJplc,2233
16
16
  sentry_sdk/hub.py,sha256=2QLvEtIYSYV04r8h7VBmQjookILaiBZxZBGTtQKNAWg,25675
17
17
  sentry_sdk/logger.py,sha256=u_8zS8gjQt7FjYqz_I91sCbdsmBe7IgRqWxMP3vrsq0,2399
18
18
  sentry_sdk/metrics.py,sha256=3IvBwbHlU-C-JdwDysTeJqOoVyYXsHZ7oEkkU0qTZb4,29913
19
- sentry_sdk/monitor.py,sha256=7LydPMKjVRR5eFY9rxgvJv0idExA3sSnrZk-1mHu6G4,3710
19
+ sentry_sdk/monitor.py,sha256=52CG1m2e8okFDVoTpbqfm9zeeaLa0ciC_r9x2RiXuDg,3639
20
20
  sentry_sdk/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
21
  sentry_sdk/scope.py,sha256=fl6Hm7BD-1HlzghOHkWY_zQY3FkakrNrqdjebfJ0LbY,63942
22
22
  sentry_sdk/scrubber.py,sha256=rENmQ35buugDl269bRZuIAtgr27B9SzisJYUF-691pc,6064
23
23
  sentry_sdk/serializer.py,sha256=iXiRwTuRj0gcKyHRO0GNTZB1Hmk0LMDiBt6Be7RpGt8,13087
24
24
  sentry_sdk/session.py,sha256=TqDVmRKKHUDSmZb4jQR-s8wDt7Fwb6QaG21hawUGWEs,5571
25
- sentry_sdk/sessions.py,sha256=gQxwVBVqGhLp4a6xwrUe3JdaCSVbitZRxTdqScOSjj4,9228
25
+ sentry_sdk/sessions.py,sha256=UZ2jfrqhYvZzTxCDGc1MLD6P_aHLJnTFetSUROIaPaA,9154
26
26
  sentry_sdk/spotlight.py,sha256=93kdd8KxdLfcPaxFnFuqHgYAAL4FCfpK1hiiPoD7Ac4,8678
27
27
  sentry_sdk/tracing.py,sha256=dEyLZn0JSj5WMjVJEQUxRud5NewBRau9dkuDrrzJ_Xw,48114
28
28
  sentry_sdk/tracing_utils.py,sha256=J_eY_0XuyydslEmcFZcrv8dt2ItpW7uWwe6CoXxoK5Q,28820
29
- sentry_sdk/transport.py,sha256=LTwSKe9pPAoo5oaphIfLvIJZuGcmKwhjuJlhwhpGcrc,32604
29
+ sentry_sdk/transport.py,sha256=A0uux7XnniDJuExLudLyyFDYnS5C6r7zozGbkveUM7E,32469
30
30
  sentry_sdk/types.py,sha256=NLbnRzww2K3_oGz2GzcC8TdX5L2DXYso1-H1uCv2Hwc,1222
31
- sentry_sdk/utils.py,sha256=jRoLuDOYyZXn2Ks7BP4WUOTkutJnofKBZoJ9s7Dha5k,59368
31
+ sentry_sdk/utils.py,sha256=q0jJzpUVs71Ejkq-1ZAr-9EyKjYnakTkv4BmuQCaxV4,59461
32
32
  sentry_sdk/worker.py,sha256=VSMaigRMbInVyupSFpBC42bft2oIViea-0C_d9ThnIo,4464
33
33
  sentry_sdk/ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
- sentry_sdk/ai/monitoring.py,sha256=hDaLjTcI_CMUpy_wyYD5WNts07I07BZDleItu0Djv_w,4508
34
+ sentry_sdk/ai/monitoring.py,sha256=o9rRRTIODEy-0wbINQQwSKVpUnE2V3FHfsdIlwKEwpw,4936
35
35
  sentry_sdk/ai/utils.py,sha256=QCwhHoptrdXyYroJqzCKxqi0cmrlD9IDDWUcBk6yWZc,950
36
36
  sentry_sdk/crons/__init__.py,sha256=3Zt6g1-pZZ12uRKKsC8QLm3XgJ4K1VYxgVpNNUygOZY,221
37
37
  sentry_sdk/crons/api.py,sha256=s3x6SG-jqIdWS-Kj0sAxJv0nz2A3stdGE1UCtQyRUy4,1559
@@ -41,7 +41,7 @@ sentry_sdk/integrations/__init__.py,sha256=d0-uVMIrodezjlfK10IYZLXotZ8LtZzHSWGwy
41
41
  sentry_sdk/integrations/_asgi_common.py,sha256=Ypg7IctB3iPPY60ebVlzChzgT8GeGpZ0YH8VvJNDlEY,3187
42
42
  sentry_sdk/integrations/_wsgi_common.py,sha256=A1-X7l1pZCcrbUhRHkmdKiK_EemEZjn7xToJIvlEuFM,7558
43
43
  sentry_sdk/integrations/aiohttp.py,sha256=_rfDKx1arvVQwcC20vh7HG80p8XtgzqKB3iBuPYZy8A,12895
44
- sentry_sdk/integrations/anthropic.py,sha256=4iMGpFOw9rxQrRPwBU4F9aZaZ6aOU-Bh0X_CcptcjN4,9426
44
+ sentry_sdk/integrations/anthropic.py,sha256=Jkf6adRz-SixvHuAqpv3gEssdso8TWp9bAK2xYD8Cys,9605
45
45
  sentry_sdk/integrations/argv.py,sha256=GIY7TBFETF8Z0fDzqTXEJldt5XXCDdFNZxpGxP7EPaU,911
46
46
  sentry_sdk/integrations/ariadne.py,sha256=C-zKlOrU7jvTWmQHZx0M0tAZNkPPo7Z5-5jXDD92LiU,5834
47
47
  sentry_sdk/integrations/arq.py,sha256=yDPdWJa3ZgnGLwFzavIylIafEVN0qqSSgL4kUHxQF70,7881
@@ -56,7 +56,7 @@ sentry_sdk/integrations/bottle.py,sha256=aC5OsitlsRUEWBlpkNjxvH0m6UEG3OfAJ9jFyPC
56
56
  sentry_sdk/integrations/chalice.py,sha256=A4K_9FmNUu131El0ctkTmjtyYd184I4hQTlidZcEC54,4699
57
57
  sentry_sdk/integrations/clickhouse_driver.py,sha256=-CN3MLtiOy3ryqjh2sSD-TUI_gvhG2DRrvKgoWszd3w,5247
58
58
  sentry_sdk/integrations/cloud_resource_context.py,sha256=_gFldMeVHs5pxP5sm8uP7ZKmm6s_5hw3UsnXek9Iw8A,7780
59
- sentry_sdk/integrations/cohere.py,sha256=tNXHwjlUYdkmDHS0m-Y73qaDwSHaXQnglZbbR7E2Fgw,9333
59
+ sentry_sdk/integrations/cohere.py,sha256=iuDI1IVPE39rbsc3e9_qJS2bCjNg7F53apueCdhzr8Q,9322
60
60
  sentry_sdk/integrations/dedupe.py,sha256=usREWhtGDFyxVBlIVzyCYj_Qy7NJBJ84FK0B57z11LM,1418
61
61
  sentry_sdk/integrations/dramatiq.py,sha256=I09vKWnfiuhdRFCjYYjmE9LOBQvDTPS-KFqf3iHFSsM,5583
62
62
  sentry_sdk/integrations/excepthook.py,sha256=tfwpSQuo1b_OmJbNKPPRh90EUjD_OSE4DqqgYY9PVQI,2408
@@ -70,14 +70,14 @@ sentry_sdk/integrations/gql.py,sha256=ppC7fjpyQ6jWST-batRt5HtebxE_9IeHbmZ-CJ1TfU
70
70
  sentry_sdk/integrations/graphene.py,sha256=I6ZJ8Apd9dO9XPVvZY7I46-v1eXOW1C1rAkWwasF3gU,5042
71
71
  sentry_sdk/integrations/httpx.py,sha256=WwUulqzBLoGGqWUUdQg_MThwQUKzBXnA-m3g_1GOpCE,5866
72
72
  sentry_sdk/integrations/huey.py,sha256=wlyxjeWqqJp1X5S3neD5FiZjXcyznm1dl8_u1wIo76U,5443
73
- sentry_sdk/integrations/huggingface_hub.py,sha256=pbtcwBtB0Nz09nNVxKMDs_GYm9XGmQVj1xgSsFSLdLI,6551
74
- sentry_sdk/integrations/langchain.py,sha256=Zxc0Xf7Y5Nv7_r-awlfXtvloGwma2B2onm-ylrQWvLU,18993
73
+ sentry_sdk/integrations/huggingface_hub.py,sha256=ypTn17T0vufQwi7ODXONFkB8fMjUrU5b4Q6JZ34bnA4,6717
74
+ sentry_sdk/integrations/langchain.py,sha256=nRmr6sc1W0xOQfNDkPzAI5gOhEHZFy24FERVbeKDByE,19060
75
75
  sentry_sdk/integrations/launchdarkly.py,sha256=bvtExuj68xPXZFsQeWTDR-ZBqP087tPuVzP1bNAOZHc,1935
76
76
  sentry_sdk/integrations/litestar.py,sha256=ui52AfgyyAO4aQ9XSkqJZNcPduX0BccCYUkQA9nIJ_E,11891
77
77
  sentry_sdk/integrations/logging.py,sha256=-0o9HTFo5RpHkCpxfZvpiBj5VWpH4aIJmH-HNQzj3Ec,13643
78
78
  sentry_sdk/integrations/loguru.py,sha256=mEWYWsNHQLlWknU4M8RBgOf2-5B5cBr5aGd-ZH1Emq4,6193
79
79
  sentry_sdk/integrations/modules.py,sha256=vzLx3Erg77Vl4mnUvAgTg_3teAuWy7zylFpAidBI9I0,820
80
- sentry_sdk/integrations/openai.py,sha256=Lm3k9WL7FtEz0HjZxwhLRnVMex35qhdSsS_iF_4QtK8,15585
80
+ sentry_sdk/integrations/openai.py,sha256=TiVo7zlBbbTHH6KVmxZsS_6Yeqq1mFBML18uzEZ3nEA,16434
81
81
  sentry_sdk/integrations/openfeature.py,sha256=NXRKnhg0knMKOx_TO_2Z4zSsh4Glgk3tStu-lI99XsE,1235
82
82
  sentry_sdk/integrations/pure_eval.py,sha256=OvT76XvllQ_J6ABu3jVNU6KD2QAxnXMtTZ7hqhXNhpY,4581
83
83
  sentry_sdk/integrations/pymongo.py,sha256=cPpMGEbXHlV6HTHgmIDL1F-x3w7ZMROXVb4eUhLs3bw,6380
@@ -158,9 +158,9 @@ sentry_sdk/profiler/__init__.py,sha256=3PI3bHk9RSkkOXZKN84DDedk_7M65EiqqaIGo-DYs
158
158
  sentry_sdk/profiler/continuous_profiler.py,sha256=s0DHkj3RZYRg9HnQQC0G44ku6DaFqRy30fZTMtTYvIs,22828
159
159
  sentry_sdk/profiler/transaction_profiler.py,sha256=4Gj6FHLnK1di3GmnI1cCc_DbNcBVMdBjZZFvPvm7C7k,27877
160
160
  sentry_sdk/profiler/utils.py,sha256=G5s4tYai9ATJqcHrQ3bOIxlK6jIaHzELrDtU5k3N4HI,6556
161
- sentry_sdk-2.33.0.dist-info/licenses/LICENSE,sha256=KhQNZg9GKBL6KQvHQNBGMxJsXsRdhLebVp4Sew7t3Qs,1093
162
- sentry_sdk-2.33.0.dist-info/METADATA,sha256=dMkhuet0J0rBMeXCztJyYwoQwKvKC5gwByORWmpygJg,10278
163
- sentry_sdk-2.33.0.dist-info/WHEEL,sha256=JNWh1Fm1UdwIQV075glCn4MVuCRs0sotJIq-J6rbxCU,109
164
- sentry_sdk-2.33.0.dist-info/entry_points.txt,sha256=qacZEz40UspQZD1IukCXykx0JtImqGDOctS5KfOLTko,91
165
- sentry_sdk-2.33.0.dist-info/top_level.txt,sha256=XrQz30XE9FKXSY_yGLrd9bsv2Rk390GTDJOSujYaMxI,11
166
- sentry_sdk-2.33.0.dist-info/RECORD,,
161
+ sentry_sdk-2.33.1.dist-info/licenses/LICENSE,sha256=KhQNZg9GKBL6KQvHQNBGMxJsXsRdhLebVp4Sew7t3Qs,1093
162
+ sentry_sdk-2.33.1.dist-info/METADATA,sha256=klU6hCfX8PKwiXAmPNHdRDa6nAh66KrRkFAN882VV10,10278
163
+ sentry_sdk-2.33.1.dist-info/WHEEL,sha256=JNWh1Fm1UdwIQV075glCn4MVuCRs0sotJIq-J6rbxCU,109
164
+ sentry_sdk-2.33.1.dist-info/entry_points.txt,sha256=qacZEz40UspQZD1IukCXykx0JtImqGDOctS5KfOLTko,91
165
+ sentry_sdk-2.33.1.dist-info/top_level.txt,sha256=XrQz30XE9FKXSY_yGLrd9bsv2Rk390GTDJOSujYaMxI,11
166
+ sentry_sdk-2.33.1.dist-info/RECORD,,