sentry-sdk 3.0.0a1__py2.py3-none-any.whl → 3.0.0a2__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

Files changed (34) hide show
  1. sentry_sdk/_types.py +2 -2
  2. sentry_sdk/ai/monitoring.py +7 -6
  3. sentry_sdk/client.py +31 -6
  4. sentry_sdk/consts.py +83 -5
  5. sentry_sdk/integrations/_asgi_common.py +2 -3
  6. sentry_sdk/integrations/arq.py +2 -1
  7. sentry_sdk/integrations/celery/__init__.py +5 -5
  8. sentry_sdk/integrations/celery/beat.py +2 -2
  9. sentry_sdk/integrations/cohere.py +10 -10
  10. sentry_sdk/integrations/django/asgi.py +2 -2
  11. sentry_sdk/integrations/grpc/__init__.py +18 -1
  12. sentry_sdk/integrations/huggingface_hub.py +2 -2
  13. sentry_sdk/integrations/logging.py +26 -32
  14. sentry_sdk/integrations/loguru.py +124 -50
  15. sentry_sdk/integrations/openai.py +4 -4
  16. sentry_sdk/integrations/redis/_async_common.py +10 -3
  17. sentry_sdk/integrations/redis/_sync_common.py +6 -1
  18. sentry_sdk/integrations/redis/redis_cluster.py +11 -5
  19. sentry_sdk/integrations/redis/utils.py +3 -3
  20. sentry_sdk/integrations/tornado.py +1 -1
  21. sentry_sdk/logger.py +32 -5
  22. sentry_sdk/opentelemetry/contextvars_context.py +9 -1
  23. sentry_sdk/opentelemetry/span_processor.py +7 -1
  24. sentry_sdk/opentelemetry/tracing.py +27 -3
  25. sentry_sdk/opentelemetry/utils.py +8 -0
  26. sentry_sdk/scope.py +2 -11
  27. sentry_sdk/tracing.py +12 -12
  28. sentry_sdk/tracing_utils.py +4 -2
  29. {sentry_sdk-3.0.0a1.dist-info → sentry_sdk-3.0.0a2.dist-info}/METADATA +3 -3
  30. {sentry_sdk-3.0.0a1.dist-info → sentry_sdk-3.0.0a2.dist-info}/RECORD +34 -34
  31. {sentry_sdk-3.0.0a1.dist-info → sentry_sdk-3.0.0a2.dist-info}/WHEEL +1 -1
  32. {sentry_sdk-3.0.0a1.dist-info → sentry_sdk-3.0.0a2.dist-info}/entry_points.txt +0 -0
  33. {sentry_sdk-3.0.0a1.dist-info → sentry_sdk-3.0.0a2.dist-info}/licenses/LICENSE +0 -0
  34. {sentry_sdk-3.0.0a1.dist-info → sentry_sdk-3.0.0a2.dist-info}/top_level.txt +0 -0
sentry_sdk/_types.py CHANGED
@@ -129,7 +129,7 @@ if TYPE_CHECKING:
129
129
  "contexts": dict[str, dict[str, object]],
130
130
  "dist": str,
131
131
  "duration": Optional[float],
132
- "environment": str,
132
+ "environment": Optional[str],
133
133
  "errors": list[dict[str, Any]], # TODO: We can expand on this type
134
134
  "event_id": str,
135
135
  "exception": dict[
@@ -146,7 +146,7 @@ if TYPE_CHECKING:
146
146
  "monitor_slug": Optional[str],
147
147
  "platform": Literal["python"],
148
148
  "profile": object, # Should be sentry_sdk.profiler.Profile, but we can't import that here due to circular imports
149
- "release": str,
149
+ "release": Optional[str],
150
150
  "request": dict[str, object],
151
151
  "sdk": Mapping[str, object],
152
152
  "server_name": str,
@@ -1,6 +1,7 @@
1
1
  import inspect
2
2
  from functools import wraps
3
3
 
4
+ from sentry_sdk.consts import SPANDATA
4
5
  import sentry_sdk.utils
5
6
  from sentry_sdk import start_span
6
7
  from sentry_sdk.tracing import Span
@@ -41,7 +42,7 @@ def ai_track(description, **span_kwargs):
41
42
  for k, v in kwargs.pop("sentry_data", {}).items():
42
43
  span.set_attribute(k, v)
43
44
  if curr_pipeline:
44
- span.set_attribute("ai.pipeline.name", curr_pipeline)
45
+ span.set_attribute(SPANDATA.AI_PIPELINE_NAME, curr_pipeline)
45
46
  return f(*args, **kwargs)
46
47
  else:
47
48
  _ai_pipeline_name.set(description)
@@ -72,7 +73,7 @@ def ai_track(description, **span_kwargs):
72
73
  for k, v in kwargs.pop("sentry_data", {}).items():
73
74
  span.set_attribute(k, v)
74
75
  if curr_pipeline:
75
- span.set_attribute("ai.pipeline.name", curr_pipeline)
76
+ span.set_attribute(SPANDATA.AI_PIPELINE_NAME, curr_pipeline)
76
77
  return await f(*args, **kwargs)
77
78
  else:
78
79
  _ai_pipeline_name.set(description)
@@ -104,11 +105,11 @@ def record_token_usage(
104
105
  # type: (Span, Optional[int], Optional[int], Optional[int]) -> None
105
106
  ai_pipeline_name = get_ai_pipeline_name()
106
107
  if ai_pipeline_name:
107
- span.set_attribute("ai.pipeline.name", ai_pipeline_name)
108
+ span.set_attribute(SPANDATA.AI_PIPELINE_NAME, ai_pipeline_name)
108
109
  if prompt_tokens is not None:
109
- span.set_attribute("ai.prompt_tokens.used", prompt_tokens)
110
+ span.set_attribute(SPANDATA.AI_PROMPT_TOKENS_USED, prompt_tokens)
110
111
  if completion_tokens is not None:
111
- span.set_attribute("ai.completion_tokens.used", completion_tokens)
112
+ span.set_attribute(SPANDATA.AI_COMPLETION_TOKENS_USED, completion_tokens)
112
113
  if (
113
114
  total_tokens is None
114
115
  and prompt_tokens is not None
@@ -116,4 +117,4 @@ def record_token_usage(
116
117
  ):
117
118
  total_tokens = prompt_tokens + completion_tokens
118
119
  if total_tokens is not None:
119
- span.set_attribute("ai.total_tokens.used", total_tokens)
120
+ span.set_attribute(SPANDATA.AI_TOTAL_TOKENS_USED, total_tokens)
sentry_sdk/client.py CHANGED
@@ -7,6 +7,7 @@ from datetime import datetime, timezone
7
7
  from importlib import import_module
8
8
  from typing import TYPE_CHECKING, List, Dict, cast, overload
9
9
 
10
+ import sentry_sdk
10
11
  from sentry_sdk._compat import check_uwsgi_thread_support
11
12
  from sentry_sdk.utils import (
12
13
  AnnotatedValue,
@@ -107,7 +108,7 @@ def _get_options(*args, **kwargs):
107
108
  rv["environment"] = os.environ.get("SENTRY_ENVIRONMENT") or "production"
108
109
 
109
110
  if rv["debug"] is None:
110
- rv["debug"] = env_to_bool(os.environ.get("SENTRY_DEBUG", "False"), strict=True)
111
+ rv["debug"] = env_to_bool(os.environ.get("SENTRY_DEBUG"), strict=True) or False
111
112
 
112
113
  if rv["server_name"] is None and hasattr(socket, "gethostname"):
113
114
  rv["server_name"] = socket.gethostname()
@@ -133,6 +134,11 @@ def _get_options(*args, **kwargs):
133
134
  )
134
135
  rv["socket_options"] = None
135
136
 
137
+ if rv["keep_alive"] is None:
138
+ rv["keep_alive"] = (
139
+ env_to_bool(os.environ.get("SENTRY_KEEP_ALIVE"), strict=True) or False
140
+ )
141
+
136
142
  return rv
137
143
 
138
144
 
@@ -185,8 +191,8 @@ class BaseClient:
185
191
  # type: (*Any, **Any) -> Optional[str]
186
192
  return None
187
193
 
188
- def _capture_experimental_log(self, scope, log):
189
- # type: (Scope, Log) -> None
194
+ def _capture_experimental_log(self, log):
195
+ # type: (Log) -> None
190
196
  pass
191
197
 
192
198
  def capture_session(self, *args, **kwargs):
@@ -400,6 +406,8 @@ class _Client(BaseClient):
400
406
 
401
407
  patch_readable_span()
402
408
  setup_sentry_tracing()
409
+
410
+ logger.debug("[Tracing] Finished setting up OpenTelemetry")
403
411
  finally:
404
412
  _client_init_debug.set(old_debug)
405
413
 
@@ -839,12 +847,14 @@ class _Client(BaseClient):
839
847
 
840
848
  return return_value
841
849
 
842
- def _capture_experimental_log(self, current_scope, log):
843
- # type: (Scope, Log) -> None
850
+ def _capture_experimental_log(self, log):
851
+ # type: (Log) -> None
844
852
  logs_enabled = self.options["_experiments"].get("enable_logs", False)
845
853
  if not logs_enabled:
846
854
  return
847
- isolation_scope = current_scope.get_isolation_scope()
855
+
856
+ current_scope = sentry_sdk.get_current_scope()
857
+ isolation_scope = sentry_sdk.get_isolation_scope()
848
858
 
849
859
  log["attributes"]["sentry.sdk.name"] = SDK_INFO["name"]
850
860
  log["attributes"]["sentry.sdk.version"] = SDK_INFO["version"]
@@ -873,6 +883,21 @@ class _Client(BaseClient):
873
883
  elif propagation_context is not None:
874
884
  log["trace_id"] = propagation_context.trace_id
875
885
 
886
+ # The user, if present, is always set on the isolation scope.
887
+ if isolation_scope._user is not None:
888
+ for log_attribute, user_attribute in (
889
+ ("user.id", "id"),
890
+ ("user.name", "username"),
891
+ ("user.email", "email"),
892
+ ):
893
+ if (
894
+ user_attribute in isolation_scope._user
895
+ and log_attribute not in log["attributes"]
896
+ ):
897
+ log["attributes"][log_attribute] = isolation_scope._user[
898
+ user_attribute
899
+ ]
900
+
876
901
  # If debug is enabled, log the log to the console
877
902
  debug = self.options.get("debug", False)
878
903
  if debug:
sentry_sdk/consts.py CHANGED
@@ -1,5 +1,4 @@
1
1
  import itertools
2
-
3
2
  from enum import Enum
4
3
  from typing import TYPE_CHECKING
5
4
 
@@ -47,6 +46,7 @@ if TYPE_CHECKING:
47
46
  Event,
48
47
  EventProcessor,
49
48
  Hint,
49
+ Log,
50
50
  ProfilerMode,
51
51
  TracesSampler,
52
52
  TransactionProcessor,
@@ -71,6 +71,7 @@ if TYPE_CHECKING:
71
71
  "transport_num_pools": Optional[int],
72
72
  "transport_http2": Optional[bool],
73
73
  "enable_logs": Optional[bool],
74
+ "before_send_log": Optional[Callable[[Log, Hint], Optional[Log]]],
74
75
  },
75
76
  total=False,
76
77
  )
@@ -174,7 +175,7 @@ class SPANDATA:
174
175
  For an AI model call, the format of the response
175
176
  """
176
177
 
177
- AI_LOGIT_BIAS = "ai.response_format"
178
+ AI_LOGIT_BIAS = "ai.logit_bias"
178
179
  """
179
180
  For an AI model call, the logit bias
180
181
  """
@@ -191,7 +192,6 @@ class SPANDATA:
191
192
  Minimize pre-processing done to the prompt sent to the LLM.
192
193
  Example: true
193
194
  """
194
-
195
195
  AI_RESPONSES = "ai.responses"
196
196
  """
197
197
  The responses to an AI model call. Always as a list.
@@ -204,6 +204,84 @@ class SPANDATA:
204
204
  Example: 123.45
205
205
  """
206
206
 
207
+ AI_CITATIONS = "ai.citations"
208
+ """
209
+ References or sources cited by the AI model in its response.
210
+ Example: ["Smith et al. 2020", "Jones 2019"]
211
+ """
212
+
213
+ AI_DOCUMENTS = "ai.documents"
214
+ """
215
+ Documents or content chunks used as context for the AI model.
216
+ Example: ["doc1.txt", "doc2.pdf"]
217
+ """
218
+
219
+ AI_SEARCH_QUERIES = "ai.search_queries"
220
+ """
221
+ Queries used to search for relevant context or documents.
222
+ Example: ["climate change effects", "renewable energy"]
223
+ """
224
+
225
+ AI_SEARCH_RESULTS = "ai.search_results"
226
+ """
227
+ Results returned from search queries for context.
228
+ Example: ["Result 1", "Result 2"]
229
+ """
230
+
231
+ AI_GENERATION_ID = "ai.generation_id"
232
+ """
233
+ Unique identifier for the completion.
234
+ Example: "gen_123abc"
235
+ """
236
+
237
+ AI_SEARCH_REQUIRED = "ai.is_search_required"
238
+ """
239
+ Boolean indicating if the model needs to perform a search.
240
+ Example: true
241
+ """
242
+
243
+ AI_FINISH_REASON = "ai.finish_reason"
244
+ """
245
+ The reason why the model stopped generating.
246
+ Example: "length"
247
+ """
248
+
249
+ AI_PIPELINE_NAME = "ai.pipeline.name"
250
+ """
251
+ Name of the AI pipeline or chain being executed.
252
+ Example: "qa-pipeline"
253
+ """
254
+
255
+ AI_PROMPT_TOKENS_USED = "ai.prompt_tokens.used"
256
+ """
257
+ The number of input prompt tokens used by the model.
258
+ Example: 10
259
+ """
260
+
261
+ AI_COMPLETION_TOKENS_USED = "ai.completion_tokens.used"
262
+ """
263
+ The number of output completion tokens used by the model.
264
+ Example: 10
265
+ """
266
+
267
+ AI_TOTAL_TOKENS_USED = "ai.total_tokens.used"
268
+ """
269
+ The total number of tokens (input + output) used by the request to the model.
270
+ Example: 20
271
+ """
272
+
273
+ AI_TEXTS = "ai.texts"
274
+ """
275
+ Raw text inputs provided to the model.
276
+ Example: ["What is machine learning?"]
277
+ """
278
+
279
+ AI_WARNINGS = "ai.warnings"
280
+ """
281
+ Warning messages generated during model execution.
282
+ Example: ["Token limit exceeded"]
283
+ """
284
+
207
285
  DB_NAME = "db.name"
208
286
  """
209
287
  The name of the database being accessed. For commands that switch the database, this should be set to the target database (even if the command fails).
@@ -545,7 +623,7 @@ class ClientConstructor:
545
623
  ignore_errors=[], # type: Sequence[Union[type, str]] # noqa: B006
546
624
  max_request_body_size="medium", # type: str
547
625
  socket_options=None, # type: Optional[List[Tuple[int, int, int | bytes]]]
548
- keep_alive=False, # type: bool
626
+ keep_alive=None, # type: Optional[bool]
549
627
  before_send=None, # type: Optional[EventProcessor]
550
628
  before_breadcrumb=None, # type: Optional[BreadcrumbProcessor]
551
629
  debug=None, # type: Optional[bool]
@@ -985,4 +1063,4 @@ DEFAULT_OPTIONS = _get_default_options()
985
1063
  del _get_default_options
986
1064
 
987
1065
 
988
- VERSION = "3.0.0a1"
1066
+ VERSION = "3.0.0a2"
@@ -3,7 +3,7 @@ import urllib
3
3
  from sentry_sdk.scope import should_send_default_pii
4
4
  from sentry_sdk.integrations._wsgi_common import _filter_headers
5
5
 
6
- from typing import TYPE_CHECKING
6
+ from typing import TYPE_CHECKING, cast
7
7
 
8
8
  if TYPE_CHECKING:
9
9
  from typing import Any
@@ -37,8 +37,7 @@ def _get_url(asgi_scope, default_scheme=None, host=None):
37
37
  """
38
38
  Extract URL from the ASGI scope, without also including the querystring.
39
39
  """
40
- scheme = asgi_scope.get("scheme", default_scheme)
41
-
40
+ scheme = cast(str, asgi_scope.get("scheme", default_scheme))
42
41
  server = asgi_scope.get("server", None)
43
42
  path = asgi_scope.get("root_path", "") + asgi_scope.get("path", "")
44
43
 
@@ -225,7 +225,8 @@ def patch_create_worker():
225
225
  ]
226
226
  if hasattr(settings_cls, "cron_jobs"):
227
227
  settings_cls.cron_jobs = [
228
- _get_arq_cron_job(cron_job) for cron_job in settings_cls.cron_jobs
228
+ _get_arq_cron_job(cron_job)
229
+ for cron_job in (settings_cls.cron_jobs or [])
229
230
  ]
230
231
 
231
232
  if "functions" in kwargs:
@@ -8,7 +8,7 @@ from sentry_sdk.consts import OP, SPANSTATUS, SPANDATA, BAGGAGE_HEADER_NAME
8
8
  from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable
9
9
  from sentry_sdk.integrations.celery.beat import (
10
10
  _patch_beat_apply_entry,
11
- _patch_redbeat_maybe_due,
11
+ _patch_redbeat_apply_async,
12
12
  _setup_celery_beat_signals,
13
13
  )
14
14
  from sentry_sdk.integrations.celery.utils import _now_seconds_since_epoch
@@ -72,7 +72,7 @@ class CeleryIntegration(Integration):
72
72
  self.exclude_beat_tasks = exclude_beat_tasks
73
73
 
74
74
  _patch_beat_apply_entry()
75
- _patch_redbeat_maybe_due()
75
+ _patch_redbeat_apply_async()
76
76
  _setup_celery_beat_signals(monitor_beat_tasks)
77
77
 
78
78
  @staticmethod
@@ -100,9 +100,9 @@ class CeleryIntegration(Integration):
100
100
  def _set_status(status):
101
101
  # type: (str) -> None
102
102
  with capture_internal_exceptions():
103
- scope = sentry_sdk.get_current_scope()
104
- if scope.span is not None:
105
- scope.span.set_status(status)
103
+ span = sentry_sdk.get_current_span()
104
+ if span is not None:
105
+ span.set_status(status)
106
106
 
107
107
 
108
108
  def _capture_exception(task, exc_info):
@@ -202,12 +202,12 @@ def _patch_beat_apply_entry():
202
202
  Scheduler.apply_entry = _wrap_beat_scheduler(Scheduler.apply_entry)
203
203
 
204
204
 
205
- def _patch_redbeat_maybe_due():
205
+ def _patch_redbeat_apply_async():
206
206
  # type: () -> None
207
207
  if RedBeatScheduler is None:
208
208
  return
209
209
 
210
- RedBeatScheduler.maybe_due = _wrap_beat_scheduler(RedBeatScheduler.maybe_due)
210
+ RedBeatScheduler.apply_async = _wrap_beat_scheduler(RedBeatScheduler.apply_async)
211
211
 
212
212
 
213
213
  def _setup_celery_beat_signals(monitor_beat_tasks):
@@ -52,17 +52,17 @@ COLLECTED_PII_CHAT_PARAMS = {
52
52
  }
53
53
 
54
54
  COLLECTED_CHAT_RESP_ATTRS = {
55
- "generation_id": "ai.generation_id",
56
- "is_search_required": "ai.is_search_required",
57
- "finish_reason": "ai.finish_reason",
55
+ "generation_id": SPANDATA.AI_GENERATION_ID,
56
+ "is_search_required": SPANDATA.AI_SEARCH_REQUIRED,
57
+ "finish_reason": SPANDATA.AI_FINISH_REASON,
58
58
  }
59
59
 
60
60
  COLLECTED_PII_CHAT_RESP_ATTRS = {
61
- "citations": "ai.citations",
62
- "documents": "ai.documents",
63
- "search_queries": "ai.search_queries",
64
- "search_results": "ai.search_results",
65
- "tool_calls": "ai.tool_calls",
61
+ "citations": SPANDATA.AI_CITATIONS,
62
+ "documents": SPANDATA.AI_DOCUMENTS,
63
+ "search_queries": SPANDATA.AI_SEARCH_QUERIES,
64
+ "search_results": SPANDATA.AI_SEARCH_RESULTS,
65
+ "tool_calls": SPANDATA.AI_TOOL_CALLS,
66
66
  }
67
67
 
68
68
 
@@ -127,7 +127,7 @@ def _wrap_chat(f, streaming):
127
127
  )
128
128
 
129
129
  if hasattr(res.meta, "warnings"):
130
- set_data_normalized(span, "ai.warnings", res.meta.warnings)
130
+ set_data_normalized(span, SPANDATA.AI_WARNINGS, res.meta.warnings)
131
131
 
132
132
  @wraps(f)
133
133
  def new_chat(*args, **kwargs):
@@ -240,7 +240,7 @@ def _wrap_embed(f):
240
240
  should_send_default_pii() and integration.include_prompts
241
241
  ):
242
242
  if isinstance(kwargs["texts"], str):
243
- set_data_normalized(span, "ai.texts", [kwargs["texts"]])
243
+ set_data_normalized(span, SPANDATA.AI_TEXTS, [kwargs["texts"]])
244
244
  elif (
245
245
  isinstance(kwargs["texts"], list)
246
246
  and len(kwargs["texts"]) > 0
@@ -241,9 +241,9 @@ def _asgi_middleware_mixin_factory(_check_middleware_span):
241
241
  middleware_span = _check_middleware_span(old_method=f)
242
242
 
243
243
  if middleware_span is None:
244
- return await f(*args, **kwargs)
244
+ return await f(*args, **kwargs) # type: ignore
245
245
 
246
246
  with middleware_span:
247
- return await f(*args, **kwargs)
247
+ return await f(*args, **kwargs) # type: ignore
248
248
 
249
249
  return SentryASGIMixin
@@ -6,6 +6,7 @@ from grpc.aio import Channel as AsyncChannel
6
6
  from grpc.aio import Server as AsyncServer
7
7
 
8
8
  from sentry_sdk.integrations import Integration
9
+ from sentry_sdk.utils import parse_version
9
10
 
10
11
  from .client import ClientInterceptor
11
12
  from .server import ServerInterceptor
@@ -41,6 +42,8 @@ else:
41
42
 
42
43
  P = ParamSpec("P")
43
44
 
45
+ GRPC_VERSION = parse_version(grpc.__version__)
46
+
44
47
 
45
48
  def _wrap_channel_sync(func: Callable[P, Channel]) -> Callable[P, Channel]:
46
49
  "Wrapper for synchronous secure and insecure channel."
@@ -127,7 +130,21 @@ def _wrap_async_server(func: Callable[P, AsyncServer]) -> Callable[P, AsyncServe
127
130
  **kwargs: P.kwargs,
128
131
  ) -> Server:
129
132
  server_interceptor = AsyncServerInterceptor()
130
- interceptors = (server_interceptor, *(interceptors or []))
133
+ interceptors = [
134
+ server_interceptor,
135
+ *(interceptors or []),
136
+ ] # type: Sequence[grpc.ServerInterceptor]
137
+
138
+ try:
139
+ # We prefer interceptors as a list because of compatibility with
140
+ # opentelemetry https://github.com/getsentry/sentry-python/issues/4389
141
+ # However, prior to grpc 1.42.0, only tuples were accepted, so we
142
+ # have no choice there.
143
+ if GRPC_VERSION is not None and GRPC_VERSION < (1, 42, 0):
144
+ interceptors = tuple(interceptors)
145
+ except Exception:
146
+ pass
147
+
131
148
  return func(*args, interceptors=interceptors, **kwargs) # type: ignore
132
149
 
133
150
  return patched_aio_server # type: ignore
@@ -98,7 +98,7 @@ def _wrap_text_generation(f):
98
98
  if should_send_default_pii() and integration.include_prompts:
99
99
  set_data_normalized(
100
100
  span,
101
- "ai.responses",
101
+ SPANDATA.AI_RESPONSES,
102
102
  [res],
103
103
  )
104
104
  span.__exit__(None, None, None)
@@ -108,7 +108,7 @@ def _wrap_text_generation(f):
108
108
  if should_send_default_pii() and integration.include_prompts:
109
109
  set_data_normalized(
110
110
  span,
111
- "ai.responses",
111
+ SPANDATA.AI_RESPONSES,
112
112
  [res.generated_text],
113
113
  )
114
114
  if res.details is not None and res.details.generated_tokens > 0:
@@ -5,6 +5,7 @@ from fnmatch import fnmatch
5
5
 
6
6
  import sentry_sdk
7
7
  from sentry_sdk.client import BaseClient
8
+ from sentry_sdk.logger import _log_level_to_otel
8
9
  from sentry_sdk.utils import (
9
10
  safe_repr,
10
11
  to_string,
@@ -14,7 +15,7 @@ from sentry_sdk.utils import (
14
15
  )
15
16
  from sentry_sdk.integrations import Integration
16
17
 
17
- from typing import TYPE_CHECKING, Tuple
18
+ from typing import TYPE_CHECKING
18
19
 
19
20
  if TYPE_CHECKING:
20
21
  from collections.abc import MutableMapping
@@ -36,6 +37,16 @@ LOGGING_TO_EVENT_LEVEL = {
36
37
  logging.CRITICAL: "fatal", # CRITICAL is same as FATAL
37
38
  }
38
39
 
40
+ # Map logging level numbers to corresponding OTel level numbers
41
+ SEVERITY_TO_OTEL_SEVERITY = {
42
+ logging.CRITICAL: 21, # fatal
43
+ logging.ERROR: 17, # error
44
+ logging.WARNING: 13, # warn
45
+ logging.INFO: 9, # info
46
+ logging.DEBUG: 5, # debug
47
+ }
48
+
49
+
39
50
  # Capturing events from those loggers causes recursion errors. We cannot allow
40
51
  # the user to unconditionally create events from those loggers under any
41
52
  # circumstances.
@@ -124,7 +135,10 @@ class LoggingIntegration(Integration):
124
135
  # the integration. Otherwise we have a high chance of getting
125
136
  # into a recursion error when the integration is resolved
126
137
  # (this also is slower).
127
- if ignored_loggers is not None and record.name not in ignored_loggers:
138
+ if (
139
+ ignored_loggers is not None
140
+ and record.name.strip() not in ignored_loggers
141
+ ):
128
142
  integration = sentry_sdk.get_client().get_integration(
129
143
  LoggingIntegration
130
144
  )
@@ -169,7 +183,7 @@ class _BaseHandler(logging.Handler):
169
183
  # type: (LogRecord) -> bool
170
184
  """Prevents ignored loggers from recording"""
171
185
  for logger in _IGNORED_LOGGERS:
172
- if fnmatch(record.name, logger):
186
+ if fnmatch(record.name.strip(), logger):
173
187
  return False
174
188
  return True
175
189
 
@@ -317,21 +331,6 @@ class BreadcrumbHandler(_BaseHandler):
317
331
  }
318
332
 
319
333
 
320
- def _python_level_to_otel(record_level):
321
- # type: (int) -> Tuple[int, str]
322
- for py_level, otel_severity_number, otel_severity_text in [
323
- (50, 21, "fatal"),
324
- (40, 17, "error"),
325
- (30, 13, "warn"),
326
- (20, 9, "info"),
327
- (10, 5, "debug"),
328
- (5, 1, "trace"),
329
- ]:
330
- if record_level >= py_level:
331
- return otel_severity_number, otel_severity_text
332
- return 0, "default"
333
-
334
-
335
334
  class SentryLogsHandler(_BaseHandler):
336
335
  """
337
336
  A logging handler that records Sentry logs for each Python log record.
@@ -353,28 +352,24 @@ class SentryLogsHandler(_BaseHandler):
353
352
  if not client.options["_experiments"].get("enable_logs", False):
354
353
  return
355
354
 
356
- SentryLogsHandler._capture_log_from_record(client, record)
355
+ self._capture_log_from_record(client, record)
357
356
 
358
- @staticmethod
359
- def _capture_log_from_record(client, record):
357
+ def _capture_log_from_record(self, client, record):
360
358
  # type: (BaseClient, LogRecord) -> None
361
- scope = sentry_sdk.get_current_scope()
362
- otel_severity_number, otel_severity_text = _python_level_to_otel(record.levelno)
359
+ otel_severity_number, otel_severity_text = _log_level_to_otel(
360
+ record.levelno, SEVERITY_TO_OTEL_SEVERITY
361
+ )
363
362
  project_root = client.options["project_root"]
364
- attrs = {
365
- "sentry.origin": "auto.logger.log",
366
- } # type: dict[str, str | bool | float | int]
363
+ attrs = self._extra_from_record(record) # type: Any
364
+ attrs["sentry.origin"] = "auto.logger.log"
367
365
  if isinstance(record.msg, str):
368
366
  attrs["sentry.message.template"] = record.msg
369
367
  if record.args is not None:
370
368
  if isinstance(record.args, tuple):
371
369
  for i, arg in enumerate(record.args):
372
- attrs[f"sentry.message.parameters.{i}"] = (
370
+ attrs[f"sentry.message.parameter.{i}"] = (
373
371
  arg
374
- if isinstance(arg, str)
375
- or isinstance(arg, float)
376
- or isinstance(arg, int)
377
- or isinstance(arg, bool)
372
+ if isinstance(arg, (str, float, int, bool))
378
373
  else safe_repr(arg)
379
374
  )
380
375
  if record.lineno:
@@ -401,7 +396,6 @@ class SentryLogsHandler(_BaseHandler):
401
396
 
402
397
  # noinspection PyProtectedMember
403
398
  client._capture_experimental_log(
404
- scope,
405
399
  {
406
400
  "severity_text": otel_severity_text,
407
401
  "severity_number": otel_severity_number,