sentry-sdk 2.34.1__py2.py3-none-any.whl → 2.35.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

Files changed (33) hide show
  1. sentry_sdk/__init__.py +1 -0
  2. sentry_sdk/ai/utils.py +9 -9
  3. sentry_sdk/api.py +80 -0
  4. sentry_sdk/client.py +12 -7
  5. sentry_sdk/consts.py +135 -10
  6. sentry_sdk/crons/api.py +5 -0
  7. sentry_sdk/integrations/anthropic.py +138 -75
  8. sentry_sdk/integrations/asgi.py +11 -25
  9. sentry_sdk/integrations/clickhouse_driver.py +33 -13
  10. sentry_sdk/integrations/django/asgi.py +1 -1
  11. sentry_sdk/integrations/fastapi.py +1 -7
  12. sentry_sdk/integrations/gnu_backtrace.py +6 -3
  13. sentry_sdk/integrations/langchain.py +466 -193
  14. sentry_sdk/integrations/litestar.py +1 -1
  15. sentry_sdk/integrations/logging.py +2 -1
  16. sentry_sdk/integrations/loguru.py +2 -1
  17. sentry_sdk/integrations/openai.py +8 -2
  18. sentry_sdk/integrations/openai_agents/patches/agent_run.py +0 -3
  19. sentry_sdk/integrations/openai_agents/patches/runner.py +18 -15
  20. sentry_sdk/integrations/openfeature.py +2 -4
  21. sentry_sdk/integrations/quart.py +1 -1
  22. sentry_sdk/integrations/starlette.py +1 -5
  23. sentry_sdk/integrations/starlite.py +1 -1
  24. sentry_sdk/scope.py +11 -11
  25. sentry_sdk/tracing.py +118 -25
  26. sentry_sdk/tracing_utils.py +321 -39
  27. sentry_sdk/utils.py +22 -1
  28. {sentry_sdk-2.34.1.dist-info → sentry_sdk-2.35.1.dist-info}/METADATA +1 -1
  29. {sentry_sdk-2.34.1.dist-info → sentry_sdk-2.35.1.dist-info}/RECORD +33 -33
  30. {sentry_sdk-2.34.1.dist-info → sentry_sdk-2.35.1.dist-info}/WHEEL +0 -0
  31. {sentry_sdk-2.34.1.dist-info → sentry_sdk-2.35.1.dist-info}/entry_points.txt +0 -0
  32. {sentry_sdk-2.34.1.dist-info → sentry_sdk-2.35.1.dist-info}/licenses/LICENSE +0 -0
  33. {sentry_sdk-2.34.1.dist-info → sentry_sdk-2.35.1.dist-info}/top_level.txt +0 -0
@@ -1,8 +1,10 @@
1
1
  from functools import wraps
2
+ import json
2
3
  from typing import TYPE_CHECKING
3
4
 
4
5
  import sentry_sdk
5
6
  from sentry_sdk.ai.monitoring import record_token_usage
7
+ from sentry_sdk.ai.utils import set_data_normalized
6
8
  from sentry_sdk.consts import OP, SPANDATA
7
9
  from sentry_sdk.integrations import _check_minimum_version, DidNotEnable, Integration
8
10
  from sentry_sdk.scope import should_send_default_pii
@@ -10,9 +12,15 @@ from sentry_sdk.utils import (
10
12
  capture_internal_exceptions,
11
13
  event_from_exception,
12
14
  package_version,
15
+ safe_serialize,
13
16
  )
14
17
 
15
18
  try:
19
+ try:
20
+ from anthropic import NOT_GIVEN
21
+ except ImportError:
22
+ NOT_GIVEN = None
23
+
16
24
  from anthropic.resources import AsyncMessages, Messages
17
25
 
18
26
  if TYPE_CHECKING:
@@ -53,8 +61,11 @@ def _capture_exception(exc):
53
61
  sentry_sdk.capture_event(event, hint=hint)
54
62
 
55
63
 
56
- def _calculate_token_usage(result, span):
57
- # type: (Messages, Span) -> None
64
+ def _get_token_usage(result):
65
+ # type: (Messages) -> tuple[int, int]
66
+ """
67
+ Get token usage from the Anthropic response.
68
+ """
58
69
  input_tokens = 0
59
70
  output_tokens = 0
60
71
  if hasattr(result, "usage"):
@@ -64,37 +75,13 @@ def _calculate_token_usage(result, span):
64
75
  if hasattr(usage, "output_tokens") and isinstance(usage.output_tokens, int):
65
76
  output_tokens = usage.output_tokens
66
77
 
67
- total_tokens = input_tokens + output_tokens
78
+ return input_tokens, output_tokens
68
79
 
69
- record_token_usage(
70
- span,
71
- input_tokens=input_tokens,
72
- output_tokens=output_tokens,
73
- total_tokens=total_tokens,
74
- )
75
80
 
76
-
77
- def _get_responses(content):
78
- # type: (list[Any]) -> list[dict[str, Any]]
81
+ def _collect_ai_data(event, model, input_tokens, output_tokens, content_blocks):
82
+ # type: (MessageStreamEvent, str | None, int, int, list[str]) -> tuple[str | None, int, int, list[str]]
79
83
  """
80
- Get JSON of a Anthropic responses.
81
- """
82
- responses = []
83
- for item in content:
84
- if hasattr(item, "text"):
85
- responses.append(
86
- {
87
- "type": item.type,
88
- "text": item.text,
89
- }
90
- )
91
- return responses
92
-
93
-
94
- def _collect_ai_data(event, input_tokens, output_tokens, content_blocks):
95
- # type: (MessageStreamEvent, int, int, list[str]) -> tuple[int, int, list[str]]
96
- """
97
- Count token usage and collect content blocks from the AI streaming response.
84
+ Collect model information, token usage, and collect content blocks from the AI streaming response.
98
85
  """
99
86
  with capture_internal_exceptions():
100
87
  if hasattr(event, "type"):
@@ -102,6 +89,7 @@ def _collect_ai_data(event, input_tokens, output_tokens, content_blocks):
102
89
  usage = event.message.usage
103
90
  input_tokens += usage.input_tokens
104
91
  output_tokens += usage.output_tokens
92
+ model = event.message.model or model
105
93
  elif event.type == "content_block_start":
106
94
  pass
107
95
  elif event.type == "content_block_delta":
@@ -114,31 +102,80 @@ def _collect_ai_data(event, input_tokens, output_tokens, content_blocks):
114
102
  elif event.type == "message_delta":
115
103
  output_tokens += event.usage.output_tokens
116
104
 
117
- return input_tokens, output_tokens, content_blocks
105
+ return model, input_tokens, output_tokens, content_blocks
118
106
 
119
107
 
120
- def _add_ai_data_to_span(
121
- span, integration, input_tokens, output_tokens, content_blocks
122
- ):
123
- # type: (Span, AnthropicIntegration, int, int, list[str]) -> None
108
+ def _set_input_data(span, kwargs, integration):
109
+ # type: (Span, dict[str, Any], AnthropicIntegration) -> None
124
110
  """
125
- Add token usage and content blocks from the AI streaming response to the span.
111
+ Set input data for the span based on the provided keyword arguments for the anthropic message creation.
126
112
  """
127
- with capture_internal_exceptions():
128
- if should_send_default_pii() and integration.include_prompts:
129
- complete_message = "".join(content_blocks)
130
- span.set_data(
131
- SPANDATA.AI_RESPONSES,
132
- [{"type": "text", "text": complete_message}],
133
- )
134
- total_tokens = input_tokens + output_tokens
135
- record_token_usage(
113
+ messages = kwargs.get("messages")
114
+ if (
115
+ messages is not None
116
+ and len(messages) > 0
117
+ and should_send_default_pii()
118
+ and integration.include_prompts
119
+ ):
120
+ set_data_normalized(
121
+ span, SPANDATA.GEN_AI_REQUEST_MESSAGES, safe_serialize(messages)
122
+ )
123
+
124
+ set_data_normalized(
125
+ span, SPANDATA.GEN_AI_RESPONSE_STREAMING, kwargs.get("stream", False)
126
+ )
127
+
128
+ kwargs_keys_to_attributes = {
129
+ "max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS,
130
+ "model": SPANDATA.GEN_AI_REQUEST_MODEL,
131
+ "temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
132
+ "top_k": SPANDATA.GEN_AI_REQUEST_TOP_K,
133
+ "top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
134
+ }
135
+ for key, attribute in kwargs_keys_to_attributes.items():
136
+ value = kwargs.get(key)
137
+ if value is not NOT_GIVEN and value is not None:
138
+ set_data_normalized(span, attribute, value)
139
+
140
+ # Input attributes: Tools
141
+ tools = kwargs.get("tools")
142
+ if tools is not NOT_GIVEN and tools is not None and len(tools) > 0:
143
+ set_data_normalized(
144
+ span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools)
145
+ )
146
+
147
+
148
+ def _set_output_data(
149
+ span,
150
+ integration,
151
+ model,
152
+ input_tokens,
153
+ output_tokens,
154
+ content_blocks,
155
+ finish_span=False,
156
+ ):
157
+ # type: (Span, AnthropicIntegration, str | None, int | None, int | None, list[Any], bool) -> None
158
+ """
159
+ Set output data for the span based on the AI response."""
160
+ span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, model)
161
+ if should_send_default_pii() and integration.include_prompts:
162
+ set_data_normalized(
136
163
  span,
137
- input_tokens=input_tokens,
138
- output_tokens=output_tokens,
139
- total_tokens=total_tokens,
164
+ SPANDATA.GEN_AI_RESPONSE_TEXT,
165
+ json.dumps(content_blocks),
166
+ unpack=False,
140
167
  )
141
- span.set_data(SPANDATA.AI_STREAMING, True)
168
+
169
+ record_token_usage(
170
+ span,
171
+ input_tokens=input_tokens,
172
+ output_tokens=output_tokens,
173
+ )
174
+
175
+ # TODO: GEN_AI_RESPONSE_TOOL_CALLS ?
176
+
177
+ if finish_span:
178
+ span.__exit__(None, None, None)
142
179
 
143
180
 
144
181
  def _sentry_patched_create_common(f, *args, **kwargs):
@@ -155,31 +192,41 @@ def _sentry_patched_create_common(f, *args, **kwargs):
155
192
  except TypeError:
156
193
  return f(*args, **kwargs)
157
194
 
195
+ model = kwargs.get("model", "")
196
+
158
197
  span = sentry_sdk.start_span(
159
- op=OP.ANTHROPIC_MESSAGES_CREATE,
160
- description="Anthropic messages create",
198
+ op=OP.GEN_AI_CHAT,
199
+ name=f"chat {model}".strip(),
161
200
  origin=AnthropicIntegration.origin,
162
201
  )
163
202
  span.__enter__()
164
203
 
165
- result = yield f, args, kwargs
204
+ _set_input_data(span, kwargs, integration)
166
205
 
167
- # add data to span and finish it
168
- messages = list(kwargs["messages"])
169
- model = kwargs.get("model")
206
+ result = yield f, args, kwargs
170
207
 
171
208
  with capture_internal_exceptions():
172
- span.set_data(SPANDATA.AI_MODEL_ID, model)
173
- span.set_data(SPANDATA.AI_STREAMING, False)
174
-
175
- if should_send_default_pii() and integration.include_prompts:
176
- span.set_data(SPANDATA.AI_INPUT_MESSAGES, messages)
177
-
178
209
  if hasattr(result, "content"):
179
- if should_send_default_pii() and integration.include_prompts:
180
- span.set_data(SPANDATA.AI_RESPONSES, _get_responses(result.content))
181
- _calculate_token_usage(result, span)
182
- span.__exit__(None, None, None)
210
+ input_tokens, output_tokens = _get_token_usage(result)
211
+
212
+ content_blocks = []
213
+ for content_block in result.content:
214
+ if hasattr(content_block, "to_dict"):
215
+ content_blocks.append(content_block.to_dict())
216
+ elif hasattr(content_block, "model_dump"):
217
+ content_blocks.append(content_block.model_dump())
218
+ elif hasattr(content_block, "text"):
219
+ content_blocks.append({"type": "text", "text": content_block.text})
220
+
221
+ _set_output_data(
222
+ span=span,
223
+ integration=integration,
224
+ model=getattr(result, "model", None),
225
+ input_tokens=input_tokens,
226
+ output_tokens=output_tokens,
227
+ content_blocks=content_blocks,
228
+ finish_span=True,
229
+ )
183
230
 
184
231
  # Streaming response
185
232
  elif hasattr(result, "_iterator"):
@@ -187,37 +234,53 @@ def _sentry_patched_create_common(f, *args, **kwargs):
187
234
 
188
235
  def new_iterator():
189
236
  # type: () -> Iterator[MessageStreamEvent]
237
+ model = None
190
238
  input_tokens = 0
191
239
  output_tokens = 0
192
240
  content_blocks = [] # type: list[str]
193
241
 
194
242
  for event in old_iterator:
195
- input_tokens, output_tokens, content_blocks = _collect_ai_data(
196
- event, input_tokens, output_tokens, content_blocks
243
+ model, input_tokens, output_tokens, content_blocks = (
244
+ _collect_ai_data(
245
+ event, model, input_tokens, output_tokens, content_blocks
246
+ )
197
247
  )
198
248
  yield event
199
249
 
200
- _add_ai_data_to_span(
201
- span, integration, input_tokens, output_tokens, content_blocks
250
+ _set_output_data(
251
+ span=span,
252
+ integration=integration,
253
+ model=model,
254
+ input_tokens=input_tokens,
255
+ output_tokens=output_tokens,
256
+ content_blocks=[{"text": "".join(content_blocks), "type": "text"}],
257
+ finish_span=True,
202
258
  )
203
- span.__exit__(None, None, None)
204
259
 
205
260
  async def new_iterator_async():
206
261
  # type: () -> AsyncIterator[MessageStreamEvent]
262
+ model = None
207
263
  input_tokens = 0
208
264
  output_tokens = 0
209
265
  content_blocks = [] # type: list[str]
210
266
 
211
267
  async for event in old_iterator:
212
- input_tokens, output_tokens, content_blocks = _collect_ai_data(
213
- event, input_tokens, output_tokens, content_blocks
268
+ model, input_tokens, output_tokens, content_blocks = (
269
+ _collect_ai_data(
270
+ event, model, input_tokens, output_tokens, content_blocks
271
+ )
214
272
  )
215
273
  yield event
216
274
 
217
- _add_ai_data_to_span(
218
- span, integration, input_tokens, output_tokens, content_blocks
275
+ _set_output_data(
276
+ span=span,
277
+ integration=integration,
278
+ model=model,
279
+ input_tokens=input_tokens,
280
+ output_tokens=output_tokens,
281
+ content_blocks=[{"text": "".join(content_blocks), "type": "text"}],
282
+ finish_span=True,
219
283
  )
220
- span.__exit__(None, None, None)
221
284
 
222
285
  if str(type(result._iterator)) == "<class 'async_generator'>":
223
286
  result._iterator = new_iterator_async()
@@ -12,7 +12,6 @@ from functools import partial
12
12
  import sentry_sdk
13
13
  from sentry_sdk.api import continue_trace
14
14
  from sentry_sdk.consts import OP
15
-
16
15
  from sentry_sdk.integrations._asgi_common import (
17
16
  _get_headers,
18
17
  _get_request_data,
@@ -42,7 +41,6 @@ from typing import TYPE_CHECKING
42
41
 
43
42
  if TYPE_CHECKING:
44
43
  from typing import Any
45
- from typing import Callable
46
44
  from typing import Dict
47
45
  from typing import Optional
48
46
  from typing import Tuple
@@ -102,6 +100,7 @@ class SentryAsgiMiddleware:
102
100
  mechanism_type="asgi", # type: str
103
101
  span_origin="manual", # type: str
104
102
  http_methods_to_capture=DEFAULT_HTTP_METHODS_TO_CAPTURE, # type: Tuple[str, ...]
103
+ asgi_version=None, # type: Optional[int]
105
104
  ):
106
105
  # type: (...) -> None
107
106
  """
@@ -140,10 +139,16 @@ class SentryAsgiMiddleware:
140
139
  self.app = app
141
140
  self.http_methods_to_capture = http_methods_to_capture
142
141
 
143
- if _looks_like_asgi3(app):
144
- self.__call__ = self._run_asgi3 # type: Callable[..., Any]
145
- else:
146
- self.__call__ = self._run_asgi2
142
+ if asgi_version is None:
143
+ if _looks_like_asgi3(app):
144
+ asgi_version = 3
145
+ else:
146
+ asgi_version = 2
147
+
148
+ if asgi_version == 3:
149
+ self.__call__ = self._run_asgi3
150
+ elif asgi_version == 2:
151
+ self.__call__ = self._run_asgi2 # type: ignore
147
152
 
148
153
  def _capture_lifespan_exception(self, exc):
149
154
  # type: (Exception) -> None
@@ -217,10 +222,6 @@ class SentryAsgiMiddleware:
217
222
  source=transaction_source,
218
223
  origin=self.span_origin,
219
224
  )
220
- logger.debug(
221
- "[ASGI] Created transaction (continuing trace): %s",
222
- transaction,
223
- )
224
225
  else:
225
226
  transaction = Transaction(
226
227
  op=OP.HTTP_SERVER,
@@ -228,17 +229,9 @@ class SentryAsgiMiddleware:
228
229
  source=transaction_source,
229
230
  origin=self.span_origin,
230
231
  )
231
- logger.debug(
232
- "[ASGI] Created transaction (new): %s", transaction
233
- )
234
232
 
235
233
  if transaction:
236
234
  transaction.set_tag("asgi.type", ty)
237
- logger.debug(
238
- "[ASGI] Set transaction name and source on transaction: '%s' / '%s'",
239
- transaction.name,
240
- transaction.source,
241
- )
242
235
 
243
236
  with (
244
237
  sentry_sdk.start_transaction(
@@ -248,7 +241,6 @@ class SentryAsgiMiddleware:
248
241
  if transaction is not None
249
242
  else nullcontext()
250
243
  ):
251
- logger.debug("[ASGI] Started transaction: %s", transaction)
252
244
  try:
253
245
 
254
246
  async def _sentry_wrapped_send(event):
@@ -303,12 +295,6 @@ class SentryAsgiMiddleware:
303
295
  event["transaction"] = name
304
296
  event["transaction_info"] = {"source": source}
305
297
 
306
- logger.debug(
307
- "[ASGI] Set transaction name and source in event_processor: '%s' / '%s'",
308
- event["transaction"],
309
- event["transaction_info"]["source"],
310
- )
311
-
312
298
  return event
313
299
 
314
300
  # Helper functions.
@@ -11,7 +11,8 @@ from typing import TYPE_CHECKING, TypeVar
11
11
  # without introducing a hard dependency on `typing_extensions`
12
12
  # from: https://stackoverflow.com/a/71944042/300572
13
13
  if TYPE_CHECKING:
14
- from typing import ParamSpec, Callable
14
+ from collections.abc import Iterator
15
+ from typing import Any, ParamSpec, Callable
15
16
  else:
16
17
  # Fake ParamSpec
17
18
  class ParamSpec:
@@ -49,9 +50,7 @@ class ClickhouseDriverIntegration(Integration):
49
50
  )
50
51
 
51
52
  # If the query contains parameters then the send_data function is used to send those parameters to clickhouse
52
- clickhouse_driver.client.Client.send_data = _wrap_send_data(
53
- clickhouse_driver.client.Client.send_data
54
- )
53
+ _wrap_send_data()
55
54
 
56
55
  # Every query ends either with the Client's `receive_end_of_query` (no result expected)
57
56
  # or its `receive_result` (result expected)
@@ -128,23 +127,44 @@ def _wrap_end(f: Callable[P, T]) -> Callable[P, T]:
128
127
  return _inner_end
129
128
 
130
129
 
131
- def _wrap_send_data(f: Callable[P, T]) -> Callable[P, T]:
132
- def _inner_send_data(*args: P.args, **kwargs: P.kwargs) -> T:
133
- instance = args[0] # type: clickhouse_driver.client.Client
134
- data = args[2]
135
- span = getattr(instance.connection, "_sentry_span", None)
130
+ def _wrap_send_data() -> None:
131
+ original_send_data = clickhouse_driver.client.Client.send_data
132
+
133
+ def _inner_send_data( # type: ignore[no-untyped-def] # clickhouse-driver does not type send_data
134
+ self, sample_block, data, types_check=False, columnar=False, *args, **kwargs
135
+ ):
136
+ span = getattr(self.connection, "_sentry_span", None)
136
137
 
137
138
  if span is not None:
138
- _set_db_data(span, instance.connection)
139
+ _set_db_data(span, self.connection)
139
140
 
140
141
  if should_send_default_pii():
141
142
  db_params = span._data.get("db.params", [])
142
- db_params.extend(data)
143
+
144
+ if isinstance(data, (list, tuple)):
145
+ db_params.extend(data)
146
+
147
+ else: # data is a generic iterator
148
+ orig_data = data
149
+
150
+ # Wrap the generator to add items to db.params as they are yielded.
151
+ # This allows us to send the params to Sentry without needing to allocate
152
+ # memory for the entire generator at once.
153
+ def wrapped_generator() -> "Iterator[Any]":
154
+ for item in orig_data:
155
+ db_params.append(item)
156
+ yield item
157
+
158
+ # Replace the original iterator with the wrapped one.
159
+ data = wrapped_generator()
160
+
143
161
  span.set_data("db.params", db_params)
144
162
 
145
- return f(*args, **kwargs)
163
+ return original_send_data(
164
+ self, sample_block, data, types_check, columnar, *args, **kwargs
165
+ )
146
166
 
147
- return _inner_send_data
167
+ clickhouse_driver.client.Client.send_data = _inner_send_data
148
168
 
149
169
 
150
170
  def _set_db_data(
@@ -155,7 +155,7 @@ def patch_channels_asgi_handler_impl(cls):
155
155
  http_methods_to_capture=integration.http_methods_to_capture,
156
156
  )
157
157
 
158
- return await middleware(self.scope)(receive, send)
158
+ return await middleware(self.scope)(receive, send) # type: ignore
159
159
 
160
160
  cls.__call__ = sentry_patched_asgi_handler
161
161
 
@@ -6,10 +6,7 @@ import sentry_sdk
6
6
  from sentry_sdk.integrations import DidNotEnable
7
7
  from sentry_sdk.scope import should_send_default_pii
8
8
  from sentry_sdk.tracing import SOURCE_FOR_STYLE, TransactionSource
9
- from sentry_sdk.utils import (
10
- transaction_from_function,
11
- logger,
12
- )
9
+ from sentry_sdk.utils import transaction_from_function
13
10
 
14
11
  from typing import TYPE_CHECKING
15
12
 
@@ -66,9 +63,6 @@ def _set_transaction_name_and_source(scope, transaction_style, request):
66
63
  source = SOURCE_FOR_STYLE[transaction_style]
67
64
 
68
65
  scope.set_transaction_name(name, source=source)
69
- logger.debug(
70
- "[FastAPI] Set transaction name and source on scope: %s / %s", name, source
71
- )
72
66
 
73
67
 
74
68
  def patch_get_request_handler():
@@ -11,13 +11,16 @@ if TYPE_CHECKING:
11
11
  from typing import Any
12
12
  from sentry_sdk._types import Event
13
13
 
14
-
15
- FUNCTION_RE = r"[^@]+?)\s+@\s+0x[0-9a-fA-F]+"
14
+ # function is everything between index at @
15
+ # and then we match on the @ plus the hex val
16
+ FUNCTION_RE = r"[^@]+?"
17
+ HEX_ADDRESS = r"\s+@\s+0x[0-9a-fA-F]+"
16
18
 
17
19
  FRAME_RE = r"""
18
- ^(?P<index>\d+)\.\s+(?P<function>{FUNCTION_RE}\s+in\s+(?P<package>.+)$
20
+ ^(?P<index>\d+)\.\s+(?P<function>{FUNCTION_RE}){HEX_ADDRESS}(?:\s+in\s+(?P<package>.+))?$
19
21
  """.format(
20
22
  FUNCTION_RE=FUNCTION_RE,
23
+ HEX_ADDRESS=HEX_ADDRESS,
21
24
  )
22
25
 
23
26
  FRAME_RE = re.compile(FRAME_RE, re.MULTILINE | re.VERBOSE)