matrix-synapse 1.139.0rc2__cp39-abi3-musllinux_1_2_aarch64.whl → 1.140.0rc1__cp39-abi3-musllinux_1_2_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of matrix-synapse might be problematic. Click here for more details.

Files changed (159) hide show
  1. {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/METADATA +5 -3
  2. {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/RECORD +158 -155
  3. synapse/_scripts/generate_workers_map.py +6 -1
  4. synapse/_scripts/synapse_port_db.py +0 -2
  5. synapse/_scripts/update_synapse_database.py +1 -6
  6. synapse/api/auth/base.py +1 -3
  7. synapse/api/auth/mas.py +6 -8
  8. synapse/api/auth/msc3861_delegated.py +6 -8
  9. synapse/api/errors.py +3 -0
  10. synapse/app/_base.py +101 -39
  11. synapse/app/admin_cmd.py +2 -4
  12. synapse/app/appservice.py +1 -1
  13. synapse/app/client_reader.py +1 -1
  14. synapse/app/event_creator.py +1 -1
  15. synapse/app/federation_reader.py +1 -1
  16. synapse/app/federation_sender.py +1 -1
  17. synapse/app/frontend_proxy.py +1 -1
  18. synapse/app/generic_worker.py +17 -11
  19. synapse/app/homeserver.py +85 -47
  20. synapse/app/media_repository.py +1 -1
  21. synapse/app/phone_stats_home.py +16 -14
  22. synapse/app/pusher.py +1 -1
  23. synapse/app/synchrotron.py +1 -1
  24. synapse/app/user_dir.py +1 -1
  25. synapse/appservice/__init__.py +29 -2
  26. synapse/appservice/scheduler.py +8 -8
  27. synapse/config/_base.py +32 -14
  28. synapse/config/_base.pyi +5 -3
  29. synapse/config/experimental.py +3 -0
  30. synapse/config/homeserver.py +27 -1
  31. synapse/config/logger.py +3 -4
  32. synapse/config/matrixrtc.py +67 -0
  33. synapse/crypto/keyring.py +18 -4
  34. synapse/events/auto_accept_invites.py +0 -1
  35. synapse/federation/federation_client.py +39 -0
  36. synapse/federation/federation_server.py +1 -1
  37. synapse/federation/send_queue.py +3 -0
  38. synapse/federation/sender/__init__.py +24 -8
  39. synapse/federation/sender/per_destination_queue.py +31 -8
  40. synapse/federation/sender/transaction_manager.py +12 -0
  41. synapse/federation/transport/client.py +29 -0
  42. synapse/handlers/account_validity.py +2 -4
  43. synapse/handlers/appservice.py +5 -7
  44. synapse/handlers/deactivate_account.py +2 -3
  45. synapse/handlers/delayed_events.py +10 -13
  46. synapse/handlers/device.py +14 -14
  47. synapse/handlers/e2e_keys.py +16 -11
  48. synapse/handlers/federation.py +7 -11
  49. synapse/handlers/federation_event.py +5 -6
  50. synapse/handlers/message.py +16 -10
  51. synapse/handlers/pagination.py +3 -7
  52. synapse/handlers/presence.py +21 -25
  53. synapse/handlers/profile.py +1 -1
  54. synapse/handlers/read_marker.py +3 -1
  55. synapse/handlers/register.py +8 -1
  56. synapse/handlers/room.py +13 -4
  57. synapse/handlers/room_member.py +11 -7
  58. synapse/handlers/room_policy.py +96 -2
  59. synapse/handlers/sso.py +1 -1
  60. synapse/handlers/stats.py +5 -3
  61. synapse/handlers/sync.py +20 -13
  62. synapse/handlers/typing.py +5 -10
  63. synapse/handlers/user_directory.py +12 -11
  64. synapse/handlers/worker_lock.py +19 -15
  65. synapse/http/client.py +18 -13
  66. synapse/http/federation/matrix_federation_agent.py +6 -1
  67. synapse/http/federation/well_known_resolver.py +3 -1
  68. synapse/http/matrixfederationclient.py +50 -11
  69. synapse/http/proxy.py +2 -2
  70. synapse/http/server.py +36 -2
  71. synapse/http/site.py +109 -17
  72. synapse/logging/context.py +201 -110
  73. synapse/logging/opentracing.py +30 -6
  74. synapse/logging/scopecontextmanager.py +161 -0
  75. synapse/media/_base.py +2 -1
  76. synapse/media/media_repository.py +20 -6
  77. synapse/media/url_previewer.py +5 -6
  78. synapse/metrics/_gc.py +3 -1
  79. synapse/metrics/background_process_metrics.py +128 -24
  80. synapse/metrics/common_usage_metrics.py +3 -5
  81. synapse/module_api/__init__.py +42 -5
  82. synapse/notifier.py +10 -3
  83. synapse/push/emailpusher.py +5 -4
  84. synapse/push/httppusher.py +6 -6
  85. synapse/push/pusherpool.py +3 -8
  86. synapse/replication/http/devices.py +0 -41
  87. synapse/replication/tcp/client.py +8 -5
  88. synapse/replication/tcp/handler.py +2 -3
  89. synapse/replication/tcp/protocol.py +14 -7
  90. synapse/replication/tcp/redis.py +16 -11
  91. synapse/replication/tcp/resource.py +5 -4
  92. synapse/replication/tcp/streams/__init__.py +2 -0
  93. synapse/res/providers.json +6 -5
  94. synapse/rest/__init__.py +2 -0
  95. synapse/rest/admin/__init__.py +4 -0
  96. synapse/rest/admin/events.py +69 -0
  97. synapse/rest/admin/media.py +70 -2
  98. synapse/rest/client/keys.py +147 -3
  99. synapse/rest/client/matrixrtc.py +52 -0
  100. synapse/rest/client/push_rule.py +1 -1
  101. synapse/rest/client/room.py +2 -3
  102. synapse/rest/client/sync.py +1 -3
  103. synapse/rest/client/transactions.py +1 -1
  104. synapse/server.py +271 -38
  105. synapse/server_notices/server_notices_manager.py +1 -0
  106. synapse/state/__init__.py +4 -1
  107. synapse/storage/_base.py +1 -1
  108. synapse/storage/background_updates.py +8 -3
  109. synapse/storage/controllers/persist_events.py +4 -3
  110. synapse/storage/controllers/purge_events.py +2 -3
  111. synapse/storage/controllers/state.py +5 -5
  112. synapse/storage/database.py +12 -7
  113. synapse/storage/databases/main/__init__.py +7 -2
  114. synapse/storage/databases/main/cache.py +4 -3
  115. synapse/storage/databases/main/censor_events.py +1 -1
  116. synapse/storage/databases/main/client_ips.py +9 -8
  117. synapse/storage/databases/main/deviceinbox.py +7 -6
  118. synapse/storage/databases/main/devices.py +4 -4
  119. synapse/storage/databases/main/end_to_end_keys.py +6 -3
  120. synapse/storage/databases/main/event_federation.py +7 -6
  121. synapse/storage/databases/main/event_push_actions.py +13 -13
  122. synapse/storage/databases/main/events_bg_updates.py +1 -1
  123. synapse/storage/databases/main/events_worker.py +6 -8
  124. synapse/storage/databases/main/lock.py +17 -13
  125. synapse/storage/databases/main/media_repository.py +2 -2
  126. synapse/storage/databases/main/metrics.py +6 -6
  127. synapse/storage/databases/main/monthly_active_users.py +3 -4
  128. synapse/storage/databases/main/receipts.py +1 -1
  129. synapse/storage/databases/main/registration.py +18 -19
  130. synapse/storage/databases/main/roommember.py +1 -1
  131. synapse/storage/databases/main/session.py +3 -3
  132. synapse/storage/databases/main/sliding_sync.py +2 -2
  133. synapse/storage/databases/main/transactions.py +3 -3
  134. synapse/storage/databases/state/store.py +2 -0
  135. synapse/synapse_rust/http_client.pyi +4 -0
  136. synapse/synapse_rust.abi3.so +0 -0
  137. synapse/util/async_helpers.py +36 -24
  138. synapse/util/batching_queue.py +16 -6
  139. synapse/util/caches/__init__.py +1 -1
  140. synapse/util/caches/deferred_cache.py +4 -0
  141. synapse/util/caches/descriptors.py +14 -2
  142. synapse/util/caches/dictionary_cache.py +6 -1
  143. synapse/util/caches/expiringcache.py +16 -5
  144. synapse/util/caches/lrucache.py +14 -26
  145. synapse/util/caches/response_cache.py +11 -1
  146. synapse/util/clock.py +215 -39
  147. synapse/util/constants.py +2 -0
  148. synapse/util/daemonize.py +5 -1
  149. synapse/util/distributor.py +9 -5
  150. synapse/util/metrics.py +35 -6
  151. synapse/util/ratelimitutils.py +4 -1
  152. synapse/util/retryutils.py +7 -4
  153. synapse/util/task_scheduler.py +11 -14
  154. synapse/logging/filter.py +0 -38
  155. {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/AUTHORS.rst +0 -0
  156. {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/LICENSE-AGPL-3.0 +0 -0
  157. {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/LICENSE-COMMERCIAL +0 -0
  158. {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/WHEEL +0 -0
  159. {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/entry_points.txt +0 -0
@@ -33,7 +33,6 @@ See doc/log_contexts.rst for details on how this works.
33
33
  import logging
34
34
  import threading
35
35
  import typing
36
- import warnings
37
36
  from types import TracebackType
38
37
  from typing import (
39
38
  TYPE_CHECKING,
@@ -55,11 +54,29 @@ from typing_extensions import ParamSpec
55
54
  from twisted.internet import defer, threads
56
55
  from twisted.python.threadpool import ThreadPool
57
56
 
57
+ from synapse.logging.loggers import ExplicitlyConfiguredLogger
58
+ from synapse.util.stringutils import random_string
59
+
58
60
  if TYPE_CHECKING:
61
+ from synapse.logging.scopecontextmanager import _LogContextScope
59
62
  from synapse.types import ISynapseReactor
60
63
 
61
64
  logger = logging.getLogger(__name__)
62
65
 
66
+ original_logger_class = logging.getLoggerClass()
67
+ logging.setLoggerClass(ExplicitlyConfiguredLogger)
68
+ logcontext_debug_logger = logging.getLogger("synapse.logging.context.debug")
69
+ """
70
+ A logger for debugging when the logcontext switches.
71
+
72
+ Because this is very noisy and probably something only developers want to see when
73
+ debugging logcontext problems, we want people to explictly opt-in before seeing anything
74
+ in the logs. Requires explicitly setting `synapse.logging.context.debug` in the logging
75
+ configuration and does not inherit the log level from the parent logger.
76
+ """
77
+ # Restore the original logger class
78
+ logging.setLoggerClass(original_logger_class)
79
+
63
80
  try:
64
81
  import resource
65
82
 
@@ -238,13 +255,22 @@ class _Sentinel:
238
255
  we should always know which server the logs are coming from.
239
256
  """
240
257
 
241
- __slots__ = ["previous_context", "finished", "request", "tag"]
258
+ __slots__ = [
259
+ "previous_context",
260
+ "finished",
261
+ "scope",
262
+ "server_name",
263
+ "request",
264
+ "tag",
265
+ ]
242
266
 
243
267
  def __init__(self) -> None:
244
268
  # Minimal set for compatibility with LoggingContext
245
269
  self.previous_context = None
246
270
  self.finished = False
271
+ self.server_name = "unknown_server_from_sentinel_context"
247
272
  self.request = None
273
+ self.scope = None
248
274
  self.tag = None
249
275
 
250
276
  def __str__(self) -> str:
@@ -282,14 +308,19 @@ class LoggingContext:
282
308
  child to the parent
283
309
 
284
310
  Args:
285
- name: Name for the context for logging. If this is omitted, it is
286
- inherited from the parent context.
311
+ name: Name for the context for logging.
312
+ server_name: The name of the server this context is associated with
313
+ (`config.server.server_name` or `hs.hostname`)
287
314
  parent_context (LoggingContext|None): The parent of the new context
315
+ request: Synapse Request Context object. Useful to associate all the logs
316
+ happening to a given request.
317
+
288
318
  """
289
319
 
290
320
  __slots__ = [
291
321
  "previous_context",
292
322
  "name",
323
+ "server_name",
293
324
  "parent_context",
294
325
  "_resource_usage",
295
326
  "usage_start",
@@ -297,11 +328,14 @@ class LoggingContext:
297
328
  "finished",
298
329
  "request",
299
330
  "tag",
331
+ "scope",
300
332
  ]
301
333
 
302
334
  def __init__(
303
335
  self,
304
- name: Optional[str] = None,
336
+ *,
337
+ name: str,
338
+ server_name: str,
305
339
  parent_context: "Optional[LoggingContext]" = None,
306
340
  request: Optional[ContextRequest] = None,
307
341
  ) -> None:
@@ -314,9 +348,12 @@ class LoggingContext:
314
348
  # if the context is not currently active.
315
349
  self.usage_start: Optional[resource.struct_rusage] = None
316
350
 
351
+ self.name = name
352
+ self.server_name = server_name
317
353
  self.main_thread = get_thread_id()
318
354
  self.request = None
319
355
  self.tag = ""
356
+ self.scope: Optional["_LogContextScope"] = None
320
357
 
321
358
  # keep track of whether we have hit the __exit__ block for this context
322
359
  # (suggesting that the the thing that created the context thinks it should
@@ -325,69 +362,24 @@ class LoggingContext:
325
362
 
326
363
  self.parent_context = parent_context
327
364
 
365
+ # Inherit some fields from the parent context
328
366
  if self.parent_context is not None:
329
- # we track the current request_id
367
+ # which request this corresponds to
330
368
  self.request = self.parent_context.request
331
369
 
370
+ # we also track the current scope:
371
+ self.scope = self.parent_context.scope
372
+
332
373
  if request is not None:
333
374
  # the request param overrides the request from the parent context
334
375
  self.request = request
335
376
 
336
- # if we don't have a `name`, but do have a parent context, use its name.
337
- if self.parent_context and name is None:
338
- name = str(self.parent_context)
339
- if name is None:
340
- raise ValueError(
341
- "LoggingContext must be given either a name or a parent context"
342
- )
343
- self.name = name
344
-
345
377
  def __str__(self) -> str:
346
378
  return self.name
347
379
 
348
- @classmethod
349
- def current_context(cls) -> LoggingContextOrSentinel:
350
- """Get the current logging context from thread local storage
351
-
352
- This exists for backwards compatibility. ``current_context()`` should be
353
- called directly.
354
-
355
- Returns:
356
- The current logging context
357
- """
358
- warnings.warn(
359
- "synapse.logging.context.LoggingContext.current_context() is deprecated "
360
- "in favor of synapse.logging.context.current_context().",
361
- DeprecationWarning,
362
- stacklevel=2,
363
- )
364
- return current_context()
365
-
366
- @classmethod
367
- def set_current_context(
368
- cls, context: LoggingContextOrSentinel
369
- ) -> LoggingContextOrSentinel:
370
- """Set the current logging context in thread local storage
371
-
372
- This exists for backwards compatibility. ``set_current_context()`` should be
373
- called directly.
374
-
375
- Args:
376
- context: The context to activate.
377
-
378
- Returns:
379
- The context that was previously active
380
- """
381
- warnings.warn(
382
- "synapse.logging.context.LoggingContext.set_current_context() is deprecated "
383
- "in favor of synapse.logging.context.set_current_context().",
384
- DeprecationWarning,
385
- stacklevel=2,
386
- )
387
- return set_current_context(context)
388
-
389
380
  def __enter__(self) -> "LoggingContext":
390
381
  """Enters this logging context into thread local storage"""
382
+ logcontext_debug_logger.debug("LoggingContext(%s).__enter__", self.name)
391
383
  old_context = set_current_context(self)
392
384
  if self.previous_context != old_context:
393
385
  logcontext_error(
@@ -410,6 +402,9 @@ class LoggingContext:
410
402
  Returns:
411
403
  None to avoid suppressing any exceptions that were thrown.
412
404
  """
405
+ logcontext_debug_logger.debug(
406
+ "LoggingContext(%s).__exit__ --> %s", self.name, self.previous_context
407
+ )
413
408
  current = set_current_context(self.previous_context)
414
409
  if current is not self:
415
410
  if current is SENTINEL_CONTEXT:
@@ -588,7 +583,26 @@ class LoggingContextFilter(logging.Filter):
588
583
  record.
589
584
  """
590
585
 
591
- def __init__(self, request: str = ""):
586
+ def __init__(
587
+ self,
588
+ # `request` is here for backwards compatibility since we previously recommended
589
+ # people manually configure `LoggingContextFilter` like the following.
590
+ #
591
+ # ```yaml
592
+ # filters:
593
+ # context:
594
+ # (): synapse.logging.context.LoggingContextFilter
595
+ # request: ""
596
+ # ```
597
+ #
598
+ # TODO: Since we now configure `LoggingContextFilter` automatically since #8051
599
+ # (2020-08-11), we could consider removing this useless parameter. This would
600
+ # require people to remove their own manual configuration of
601
+ # `LoggingContextFilter` as it would cause `TypeError: Filter.__init__() got an
602
+ # unexpected keyword argument 'request'` -> `ValueError: Unable to configure
603
+ # filter 'context'`
604
+ request: str = "",
605
+ ):
592
606
  self._default_request = request
593
607
 
594
608
  def filter(self, record: logging.LogRecord) -> Literal[True]:
@@ -598,11 +612,13 @@ class LoggingContextFilter(logging.Filter):
598
612
  """
599
613
  context = current_context()
600
614
  record.request = self._default_request
615
+ record.server_name = "unknown_server_from_no_context"
601
616
 
602
617
  # context should never be None, but if it somehow ends up being, then
603
618
  # we end up in a death spiral of infinite loops, so let's check, for
604
619
  # robustness' sake.
605
620
  if context is not None:
621
+ record.server_name = context.server_name
606
622
  # Logging is interested in the request ID. Note that for backwards
607
623
  # compatibility this is stored as the "request" on the record.
608
624
  record.request = str(context)
@@ -637,14 +653,21 @@ class PreserveLoggingContext:
637
653
  reactor back to the code).
638
654
  """
639
655
 
640
- __slots__ = ["_old_context", "_new_context"]
656
+ __slots__ = ["_old_context", "_new_context", "_instance_id"]
641
657
 
642
658
  def __init__(
643
659
  self, new_context: LoggingContextOrSentinel = SENTINEL_CONTEXT
644
660
  ) -> None:
645
661
  self._new_context = new_context
662
+ self._instance_id = random_string(5)
646
663
 
647
664
  def __enter__(self) -> None:
665
+ logcontext_debug_logger.debug(
666
+ "PreserveLoggingContext(%s).__enter__ %s --> %s",
667
+ self._instance_id,
668
+ current_context(),
669
+ self._new_context,
670
+ )
648
671
  self._old_context = set_current_context(self._new_context)
649
672
 
650
673
  def __exit__(
@@ -653,6 +676,12 @@ class PreserveLoggingContext:
653
676
  value: Optional[BaseException],
654
677
  traceback: Optional[TracebackType],
655
678
  ) -> None:
679
+ logcontext_debug_logger.debug(
680
+ "PreserveLoggingContext(%s).__exit %s --> %s",
681
+ self._instance_id,
682
+ current_context(),
683
+ self._old_context,
684
+ )
656
685
  context = set_current_context(self._old_context)
657
686
 
658
687
  if context != self._new_context:
@@ -728,12 +757,15 @@ def nested_logging_context(suffix: str) -> LoggingContext:
728
757
  "Starting nested logging context from sentinel context: metrics will be lost"
729
758
  )
730
759
  parent_context = None
760
+ server_name = "unknown_server_from_sentinel_context"
731
761
  else:
732
762
  assert isinstance(curr_context, LoggingContext)
733
763
  parent_context = curr_context
764
+ server_name = parent_context.server_name
734
765
  prefix = str(curr_context)
735
766
  return LoggingContext(
736
- prefix + "-" + suffix,
767
+ name=prefix + "-" + suffix,
768
+ server_name=server_name,
737
769
  parent_context=parent_context,
738
770
  )
739
771
 
@@ -802,13 +834,15 @@ def run_in_background(
802
834
  deferred returned by the function completes.
803
835
 
804
836
  To explain how the log contexts work here:
805
- - When `run_in_background` is called, the current context is stored ("original"),
806
- we kick off the background task in the current context, and we restore that
807
- original context before returning
808
- - When the background task finishes, we don't want to leak our context into the
809
- reactor which would erroneously get attached to the next operation picked up by
810
- the event loop. We add a callback to the deferred which will clear the logging
811
- context after it finishes and yields control back to the reactor.
837
+ - When `run_in_background` is called, the calling logcontext is stored
838
+ ("original"), we kick off the background task in the current context, and we
839
+ restore that original context before returning.
840
+ - For a completed deferred, that's the end of the story.
841
+ - For an incomplete deferred, when the background task finishes, we don't want to
842
+ leak our context into the reactor which would erroneously get attached to the
843
+ next operation picked up by the event loop. We add a callback to the deferred
844
+ which will clear the logging context after it finishes and yields control back to
845
+ the reactor.
812
846
 
813
847
  Useful for wrapping functions that return a deferred or coroutine, which you don't
814
848
  yield or await on (for instance because you want to pass it to
@@ -827,7 +861,11 @@ def run_in_background(
827
861
  Note that the returned Deferred does not follow the synapse logcontext
828
862
  rules.
829
863
  """
864
+ instance_id = random_string(5)
830
865
  calling_context = current_context()
866
+ logcontext_debug_logger.debug(
867
+ "run_in_background(%s): called with logcontext=%s", instance_id, calling_context
868
+ )
831
869
  try:
832
870
  # (kick off the task in the current context)
833
871
  res = f(*args, **kwargs)
@@ -857,22 +895,46 @@ def run_in_background(
857
895
 
858
896
  # The deferred has already completed
859
897
  if d.called and not d.paused:
860
- # The function should have maintained the logcontext, so we can
861
- # optimise out the messing about
898
+ # If the function messes with logcontexts, we can assume it follows the Synapse
899
+ # logcontext rules (Rules for functions returning awaitables: "If the awaitable
900
+ # is already complete, the function returns with the same logcontext it started
901
+ # with."). If it function doesn't touch logcontexts at all, we can also assume
902
+ # the logcontext is unchanged.
903
+ #
904
+ # Either way, the function should have maintained the calling logcontext, so we
905
+ # can avoid messing with it further. Additionally, if the deferred has already
906
+ # completed, then it would be a mistake to then add a deferred callback (below)
907
+ # to reset the logcontext to the sentinel logcontext as that would run
908
+ # immediately (remember our goal is to maintain the calling logcontext when we
909
+ # return).
910
+ logcontext_debug_logger.debug(
911
+ "run_in_background(%s): deferred already completed and the function should have maintained the logcontext %s",
912
+ instance_id,
913
+ calling_context,
914
+ )
862
915
  return d
863
916
 
864
- # The function may have reset the context before returning, so we need to restore it
865
- # now.
917
+ # Since the function we called may follow the Synapse logcontext rules (Rules for
918
+ # functions returning awaitables: "If the awaitable is incomplete, the function
919
+ # clears the logcontext before returning"), the function may have reset the
920
+ # logcontext before returning, so we need to restore the calling logcontext now
921
+ # before we return ourselves.
866
922
  #
867
923
  # Our goal is to have the caller logcontext unchanged after firing off the
868
924
  # background task and returning.
925
+ logcontext_debug_logger.debug(
926
+ "run_in_background(%s): restoring calling logcontext %s",
927
+ instance_id,
928
+ calling_context,
929
+ )
869
930
  set_current_context(calling_context)
870
931
 
871
- # The original logcontext will be restored when the deferred completes, but
872
- # there is nothing waiting for it, so it will get leaked into the reactor (which
873
- # would then get picked up by the next thing the reactor does). We therefore
874
- # need to reset the logcontext here (set the `sentinel` logcontext) before
875
- # yielding control back to the reactor.
932
+ # If the function we called is playing nice and following the Synapse logcontext
933
+ # rules, it will restore original calling logcontext when the deferred completes;
934
+ # but there is nothing waiting for it, so it will get leaked into the reactor (which
935
+ # would then get picked up by the next thing the reactor does). We therefore need to
936
+ # reset the logcontext here (set the `sentinel` logcontext) before yielding control
937
+ # back to the reactor.
876
938
  #
877
939
  # (If this feels asymmetric, consider it this way: we are
878
940
  # effectively forking a new thread of execution. We are
@@ -880,7 +942,23 @@ def run_in_background(
880
942
  # which is supposed to have a single entry and exit point. But
881
943
  # by spawning off another deferred, we are effectively
882
944
  # adding a new exit point.)
883
- d.addBoth(_set_context_cb, SENTINEL_CONTEXT)
945
+ if logcontext_debug_logger.isEnabledFor(logging.DEBUG):
946
+
947
+ def _log_set_context_cb(
948
+ result: ResultT, context: LoggingContextOrSentinel
949
+ ) -> ResultT:
950
+ logcontext_debug_logger.debug(
951
+ "run_in_background(%s): resetting logcontext to %s",
952
+ instance_id,
953
+ context,
954
+ )
955
+ set_current_context(context)
956
+ return result
957
+
958
+ d.addBoth(_log_set_context_cb, SENTINEL_CONTEXT)
959
+ else:
960
+ d.addBoth(_set_context_cb, SENTINEL_CONTEXT)
961
+
884
962
  return d
885
963
 
886
964
 
@@ -894,10 +972,9 @@ def run_coroutine_in_background(
894
972
  Useful for wrapping coroutines that you don't yield or await on (for
895
973
  instance because you want to pass it to deferred.gatherResults()).
896
974
 
897
- This is a special case of `run_in_background` where we can accept a
898
- coroutine directly rather than a function. We can do this because coroutines
899
- do not run until called, and so calling an async function without awaiting
900
- cannot change the log contexts.
975
+ This is a special case of `run_in_background` where we can accept a coroutine
976
+ directly rather than a function. We can do this because coroutines do not continue
977
+ running once they have yielded.
901
978
 
902
979
  This is an ergonomic helper so we can do this:
903
980
  ```python
@@ -908,33 +985,7 @@ def run_coroutine_in_background(
908
985
  run_in_background(lambda: func1(arg1))
909
986
  ```
910
987
  """
911
- calling_context = current_context()
912
-
913
- # Wrap the coroutine in a deferred, which will have the side effect of executing the
914
- # coroutine in the background.
915
- d = defer.ensureDeferred(coroutine)
916
-
917
- # The function may have reset the context before returning, so we need to restore it
918
- # now.
919
- #
920
- # Our goal is to have the caller logcontext unchanged after firing off the
921
- # background task and returning.
922
- set_current_context(calling_context)
923
-
924
- # The original logcontext will be restored when the deferred completes, but
925
- # there is nothing waiting for it, so it will get leaked into the reactor (which
926
- # would then get picked up by the next thing the reactor does). We therefore
927
- # need to reset the logcontext here (set the `sentinel` logcontext) before
928
- # yielding control back to the reactor.
929
- #
930
- # (If this feels asymmetric, consider it this way: we are
931
- # effectively forking a new thread of execution. We are
932
- # probably currently within a ``with LoggingContext()`` block,
933
- # which is supposed to have a single entry and exit point. But
934
- # by spawning off another deferred, we are effectively
935
- # adding a new exit point.)
936
- d.addBoth(_set_context_cb, SENTINEL_CONTEXT)
937
- return d
988
+ return run_in_background(lambda: coroutine)
938
989
 
939
990
 
940
991
  T = TypeVar("T")
@@ -963,10 +1014,21 @@ def make_deferred_yieldable(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]
963
1014
  restores the old context once the awaitable completes (execution passes from the
964
1015
  reactor back to the code).
965
1016
  """
1017
+ instance_id = random_string(5)
1018
+ logcontext_debug_logger.debug(
1019
+ "make_deferred_yieldable(%s): called with logcontext=%s",
1020
+ instance_id,
1021
+ current_context(),
1022
+ )
1023
+
966
1024
  # The deferred has already completed
967
1025
  if deferred.called and not deferred.paused:
968
1026
  # it looks like this deferred is ready to run any callbacks we give it
969
1027
  # immediately. We may as well optimise out the logcontext faffery.
1028
+ logcontext_debug_logger.debug(
1029
+ "make_deferred_yieldable(%s): deferred already completed and the function should have maintained the logcontext",
1030
+ instance_id,
1031
+ )
970
1032
  return deferred
971
1033
 
972
1034
  # Our goal is to have the caller logcontext unchanged after they yield/await the
@@ -978,8 +1040,31 @@ def make_deferred_yieldable(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]
978
1040
  # does) while the deferred runs in the reactor event loop, we reset the logcontext
979
1041
  # and add a callback to the deferred to restore it so the caller's logcontext is
980
1042
  # active when the deferred completes.
981
- prev_context = set_current_context(SENTINEL_CONTEXT)
982
- deferred.addBoth(_set_context_cb, prev_context)
1043
+
1044
+ logcontext_debug_logger.debug(
1045
+ "make_deferred_yieldable(%s): resetting logcontext to %s",
1046
+ instance_id,
1047
+ SENTINEL_CONTEXT,
1048
+ )
1049
+ calling_context = set_current_context(SENTINEL_CONTEXT)
1050
+
1051
+ if logcontext_debug_logger.isEnabledFor(logging.DEBUG):
1052
+
1053
+ def _log_set_context_cb(
1054
+ result: ResultT, context: LoggingContextOrSentinel
1055
+ ) -> ResultT:
1056
+ logcontext_debug_logger.debug(
1057
+ "make_deferred_yieldable(%s): restoring calling logcontext to %s",
1058
+ instance_id,
1059
+ context,
1060
+ )
1061
+ set_current_context(context)
1062
+ return result
1063
+
1064
+ deferred.addBoth(_log_set_context_cb, calling_context)
1065
+ else:
1066
+ deferred.addBoth(_set_context_cb, calling_context)
1067
+
983
1068
  return deferred
984
1069
 
985
1070
 
@@ -1069,12 +1154,18 @@ def defer_to_threadpool(
1069
1154
  "Calling defer_to_threadpool from sentinel context: metrics will be lost"
1070
1155
  )
1071
1156
  parent_context = None
1157
+ server_name = "unknown_server_from_sentinel_context"
1072
1158
  else:
1073
1159
  assert isinstance(curr_context, LoggingContext)
1074
1160
  parent_context = curr_context
1161
+ server_name = parent_context.server_name
1075
1162
 
1076
1163
  def g() -> R:
1077
- with LoggingContext(str(curr_context), parent_context=parent_context):
1164
+ with LoggingContext(
1165
+ name=str(curr_context),
1166
+ server_name=server_name,
1167
+ parent_context=parent_context,
1168
+ ):
1078
1169
  return f(*args, **kwargs)
1079
1170
 
1080
1171
  return make_deferred_yieldable(threads.deferToThreadPool(reactor, threadpool, g))
@@ -251,17 +251,18 @@ class _DummyTagNames:
251
251
  try:
252
252
  import opentracing
253
253
  import opentracing.tags
254
- from opentracing.scope_managers.contextvars import ContextVarsScopeManager
255
254
 
256
255
  tags = opentracing.tags
257
256
  except ImportError:
258
257
  opentracing = None # type: ignore[assignment]
259
258
  tags = _DummyTagNames # type: ignore[assignment]
260
- ContextVarsScopeManager = None # type: ignore
261
259
  try:
262
260
  from jaeger_client import Config as JaegerConfig
261
+
262
+ from synapse.logging.scopecontextmanager import LogContextScopeManager
263
263
  except ImportError:
264
264
  JaegerConfig = None # type: ignore
265
+ LogContextScopeManager = None # type: ignore
265
266
 
266
267
 
267
268
  try:
@@ -483,7 +484,7 @@ def init_tracer(hs: "HomeServer") -> None:
483
484
  config = JaegerConfig(
484
485
  config=jaeger_config,
485
486
  service_name=f"{hs.config.server.server_name} {instance_name_by_type}",
486
- scope_manager=ContextVarsScopeManager(),
487
+ scope_manager=LogContextScopeManager(),
487
488
  metrics_factory=PrometheusMetricsFactory(),
488
489
  )
489
490
 
@@ -576,7 +577,9 @@ def start_active_span_follows_from(
576
577
  operation_name: str,
577
578
  contexts: Collection,
578
579
  child_of: Optional[Union["opentracing.Span", "opentracing.SpanContext"]] = None,
580
+ tags: Optional[Dict[str, str]] = None,
579
581
  start_time: Optional[float] = None,
582
+ ignore_active_span: bool = False,
580
583
  *,
581
584
  inherit_force_tracing: bool = False,
582
585
  tracer: Optional["opentracing.Tracer"] = None,
@@ -591,9 +594,16 @@ def start_active_span_follows_from(
591
594
  span will be the parent. (If there is no currently active span, the first
592
595
  span in `contexts` will be the parent.)
593
596
 
597
+ tags: an optional dictionary of span tags. The caller gives up ownership of that
598
+ dictionary, because the :class:`Tracer` may use it as-is to avoid extra data
599
+ copying.
600
+
594
601
  start_time: optional override for the start time of the created span. Seconds
595
602
  since the epoch.
596
603
 
604
+ ignore_active_span: an explicit flag that ignores the current active
605
+ scope and creates a root span.
606
+
597
607
  inherit_force_tracing: if set, and any of the previous contexts have had tracing
598
608
  forced, the new span will also have tracing forced.
599
609
  tracer: override the opentracing tracer. By default the global tracer is used.
@@ -606,7 +616,9 @@ def start_active_span_follows_from(
606
616
  operation_name,
607
617
  child_of=child_of,
608
618
  references=references,
619
+ tags=tags,
609
620
  start_time=start_time,
621
+ ignore_active_span=ignore_active_span,
610
622
  tracer=tracer,
611
623
  )
612
624
 
@@ -672,9 +684,21 @@ def start_active_span_from_edu(
672
684
 
673
685
  # Opentracing setters for tags, logs, etc
674
686
  @only_if_tracing
675
- def active_span() -> Optional["opentracing.Span"]:
676
- """Get the currently active span, if any"""
677
- return opentracing.tracer.active_span
687
+ def active_span(
688
+ *,
689
+ tracer: Optional["opentracing.Tracer"] = None,
690
+ ) -> Optional["opentracing.Span"]:
691
+ """
692
+ Get the currently active span, if any
693
+
694
+ Args:
695
+ tracer: override the opentracing tracer. By default the global tracer is used.
696
+ """
697
+ if tracer is None:
698
+ # use the global tracer by default
699
+ tracer = opentracing.tracer
700
+
701
+ return tracer.active_span
678
702
 
679
703
 
680
704
  @ensure_active_span("set a tag")