matrix-synapse 1.139.2__cp39-abi3-macosx_11_0_arm64.whl → 1.140.0rc1__cp39-abi3-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of matrix-synapse might be problematic. Click here for more details.

Files changed (158) hide show
  1. {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/METADATA +5 -3
  2. {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/RECORD +157 -154
  3. synapse/_scripts/generate_workers_map.py +6 -1
  4. synapse/_scripts/synapse_port_db.py +0 -2
  5. synapse/_scripts/update_synapse_database.py +1 -6
  6. synapse/api/auth/base.py +1 -3
  7. synapse/api/auth/mas.py +6 -8
  8. synapse/api/auth/msc3861_delegated.py +6 -8
  9. synapse/api/errors.py +3 -0
  10. synapse/app/_base.py +101 -39
  11. synapse/app/admin_cmd.py +2 -4
  12. synapse/app/appservice.py +1 -1
  13. synapse/app/client_reader.py +1 -1
  14. synapse/app/event_creator.py +1 -1
  15. synapse/app/federation_reader.py +1 -1
  16. synapse/app/federation_sender.py +1 -1
  17. synapse/app/frontend_proxy.py +1 -1
  18. synapse/app/generic_worker.py +17 -11
  19. synapse/app/homeserver.py +85 -47
  20. synapse/app/media_repository.py +1 -1
  21. synapse/app/phone_stats_home.py +16 -14
  22. synapse/app/pusher.py +1 -1
  23. synapse/app/synchrotron.py +1 -1
  24. synapse/app/user_dir.py +1 -1
  25. synapse/appservice/__init__.py +29 -2
  26. synapse/appservice/scheduler.py +8 -8
  27. synapse/config/_base.py +32 -14
  28. synapse/config/_base.pyi +5 -3
  29. synapse/config/experimental.py +3 -0
  30. synapse/config/homeserver.py +27 -1
  31. synapse/config/logger.py +3 -4
  32. synapse/config/matrixrtc.py +67 -0
  33. synapse/crypto/keyring.py +18 -4
  34. synapse/events/auto_accept_invites.py +0 -1
  35. synapse/federation/federation_client.py +39 -0
  36. synapse/federation/federation_server.py +1 -1
  37. synapse/federation/send_queue.py +3 -0
  38. synapse/federation/sender/__init__.py +24 -8
  39. synapse/federation/sender/per_destination_queue.py +31 -8
  40. synapse/federation/sender/transaction_manager.py +12 -0
  41. synapse/federation/transport/client.py +29 -0
  42. synapse/handlers/account_validity.py +2 -4
  43. synapse/handlers/appservice.py +5 -7
  44. synapse/handlers/deactivate_account.py +2 -3
  45. synapse/handlers/delayed_events.py +10 -13
  46. synapse/handlers/device.py +14 -14
  47. synapse/handlers/e2e_keys.py +4 -3
  48. synapse/handlers/federation.py +7 -11
  49. synapse/handlers/federation_event.py +5 -6
  50. synapse/handlers/message.py +16 -10
  51. synapse/handlers/pagination.py +3 -7
  52. synapse/handlers/presence.py +21 -25
  53. synapse/handlers/profile.py +1 -1
  54. synapse/handlers/read_marker.py +3 -1
  55. synapse/handlers/register.py +8 -1
  56. synapse/handlers/room.py +13 -4
  57. synapse/handlers/room_member.py +11 -7
  58. synapse/handlers/room_policy.py +96 -2
  59. synapse/handlers/sso.py +1 -1
  60. synapse/handlers/stats.py +5 -3
  61. synapse/handlers/sync.py +20 -13
  62. synapse/handlers/typing.py +5 -10
  63. synapse/handlers/user_directory.py +12 -11
  64. synapse/handlers/worker_lock.py +19 -15
  65. synapse/http/client.py +18 -13
  66. synapse/http/federation/matrix_federation_agent.py +6 -1
  67. synapse/http/federation/well_known_resolver.py +3 -1
  68. synapse/http/matrixfederationclient.py +50 -11
  69. synapse/http/proxy.py +2 -2
  70. synapse/http/server.py +36 -2
  71. synapse/http/site.py +109 -17
  72. synapse/logging/context.py +165 -63
  73. synapse/logging/opentracing.py +30 -6
  74. synapse/logging/scopecontextmanager.py +161 -0
  75. synapse/media/_base.py +2 -1
  76. synapse/media/media_repository.py +20 -6
  77. synapse/media/url_previewer.py +5 -6
  78. synapse/metrics/_gc.py +3 -1
  79. synapse/metrics/background_process_metrics.py +128 -24
  80. synapse/metrics/common_usage_metrics.py +3 -5
  81. synapse/module_api/__init__.py +42 -5
  82. synapse/notifier.py +10 -3
  83. synapse/push/emailpusher.py +5 -4
  84. synapse/push/httppusher.py +6 -6
  85. synapse/push/pusherpool.py +3 -8
  86. synapse/replication/http/devices.py +0 -41
  87. synapse/replication/tcp/client.py +8 -5
  88. synapse/replication/tcp/handler.py +2 -3
  89. synapse/replication/tcp/protocol.py +14 -7
  90. synapse/replication/tcp/redis.py +16 -11
  91. synapse/replication/tcp/resource.py +5 -4
  92. synapse/replication/tcp/streams/__init__.py +2 -0
  93. synapse/res/providers.json +6 -5
  94. synapse/rest/__init__.py +2 -0
  95. synapse/rest/admin/__init__.py +4 -0
  96. synapse/rest/admin/events.py +69 -0
  97. synapse/rest/admin/media.py +70 -2
  98. synapse/rest/client/matrixrtc.py +52 -0
  99. synapse/rest/client/push_rule.py +1 -1
  100. synapse/rest/client/room.py +2 -3
  101. synapse/rest/client/sync.py +1 -0
  102. synapse/rest/client/transactions.py +1 -1
  103. synapse/server.py +271 -38
  104. synapse/server_notices/server_notices_manager.py +1 -0
  105. synapse/state/__init__.py +4 -1
  106. synapse/storage/_base.py +1 -1
  107. synapse/storage/background_updates.py +8 -3
  108. synapse/storage/controllers/persist_events.py +4 -3
  109. synapse/storage/controllers/purge_events.py +2 -3
  110. synapse/storage/controllers/state.py +5 -5
  111. synapse/storage/database.py +12 -7
  112. synapse/storage/databases/main/__init__.py +7 -2
  113. synapse/storage/databases/main/cache.py +4 -3
  114. synapse/storage/databases/main/censor_events.py +1 -1
  115. synapse/storage/databases/main/client_ips.py +9 -8
  116. synapse/storage/databases/main/deviceinbox.py +7 -6
  117. synapse/storage/databases/main/devices.py +4 -4
  118. synapse/storage/databases/main/end_to_end_keys.py +6 -3
  119. synapse/storage/databases/main/event_federation.py +7 -6
  120. synapse/storage/databases/main/event_push_actions.py +13 -13
  121. synapse/storage/databases/main/events_bg_updates.py +1 -1
  122. synapse/storage/databases/main/events_worker.py +6 -8
  123. synapse/storage/databases/main/lock.py +17 -13
  124. synapse/storage/databases/main/media_repository.py +2 -2
  125. synapse/storage/databases/main/metrics.py +6 -6
  126. synapse/storage/databases/main/monthly_active_users.py +3 -4
  127. synapse/storage/databases/main/receipts.py +1 -1
  128. synapse/storage/databases/main/registration.py +18 -19
  129. synapse/storage/databases/main/roommember.py +1 -1
  130. synapse/storage/databases/main/session.py +3 -3
  131. synapse/storage/databases/main/sliding_sync.py +2 -2
  132. synapse/storage/databases/main/transactions.py +3 -3
  133. synapse/storage/databases/state/store.py +2 -0
  134. synapse/synapse_rust/http_client.pyi +4 -0
  135. synapse/synapse_rust.abi3.so +0 -0
  136. synapse/util/async_helpers.py +36 -24
  137. synapse/util/batching_queue.py +16 -6
  138. synapse/util/caches/__init__.py +1 -1
  139. synapse/util/caches/deferred_cache.py +4 -0
  140. synapse/util/caches/descriptors.py +14 -2
  141. synapse/util/caches/dictionary_cache.py +6 -1
  142. synapse/util/caches/expiringcache.py +16 -5
  143. synapse/util/caches/lrucache.py +14 -26
  144. synapse/util/caches/response_cache.py +11 -1
  145. synapse/util/clock.py +215 -39
  146. synapse/util/constants.py +2 -0
  147. synapse/util/daemonize.py +5 -1
  148. synapse/util/distributor.py +9 -5
  149. synapse/util/metrics.py +35 -6
  150. synapse/util/ratelimitutils.py +4 -1
  151. synapse/util/retryutils.py +7 -4
  152. synapse/util/task_scheduler.py +11 -14
  153. synapse/logging/filter.py +0 -38
  154. {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/AUTHORS.rst +0 -0
  155. {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/LICENSE-AGPL-3.0 +0 -0
  156. {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/LICENSE-COMMERCIAL +0 -0
  157. {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/WHEEL +0 -0
  158. {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/entry_points.txt +0 -0
@@ -67,7 +67,6 @@ from synapse.media.media_storage import (
67
67
  from synapse.media.storage_provider import StorageProviderWrapper
68
68
  from synapse.media.thumbnailer import Thumbnailer, ThumbnailError
69
69
  from synapse.media.url_previewer import UrlPreviewer
70
- from synapse.metrics.background_process_metrics import run_as_background_process
71
70
  from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia
72
71
  from synapse.types import UserID
73
72
  from synapse.util.async_helpers import Linearizer
@@ -108,7 +107,7 @@ class MediaRepository:
108
107
  self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails
109
108
  self.thumbnail_requirements = hs.config.media.thumbnail_requirements
110
109
 
111
- self.remote_media_linearizer = Linearizer(name="media_remote")
110
+ self.remote_media_linearizer = Linearizer(name="media_remote", clock=self.clock)
112
111
 
113
112
  self.recently_accessed_remotes: Set[Tuple[str, str]] = set()
114
113
  self.recently_accessed_locals: Set[str] = set()
@@ -187,16 +186,14 @@ class MediaRepository:
187
186
  self.media_repository_callbacks = hs.get_module_api_callbacks().media_repository
188
187
 
189
188
  def _start_update_recently_accessed(self) -> Deferred:
190
- return run_as_background_process(
189
+ return self.hs.run_as_background_process(
191
190
  "update_recently_accessed_media",
192
- self.server_name,
193
191
  self._update_recently_accessed,
194
192
  )
195
193
 
196
194
  def _start_apply_media_retention_rules(self) -> Deferred:
197
- return run_as_background_process(
195
+ return self.hs.run_as_background_process(
198
196
  "apply_media_retention_rules",
199
- self.server_name,
200
197
  self._apply_media_retention_rules,
201
198
  )
202
199
 
@@ -423,6 +420,23 @@ class MediaRepository:
423
420
  send_cors=True,
424
421
  )
425
422
 
423
+ async def get_cached_remote_media_info(
424
+ self, origin: str, media_id: str
425
+ ) -> Optional[RemoteMedia]:
426
+ """
427
+ Get cached remote media info for a given origin/media ID combo. If the requested
428
+ media is not found locally, it will not be requested over federation and the
429
+ call will return None.
430
+
431
+ Args:
432
+ origin: The origin of the remote media
433
+ media_id: The media ID of the requested content
434
+
435
+ Returns:
436
+ The info for the cached remote media or None if it was not found
437
+ """
438
+ return await self.store.get_cached_remote_media(origin, media_id)
439
+
426
440
  async def get_local_media_info(
427
441
  self, request: SynapseRequest, media_id: str, max_timeout_ms: int
428
442
  ) -> Optional[LocalMedia]:
@@ -44,7 +44,6 @@ from synapse.media._base import FileInfo, get_filename_from_headers
44
44
  from synapse.media.media_storage import MediaStorage, SHA256TransparentIOWriter
45
45
  from synapse.media.oembed import OEmbedProvider
46
46
  from synapse.media.preview_html import decode_body, parse_html_to_open_graph
47
- from synapse.metrics.background_process_metrics import run_as_background_process
48
47
  from synapse.types import JsonDict, UserID
49
48
  from synapse.util.async_helpers import ObservableDeferred
50
49
  from synapse.util.caches.expiringcache import ExpiringCache
@@ -167,6 +166,7 @@ class UrlPreviewer:
167
166
  media_storage: MediaStorage,
168
167
  ):
169
168
  self.clock = hs.get_clock()
169
+ self.hs = hs
170
170
  self.filepaths = media_repo.filepaths
171
171
  self.max_spider_size = hs.config.media.max_spider_size
172
172
  self.server_name = hs.hostname
@@ -201,15 +201,14 @@ class UrlPreviewer:
201
201
  self._cache: ExpiringCache[str, ObservableDeferred] = ExpiringCache(
202
202
  cache_name="url_previews",
203
203
  server_name=self.server_name,
204
+ hs=self.hs,
204
205
  clock=self.clock,
205
206
  # don't spider URLs more often than once an hour
206
207
  expiry_ms=ONE_HOUR,
207
208
  )
208
209
 
209
210
  if self._worker_run_media_background_jobs:
210
- self._cleaner_loop = self.clock.looping_call(
211
- self._start_expire_url_cache_data, 10 * 1000
212
- )
211
+ self.clock.looping_call(self._start_expire_url_cache_data, 10 * 1000)
213
212
 
214
213
  async def preview(self, url: str, user: UserID, ts: int) -> bytes:
215
214
  # the in-memory cache:
@@ -739,8 +738,8 @@ class UrlPreviewer:
739
738
  return open_graph_result, oembed_response.author_name, expiration_ms
740
739
 
741
740
  def _start_expire_url_cache_data(self) -> Deferred:
742
- return run_as_background_process(
743
- "expire_url_cache_data", self.server_name, self._expire_url_cache_data
741
+ return self.hs.run_as_background_process(
742
+ "expire_url_cache_data", self._expire_url_cache_data
744
743
  )
745
744
 
746
745
  async def _expire_url_cache_data(self) -> None:
synapse/metrics/_gc.py CHANGED
@@ -138,7 +138,9 @@ def install_gc_manager() -> None:
138
138
  gc_time.labels(i).observe(end - start)
139
139
  gc_unreachable.labels(i).set(unreachable)
140
140
 
141
- gc_task = task.LoopingCall(_maybe_gc)
141
+ # We can ignore the lint here since this looping call does not hold a `HomeServer`
142
+ # reference so can be cleaned up by other means on shutdown.
143
+ gc_task = task.LoopingCall(_maybe_gc) # type: ignore[prefer-synapse-clock-looping-call]
142
144
  gc_task.start(0.1)
143
145
 
144
146
 
@@ -20,7 +20,7 @@
20
20
 
21
21
  import logging
22
22
  import threading
23
- from contextlib import nullcontext
23
+ from contextlib import contextmanager, nullcontext
24
24
  from functools import wraps
25
25
  from types import TracebackType
26
26
  from typing import (
@@ -28,7 +28,9 @@ from typing import (
28
28
  Any,
29
29
  Awaitable,
30
30
  Callable,
31
+ ContextManager,
31
32
  Dict,
33
+ Generator,
32
34
  Iterable,
33
35
  Optional,
34
36
  Protocol,
@@ -49,7 +51,12 @@ from synapse.logging.context import (
49
51
  LoggingContext,
50
52
  PreserveLoggingContext,
51
53
  )
52
- from synapse.logging.opentracing import SynapseTags, start_active_span
54
+ from synapse.logging.opentracing import (
55
+ SynapseTags,
56
+ active_span,
57
+ start_active_span,
58
+ start_active_span_follows_from,
59
+ )
53
60
  from synapse.metrics import SERVER_NAME_LABEL
54
61
  from synapse.metrics._types import Collector
55
62
 
@@ -59,6 +66,13 @@ if TYPE_CHECKING:
59
66
  # Old versions don't have `LiteralString`
60
67
  from typing_extensions import LiteralString
61
68
 
69
+ from synapse.server import HomeServer
70
+
71
+ try:
72
+ import opentracing
73
+ except ImportError:
74
+ opentracing = None # type: ignore[assignment]
75
+
62
76
 
63
77
  logger = logging.getLogger(__name__)
64
78
 
@@ -216,6 +230,7 @@ def run_as_background_process(
216
230
  func: Callable[..., Awaitable[Optional[R]]],
217
231
  *args: Any,
218
232
  bg_start_span: bool = True,
233
+ test_only_tracer: Optional["opentracing.Tracer"] = None,
219
234
  **kwargs: Any,
220
235
  ) -> "defer.Deferred[Optional[R]]":
221
236
  """Run the given function in its own logcontext, with resource metrics
@@ -241,6 +256,8 @@ def run_as_background_process(
241
256
  bg_start_span: Whether to start an opentracing span. Defaults to True.
242
257
  Should only be disabled for processes that will not log to or tag
243
258
  a span.
259
+ test_only_tracer: Set the OpenTracing tracer to use. This is only useful for
260
+ tests.
244
261
  args: positional args for func
245
262
  kwargs: keyword args for func
246
263
 
@@ -250,6 +267,12 @@ def run_as_background_process(
250
267
  rules.
251
268
  """
252
269
 
270
+ # Since we track the tracing scope in the `LoggingContext`, before we move to the
271
+ # sentinel logcontext (or a new `LoggingContext`), grab the currently active
272
+ # tracing span (if any) so that we can create a cross-link to the background process
273
+ # trace.
274
+ original_active_tracing_span = active_span(tracer=test_only_tracer)
275
+
253
276
  async def run() -> Optional[R]:
254
277
  with _bg_metrics_lock:
255
278
  count = _background_process_counts.get(desc, 0)
@@ -264,15 +287,101 @@ def run_as_background_process(
264
287
 
265
288
  with BackgroundProcessLoggingContext(
266
289
  name=desc, server_name=server_name, instance_id=count
267
- ) as context:
290
+ ) as logging_context:
268
291
  try:
269
292
  if bg_start_span:
270
- ctx = start_active_span(
271
- f"bgproc.{desc}", tags={SynapseTags.REQUEST_ID: str(context)}
272
- )
293
+ # If there is already an active span (e.g. because this background
294
+ # process was started as part of handling a request for example),
295
+ # because this is a long-running background task that may serve a
296
+ # broader purpose than the request that kicked it off, we don't want
297
+ # it to be a direct child of the currently active trace connected to
298
+ # the request. We only want a loose reference to jump between the
299
+ # traces.
300
+ #
301
+ # For example, when making a `/messages` request, when approaching a
302
+ # gap, we may kick off a background process to fetch missing events
303
+ # from federation. The `/messages` request trace should't include
304
+ # the entire time taken and details around fetching the missing
305
+ # events since the request doesn't rely on the result, it was just
306
+ # part of the heuristic to initiate things.
307
+ #
308
+ # We don't care about the value from the context manager as it's not
309
+ # used (so we just use `Any` for the type). Ideally, we'd be able to
310
+ # mark this as unused like an `assert_never` of sorts.
311
+ tracing_scope: ContextManager[Any]
312
+ if original_active_tracing_span is not None:
313
+ # With the OpenTracing client that we're using, it's impossible to
314
+ # create a disconnected root span while also providing `references`
315
+ # so we first create a bare root span, then create a child span that
316
+ # includes the references that we want.
317
+ root_tracing_scope = start_active_span(
318
+ f"bgproc.{desc}",
319
+ tags={SynapseTags.REQUEST_ID: str(logging_context)},
320
+ # Create a root span for the background process (disconnected
321
+ # from other spans)
322
+ ignore_active_span=True,
323
+ tracer=test_only_tracer,
324
+ )
325
+
326
+ # Also add a span in the original request trace that cross-links
327
+ # to background process trace. We immediately finish the span as
328
+ # this is just a marker to follow where the real work is being
329
+ # done.
330
+ #
331
+ # In OpenTracing, `FOLLOWS_FROM` indicates parent-child
332
+ # relationship whereas we just want a cross-link to the
333
+ # downstream trace. This is a bit hacky, but the closest we
334
+ # can get to in OpenTracing land. If we ever migrate to
335
+ # OpenTelemetry, we should use a normal `Link` for this.
336
+ with start_active_span_follows_from(
337
+ f"start_bgproc.{desc}",
338
+ child_of=original_active_tracing_span,
339
+ ignore_active_span=True,
340
+ # Create the `FOLLOWS_FROM` reference to the background
341
+ # process span so there is a loose coupling between the two
342
+ # traces and it's easy to jump between.
343
+ contexts=[root_tracing_scope.span.context],
344
+ tracer=test_only_tracer,
345
+ ):
346
+ pass
347
+
348
+ # Then start the tracing scope that we're going to use for
349
+ # the duration of the background process within the root
350
+ # span we just created.
351
+ child_tracing_scope = start_active_span_follows_from(
352
+ f"bgproc_child.{desc}",
353
+ child_of=root_tracing_scope.span,
354
+ ignore_active_span=True,
355
+ tags={SynapseTags.REQUEST_ID: str(logging_context)},
356
+ # Create the `FOLLOWS_FROM` reference to the request's
357
+ # span so there is a loose coupling between the two
358
+ # traces and it's easy to jump between.
359
+ contexts=[original_active_tracing_span.context],
360
+ tracer=test_only_tracer,
361
+ )
362
+
363
+ # For easy usage down below, we create a context manager that
364
+ # combines both scopes.
365
+ @contextmanager
366
+ def combined_context_manager() -> Generator[None, None, None]:
367
+ with root_tracing_scope, child_tracing_scope:
368
+ yield
369
+
370
+ tracing_scope = combined_context_manager()
371
+
372
+ else:
373
+ # Otherwise, when there is no active span, we will be creating
374
+ # a disconnected root span already and we don't have to
375
+ # worry about cross-linking to anything.
376
+ tracing_scope = start_active_span(
377
+ f"bgproc.{desc}",
378
+ tags={SynapseTags.REQUEST_ID: str(logging_context)},
379
+ tracer=test_only_tracer,
380
+ )
273
381
  else:
274
- ctx = nullcontext() # type: ignore[assignment]
275
- with ctx:
382
+ tracing_scope = nullcontext()
383
+
384
+ with tracing_scope:
276
385
  return await func(*args, **kwargs)
277
386
  except Exception:
278
387
  logger.exception(
@@ -308,11 +417,11 @@ def run_as_background_process(
308
417
  P = ParamSpec("P")
309
418
 
310
419
 
311
- class HasServerName(Protocol):
312
- server_name: str
420
+ class HasHomeServer(Protocol):
421
+ hs: "HomeServer"
313
422
  """
314
- The homeserver name that this cache is associated with (used to label the metric)
315
- (`hs.hostname`).
423
+ The homeserver that this cache is associated with (used to label the metric and
424
+ track backgroun processes for clean shutdown).
316
425
  """
317
426
 
318
427
 
@@ -342,27 +451,22 @@ def wrap_as_background_process(
342
451
  """
343
452
 
344
453
  def wrapper(
345
- func: Callable[Concatenate[HasServerName, P], Awaitable[Optional[R]]],
454
+ func: Callable[Concatenate[HasHomeServer, P], Awaitable[Optional[R]]],
346
455
  ) -> Callable[P, "defer.Deferred[Optional[R]]"]:
347
456
  @wraps(func)
348
457
  def wrapped_func(
349
- self: HasServerName, *args: P.args, **kwargs: P.kwargs
458
+ self: HasHomeServer, *args: P.args, **kwargs: P.kwargs
350
459
  ) -> "defer.Deferred[Optional[R]]":
351
- assert self.server_name is not None, (
352
- "The `server_name` attribute must be set on the object where `@wrap_as_background_process` decorator is used."
460
+ assert self.hs is not None, (
461
+ "The `hs` attribute must be set on the object where `@wrap_as_background_process` decorator is used."
353
462
  )
354
463
 
355
- return run_as_background_process(
464
+ return self.hs.run_as_background_process(
356
465
  desc,
357
- self.server_name,
358
466
  func,
359
467
  self,
360
468
  *args,
361
- # type-ignore: mypy is confusing kwargs with the bg_start_span kwarg.
362
- # Argument 4 to "run_as_background_process" has incompatible type
363
- # "**P.kwargs"; expected "bool"
364
- # See https://github.com/python/mypy/issues/8862
365
- **kwargs, # type: ignore[arg-type]
469
+ **kwargs,
366
470
  )
367
471
 
368
472
  # There are some shenanigans here, because we're decorating a method but
@@ -401,7 +505,7 @@ class BackgroundProcessLoggingContext(LoggingContext):
401
505
  """
402
506
  if instance_id is None:
403
507
  instance_id = id(self)
404
- super().__init__("%s-%s" % (name, instance_id))
508
+ super().__init__(name="%s-%s" % (name, instance_id), server_name=server_name)
405
509
  self._proc: Optional[_BackgroundProcess] = _BackgroundProcess(
406
510
  desc=name, server_name=server_name, ctx=self
407
511
  )
@@ -23,7 +23,6 @@ from typing import TYPE_CHECKING
23
23
  import attr
24
24
 
25
25
  from synapse.metrics import SERVER_NAME_LABEL
26
- from synapse.metrics.background_process_metrics import run_as_background_process
27
26
 
28
27
  if TYPE_CHECKING:
29
28
  from synapse.server import HomeServer
@@ -52,6 +51,7 @@ class CommonUsageMetricsManager:
52
51
  self.server_name = hs.hostname
53
52
  self._store = hs.get_datastores().main
54
53
  self._clock = hs.get_clock()
54
+ self._hs = hs
55
55
 
56
56
  async def get_metrics(self) -> CommonUsageMetrics:
57
57
  """Get the CommonUsageMetrics object. If no collection has happened yet, do it
@@ -64,16 +64,14 @@ class CommonUsageMetricsManager:
64
64
 
65
65
  async def setup(self) -> None:
66
66
  """Keep the gauges for common usage metrics up to date."""
67
- run_as_background_process(
67
+ self._hs.run_as_background_process(
68
68
  desc="common_usage_metrics_update_gauges",
69
- server_name=self.server_name,
70
69
  func=self._update_gauges,
71
70
  )
72
71
  self._clock.looping_call(
73
- run_as_background_process,
72
+ self._hs.run_as_background_process,
74
73
  5 * 60 * 1000,
75
74
  desc="common_usage_metrics_update_gauges",
76
- server_name=self.server_name,
77
75
  func=self._update_gauges,
78
76
  )
79
77
 
@@ -43,6 +43,7 @@ from typing_extensions import Concatenate, ParamSpec
43
43
 
44
44
  from twisted.internet import defer
45
45
  from twisted.internet.interfaces import IDelayedCall
46
+ from twisted.python.threadpool import ThreadPool
46
47
  from twisted.web.resource import Resource
47
48
 
48
49
  from synapse.api import errors
@@ -79,6 +80,7 @@ from synapse.http.servlet import parse_json_object_from_request
79
80
  from synapse.http.site import SynapseRequest
80
81
  from synapse.logging.context import (
81
82
  defer_to_thread,
83
+ defer_to_threadpool,
82
84
  make_deferred_yieldable,
83
85
  run_in_background,
84
86
  )
@@ -275,7 +277,15 @@ def run_as_background_process(
275
277
  # function instead.
276
278
  stub_server_name = "synapse_module_running_from_unknown_server"
277
279
 
278
- return _run_as_background_process(
280
+ # Ignore the linter error here. Since this is leveraging the
281
+ # `run_as_background_process` function directly and we don't want to break the
282
+ # module api, we need to keep the function signature the same. This means we don't
283
+ # have access to the running `HomeServer` and cannot track this background process
284
+ # for cleanup during shutdown.
285
+ # This is not an issue during runtime and is only potentially problematic if the
286
+ # application cares about being able to garbage collect `HomeServer` instances
287
+ # during runtime.
288
+ return _run_as_background_process( # type: ignore[untracked-background-process]
279
289
  desc,
280
290
  stub_server_name,
281
291
  func,
@@ -1402,7 +1412,7 @@ class ModuleApi:
1402
1412
 
1403
1413
  if self._hs.config.worker.run_background_tasks or run_on_all_instances:
1404
1414
  self._clock.looping_call(
1405
- self.run_as_background_process,
1415
+ self._hs.run_as_background_process,
1406
1416
  msec,
1407
1417
  desc,
1408
1418
  lambda: maybe_awaitable(f(*args, **kwargs)),
@@ -1460,7 +1470,7 @@ class ModuleApi:
1460
1470
  return self._clock.call_later(
1461
1471
  # convert ms to seconds as needed by call_later.
1462
1472
  msec * 0.001,
1463
- self.run_as_background_process,
1473
+ self._hs.run_as_background_process,
1464
1474
  desc,
1465
1475
  lambda: maybe_awaitable(f(*args, **kwargs)),
1466
1476
  )
@@ -1701,8 +1711,8 @@ class ModuleApi:
1701
1711
  Note that the returned Deferred does not follow the synapse logcontext
1702
1712
  rules.
1703
1713
  """
1704
- return _run_as_background_process(
1705
- desc, self.server_name, func, *args, bg_start_span=bg_start_span, **kwargs
1714
+ return self._hs.run_as_background_process(
1715
+ desc, func, *args, bg_start_span=bg_start_span, **kwargs
1706
1716
  )
1707
1717
 
1708
1718
  async def defer_to_thread(
@@ -1725,6 +1735,33 @@ class ModuleApi:
1725
1735
  """
1726
1736
  return await defer_to_thread(self._hs.get_reactor(), f, *args, **kwargs)
1727
1737
 
1738
+ async def defer_to_threadpool(
1739
+ self,
1740
+ threadpool: ThreadPool,
1741
+ f: Callable[P, T],
1742
+ *args: P.args,
1743
+ **kwargs: P.kwargs,
1744
+ ) -> T:
1745
+ """Runs the given function in a separate thread from the given thread pool.
1746
+
1747
+ Allows specifying a custom thread pool instead of using the default Synapse
1748
+ one. To use the default Synapse threadpool, use `defer_to_thread` instead.
1749
+
1750
+ Added in Synapse v1.140.0.
1751
+
1752
+ Args:
1753
+ threadpool: The thread pool to use.
1754
+ f: The function to run.
1755
+ args: The function's arguments.
1756
+ kwargs: The function's keyword arguments.
1757
+
1758
+ Returns:
1759
+ The return value of the function once ran in a thread.
1760
+ """
1761
+ return await defer_to_threadpool(
1762
+ self._hs.get_reactor(), threadpool, f, *args, **kwargs
1763
+ )
1764
+
1728
1765
  async def check_username(self, username: str) -> None:
1729
1766
  """Checks if the provided username uses the grammar defined in the Matrix
1730
1767
  specification, and is already being used by an existing user.
synapse/notifier.py CHANGED
@@ -676,9 +676,16 @@ class Notifier:
676
676
  # is a new token.
677
677
  listener = user_stream.new_listener(prev_token)
678
678
  listener = timeout_deferred(
679
- listener,
680
- (end_time - now) / 1000.0,
681
- self.hs.get_reactor(),
679
+ deferred=listener,
680
+ timeout=(end_time - now) / 1000.0,
681
+ # We don't track these calls since they are constantly being
682
+ # overridden by new calls to /sync and they don't hold the
683
+ # `HomeServer` in memory on shutdown. It is safe to let them
684
+ # timeout of their own accord after shutting down since it
685
+ # won't delay shutdown and there won't be any adverse
686
+ # behaviour.
687
+ cancel_on_shutdown=False,
688
+ clock=self.hs.get_clock(),
682
689
  )
683
690
 
684
691
  log_kv(
@@ -25,7 +25,6 @@ from typing import TYPE_CHECKING, Dict, List, Optional
25
25
  from twisted.internet.error import AlreadyCalled, AlreadyCancelled
26
26
  from twisted.internet.interfaces import IDelayedCall
27
27
 
28
- from synapse.metrics.background_process_metrics import run_as_background_process
29
28
  from synapse.push import Pusher, PusherConfig, PusherConfigException, ThrottleParams
30
29
  from synapse.push.mailer import Mailer
31
30
  from synapse.push.push_types import EmailReason
@@ -118,7 +117,7 @@ class EmailPusher(Pusher):
118
117
  if self._is_processing:
119
118
  return
120
119
 
121
- run_as_background_process("emailpush.process", self.server_name, self._process)
120
+ self.hs.run_as_background_process("emailpush.process", self._process)
122
121
 
123
122
  def _pause_processing(self) -> None:
124
123
  """Used by tests to temporarily pause processing of events.
@@ -228,8 +227,10 @@ class EmailPusher(Pusher):
228
227
  self.timed_call = None
229
228
 
230
229
  if soonest_due_at is not None:
231
- self.timed_call = self.hs.get_reactor().callLater(
232
- self.seconds_until(soonest_due_at), self.on_timer
230
+ delay = self.seconds_until(soonest_due_at)
231
+ self.timed_call = self.hs.get_clock().call_later(
232
+ delay,
233
+ self.on_timer,
233
234
  )
234
235
 
235
236
  async def save_last_stream_ordering_and_success(
@@ -32,7 +32,6 @@ from synapse.api.constants import EventTypes
32
32
  from synapse.events import EventBase
33
33
  from synapse.logging import opentracing
34
34
  from synapse.metrics import SERVER_NAME_LABEL
35
- from synapse.metrics.background_process_metrics import run_as_background_process
36
35
  from synapse.push import Pusher, PusherConfig, PusherConfigException
37
36
  from synapse.storage.databases.main.event_push_actions import HttpPushAction
38
37
  from synapse.types import JsonDict, JsonMapping
@@ -182,8 +181,8 @@ class HttpPusher(Pusher):
182
181
 
183
182
  # We could check the receipts are actually m.read receipts here,
184
183
  # but currently that's the only type of receipt anyway...
185
- run_as_background_process(
186
- "http_pusher.on_new_receipts", self.server_name, self._update_badge
184
+ self.hs.run_as_background_process(
185
+ "http_pusher.on_new_receipts", self._update_badge
187
186
  )
188
187
 
189
188
  async def _update_badge(self) -> None:
@@ -219,7 +218,7 @@ class HttpPusher(Pusher):
219
218
  if self.failing_since and self.timed_call and self.timed_call.active():
220
219
  return
221
220
 
222
- run_as_background_process("httppush.process", self.server_name, self._process)
221
+ self.hs.run_as_background_process("httppush.process", self._process)
223
222
 
224
223
  async def _process(self) -> None:
225
224
  # we should never get here if we are already processing
@@ -336,8 +335,9 @@ class HttpPusher(Pusher):
336
335
  )
337
336
  else:
338
337
  logger.info("Push failed: delaying for %ds", self.backoff_delay)
339
- self.timed_call = self.hs.get_reactor().callLater(
340
- self.backoff_delay, self.on_timer
338
+ self.timed_call = self.hs.get_clock().call_later(
339
+ self.backoff_delay,
340
+ self.on_timer,
341
341
  )
342
342
  self.backoff_delay = min(
343
343
  self.backoff_delay * 2, self.MAX_BACKOFF_SEC
@@ -27,7 +27,6 @@ from prometheus_client import Gauge
27
27
  from synapse.api.errors import Codes, SynapseError
28
28
  from synapse.metrics import SERVER_NAME_LABEL
29
29
  from synapse.metrics.background_process_metrics import (
30
- run_as_background_process,
31
30
  wrap_as_background_process,
32
31
  )
33
32
  from synapse.push import Pusher, PusherConfig, PusherConfigException
@@ -70,10 +69,8 @@ class PusherPool:
70
69
  """
71
70
 
72
71
  def __init__(self, hs: "HomeServer"):
73
- self.hs = hs
74
- self.server_name = (
75
- hs.hostname
76
- ) # nb must be called this for @wrap_as_background_process
72
+ self.hs = hs # nb must be called this for @wrap_as_background_process
73
+ self.server_name = hs.hostname
77
74
  self.pusher_factory = PusherFactory(hs)
78
75
  self.store = self.hs.get_datastores().main
79
76
  self.clock = self.hs.get_clock()
@@ -112,9 +109,7 @@ class PusherPool:
112
109
  if not self._should_start_pushers:
113
110
  logger.info("Not starting pushers because they are disabled in the config")
114
111
  return
115
- run_as_background_process(
116
- "start_pushers", self.server_name, self._start_pushers
117
- )
112
+ self.hs.run_as_background_process("start_pushers", self._start_pushers)
118
113
 
119
114
  async def add_or_update_pusher(
120
115
  self,
@@ -185,46 +185,6 @@ class ReplicationMultiUserDevicesResyncRestServlet(ReplicationEndpoint):
185
185
  return 200, multi_user_devices
186
186
 
187
187
 
188
- # FIXME(2025-07-22): Remove this on the next release, this will only get used
189
- # during rollout to Synapse 1.135 and can be removed after that release.
190
- class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint):
191
- """Unused endpoint, kept for backwards compatibility during rollout."""
192
-
193
- NAME = "upload_keys_for_user"
194
- PATH_ARGS = ()
195
- CACHE = False
196
-
197
- def __init__(self, hs: "HomeServer"):
198
- super().__init__(hs)
199
-
200
- self.e2e_keys_handler = hs.get_e2e_keys_handler()
201
- self.store = hs.get_datastores().main
202
- self.clock = hs.get_clock()
203
-
204
- @staticmethod
205
- async def _serialize_payload( # type: ignore[override]
206
- user_id: str, device_id: str, keys: JsonDict
207
- ) -> JsonDict:
208
- return {
209
- "user_id": user_id,
210
- "device_id": device_id,
211
- "keys": keys,
212
- }
213
-
214
- async def _handle_request( # type: ignore[override]
215
- self, request: Request, content: JsonDict
216
- ) -> Tuple[int, JsonDict]:
217
- user_id = content["user_id"]
218
- device_id = content["device_id"]
219
- keys = content["keys"]
220
-
221
- results = await self.e2e_keys_handler.upload_keys_for_user(
222
- user_id, device_id, keys
223
- )
224
-
225
- return 200, results
226
-
227
-
228
188
  class ReplicationHandleNewDeviceUpdateRestServlet(ReplicationEndpoint):
229
189
  """Wake up a device writer to send local device list changes as federation outbound pokes.
230
190
 
@@ -291,5 +251,4 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
291
251
  ReplicationNotifyUserSignatureUpdateRestServlet(hs).register(http_server)
292
252
  ReplicationMultiUserDevicesResyncRestServlet(hs).register(http_server)
293
253
  ReplicationHandleNewDeviceUpdateRestServlet(hs).register(http_server)
294
- ReplicationUploadKeysForUserRestServlet(hs).register(http_server)
295
254
  ReplicationDeviceHandleRoomUnPartialStated(hs).register(http_server)