matrix-synapse 1.139.2__cp39-abi3-macosx_11_0_arm64.whl → 1.140.0rc1__cp39-abi3-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of matrix-synapse might be problematic. Click here for more details.

Files changed (158) hide show
  1. {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/METADATA +5 -3
  2. {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/RECORD +157 -154
  3. synapse/_scripts/generate_workers_map.py +6 -1
  4. synapse/_scripts/synapse_port_db.py +0 -2
  5. synapse/_scripts/update_synapse_database.py +1 -6
  6. synapse/api/auth/base.py +1 -3
  7. synapse/api/auth/mas.py +6 -8
  8. synapse/api/auth/msc3861_delegated.py +6 -8
  9. synapse/api/errors.py +3 -0
  10. synapse/app/_base.py +101 -39
  11. synapse/app/admin_cmd.py +2 -4
  12. synapse/app/appservice.py +1 -1
  13. synapse/app/client_reader.py +1 -1
  14. synapse/app/event_creator.py +1 -1
  15. synapse/app/federation_reader.py +1 -1
  16. synapse/app/federation_sender.py +1 -1
  17. synapse/app/frontend_proxy.py +1 -1
  18. synapse/app/generic_worker.py +17 -11
  19. synapse/app/homeserver.py +85 -47
  20. synapse/app/media_repository.py +1 -1
  21. synapse/app/phone_stats_home.py +16 -14
  22. synapse/app/pusher.py +1 -1
  23. synapse/app/synchrotron.py +1 -1
  24. synapse/app/user_dir.py +1 -1
  25. synapse/appservice/__init__.py +29 -2
  26. synapse/appservice/scheduler.py +8 -8
  27. synapse/config/_base.py +32 -14
  28. synapse/config/_base.pyi +5 -3
  29. synapse/config/experimental.py +3 -0
  30. synapse/config/homeserver.py +27 -1
  31. synapse/config/logger.py +3 -4
  32. synapse/config/matrixrtc.py +67 -0
  33. synapse/crypto/keyring.py +18 -4
  34. synapse/events/auto_accept_invites.py +0 -1
  35. synapse/federation/federation_client.py +39 -0
  36. synapse/federation/federation_server.py +1 -1
  37. synapse/federation/send_queue.py +3 -0
  38. synapse/federation/sender/__init__.py +24 -8
  39. synapse/federation/sender/per_destination_queue.py +31 -8
  40. synapse/federation/sender/transaction_manager.py +12 -0
  41. synapse/federation/transport/client.py +29 -0
  42. synapse/handlers/account_validity.py +2 -4
  43. synapse/handlers/appservice.py +5 -7
  44. synapse/handlers/deactivate_account.py +2 -3
  45. synapse/handlers/delayed_events.py +10 -13
  46. synapse/handlers/device.py +14 -14
  47. synapse/handlers/e2e_keys.py +4 -3
  48. synapse/handlers/federation.py +7 -11
  49. synapse/handlers/federation_event.py +5 -6
  50. synapse/handlers/message.py +16 -10
  51. synapse/handlers/pagination.py +3 -7
  52. synapse/handlers/presence.py +21 -25
  53. synapse/handlers/profile.py +1 -1
  54. synapse/handlers/read_marker.py +3 -1
  55. synapse/handlers/register.py +8 -1
  56. synapse/handlers/room.py +13 -4
  57. synapse/handlers/room_member.py +11 -7
  58. synapse/handlers/room_policy.py +96 -2
  59. synapse/handlers/sso.py +1 -1
  60. synapse/handlers/stats.py +5 -3
  61. synapse/handlers/sync.py +20 -13
  62. synapse/handlers/typing.py +5 -10
  63. synapse/handlers/user_directory.py +12 -11
  64. synapse/handlers/worker_lock.py +19 -15
  65. synapse/http/client.py +18 -13
  66. synapse/http/federation/matrix_federation_agent.py +6 -1
  67. synapse/http/federation/well_known_resolver.py +3 -1
  68. synapse/http/matrixfederationclient.py +50 -11
  69. synapse/http/proxy.py +2 -2
  70. synapse/http/server.py +36 -2
  71. synapse/http/site.py +109 -17
  72. synapse/logging/context.py +165 -63
  73. synapse/logging/opentracing.py +30 -6
  74. synapse/logging/scopecontextmanager.py +161 -0
  75. synapse/media/_base.py +2 -1
  76. synapse/media/media_repository.py +20 -6
  77. synapse/media/url_previewer.py +5 -6
  78. synapse/metrics/_gc.py +3 -1
  79. synapse/metrics/background_process_metrics.py +128 -24
  80. synapse/metrics/common_usage_metrics.py +3 -5
  81. synapse/module_api/__init__.py +42 -5
  82. synapse/notifier.py +10 -3
  83. synapse/push/emailpusher.py +5 -4
  84. synapse/push/httppusher.py +6 -6
  85. synapse/push/pusherpool.py +3 -8
  86. synapse/replication/http/devices.py +0 -41
  87. synapse/replication/tcp/client.py +8 -5
  88. synapse/replication/tcp/handler.py +2 -3
  89. synapse/replication/tcp/protocol.py +14 -7
  90. synapse/replication/tcp/redis.py +16 -11
  91. synapse/replication/tcp/resource.py +5 -4
  92. synapse/replication/tcp/streams/__init__.py +2 -0
  93. synapse/res/providers.json +6 -5
  94. synapse/rest/__init__.py +2 -0
  95. synapse/rest/admin/__init__.py +4 -0
  96. synapse/rest/admin/events.py +69 -0
  97. synapse/rest/admin/media.py +70 -2
  98. synapse/rest/client/matrixrtc.py +52 -0
  99. synapse/rest/client/push_rule.py +1 -1
  100. synapse/rest/client/room.py +2 -3
  101. synapse/rest/client/sync.py +1 -0
  102. synapse/rest/client/transactions.py +1 -1
  103. synapse/server.py +271 -38
  104. synapse/server_notices/server_notices_manager.py +1 -0
  105. synapse/state/__init__.py +4 -1
  106. synapse/storage/_base.py +1 -1
  107. synapse/storage/background_updates.py +8 -3
  108. synapse/storage/controllers/persist_events.py +4 -3
  109. synapse/storage/controllers/purge_events.py +2 -3
  110. synapse/storage/controllers/state.py +5 -5
  111. synapse/storage/database.py +12 -7
  112. synapse/storage/databases/main/__init__.py +7 -2
  113. synapse/storage/databases/main/cache.py +4 -3
  114. synapse/storage/databases/main/censor_events.py +1 -1
  115. synapse/storage/databases/main/client_ips.py +9 -8
  116. synapse/storage/databases/main/deviceinbox.py +7 -6
  117. synapse/storage/databases/main/devices.py +4 -4
  118. synapse/storage/databases/main/end_to_end_keys.py +6 -3
  119. synapse/storage/databases/main/event_federation.py +7 -6
  120. synapse/storage/databases/main/event_push_actions.py +13 -13
  121. synapse/storage/databases/main/events_bg_updates.py +1 -1
  122. synapse/storage/databases/main/events_worker.py +6 -8
  123. synapse/storage/databases/main/lock.py +17 -13
  124. synapse/storage/databases/main/media_repository.py +2 -2
  125. synapse/storage/databases/main/metrics.py +6 -6
  126. synapse/storage/databases/main/monthly_active_users.py +3 -4
  127. synapse/storage/databases/main/receipts.py +1 -1
  128. synapse/storage/databases/main/registration.py +18 -19
  129. synapse/storage/databases/main/roommember.py +1 -1
  130. synapse/storage/databases/main/session.py +3 -3
  131. synapse/storage/databases/main/sliding_sync.py +2 -2
  132. synapse/storage/databases/main/transactions.py +3 -3
  133. synapse/storage/databases/state/store.py +2 -0
  134. synapse/synapse_rust/http_client.pyi +4 -0
  135. synapse/synapse_rust.abi3.so +0 -0
  136. synapse/util/async_helpers.py +36 -24
  137. synapse/util/batching_queue.py +16 -6
  138. synapse/util/caches/__init__.py +1 -1
  139. synapse/util/caches/deferred_cache.py +4 -0
  140. synapse/util/caches/descriptors.py +14 -2
  141. synapse/util/caches/dictionary_cache.py +6 -1
  142. synapse/util/caches/expiringcache.py +16 -5
  143. synapse/util/caches/lrucache.py +14 -26
  144. synapse/util/caches/response_cache.py +11 -1
  145. synapse/util/clock.py +215 -39
  146. synapse/util/constants.py +2 -0
  147. synapse/util/daemonize.py +5 -1
  148. synapse/util/distributor.py +9 -5
  149. synapse/util/metrics.py +35 -6
  150. synapse/util/ratelimitutils.py +4 -1
  151. synapse/util/retryutils.py +7 -4
  152. synapse/util/task_scheduler.py +11 -14
  153. synapse/logging/filter.py +0 -38
  154. {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/AUTHORS.rst +0 -0
  155. {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/LICENSE-AGPL-3.0 +0 -0
  156. {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/LICENSE-COMMERCIAL +0 -0
  157. {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/WHEEL +0 -0
  158. {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/entry_points.txt +0 -0
synapse/server.py CHANGED
@@ -28,10 +28,27 @@
28
28
  import abc
29
29
  import functools
30
30
  import logging
31
- from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Type, TypeVar, cast
31
+ from threading import Thread
32
+ from typing import (
33
+ TYPE_CHECKING,
34
+ Any,
35
+ Awaitable,
36
+ Callable,
37
+ Dict,
38
+ List,
39
+ Optional,
40
+ Tuple,
41
+ Type,
42
+ TypeVar,
43
+ cast,
44
+ )
45
+ from wsgiref.simple_server import WSGIServer
32
46
 
47
+ from attr import dataclass
33
48
  from typing_extensions import TypeAlias
34
49
 
50
+ from twisted.internet import defer
51
+ from twisted.internet.base import _SystemEventID
35
52
  from twisted.internet.interfaces import IOpenSSLContextFactory
36
53
  from twisted.internet.tcp import Port
37
54
  from twisted.python.threadpool import ThreadPool
@@ -44,6 +61,7 @@ from synapse.api.auth.mas import MasDelegatedAuth
44
61
  from synapse.api.auth_blocking import AuthBlocking
45
62
  from synapse.api.filtering import Filtering
46
63
  from synapse.api.ratelimiting import Ratelimiter, RequestRatelimiter
64
+ from synapse.app._base import unregister_sighups
47
65
  from synapse.appservice.api import ApplicationServiceApi
48
66
  from synapse.appservice.scheduler import ApplicationServiceScheduler
49
67
  from synapse.config.homeserver import HomeServerConfig
@@ -133,6 +151,7 @@ from synapse.metrics import (
133
151
  all_later_gauges_to_clean_up_on_shutdown,
134
152
  register_threadpool,
135
153
  )
154
+ from synapse.metrics.background_process_metrics import run_as_background_process
136
155
  from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager
137
156
  from synapse.module_api import ModuleApi
138
157
  from synapse.module_api.callbacks import ModuleApiCallbacks
@@ -156,6 +175,8 @@ from synapse.storage.controllers import StorageControllers
156
175
  from synapse.streams.events import EventSources
157
176
  from synapse.synapse_rust.rendezvous import RendezvousHandler
158
177
  from synapse.types import DomainSpecificString, ISynapseReactor
178
+ from synapse.util import SYNAPSE_VERSION
179
+ from synapse.util.caches import CACHE_METRIC_REGISTRY
159
180
  from synapse.util.clock import Clock
160
181
  from synapse.util.distributor import Distributor
161
182
  from synapse.util.macaroons import MacaroonGenerator
@@ -166,7 +187,9 @@ from synapse.util.task_scheduler import TaskScheduler
166
187
  logger = logging.getLogger(__name__)
167
188
 
168
189
  if TYPE_CHECKING:
190
+ # Old Python versions don't have `LiteralString`
169
191
  from txredisapi import ConnectionHandler
192
+ from typing_extensions import LiteralString
170
193
 
171
194
  from synapse.handlers.jwt import JwtHandler
172
195
  from synapse.handlers.oidc import OidcHandler
@@ -196,6 +219,7 @@ if TYPE_CHECKING:
196
219
 
197
220
  T: TypeAlias = object
198
221
  F = TypeVar("F", bound=Callable[["HomeServer"], T])
222
+ R = TypeVar("R")
199
223
 
200
224
 
201
225
  def cache_in_self(builder: F) -> F:
@@ -219,7 +243,8 @@ def cache_in_self(builder: F) -> F:
219
243
  @functools.wraps(builder)
220
244
  def _get(self: "HomeServer") -> T:
221
245
  try:
222
- return getattr(self, depname)
246
+ dep = getattr(self, depname)
247
+ return dep
223
248
  except AttributeError:
224
249
  pass
225
250
 
@@ -239,6 +264,22 @@ def cache_in_self(builder: F) -> F:
239
264
  return cast(F, _get)
240
265
 
241
266
 
267
+ @dataclass
268
+ class ShutdownInfo:
269
+ """Information for callable functions called at time of shutdown.
270
+
271
+ Attributes:
272
+ func: the object to call before shutdown.
273
+ trigger_id: an ID returned when registering this event trigger.
274
+ args: the arguments to call the function with.
275
+ kwargs: the keyword arguments to call the function with.
276
+ """
277
+
278
+ func: Callable[..., Any]
279
+ trigger_id: _SystemEventID
280
+ kwargs: Dict[str, object]
281
+
282
+
242
283
  class HomeServer(metaclass=abc.ABCMeta):
243
284
  """A basic homeserver object without lazy component builders.
244
285
 
@@ -282,13 +323,13 @@ class HomeServer(metaclass=abc.ABCMeta):
282
323
  hostname: str,
283
324
  config: HomeServerConfig,
284
325
  reactor: Optional[ISynapseReactor] = None,
285
- version_string: str = "Synapse",
286
326
  ):
287
327
  """
288
328
  Args:
289
329
  hostname : The hostname for the server.
290
330
  config: The full config for the homeserver.
291
331
  """
332
+
292
333
  if not reactor:
293
334
  from twisted.internet import reactor as _reactor
294
335
 
@@ -300,12 +341,13 @@ class HomeServer(metaclass=abc.ABCMeta):
300
341
  self.signing_key = config.key.signing_key[0]
301
342
  self.config = config
302
343
  self._listening_services: List[Port] = []
344
+ self._metrics_listeners: List[Tuple[WSGIServer, Thread]] = []
303
345
  self.start_time: Optional[int] = None
304
346
 
305
347
  self._instance_id = random_string(5)
306
348
  self._instance_name = config.worker.instance_name
307
349
 
308
- self.version_string = version_string
350
+ self.version_string = f"Synapse/{SYNAPSE_VERSION}"
309
351
 
310
352
  self.datastores: Optional[Databases] = None
311
353
 
@@ -315,6 +357,211 @@ class HomeServer(metaclass=abc.ABCMeta):
315
357
  # This attribute is set by the free function `refresh_certificate`.
316
358
  self.tls_server_context_factory: Optional[IOpenSSLContextFactory] = None
317
359
 
360
+ self._is_shutdown = False
361
+ self._async_shutdown_handlers: List[ShutdownInfo] = []
362
+ self._sync_shutdown_handlers: List[ShutdownInfo] = []
363
+ self._background_processes: set[defer.Deferred[Optional[Any]]] = set()
364
+
365
+ def run_as_background_process(
366
+ self,
367
+ desc: "LiteralString",
368
+ func: Callable[..., Awaitable[Optional[R]]],
369
+ *args: Any,
370
+ **kwargs: Any,
371
+ ) -> "defer.Deferred[Optional[R]]":
372
+ """Run the given function in its own logcontext, with resource metrics
373
+
374
+ This should be used to wrap processes which are fired off to run in the
375
+ background, instead of being associated with a particular request.
376
+
377
+ It returns a Deferred which completes when the function completes, but it doesn't
378
+ follow the synapse logcontext rules, which makes it appropriate for passing to
379
+ clock.looping_call and friends (or for firing-and-forgetting in the middle of a
380
+ normal synapse async function).
381
+
382
+ Because the returned Deferred does not follow the synapse logcontext rules, awaiting
383
+ the result of this function will result in the log context being cleared (bad). In
384
+ order to properly await the result of this function and maintain the current log
385
+ context, use `make_deferred_yieldable`.
386
+
387
+ Args:
388
+ desc: a description for this background process type
389
+ server_name: The homeserver name that this background process is being run for
390
+ (this should be `hs.hostname`).
391
+ func: a function, which may return a Deferred or a coroutine
392
+ bg_start_span: Whether to start an opentracing span. Defaults to True.
393
+ Should only be disabled for processes that will not log to or tag
394
+ a span.
395
+ args: positional args for func
396
+ kwargs: keyword args for func
397
+
398
+ Returns:
399
+ Deferred which returns the result of func, or `None` if func raises.
400
+ Note that the returned Deferred does not follow the synapse logcontext
401
+ rules.
402
+ """
403
+ if self._is_shutdown:
404
+ raise Exception(
405
+ f"Cannot start background process. HomeServer has been shutdown {len(self._background_processes)} {len(self.get_clock()._looping_calls)} {len(self.get_clock()._call_id_to_delayed_call)}"
406
+ )
407
+
408
+ # Ignore linter error as this is the one location this should be called.
409
+ deferred = run_as_background_process(desc, self.hostname, func, *args, **kwargs) # type: ignore[untracked-background-process]
410
+ self._background_processes.add(deferred)
411
+
412
+ def on_done(res: R) -> R:
413
+ try:
414
+ self._background_processes.remove(deferred)
415
+ except KeyError:
416
+ # If the background process isn't being tracked anymore we can just move on.
417
+ pass
418
+ return res
419
+
420
+ deferred.addBoth(on_done)
421
+ return deferred
422
+
423
+ async def shutdown(self) -> None:
424
+ """
425
+ Cleanly stops all aspects of the HomeServer and removes any references that
426
+ have been handed out in order to allow the HomeServer object to be garbage
427
+ collected.
428
+
429
+ You must ensure the HomeServer object to not be frozen in the garbage collector
430
+ in order for it to be cleaned up. By default, Synapse freezes the HomeServer
431
+ object in the garbage collector.
432
+ """
433
+
434
+ self._is_shutdown = True
435
+
436
+ logger.info(
437
+ "Received shutdown request for %s (%s).",
438
+ self.hostname,
439
+ self.get_instance_id(),
440
+ )
441
+
442
+ # Unregister sighups first. If a shutdown was requested we shouldn't be responding
443
+ # to things like config changes. So it would be best to stop listening to these first.
444
+ unregister_sighups(self._instance_id)
445
+
446
+ # TODO: It would be desireable to be able to report an error if the HomeServer
447
+ # object is frozen in the garbage collector as that would prevent it from being
448
+ # collected after being shutdown.
449
+ # In theory the following should work, but it doesn't seem to make a difference
450
+ # when I test it locally.
451
+ #
452
+ # if gc.is_tracked(self):
453
+ # logger.error("HomeServer object is tracked by garbage collection so cannot be fully cleaned up")
454
+
455
+ for listener in self._listening_services:
456
+ # During unit tests, an incomplete `twisted.pair.testing._FakePort` is used
457
+ # for listeners so check listener type here to ensure shutdown procedure is
458
+ # only applied to actual `Port` instances.
459
+ if type(listener) is Port:
460
+ port_shutdown = listener.stopListening()
461
+ if port_shutdown is not None:
462
+ await port_shutdown
463
+ self._listening_services.clear()
464
+
465
+ for server, thread in self._metrics_listeners:
466
+ server.shutdown()
467
+ thread.join()
468
+ self._metrics_listeners.clear()
469
+
470
+ # TODO: Cleanup replication pieces
471
+
472
+ self.get_keyring().shutdown()
473
+
474
+ # Cleanup metrics associated with the homeserver
475
+ for later_gauge in all_later_gauges_to_clean_up_on_shutdown.values():
476
+ later_gauge.unregister_hooks_for_homeserver_instance_id(
477
+ self.get_instance_id()
478
+ )
479
+
480
+ CACHE_METRIC_REGISTRY.unregister_hooks_for_homeserver(
481
+ self.config.server.server_name
482
+ )
483
+
484
+ for db in self.get_datastores().databases:
485
+ db.stop_background_updates()
486
+
487
+ if self.should_send_federation():
488
+ try:
489
+ self.get_federation_sender().shutdown()
490
+ except Exception:
491
+ pass
492
+
493
+ for shutdown_handler in self._async_shutdown_handlers:
494
+ try:
495
+ self.get_reactor().removeSystemEventTrigger(shutdown_handler.trigger_id)
496
+ defer.ensureDeferred(shutdown_handler.func(**shutdown_handler.kwargs))
497
+ except Exception as e:
498
+ logger.error("Error calling shutdown async handler: %s", e)
499
+ self._async_shutdown_handlers.clear()
500
+
501
+ for shutdown_handler in self._sync_shutdown_handlers:
502
+ try:
503
+ self.get_reactor().removeSystemEventTrigger(shutdown_handler.trigger_id)
504
+ shutdown_handler.func(**shutdown_handler.kwargs)
505
+ except Exception as e:
506
+ logger.error("Error calling shutdown sync handler: %s", e)
507
+ self._sync_shutdown_handlers.clear()
508
+
509
+ self.get_clock().shutdown()
510
+
511
+ for background_process in list(self._background_processes):
512
+ try:
513
+ background_process.cancel()
514
+ except Exception:
515
+ pass
516
+ self._background_processes.clear()
517
+
518
+ for db in self.get_datastores().databases:
519
+ db._db_pool.close()
520
+
521
+ def register_async_shutdown_handler(
522
+ self,
523
+ *,
524
+ phase: str,
525
+ eventType: str,
526
+ shutdown_func: Callable[..., Any],
527
+ **kwargs: object,
528
+ ) -> None:
529
+ """
530
+ Register a system event trigger with the HomeServer so it can be cleanly
531
+ removed when the HomeServer is shutdown.
532
+ """
533
+ id = self.get_clock().add_system_event_trigger(
534
+ phase,
535
+ eventType,
536
+ shutdown_func,
537
+ **kwargs,
538
+ )
539
+ self._async_shutdown_handlers.append(
540
+ ShutdownInfo(func=shutdown_func, trigger_id=id, kwargs=kwargs)
541
+ )
542
+
543
+ def register_sync_shutdown_handler(
544
+ self,
545
+ *,
546
+ phase: str,
547
+ eventType: str,
548
+ shutdown_func: Callable[..., Any],
549
+ **kwargs: object,
550
+ ) -> None:
551
+ """
552
+ Register a system event trigger with the HomeServer so it can be cleanly
553
+ removed when the HomeServer is shutdown.
554
+ """
555
+ id = self.get_clock().add_system_event_trigger(
556
+ phase,
557
+ eventType,
558
+ shutdown_func,
559
+ **kwargs,
560
+ )
561
+ self._sync_shutdown_handlers.append(
562
+ ShutdownInfo(func=shutdown_func, trigger_id=id, kwargs=kwargs)
563
+ )
564
+
318
565
  def register_module_web_resource(self, path: str, resource: Resource) -> None:
319
566
  """Allows a module to register a web resource to be served at the given path.
320
567
 
@@ -366,36 +613,19 @@ class HomeServer(metaclass=abc.ABCMeta):
366
613
  self.datastores = Databases(self.DATASTORE_CLASS, self)
367
614
  logger.info("Finished setting up.")
368
615
 
369
- def __del__(self) -> None:
370
- """
371
- Called when an the homeserver is garbage collected.
372
-
373
- Make sure we actually do some clean-up, rather than leak data.
374
- """
375
- self.cleanup()
376
-
377
- def cleanup(self) -> None:
378
- """
379
- WIP: Clean-up any references to the homeserver and stop any running related
380
- processes, timers, loops, replication stream, etc.
381
-
382
- This should be called wherever you care about the HomeServer being completely
383
- garbage collected like in tests. It's not necessary to call if you plan to just
384
- shut down the whole Python process anyway.
385
-
386
- Can be called multiple times.
387
- """
388
- logger.info("Received cleanup request for %s.", self.hostname)
389
-
390
- # TODO: Stop background processes, timers, loops, replication stream, etc.
391
-
392
- # Cleanup metrics associated with the homeserver
393
- for later_gauge in all_later_gauges_to_clean_up_on_shutdown.values():
394
- later_gauge.unregister_hooks_for_homeserver_instance_id(
395
- self.get_instance_id()
396
- )
397
-
398
- logger.info("Cleanup complete for %s.", self.hostname)
616
+ # def __del__(self) -> None:
617
+ # """
618
+ # Called when an the homeserver is garbage collected.
619
+ #
620
+ # Make sure we actually do some clean-up, rather than leak data.
621
+ # """
622
+ #
623
+ # # NOTE: This is a chicken and egg problem.
624
+ # # __del__ will never be called since the HomeServer cannot be garbage collected
625
+ # # until the shutdown function has been called. So it makes no sense to call
626
+ # # shutdown inside of __del__, even though that is a logical place to assume it
627
+ # # should be called.
628
+ # self.shutdown()
399
629
 
400
630
  def start_listening(self) -> None: # noqa: B027 (no-op by design)
401
631
  """Start the HTTP, manhole, metrics, etc listeners
@@ -442,7 +672,8 @@ class HomeServer(metaclass=abc.ABCMeta):
442
672
 
443
673
  @cache_in_self
444
674
  def get_clock(self) -> Clock:
445
- return Clock(self._reactor)
675
+ # Ignore the linter error since this is the one place the `Clock` should be created.
676
+ return Clock(self._reactor, server_name=self.hostname) # type: ignore[multiple-internal-clocks]
446
677
 
447
678
  def get_datastores(self) -> Databases:
448
679
  if not self.datastores:
@@ -452,7 +683,7 @@ class HomeServer(metaclass=abc.ABCMeta):
452
683
 
453
684
  @cache_in_self
454
685
  def get_distributor(self) -> Distributor:
455
- return Distributor(server_name=self.hostname)
686
+ return Distributor(hs=self)
456
687
 
457
688
  @cache_in_self
458
689
  def get_registration_ratelimiter(self) -> Ratelimiter:
@@ -1007,8 +1238,10 @@ class HomeServer(metaclass=abc.ABCMeta):
1007
1238
  )
1008
1239
 
1009
1240
  media_threadpool.start()
1010
- self.get_clock().add_system_event_trigger(
1011
- "during", "shutdown", media_threadpool.stop
1241
+ self.register_sync_shutdown_handler(
1242
+ phase="during",
1243
+ eventType="shutdown",
1244
+ shutdown_func=media_threadpool.stop,
1012
1245
  )
1013
1246
 
1014
1247
  # Register the threadpool with our metrics.
@@ -36,6 +36,7 @@ SERVER_NOTICE_ROOM_TAG = "m.server_notice"
36
36
  class ServerNoticesManager:
37
37
  def __init__(self, hs: "HomeServer"):
38
38
  self.server_name = hs.hostname # nb must be called this for @cached
39
+ self.clock = hs.get_clock() # nb must be called this for @cached
39
40
  self._store = hs.get_datastores().main
40
41
  self._config = hs.config
41
42
  self._account_data_handler = hs.get_account_data_handler()
synapse/state/__init__.py CHANGED
@@ -642,13 +642,16 @@ class StateResolutionHandler:
642
642
  self.server_name = hs.hostname
643
643
  self.clock = hs.get_clock()
644
644
 
645
- self.resolve_linearizer = Linearizer(name="state_resolve_lock")
645
+ self.resolve_linearizer = Linearizer(
646
+ name="state_resolve_lock", clock=self.clock
647
+ )
646
648
 
647
649
  # dict of set of event_ids -> _StateCacheEntry.
648
650
  self._state_cache: ExpiringCache[FrozenSet[int], _StateCacheEntry] = (
649
651
  ExpiringCache(
650
652
  cache_name="state_cache",
651
653
  server_name=self.server_name,
654
+ hs=hs,
652
655
  clock=self.clock,
653
656
  max_len=100000,
654
657
  expiry_ms=EVICTION_TIMEOUT_SECONDS * 1000,
synapse/storage/_base.py CHANGED
@@ -56,7 +56,7 @@ class SQLBaseStore(metaclass=ABCMeta):
56
56
  ):
57
57
  self.hs = hs
58
58
  self.server_name = hs.hostname # nb must be called this for @cached
59
- self._clock = hs.get_clock()
59
+ self.clock = hs.get_clock() # nb must be called this for @cached
60
60
  self.database_engine = database.engine
61
61
  self.db_pool = database
62
62
 
@@ -41,7 +41,6 @@ from typing import (
41
41
  import attr
42
42
 
43
43
  from synapse._pydantic_compat import BaseModel
44
- from synapse.metrics.background_process_metrics import run_as_background_process
45
44
  from synapse.storage.engines import PostgresEngine
46
45
  from synapse.storage.types import Connection, Cursor
47
46
  from synapse.types import JsonDict, StrCollection
@@ -285,6 +284,13 @@ class BackgroundUpdater:
285
284
  self.sleep_duration_ms = hs.config.background_updates.sleep_duration_ms
286
285
  self.sleep_enabled = hs.config.background_updates.sleep_enabled
287
286
 
287
+ def shutdown(self) -> None:
288
+ """
289
+ Stop any further background updates from happening.
290
+ """
291
+ self.enabled = False
292
+ self._background_update_handlers.clear()
293
+
288
294
  def get_status(self) -> UpdaterStatus:
289
295
  """An integer summarising the updater status. Used as a metric."""
290
296
  if self._aborted:
@@ -396,9 +402,8 @@ class BackgroundUpdater:
396
402
  # if we start a new background update, not all updates are done.
397
403
  self._all_done = False
398
404
  sleep = self.sleep_enabled
399
- run_as_background_process(
405
+ self.hs.run_as_background_process(
400
406
  "background_updates",
401
- self.server_name,
402
407
  self.run_background_updates,
403
408
  sleep,
404
409
  )
@@ -62,7 +62,6 @@ from synapse.logging.opentracing import (
62
62
  trace,
63
63
  )
64
64
  from synapse.metrics import SERVER_NAME_LABEL
65
- from synapse.metrics.background_process_metrics import run_as_background_process
66
65
  from synapse.storage.controllers.state import StateStorageController
67
66
  from synapse.storage.databases import Databases
68
67
  from synapse.storage.databases.main.events import DeltaState
@@ -195,6 +194,7 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
195
194
 
196
195
  def __init__(
197
196
  self,
197
+ hs: "HomeServer",
198
198
  server_name: str,
199
199
  per_item_callback: Callable[
200
200
  [str, _EventPersistQueueTask],
@@ -207,6 +207,7 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
207
207
  and its result will be returned via the Deferreds returned from add_to_queue.
208
208
  """
209
209
  self.server_name = server_name
210
+ self.hs = hs
210
211
  self._event_persist_queues: Dict[str, Deque[_EventPersistQueueItem]] = {}
211
212
  self._currently_persisting_rooms: Set[str] = set()
212
213
  self._per_item_callback = per_item_callback
@@ -311,7 +312,7 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
311
312
  self._currently_persisting_rooms.discard(room_id)
312
313
 
313
314
  # set handle_queue_loop off in the background
314
- run_as_background_process("persist_events", self.server_name, handle_queue_loop)
315
+ self.hs.run_as_background_process("persist_events", handle_queue_loop)
315
316
 
316
317
  def _get_drainining_queue(
317
318
  self, room_id: str
@@ -354,7 +355,7 @@ class EventsPersistenceStorageController:
354
355
  self._instance_name = hs.get_instance_name()
355
356
  self.is_mine_id = hs.is_mine_id
356
357
  self._event_persist_queue = _EventPeristenceQueue(
357
- self.server_name, self._process_event_persist_queue_task
358
+ hs, self.server_name, self._process_event_persist_queue_task
358
359
  )
359
360
  self._state_resolution_handler = hs.get_state_resolution_handler()
360
361
  self._state_controller = state_controller
@@ -46,9 +46,8 @@ class PurgeEventsStorageController:
46
46
  """High level interface for purging rooms and event history."""
47
47
 
48
48
  def __init__(self, hs: "HomeServer", stores: Databases):
49
- self.server_name = (
50
- hs.hostname
51
- ) # nb must be called this for @wrap_as_background_process
49
+ self.hs = hs # nb must be called this for @wrap_as_background_process
50
+ self.server_name = hs.hostname
52
51
  self.stores = stores
53
52
 
54
53
  if hs.config.worker.run_background_tasks:
@@ -69,15 +69,17 @@ class StateStorageController:
69
69
 
70
70
  def __init__(self, hs: "HomeServer", stores: "Databases"):
71
71
  self.server_name = hs.hostname # nb must be called this for @cached
72
+ self.clock = hs.get_clock()
72
73
  self._is_mine_id = hs.is_mine_id
73
- self._clock = hs.get_clock()
74
74
  self.stores = stores
75
75
  self._partial_state_events_tracker = PartialStateEventsTracker(stores.main)
76
76
  self._partial_state_room_tracker = PartialCurrentStateTracker(stores.main)
77
77
 
78
78
  # Used by `_get_joined_hosts` to ensure only one thing mutates the cache
79
79
  # at a time. Keyed by room_id.
80
- self._joined_host_linearizer = Linearizer("_JoinedHostsCache")
80
+ self._joined_host_linearizer = Linearizer(
81
+ name="_JoinedHostsCache", clock=self.clock
82
+ )
81
83
 
82
84
  def notify_event_un_partial_stated(self, event_id: str) -> None:
83
85
  self._partial_state_events_tracker.notify_un_partial_stated(event_id)
@@ -815,9 +817,7 @@ class StateStorageController:
815
817
  state_group = object()
816
818
 
817
819
  assert state_group is not None
818
- with Measure(
819
- self._clock, name="get_joined_hosts", server_name=self.server_name
820
- ):
820
+ with Measure(self.clock, name="get_joined_hosts", server_name=self.server_name):
821
821
  return await self._get_joined_hosts(
822
822
  room_id, state_group, state_entry=state_entry
823
823
  )
@@ -62,7 +62,6 @@ from synapse.logging.context import (
62
62
  make_deferred_yieldable,
63
63
  )
64
64
  from synapse.metrics import SERVER_NAME_LABEL, register_threadpool
65
- from synapse.metrics.background_process_metrics import run_as_background_process
66
65
  from synapse.storage.background_updates import BackgroundUpdater
67
66
  from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
68
67
  from synapse.storage.types import Connection, Cursor, SQLQueryParameters
@@ -146,7 +145,7 @@ def make_pool(
146
145
  def _on_new_connection(conn: Connection) -> None:
147
146
  # Ensure we have a logging context so we can correctly track queries,
148
147
  # etc.
149
- with LoggingContext("db.on_new_connection"):
148
+ with LoggingContext(name="db.on_new_connection", server_name=server_name):
150
149
  engine.on_new_connection(
151
150
  LoggingDatabaseConnection(
152
151
  conn=conn,
@@ -638,12 +637,17 @@ class DatabasePool:
638
637
  # background updates of tables that aren't safe to update.
639
638
  self._clock.call_later(
640
639
  0.0,
641
- run_as_background_process,
640
+ self.hs.run_as_background_process,
642
641
  "upsert_safety_check",
643
- self.server_name,
644
642
  self._check_safe_to_upsert,
645
643
  )
646
644
 
645
+ def stop_background_updates(self) -> None:
646
+ """
647
+ Stops the database from running any further background updates.
648
+ """
649
+ self.updates.shutdown()
650
+
647
651
  def name(self) -> str:
648
652
  "Return the name of this database"
649
653
  return self._database_config.name
@@ -681,9 +685,8 @@ class DatabasePool:
681
685
  if background_update_names:
682
686
  self._clock.call_later(
683
687
  15.0,
684
- run_as_background_process,
688
+ self.hs.run_as_background_process,
685
689
  "upsert_safety_check",
686
- self.server_name,
687
690
  self._check_safe_to_upsert,
688
691
  )
689
692
 
@@ -1043,7 +1046,9 @@ class DatabasePool:
1043
1046
  assert not self.engine.in_transaction(conn)
1044
1047
 
1045
1048
  with LoggingContext(
1046
- str(curr_context), parent_context=parent_context
1049
+ name=str(curr_context),
1050
+ server_name=self.server_name,
1051
+ parent_context=parent_context,
1047
1052
  ) as context:
1048
1053
  with opentracing.start_active_span(
1049
1054
  operation_name="db.connection",
@@ -299,10 +299,14 @@ class DataStore(
299
299
  FROM users as u
300
300
  LEFT JOIN profiles AS p ON u.name = p.full_user_id
301
301
  LEFT JOIN erased_users AS eu ON u.name = eu.user_id
302
+ LEFT JOIN (
303
+ SELECT user_id, MAX(last_seen) AS last_seen_ts
304
+ FROM devices GROUP BY user_id
305
+ ) lsd ON u.name = lsd.user_id
302
306
  LEFT JOIN (
303
307
  SELECT user_id, MAX(last_seen) AS last_seen_ts
304
308
  FROM user_ips GROUP BY user_id
305
- ) ls ON u.name = ls.user_id
309
+ ) lsi ON u.name = lsi.user_id
306
310
  {where_clause}
307
311
  """
308
312
  sql = "SELECT COUNT(*) as total_users " + sql_base
@@ -312,7 +316,8 @@ class DataStore(
312
316
  sql = f"""
313
317
  SELECT name, user_type, is_guest, admin, deactivated, shadow_banned,
314
318
  displayname, avatar_url, creation_ts * 1000 as creation_ts, approved,
315
- eu.user_id is not null as erased, last_seen_ts, locked
319
+ eu.user_id is not null as erased,
320
+ COALESCE(lsd.last_seen_ts, lsi.last_seen_ts) as last_seen_ts, locked
316
321
  {sql_base}
317
322
  ORDER BY {order_by_column} {order}, u.name ASC
318
323
  LIMIT ? OFFSET ?