matrix-synapse 1.139.0rc2__cp39-abi3-musllinux_1_2_aarch64.whl → 1.140.0rc1__cp39-abi3-musllinux_1_2_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of matrix-synapse might be problematic. Click here for more details.

Files changed (159) hide show
  1. {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/METADATA +5 -3
  2. {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/RECORD +158 -155
  3. synapse/_scripts/generate_workers_map.py +6 -1
  4. synapse/_scripts/synapse_port_db.py +0 -2
  5. synapse/_scripts/update_synapse_database.py +1 -6
  6. synapse/api/auth/base.py +1 -3
  7. synapse/api/auth/mas.py +6 -8
  8. synapse/api/auth/msc3861_delegated.py +6 -8
  9. synapse/api/errors.py +3 -0
  10. synapse/app/_base.py +101 -39
  11. synapse/app/admin_cmd.py +2 -4
  12. synapse/app/appservice.py +1 -1
  13. synapse/app/client_reader.py +1 -1
  14. synapse/app/event_creator.py +1 -1
  15. synapse/app/federation_reader.py +1 -1
  16. synapse/app/federation_sender.py +1 -1
  17. synapse/app/frontend_proxy.py +1 -1
  18. synapse/app/generic_worker.py +17 -11
  19. synapse/app/homeserver.py +85 -47
  20. synapse/app/media_repository.py +1 -1
  21. synapse/app/phone_stats_home.py +16 -14
  22. synapse/app/pusher.py +1 -1
  23. synapse/app/synchrotron.py +1 -1
  24. synapse/app/user_dir.py +1 -1
  25. synapse/appservice/__init__.py +29 -2
  26. synapse/appservice/scheduler.py +8 -8
  27. synapse/config/_base.py +32 -14
  28. synapse/config/_base.pyi +5 -3
  29. synapse/config/experimental.py +3 -0
  30. synapse/config/homeserver.py +27 -1
  31. synapse/config/logger.py +3 -4
  32. synapse/config/matrixrtc.py +67 -0
  33. synapse/crypto/keyring.py +18 -4
  34. synapse/events/auto_accept_invites.py +0 -1
  35. synapse/federation/federation_client.py +39 -0
  36. synapse/federation/federation_server.py +1 -1
  37. synapse/federation/send_queue.py +3 -0
  38. synapse/federation/sender/__init__.py +24 -8
  39. synapse/federation/sender/per_destination_queue.py +31 -8
  40. synapse/federation/sender/transaction_manager.py +12 -0
  41. synapse/federation/transport/client.py +29 -0
  42. synapse/handlers/account_validity.py +2 -4
  43. synapse/handlers/appservice.py +5 -7
  44. synapse/handlers/deactivate_account.py +2 -3
  45. synapse/handlers/delayed_events.py +10 -13
  46. synapse/handlers/device.py +14 -14
  47. synapse/handlers/e2e_keys.py +16 -11
  48. synapse/handlers/federation.py +7 -11
  49. synapse/handlers/federation_event.py +5 -6
  50. synapse/handlers/message.py +16 -10
  51. synapse/handlers/pagination.py +3 -7
  52. synapse/handlers/presence.py +21 -25
  53. synapse/handlers/profile.py +1 -1
  54. synapse/handlers/read_marker.py +3 -1
  55. synapse/handlers/register.py +8 -1
  56. synapse/handlers/room.py +13 -4
  57. synapse/handlers/room_member.py +11 -7
  58. synapse/handlers/room_policy.py +96 -2
  59. synapse/handlers/sso.py +1 -1
  60. synapse/handlers/stats.py +5 -3
  61. synapse/handlers/sync.py +20 -13
  62. synapse/handlers/typing.py +5 -10
  63. synapse/handlers/user_directory.py +12 -11
  64. synapse/handlers/worker_lock.py +19 -15
  65. synapse/http/client.py +18 -13
  66. synapse/http/federation/matrix_federation_agent.py +6 -1
  67. synapse/http/federation/well_known_resolver.py +3 -1
  68. synapse/http/matrixfederationclient.py +50 -11
  69. synapse/http/proxy.py +2 -2
  70. synapse/http/server.py +36 -2
  71. synapse/http/site.py +109 -17
  72. synapse/logging/context.py +201 -110
  73. synapse/logging/opentracing.py +30 -6
  74. synapse/logging/scopecontextmanager.py +161 -0
  75. synapse/media/_base.py +2 -1
  76. synapse/media/media_repository.py +20 -6
  77. synapse/media/url_previewer.py +5 -6
  78. synapse/metrics/_gc.py +3 -1
  79. synapse/metrics/background_process_metrics.py +128 -24
  80. synapse/metrics/common_usage_metrics.py +3 -5
  81. synapse/module_api/__init__.py +42 -5
  82. synapse/notifier.py +10 -3
  83. synapse/push/emailpusher.py +5 -4
  84. synapse/push/httppusher.py +6 -6
  85. synapse/push/pusherpool.py +3 -8
  86. synapse/replication/http/devices.py +0 -41
  87. synapse/replication/tcp/client.py +8 -5
  88. synapse/replication/tcp/handler.py +2 -3
  89. synapse/replication/tcp/protocol.py +14 -7
  90. synapse/replication/tcp/redis.py +16 -11
  91. synapse/replication/tcp/resource.py +5 -4
  92. synapse/replication/tcp/streams/__init__.py +2 -0
  93. synapse/res/providers.json +6 -5
  94. synapse/rest/__init__.py +2 -0
  95. synapse/rest/admin/__init__.py +4 -0
  96. synapse/rest/admin/events.py +69 -0
  97. synapse/rest/admin/media.py +70 -2
  98. synapse/rest/client/keys.py +147 -3
  99. synapse/rest/client/matrixrtc.py +52 -0
  100. synapse/rest/client/push_rule.py +1 -1
  101. synapse/rest/client/room.py +2 -3
  102. synapse/rest/client/sync.py +1 -3
  103. synapse/rest/client/transactions.py +1 -1
  104. synapse/server.py +271 -38
  105. synapse/server_notices/server_notices_manager.py +1 -0
  106. synapse/state/__init__.py +4 -1
  107. synapse/storage/_base.py +1 -1
  108. synapse/storage/background_updates.py +8 -3
  109. synapse/storage/controllers/persist_events.py +4 -3
  110. synapse/storage/controllers/purge_events.py +2 -3
  111. synapse/storage/controllers/state.py +5 -5
  112. synapse/storage/database.py +12 -7
  113. synapse/storage/databases/main/__init__.py +7 -2
  114. synapse/storage/databases/main/cache.py +4 -3
  115. synapse/storage/databases/main/censor_events.py +1 -1
  116. synapse/storage/databases/main/client_ips.py +9 -8
  117. synapse/storage/databases/main/deviceinbox.py +7 -6
  118. synapse/storage/databases/main/devices.py +4 -4
  119. synapse/storage/databases/main/end_to_end_keys.py +6 -3
  120. synapse/storage/databases/main/event_federation.py +7 -6
  121. synapse/storage/databases/main/event_push_actions.py +13 -13
  122. synapse/storage/databases/main/events_bg_updates.py +1 -1
  123. synapse/storage/databases/main/events_worker.py +6 -8
  124. synapse/storage/databases/main/lock.py +17 -13
  125. synapse/storage/databases/main/media_repository.py +2 -2
  126. synapse/storage/databases/main/metrics.py +6 -6
  127. synapse/storage/databases/main/monthly_active_users.py +3 -4
  128. synapse/storage/databases/main/receipts.py +1 -1
  129. synapse/storage/databases/main/registration.py +18 -19
  130. synapse/storage/databases/main/roommember.py +1 -1
  131. synapse/storage/databases/main/session.py +3 -3
  132. synapse/storage/databases/main/sliding_sync.py +2 -2
  133. synapse/storage/databases/main/transactions.py +3 -3
  134. synapse/storage/databases/state/store.py +2 -0
  135. synapse/synapse_rust/http_client.pyi +4 -0
  136. synapse/synapse_rust.abi3.so +0 -0
  137. synapse/util/async_helpers.py +36 -24
  138. synapse/util/batching_queue.py +16 -6
  139. synapse/util/caches/__init__.py +1 -1
  140. synapse/util/caches/deferred_cache.py +4 -0
  141. synapse/util/caches/descriptors.py +14 -2
  142. synapse/util/caches/dictionary_cache.py +6 -1
  143. synapse/util/caches/expiringcache.py +16 -5
  144. synapse/util/caches/lrucache.py +14 -26
  145. synapse/util/caches/response_cache.py +11 -1
  146. synapse/util/clock.py +215 -39
  147. synapse/util/constants.py +2 -0
  148. synapse/util/daemonize.py +5 -1
  149. synapse/util/distributor.py +9 -5
  150. synapse/util/metrics.py +35 -6
  151. synapse/util/ratelimitutils.py +4 -1
  152. synapse/util/retryutils.py +7 -4
  153. synapse/util/task_scheduler.py +11 -14
  154. synapse/logging/filter.py +0 -38
  155. {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/AUTHORS.rst +0 -0
  156. {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/LICENSE-AGPL-3.0 +0 -0
  157. {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/LICENSE-COMMERCIAL +0 -0
  158. {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/WHEEL +0 -0
  159. {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/entry_points.txt +0 -0
@@ -212,7 +212,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore):
212
212
  )
213
213
 
214
214
  if hs.config.worker.run_background_tasks:
215
- self._clock.call_later(
215
+ self.clock.call_later(
216
216
  0.0,
217
217
  self._set_expiration_date_when_missing,
218
218
  )
@@ -226,7 +226,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore):
226
226
 
227
227
  # Create a background job for culling expired 3PID validity tokens
228
228
  if hs.config.worker.run_background_tasks:
229
- self._clock.looping_call(
229
+ self.clock.looping_call(
230
230
  self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS
231
231
  )
232
232
 
@@ -298,7 +298,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore):
298
298
  ) -> None:
299
299
  user_id_obj = UserID.from_string(user_id)
300
300
 
301
- now = int(self._clock.time())
301
+ now = int(self.clock.time())
302
302
 
303
303
  user_approved = approved or not self._require_approval
304
304
 
@@ -457,7 +457,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore):
457
457
  if not info:
458
458
  return False
459
459
 
460
- now = self._clock.time_msec()
460
+ now = self.clock.time_msec()
461
461
  days = self.config.server.mau_appservice_trial_days.get(
462
462
  info.appservice_id, self.config.server.mau_trial_days
463
463
  )
@@ -640,7 +640,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore):
640
640
  return await self.db_pool.runInteraction(
641
641
  "get_users_expiring_soon",
642
642
  select_users_txn,
643
- self._clock.time_msec(),
643
+ self.clock.time_msec(),
644
644
  self.config.account_validity.account_validity_renew_at,
645
645
  )
646
646
 
@@ -1084,7 +1084,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore):
1084
1084
  """
1085
1085
 
1086
1086
  def _count_daily_user_type(txn: LoggingTransaction) -> Dict[str, int]:
1087
- yesterday = int(self._clock.time()) - (60 * 60 * 24)
1087
+ yesterday = int(self.clock.time()) - (60 * 60 * 24)
1088
1088
 
1089
1089
  sql = """
1090
1090
  SELECT user_type, COUNT(*) AS count FROM (
@@ -1496,7 +1496,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore):
1496
1496
  await self.db_pool.runInteraction(
1497
1497
  "cull_expired_threepid_validation_tokens",
1498
1498
  cull_expired_threepid_validation_tokens_txn,
1499
- self._clock.time_msec(),
1499
+ self.clock.time_msec(),
1500
1500
  )
1501
1501
 
1502
1502
  @wrap_as_background_process("account_validity_set_expiration_dates")
@@ -1537,7 +1537,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore):
1537
1537
  random value in the [now + period - d ; now + period] range, d being a
1538
1538
  delta equal to 10% of the validity period.
1539
1539
  """
1540
- now_ms = self._clock.time_msec()
1540
+ now_ms = self.clock.time_msec()
1541
1541
  assert self._account_validity_period is not None
1542
1542
  expiration_ts = now_ms + self._account_validity_period
1543
1543
 
@@ -1608,7 +1608,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore):
1608
1608
  Raises:
1609
1609
  StoreError if there was a problem updating this.
1610
1610
  """
1611
- now = self._clock.time_msec()
1611
+ now = self.clock.time_msec()
1612
1612
 
1613
1613
  await self.db_pool.simple_update_one(
1614
1614
  "access_tokens",
@@ -1639,7 +1639,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore):
1639
1639
  uses_allowed, pending, completed, expiry_time = res
1640
1640
 
1641
1641
  # Check if the token has expired
1642
- now = self._clock.time_msec()
1642
+ now = self.clock.time_msec()
1643
1643
  if expiry_time and expiry_time < now:
1644
1644
  return False
1645
1645
 
@@ -1771,7 +1771,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore):
1771
1771
  return await self.db_pool.runInteraction(
1772
1772
  "select_registration_tokens",
1773
1773
  select_registration_tokens_txn,
1774
- self._clock.time_msec(),
1774
+ self.clock.time_msec(),
1775
1775
  valid,
1776
1776
  )
1777
1777
 
@@ -2251,7 +2251,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore):
2251
2251
  "consume_login_token",
2252
2252
  self._consume_login_token,
2253
2253
  token,
2254
- self._clock.time_msec(),
2254
+ self.clock.time_msec(),
2255
2255
  )
2256
2256
 
2257
2257
  async def invalidate_login_tokens_by_session_id(
@@ -2271,7 +2271,7 @@ class RegistrationWorkerStore(StatsStore, CacheInvalidationWorkerStore):
2271
2271
  "auth_provider_id": auth_provider_id,
2272
2272
  "auth_provider_session_id": auth_provider_session_id,
2273
2273
  },
2274
- updatevalues={"used_ts": self._clock.time_msec()},
2274
+ updatevalues={"used_ts": self.clock.time_msec()},
2275
2275
  desc="invalidate_login_tokens_by_session_id",
2276
2276
  )
2277
2277
 
@@ -2640,7 +2640,6 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
2640
2640
  ):
2641
2641
  super().__init__(database, db_conn, hs)
2642
2642
 
2643
- self._clock = hs.get_clock()
2644
2643
  self.config = hs.config
2645
2644
 
2646
2645
  self.db_pool.updates.register_background_index_update(
@@ -2761,7 +2760,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
2761
2760
 
2762
2761
  # Create a background job for removing expired login tokens
2763
2762
  if hs.config.worker.run_background_tasks:
2764
- self._clock.looping_call(
2763
+ self.clock.looping_call(
2765
2764
  self._delete_expired_login_tokens, THIRTY_MINUTES_IN_MS
2766
2765
  )
2767
2766
 
@@ -2790,7 +2789,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
2790
2789
  The token ID
2791
2790
  """
2792
2791
  next_id = self._access_tokens_id_gen.get_next()
2793
- now = self._clock.time_msec()
2792
+ now = self.clock.time_msec()
2794
2793
 
2795
2794
  await self.db_pool.simple_insert(
2796
2795
  "access_tokens",
@@ -2874,7 +2873,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
2874
2873
  keyvalues={"name": user_id},
2875
2874
  updatevalues={
2876
2875
  "consent_version": consent_version,
2877
- "consent_ts": self._clock.time_msec(),
2876
+ "consent_ts": self.clock.time_msec(),
2878
2877
  },
2879
2878
  )
2880
2879
  self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
@@ -2986,7 +2985,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
2986
2985
  txn,
2987
2986
  table="threepid_validation_session",
2988
2987
  keyvalues={"session_id": session_id},
2989
- updatevalues={"validated_at": self._clock.time_msec()},
2988
+ updatevalues={"validated_at": self.clock.time_msec()},
2990
2989
  )
2991
2990
 
2992
2991
  return next_link
@@ -3064,7 +3063,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
3064
3063
 
3065
3064
  # We keep the expired tokens for an extra 5 minutes so we can measure how many
3066
3065
  # times a token is being used after its expiry
3067
- now = self._clock.time_msec()
3066
+ now = self.clock.time_msec()
3068
3067
  await self.db_pool.runInteraction(
3069
3068
  "delete_expired_login_tokens",
3070
3069
  _delete_expired_login_tokens_txn,
@@ -1002,7 +1002,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
1002
1002
  """
1003
1003
 
1004
1004
  with Measure(
1005
- self._clock,
1005
+ self.clock,
1006
1006
  name="get_joined_user_ids_from_state",
1007
1007
  server_name=self.server_name,
1008
1008
  ):
@@ -55,7 +55,7 @@ class SessionStore(SQLBaseStore):
55
55
 
56
56
  # Create a background job for culling expired sessions.
57
57
  if hs.config.worker.run_background_tasks:
58
- self._clock.looping_call(self._delete_expired_sessions, 30 * 60 * 1000)
58
+ self.clock.looping_call(self._delete_expired_sessions, 30 * 60 * 1000)
59
59
 
60
60
  async def create_session(
61
61
  self, session_type: str, value: JsonDict, expiry_ms: int
@@ -133,7 +133,7 @@ class SessionStore(SQLBaseStore):
133
133
  _get_session,
134
134
  session_type,
135
135
  session_id,
136
- self._clock.time_msec(),
136
+ self.clock.time_msec(),
137
137
  )
138
138
 
139
139
  @wrap_as_background_process("delete_expired_sessions")
@@ -147,5 +147,5 @@ class SessionStore(SQLBaseStore):
147
147
  await self.db_pool.runInteraction(
148
148
  "delete_expired_sessions",
149
149
  _delete_expired_sessions_txn,
150
- self._clock.time_msec(),
150
+ self.clock.time_msec(),
151
151
  )
@@ -201,7 +201,7 @@ class SlidingSyncStore(SQLBaseStore):
201
201
  "user_id": user_id,
202
202
  "effective_device_id": device_id,
203
203
  "conn_id": conn_id,
204
- "created_ts": self._clock.time_msec(),
204
+ "created_ts": self.clock.time_msec(),
205
205
  },
206
206
  returning=("connection_key",),
207
207
  )
@@ -212,7 +212,7 @@ class SlidingSyncStore(SQLBaseStore):
212
212
  table="sliding_sync_connection_positions",
213
213
  values={
214
214
  "connection_key": connection_key,
215
- "created_ts": self._clock.time_msec(),
215
+ "created_ts": self.clock.time_msec(),
216
216
  },
217
217
  returning=("connection_position",),
218
218
  )
@@ -81,11 +81,11 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
81
81
  super().__init__(database, db_conn, hs)
82
82
 
83
83
  if hs.config.worker.run_background_tasks:
84
- self._clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000)
84
+ self.clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000)
85
85
 
86
86
  @wrap_as_background_process("cleanup_transactions")
87
87
  async def _cleanup_transactions(self) -> None:
88
- now = self._clock.time_msec()
88
+ now = self.clock.time_msec()
89
89
  day_ago = now - 24 * 60 * 60 * 1000
90
90
 
91
91
  def _cleanup_transactions_txn(txn: LoggingTransaction) -> None:
@@ -160,7 +160,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
160
160
  insertion_values={
161
161
  "response_code": code,
162
162
  "response_json": db_binary_type(encode_canonical_json(response_dict)),
163
- "ts": self._clock.time_msec(),
163
+ "ts": self.clock.time_msec(),
164
164
  },
165
165
  desc="set_received_txn_response",
166
166
  )
@@ -125,6 +125,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
125
125
 
126
126
  self._state_group_cache: DictionaryCache[int, StateKey, str] = DictionaryCache(
127
127
  name="*stateGroupCache*",
128
+ clock=hs.get_clock(),
128
129
  server_name=self.server_name,
129
130
  # TODO: this hasn't been tuned yet
130
131
  max_entries=50000,
@@ -132,6 +133,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
132
133
  self._state_group_members_cache: DictionaryCache[int, StateKey, str] = (
133
134
  DictionaryCache(
134
135
  name="*stateGroupMembersCache*",
136
+ clock=hs.get_clock(),
135
137
  server_name=self.server_name,
136
138
  max_entries=500000,
137
139
  )
@@ -17,6 +17,10 @@ from twisted.internet.defer import Deferred
17
17
  from synapse.types import ISynapseReactor
18
18
 
19
19
  class HttpClient:
20
+ """
21
+ The returned deferreds follow Synapse logcontext rules.
22
+ """
23
+
20
24
  def __init__(self, reactor: ISynapseReactor, user_agent: str) -> None: ...
21
25
  def get(self, url: str, response_limit: int) -> Deferred[bytes]: ...
22
26
  def post(
Binary file
@@ -47,7 +47,6 @@ from typing import (
47
47
  Tuple,
48
48
  TypeVar,
49
49
  Union,
50
- cast,
51
50
  overload,
52
51
  )
53
52
 
@@ -56,7 +55,6 @@ from typing_extensions import Concatenate, ParamSpec, Unpack
56
55
 
57
56
  from twisted.internet import defer
58
57
  from twisted.internet.defer import CancelledError
59
- from twisted.internet.interfaces import IReactorTime
60
58
  from twisted.python.failure import Failure
61
59
 
62
60
  from synapse.logging.context import (
@@ -65,7 +63,6 @@ from synapse.logging.context import (
65
63
  run_coroutine_in_background,
66
64
  run_in_background,
67
65
  )
68
- from synapse.types import ISynapseThreadlessReactor
69
66
  from synapse.util.clock import Clock
70
67
 
71
68
  logger = logging.getLogger(__name__)
@@ -551,25 +548,19 @@ class Linearizer:
551
548
 
552
549
  def __init__(
553
550
  self,
554
- name: Optional[str] = None,
551
+ name: str,
552
+ clock: Clock,
555
553
  max_count: int = 1,
556
- clock: Optional[Clock] = None,
557
554
  ):
558
555
  """
559
556
  Args:
557
+ name: TODO
560
558
  max_count: The maximum number of concurrent accesses
559
+ clock: (ideally, the homeserver clock `hs.get_clock()`)
561
560
  """
562
- if name is None:
563
- self.name: Union[str, int] = id(self)
564
- else:
565
- self.name = name
566
-
567
- if not clock:
568
- from twisted.internet import reactor
569
-
570
- clock = Clock(cast(ISynapseThreadlessReactor, reactor))
571
- self._clock = clock
561
+ self.name = name
572
562
  self.max_count = max_count
563
+ self._clock = clock
573
564
 
574
565
  # key_to_defer is a map from the key to a _LinearizerEntry.
575
566
  self.key_to_defer: Dict[Hashable, _LinearizerEntry] = {}
@@ -779,7 +770,11 @@ class ReadWriteLock:
779
770
 
780
771
 
781
772
  def timeout_deferred(
782
- deferred: "defer.Deferred[_T]", timeout: float, reactor: IReactorTime
773
+ *,
774
+ deferred: "defer.Deferred[_T]",
775
+ timeout: float,
776
+ cancel_on_shutdown: bool = True,
777
+ clock: Clock,
783
778
  ) -> "defer.Deferred[_T]":
784
779
  """The in built twisted `Deferred.addTimeout` fails to time out deferreds
785
780
  that have a canceller that throws exceptions. This method creates a new
@@ -797,7 +792,13 @@ def timeout_deferred(
797
792
  Args:
798
793
  deferred: The Deferred to potentially timeout.
799
794
  timeout: Timeout in seconds
800
- reactor: The twisted reactor to use
795
+ cancel_on_shutdown: Whether this call should be tracked for cleanup during
796
+ shutdown. In general, all calls should be tracked. There may be a use case
797
+ not to track calls with a `timeout` of 0 (or similarly short) since tracking
798
+ them may result in rapid insertions and removals of tracked calls
799
+ unnecessarily. But unless a specific instance of tracking proves to be an
800
+ issue, we can just track all delayed calls.
801
+ clock: The `Clock` instance used to track delayed calls.
801
802
 
802
803
 
803
804
  Returns:
@@ -821,7 +822,10 @@ def timeout_deferred(
821
822
  if not new_d.called:
822
823
  new_d.errback(defer.TimeoutError("Timed out after %gs" % (timeout,)))
823
824
 
824
- delayed_call = reactor.callLater(timeout, time_it_out)
825
+ # We don't track these calls since they are short.
826
+ delayed_call = clock.call_later(
827
+ timeout, time_it_out, call_later_cancel_on_shutdown=cancel_on_shutdown
828
+ )
825
829
 
826
830
  def convert_cancelled(value: Failure) -> Failure:
827
831
  # if the original deferred was cancelled, and our timeout has fired, then
@@ -963,9 +967,9 @@ class AwakenableSleeper:
963
967
  currently sleeping.
964
968
  """
965
969
 
966
- def __init__(self, reactor: IReactorTime) -> None:
970
+ def __init__(self, clock: Clock) -> None:
967
971
  self._streams: Dict[str, Set[defer.Deferred[None]]] = {}
968
- self._reactor = reactor
972
+ self._clock = clock
969
973
 
970
974
  def wake(self, name: str) -> None:
971
975
  """Wake everything related to `name` that is currently sleeping."""
@@ -984,7 +988,11 @@ class AwakenableSleeper:
984
988
 
985
989
  # Create a deferred that gets called in N seconds
986
990
  sleep_deferred: "defer.Deferred[None]" = defer.Deferred()
987
- call = self._reactor.callLater(delay_ms / 1000, sleep_deferred.callback, None)
991
+ call = self._clock.call_later(
992
+ delay_ms / 1000,
993
+ sleep_deferred.callback,
994
+ None,
995
+ )
988
996
 
989
997
  # Create a deferred that will get called if `wake` is called with
990
998
  # the same `name`.
@@ -1018,8 +1026,8 @@ class AwakenableSleeper:
1018
1026
  class DeferredEvent:
1019
1027
  """Like threading.Event but for async code"""
1020
1028
 
1021
- def __init__(self, reactor: IReactorTime) -> None:
1022
- self._reactor = reactor
1029
+ def __init__(self, clock: Clock) -> None:
1030
+ self._clock = clock
1023
1031
  self._deferred: "defer.Deferred[None]" = defer.Deferred()
1024
1032
 
1025
1033
  def set(self) -> None:
@@ -1039,7 +1047,11 @@ class DeferredEvent:
1039
1047
 
1040
1048
  # Create a deferred that gets called in N seconds
1041
1049
  sleep_deferred: "defer.Deferred[None]" = defer.Deferred()
1042
- call = self._reactor.callLater(timeout_seconds, sleep_deferred.callback, None)
1050
+ call = self._clock.call_later(
1051
+ timeout_seconds,
1052
+ sleep_deferred.callback,
1053
+ None,
1054
+ )
1043
1055
 
1044
1056
  try:
1045
1057
  await make_deferred_yieldable(
@@ -21,6 +21,7 @@
21
21
 
22
22
  import logging
23
23
  from typing import (
24
+ TYPE_CHECKING,
24
25
  Awaitable,
25
26
  Callable,
26
27
  Dict,
@@ -38,9 +39,11 @@ from twisted.internet import defer
38
39
 
39
40
  from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
40
41
  from synapse.metrics import SERVER_NAME_LABEL
41
- from synapse.metrics.background_process_metrics import run_as_background_process
42
42
  from synapse.util.clock import Clock
43
43
 
44
+ if TYPE_CHECKING:
45
+ from synapse.server import HomeServer
46
+
44
47
  logger = logging.getLogger(__name__)
45
48
 
46
49
 
@@ -97,12 +100,13 @@ class BatchingQueue(Generic[V, R]):
97
100
  self,
98
101
  *,
99
102
  name: str,
100
- server_name: str,
103
+ hs: "HomeServer",
101
104
  clock: Clock,
102
105
  process_batch_callback: Callable[[List[V]], Awaitable[R]],
103
106
  ):
104
107
  self._name = name
105
- self.server_name = server_name
108
+ self.hs = hs
109
+ self.server_name = hs.hostname
106
110
  self._clock = clock
107
111
 
108
112
  # The set of keys currently being processed.
@@ -127,6 +131,14 @@ class BatchingQueue(Generic[V, R]):
127
131
  name=self._name, **{SERVER_NAME_LABEL: self.server_name}
128
132
  )
129
133
 
134
+ def shutdown(self) -> None:
135
+ """
136
+ Prepares the object for garbage collection by removing any handed out
137
+ references.
138
+ """
139
+ number_queued.remove(self._name, self.server_name)
140
+ number_of_keys.remove(self._name, self.server_name)
141
+
130
142
  async def add_to_queue(self, value: V, key: Hashable = ()) -> R:
131
143
  """Adds the value to the queue with the given key, returning the result
132
144
  of the processing function for the batch that included the given value.
@@ -145,9 +157,7 @@ class BatchingQueue(Generic[V, R]):
145
157
  # If we're not currently processing the key fire off a background
146
158
  # process to start processing.
147
159
  if key not in self._processing_keys:
148
- run_as_background_process(
149
- self._name, self.server_name, self._process_queue, key
150
- )
160
+ self.hs.run_as_background_process(self._name, self._process_queue, key)
151
161
 
152
162
  with self._number_in_flight_metric.track_inprogress():
153
163
  return await make_deferred_yieldable(d)
@@ -244,7 +244,7 @@ def register_cache(
244
244
  collect_callback=collect_callback,
245
245
  )
246
246
  metric_name = "cache_%s_%s_%s" % (cache_type, cache_name, server_name)
247
- CACHE_METRIC_REGISTRY.register_hook(metric_name, metric.collect)
247
+ CACHE_METRIC_REGISTRY.register_hook(server_name, metric_name, metric.collect)
248
248
  return metric
249
249
 
250
250
 
@@ -47,6 +47,7 @@ from synapse.metrics import SERVER_NAME_LABEL
47
47
  from synapse.util.async_helpers import ObservableDeferred
48
48
  from synapse.util.caches.lrucache import LruCache
49
49
  from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry
50
+ from synapse.util.clock import Clock
50
51
 
51
52
  cache_pending_metric = Gauge(
52
53
  "synapse_util_caches_cache_pending",
@@ -82,6 +83,7 @@ class DeferredCache(Generic[KT, VT]):
82
83
  self,
83
84
  *,
84
85
  name: str,
86
+ clock: Clock,
85
87
  server_name: str,
86
88
  max_entries: int = 1000,
87
89
  tree: bool = False,
@@ -103,6 +105,7 @@ class DeferredCache(Generic[KT, VT]):
103
105
  prune_unread_entries: If True, cache entries that haven't been read recently
104
106
  will be evicted from the cache in the background. Set to False to
105
107
  opt-out of this behaviour.
108
+ clock: The homeserver `Clock` instance
106
109
  """
107
110
  cache_type = TreeCache if tree else dict
108
111
 
@@ -120,6 +123,7 @@ class DeferredCache(Generic[KT, VT]):
120
123
  # a Deferred.
121
124
  self.cache: LruCache[KT, VT] = LruCache(
122
125
  max_size=max_entries,
126
+ clock=clock,
123
127
  server_name=server_name,
124
128
  cache_name=name,
125
129
  cache_type=cache_type,
@@ -53,6 +53,7 @@ from synapse.util import unwrapFirstError
53
53
  from synapse.util.async_helpers import delay_cancellation
54
54
  from synapse.util.caches.deferred_cache import DeferredCache
55
55
  from synapse.util.caches.lrucache import LruCache
56
+ from synapse.util.clock import Clock
56
57
 
57
58
  logger = logging.getLogger(__name__)
58
59
 
@@ -154,13 +155,20 @@ class _CacheDescriptorBase:
154
155
  )
155
156
 
156
157
 
157
- class HasServerName(Protocol):
158
+ class HasServerNameAndClock(Protocol):
158
159
  server_name: str
159
160
  """
160
161
  The homeserver name that this cache is associated with (used to label the metric)
161
162
  (`hs.hostname`).
162
163
  """
163
164
 
165
+ clock: Clock
166
+ """
167
+ The homeserver clock instance used to track delayed and looping calls. Important to
168
+ be able to fully cleanup the homeserver instance on server shutdown.
169
+ (`hs.get_clock()`).
170
+ """
171
+
164
172
 
165
173
  class DeferredCacheDescriptor(_CacheDescriptorBase):
166
174
  """A method decorator that applies a memoizing cache around the function.
@@ -239,7 +247,7 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
239
247
  self.prune_unread_entries = prune_unread_entries
240
248
 
241
249
  def __get__(
242
- self, obj: Optional[HasServerName], owner: Optional[Type]
250
+ self, obj: Optional[HasServerNameAndClock], owner: Optional[Type]
243
251
  ) -> Callable[..., "defer.Deferred[Any]"]:
244
252
  # We need access to instance-level `obj.server_name` attribute
245
253
  assert obj is not None, (
@@ -249,9 +257,13 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
249
257
  assert obj.server_name is not None, (
250
258
  "The `server_name` attribute must be set on the object where `@cached` decorator is used."
251
259
  )
260
+ assert obj.clock is not None, (
261
+ "The `clock` attribute must be set on the object where `@cached` decorator is used."
262
+ )
252
263
 
253
264
  cache: DeferredCache[CacheKey, Any] = DeferredCache(
254
265
  name=self.name,
266
+ clock=obj.clock,
255
267
  server_name=obj.server_name,
256
268
  max_entries=self.max_entries,
257
269
  tree=self.tree,
@@ -37,6 +37,7 @@ import attr
37
37
 
38
38
  from synapse.util.caches.lrucache import LruCache
39
39
  from synapse.util.caches.treecache import TreeCache
40
+ from synapse.util.clock import Clock
40
41
 
41
42
  logger = logging.getLogger(__name__)
42
43
 
@@ -127,10 +128,13 @@ class DictionaryCache(Generic[KT, DKT, DV]):
127
128
  for the '2' dict key.
128
129
  """
129
130
 
130
- def __init__(self, *, name: str, server_name: str, max_entries: int = 1000):
131
+ def __init__(
132
+ self, *, name: str, clock: Clock, server_name: str, max_entries: int = 1000
133
+ ):
131
134
  """
132
135
  Args:
133
136
  name
137
+ clock: The homeserver `Clock` instance
134
138
  server_name: The homeserver name that this cache is associated with
135
139
  (used to label the metric) (`hs.hostname`).
136
140
  max_entries
@@ -160,6 +164,7 @@ class DictionaryCache(Generic[KT, DKT, DV]):
160
164
  Union[_PerKeyValue, Dict[DKT, DV]],
161
165
  ] = LruCache(
162
166
  max_size=max_entries,
167
+ clock=clock,
163
168
  server_name=server_name,
164
169
  cache_name=name,
165
170
  cache_type=TreeCache,
@@ -21,17 +21,29 @@
21
21
 
22
22
  import logging
23
23
  from collections import OrderedDict
24
- from typing import Any, Generic, Iterable, Literal, Optional, TypeVar, Union, overload
24
+ from typing import (
25
+ TYPE_CHECKING,
26
+ Any,
27
+ Generic,
28
+ Iterable,
29
+ Literal,
30
+ Optional,
31
+ TypeVar,
32
+ Union,
33
+ overload,
34
+ )
25
35
 
26
36
  import attr
27
37
 
28
38
  from twisted.internet import defer
29
39
 
30
40
  from synapse.config import cache as cache_config
31
- from synapse.metrics.background_process_metrics import run_as_background_process
32
41
  from synapse.util.caches import EvictionReason, register_cache
33
42
  from synapse.util.clock import Clock
34
43
 
44
+ if TYPE_CHECKING:
45
+ from synapse.server import HomeServer
46
+
35
47
  logger = logging.getLogger(__name__)
36
48
 
37
49
 
@@ -49,6 +61,7 @@ class ExpiringCache(Generic[KT, VT]):
49
61
  *,
50
62
  cache_name: str,
51
63
  server_name: str,
64
+ hs: "HomeServer",
52
65
  clock: Clock,
53
66
  max_len: int = 0,
54
67
  expiry_ms: int = 0,
@@ -99,9 +112,7 @@ class ExpiringCache(Generic[KT, VT]):
99
112
  return
100
113
 
101
114
  def f() -> "defer.Deferred[None]":
102
- return run_as_background_process(
103
- "prune_cache", server_name, self._prune_cache
104
- )
115
+ return hs.run_as_background_process("prune_cache", self._prune_cache)
105
116
 
106
117
  self._clock.looping_call(f, self._expiry_ms / 2)
107
118