matrix-synapse 1.139.1__cp39-abi3-musllinux_1_2_aarch64.whl → 1.140.0rc1__cp39-abi3-musllinux_1_2_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of matrix-synapse might be problematic. Click here for more details.

Files changed (159) hide show
  1. {matrix_synapse-1.139.1.dist-info → matrix_synapse-1.140.0rc1.dist-info}/METADATA +5 -3
  2. {matrix_synapse-1.139.1.dist-info → matrix_synapse-1.140.0rc1.dist-info}/RECORD +158 -155
  3. synapse/_scripts/generate_workers_map.py +6 -1
  4. synapse/_scripts/synapse_port_db.py +0 -2
  5. synapse/_scripts/update_synapse_database.py +1 -6
  6. synapse/api/auth/base.py +1 -3
  7. synapse/api/auth/mas.py +6 -8
  8. synapse/api/auth/msc3861_delegated.py +6 -8
  9. synapse/api/errors.py +3 -0
  10. synapse/app/_base.py +101 -39
  11. synapse/app/admin_cmd.py +2 -4
  12. synapse/app/appservice.py +1 -1
  13. synapse/app/client_reader.py +1 -1
  14. synapse/app/event_creator.py +1 -1
  15. synapse/app/federation_reader.py +1 -1
  16. synapse/app/federation_sender.py +1 -1
  17. synapse/app/frontend_proxy.py +1 -1
  18. synapse/app/generic_worker.py +17 -11
  19. synapse/app/homeserver.py +85 -47
  20. synapse/app/media_repository.py +1 -1
  21. synapse/app/phone_stats_home.py +16 -14
  22. synapse/app/pusher.py +1 -1
  23. synapse/app/synchrotron.py +1 -1
  24. synapse/app/user_dir.py +1 -1
  25. synapse/appservice/__init__.py +29 -2
  26. synapse/appservice/scheduler.py +8 -8
  27. synapse/config/_base.py +32 -14
  28. synapse/config/_base.pyi +5 -3
  29. synapse/config/experimental.py +3 -0
  30. synapse/config/homeserver.py +27 -1
  31. synapse/config/logger.py +3 -4
  32. synapse/config/matrixrtc.py +67 -0
  33. synapse/crypto/keyring.py +18 -4
  34. synapse/events/auto_accept_invites.py +0 -1
  35. synapse/federation/federation_client.py +39 -0
  36. synapse/federation/federation_server.py +1 -1
  37. synapse/federation/send_queue.py +3 -0
  38. synapse/federation/sender/__init__.py +24 -8
  39. synapse/federation/sender/per_destination_queue.py +31 -8
  40. synapse/federation/sender/transaction_manager.py +12 -0
  41. synapse/federation/transport/client.py +29 -0
  42. synapse/handlers/account_validity.py +2 -4
  43. synapse/handlers/appservice.py +5 -7
  44. synapse/handlers/deactivate_account.py +2 -3
  45. synapse/handlers/delayed_events.py +10 -13
  46. synapse/handlers/device.py +14 -14
  47. synapse/handlers/e2e_keys.py +4 -3
  48. synapse/handlers/federation.py +7 -11
  49. synapse/handlers/federation_event.py +5 -6
  50. synapse/handlers/message.py +16 -10
  51. synapse/handlers/pagination.py +3 -7
  52. synapse/handlers/presence.py +21 -25
  53. synapse/handlers/profile.py +1 -1
  54. synapse/handlers/read_marker.py +3 -1
  55. synapse/handlers/register.py +8 -1
  56. synapse/handlers/room.py +13 -4
  57. synapse/handlers/room_member.py +11 -7
  58. synapse/handlers/room_policy.py +96 -2
  59. synapse/handlers/sso.py +1 -1
  60. synapse/handlers/stats.py +5 -3
  61. synapse/handlers/sync.py +20 -13
  62. synapse/handlers/typing.py +5 -10
  63. synapse/handlers/user_directory.py +12 -11
  64. synapse/handlers/worker_lock.py +19 -15
  65. synapse/http/client.py +18 -13
  66. synapse/http/federation/matrix_federation_agent.py +6 -1
  67. synapse/http/federation/well_known_resolver.py +3 -1
  68. synapse/http/matrixfederationclient.py +50 -11
  69. synapse/http/proxy.py +2 -2
  70. synapse/http/server.py +36 -2
  71. synapse/http/site.py +109 -17
  72. synapse/logging/context.py +165 -63
  73. synapse/logging/opentracing.py +30 -6
  74. synapse/logging/scopecontextmanager.py +161 -0
  75. synapse/media/_base.py +2 -1
  76. synapse/media/media_repository.py +20 -6
  77. synapse/media/url_previewer.py +5 -6
  78. synapse/metrics/_gc.py +3 -1
  79. synapse/metrics/background_process_metrics.py +128 -24
  80. synapse/metrics/common_usage_metrics.py +3 -5
  81. synapse/module_api/__init__.py +42 -5
  82. synapse/notifier.py +10 -3
  83. synapse/push/emailpusher.py +5 -4
  84. synapse/push/httppusher.py +6 -6
  85. synapse/push/pusherpool.py +3 -8
  86. synapse/replication/http/devices.py +0 -41
  87. synapse/replication/tcp/client.py +8 -5
  88. synapse/replication/tcp/handler.py +2 -3
  89. synapse/replication/tcp/protocol.py +14 -7
  90. synapse/replication/tcp/redis.py +16 -11
  91. synapse/replication/tcp/resource.py +5 -4
  92. synapse/replication/tcp/streams/__init__.py +2 -0
  93. synapse/res/providers.json +6 -5
  94. synapse/rest/__init__.py +2 -0
  95. synapse/rest/admin/__init__.py +4 -0
  96. synapse/rest/admin/events.py +69 -0
  97. synapse/rest/admin/media.py +70 -2
  98. synapse/rest/client/keys.py +3 -3
  99. synapse/rest/client/matrixrtc.py +52 -0
  100. synapse/rest/client/push_rule.py +1 -1
  101. synapse/rest/client/room.py +2 -3
  102. synapse/rest/client/sync.py +1 -0
  103. synapse/rest/client/transactions.py +1 -1
  104. synapse/server.py +271 -38
  105. synapse/server_notices/server_notices_manager.py +1 -0
  106. synapse/state/__init__.py +4 -1
  107. synapse/storage/_base.py +1 -1
  108. synapse/storage/background_updates.py +8 -3
  109. synapse/storage/controllers/persist_events.py +4 -3
  110. synapse/storage/controllers/purge_events.py +2 -3
  111. synapse/storage/controllers/state.py +5 -5
  112. synapse/storage/database.py +12 -7
  113. synapse/storage/databases/main/__init__.py +7 -2
  114. synapse/storage/databases/main/cache.py +4 -3
  115. synapse/storage/databases/main/censor_events.py +1 -1
  116. synapse/storage/databases/main/client_ips.py +9 -8
  117. synapse/storage/databases/main/deviceinbox.py +7 -6
  118. synapse/storage/databases/main/devices.py +4 -4
  119. synapse/storage/databases/main/end_to_end_keys.py +6 -3
  120. synapse/storage/databases/main/event_federation.py +7 -6
  121. synapse/storage/databases/main/event_push_actions.py +13 -13
  122. synapse/storage/databases/main/events_bg_updates.py +1 -1
  123. synapse/storage/databases/main/events_worker.py +6 -8
  124. synapse/storage/databases/main/lock.py +17 -13
  125. synapse/storage/databases/main/media_repository.py +2 -2
  126. synapse/storage/databases/main/metrics.py +6 -6
  127. synapse/storage/databases/main/monthly_active_users.py +3 -4
  128. synapse/storage/databases/main/receipts.py +1 -1
  129. synapse/storage/databases/main/registration.py +18 -19
  130. synapse/storage/databases/main/roommember.py +1 -1
  131. synapse/storage/databases/main/session.py +3 -3
  132. synapse/storage/databases/main/sliding_sync.py +2 -2
  133. synapse/storage/databases/main/transactions.py +3 -3
  134. synapse/storage/databases/state/store.py +2 -0
  135. synapse/synapse_rust/http_client.pyi +4 -0
  136. synapse/synapse_rust.abi3.so +0 -0
  137. synapse/util/async_helpers.py +36 -24
  138. synapse/util/batching_queue.py +16 -6
  139. synapse/util/caches/__init__.py +1 -1
  140. synapse/util/caches/deferred_cache.py +4 -0
  141. synapse/util/caches/descriptors.py +14 -2
  142. synapse/util/caches/dictionary_cache.py +6 -1
  143. synapse/util/caches/expiringcache.py +16 -5
  144. synapse/util/caches/lrucache.py +14 -26
  145. synapse/util/caches/response_cache.py +11 -1
  146. synapse/util/clock.py +215 -39
  147. synapse/util/constants.py +2 -0
  148. synapse/util/daemonize.py +5 -1
  149. synapse/util/distributor.py +9 -5
  150. synapse/util/metrics.py +35 -6
  151. synapse/util/ratelimitutils.py +4 -1
  152. synapse/util/retryutils.py +7 -4
  153. synapse/util/task_scheduler.py +11 -14
  154. synapse/logging/filter.py +0 -38
  155. {matrix_synapse-1.139.1.dist-info → matrix_synapse-1.140.0rc1.dist-info}/AUTHORS.rst +0 -0
  156. {matrix_synapse-1.139.1.dist-info → matrix_synapse-1.140.0rc1.dist-info}/LICENSE-AGPL-3.0 +0 -0
  157. {matrix_synapse-1.139.1.dist-info → matrix_synapse-1.140.0rc1.dist-info}/LICENSE-COMMERCIAL +0 -0
  158. {matrix_synapse-1.139.1.dist-info → matrix_synapse-1.140.0rc1.dist-info}/WHEEL +0 -0
  159. {matrix_synapse-1.139.1.dist-info → matrix_synapse-1.140.0rc1.dist-info}/entry_points.txt +0 -0
@@ -751,7 +751,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
751
751
  "instance_name": self._instance_name,
752
752
  "cache_func": cache_name,
753
753
  "keys": keys,
754
- "invalidation_ts": self._clock.time_msec(),
754
+ "invalidation_ts": self.clock.time_msec(),
755
755
  },
756
756
  )
757
757
 
@@ -778,7 +778,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
778
778
  assert self._cache_id_gen is not None
779
779
 
780
780
  stream_ids = self._cache_id_gen.get_next_mult_txn(txn, len(key_tuples))
781
- ts = self._clock.time_msec()
781
+ ts = self.clock.time_msec()
782
782
  txn.call_after(self.hs.get_notifier().on_new_replication_data)
783
783
  self.db_pool.simple_insert_many_txn(
784
784
  txn,
@@ -830,7 +830,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
830
830
  next_interval = REGULAR_CLEANUP_INTERVAL_MS
831
831
 
832
832
  self.hs.get_clock().call_later(
833
- next_interval / 1000, self._clean_up_cache_invalidation_wrapper
833
+ next_interval / 1000,
834
+ self._clean_up_cache_invalidation_wrapper,
834
835
  )
835
836
 
836
837
  async def _clean_up_batch_of_old_cache_invalidations(
@@ -77,7 +77,7 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase
77
77
  return
78
78
 
79
79
  before_ts = (
80
- self._clock.time_msec() - self.hs.config.server.redaction_retention_period
80
+ self.clock.time_msec() - self.hs.config.server.redaction_retention_period
81
81
  )
82
82
 
83
83
  # We fetch all redactions that:
@@ -438,10 +438,11 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke
438
438
  cache_name="client_ip_last_seen",
439
439
  server_name=self.server_name,
440
440
  max_size=50000,
441
+ clock=hs.get_clock(),
441
442
  )
442
443
 
443
444
  if hs.config.worker.run_background_tasks and self.user_ips_max_age:
444
- self._clock.looping_call(self._prune_old_user_ips, 5 * 1000)
445
+ self.clock.looping_call(self._prune_old_user_ips, 5 * 1000)
445
446
 
446
447
  if self._update_on_this_worker:
447
448
  # This is the designated worker that can write to the client IP
@@ -452,11 +453,11 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke
452
453
  Tuple[str, str, str], Tuple[str, Optional[str], int]
453
454
  ] = {}
454
455
 
455
- self._client_ip_looper = self._clock.looping_call(
456
- self._update_client_ips_batch, 5 * 1000
457
- )
458
- self.hs.get_clock().add_system_event_trigger(
459
- "before", "shutdown", self._update_client_ips_batch
456
+ self.clock.looping_call(self._update_client_ips_batch, 5 * 1000)
457
+ hs.register_async_shutdown_handler(
458
+ phase="before",
459
+ eventType="shutdown",
460
+ shutdown_func=self._update_client_ips_batch,
460
461
  )
461
462
 
462
463
  @wrap_as_background_process("prune_old_user_ips")
@@ -492,7 +493,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke
492
493
  )
493
494
  """
494
495
 
495
- timestamp = self._clock.time_msec() - self.user_ips_max_age
496
+ timestamp = self.clock.time_msec() - self.user_ips_max_age
496
497
 
497
498
  def _prune_old_user_ips_txn(txn: LoggingTransaction) -> None:
498
499
  txn.execute(sql, (timestamp,))
@@ -628,7 +629,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke
628
629
  return
629
630
 
630
631
  if not now:
631
- now = int(self._clock.time_msec())
632
+ now = int(self.clock.time_msec())
632
633
  key = (user_id, access_token, ip)
633
634
 
634
635
  try:
@@ -96,7 +96,8 @@ class DeviceInboxWorkerStore(SQLBaseStore):
96
96
  ] = ExpiringCache(
97
97
  cache_name="last_device_delete_cache",
98
98
  server_name=self.server_name,
99
- clock=self._clock,
99
+ hs=hs,
100
+ clock=self.clock,
100
101
  max_len=10000,
101
102
  expiry_ms=30 * 60 * 1000,
102
103
  )
@@ -154,7 +155,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
154
155
  )
155
156
 
156
157
  if hs.config.worker.run_background_tasks:
157
- self._clock.looping_call(
158
+ self.clock.looping_call(
158
159
  run_as_background_process,
159
160
  DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL_MS,
160
161
  "_delete_old_federation_inbox_rows",
@@ -826,7 +827,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
826
827
  )
827
828
 
828
829
  async with self._to_device_msg_id_gen.get_next() as stream_id:
829
- now_ms = self._clock.time_msec()
830
+ now_ms = self.clock.time_msec()
830
831
  await self.db_pool.runInteraction(
831
832
  "add_messages_to_device_inbox", add_messages_txn, now_ms, stream_id
832
833
  )
@@ -881,7 +882,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
881
882
  )
882
883
 
883
884
  async with self._to_device_msg_id_gen.get_next() as stream_id:
884
- now_ms = self._clock.time_msec()
885
+ now_ms = self.clock.time_msec()
885
886
  await self.db_pool.runInteraction(
886
887
  "add_messages_from_remote_to_device_inbox",
887
888
  add_messages_txn,
@@ -1002,7 +1003,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
1002
1003
  # We delete at most 100 rows that are older than
1003
1004
  # DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS
1004
1005
  delete_before_ts = (
1005
- self._clock.time_msec() - DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS
1006
+ self.clock.time_msec() - DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS
1006
1007
  )
1007
1008
  sql = """
1008
1009
  WITH to_delete AS (
@@ -1032,7 +1033,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
1032
1033
 
1033
1034
  # We sleep a bit so that we don't hammer the database in a tight
1034
1035
  # loop first time we run this.
1035
- await self._clock.sleep(1)
1036
+ await self.clock.sleep(1)
1036
1037
 
1037
1038
  async def get_devices_with_messages(
1038
1039
  self, user_id: str, device_ids: StrCollection
@@ -195,7 +195,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
195
195
  )
196
196
 
197
197
  if hs.config.worker.run_background_tasks:
198
- self._clock.looping_call(
198
+ self.clock.looping_call(
199
199
  self._prune_old_outbound_device_pokes, 60 * 60 * 1000
200
200
  )
201
201
 
@@ -1390,7 +1390,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
1390
1390
  table="device_lists_remote_resync",
1391
1391
  keyvalues={"user_id": user_id},
1392
1392
  values={},
1393
- insertion_values={"added_ts": self._clock.time_msec()},
1393
+ insertion_values={"added_ts": self.clock.time_msec()},
1394
1394
  )
1395
1395
 
1396
1396
  await self.db_pool.runInteraction(
@@ -1601,7 +1601,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
1601
1601
  that user when the destination comes back. It doesn't matter which device
1602
1602
  we keep.
1603
1603
  """
1604
- yesterday = self._clock.time_msec() - prune_age
1604
+ yesterday = self.clock.time_msec() - prune_age
1605
1605
 
1606
1606
  def _prune_txn(txn: LoggingTransaction) -> None:
1607
1607
  # look for (user, destination) pairs which have an update older than
@@ -2086,7 +2086,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
2086
2086
  stream_id,
2087
2087
  )
2088
2088
 
2089
- now = self._clock.time_msec()
2089
+ now = self.clock.time_msec()
2090
2090
 
2091
2091
  encoded_context = json_encoder.encode(context)
2092
2092
  mark_sent = not self.hs.is_mine_id(user_id)
@@ -354,7 +354,10 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
354
354
  if d is not None and d.keys is not None
355
355
  )
356
356
 
357
- for batch in batch_iter(signature_query, 50):
357
+ # 1000 is an arbitrary batch size. It helped performance on a very
358
+ # large-scale deployment (matrix.org), but has not been tested against
359
+ # any other setup.
360
+ for batch in batch_iter(signature_query, 1000):
358
361
  cross_sigs_result = (
359
362
  await self._get_e2e_cross_signing_signatures_for_devices(batch)
360
363
  )
@@ -1561,7 +1564,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
1561
1564
  DELETE FROM e2e_one_time_keys_json
1562
1565
  WHERE {clause} AND ts_added_ms < ? AND length(key_id) = 6
1563
1566
  """
1564
- args.append(self._clock.time_msec() - (7 * 24 * 3600 * 1000))
1567
+ args.append(self.clock.time_msec() - (7 * 24 * 3600 * 1000))
1565
1568
  txn.execute(sql, args)
1566
1569
 
1567
1570
  return users, txn.rowcount
@@ -1582,7 +1585,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
1582
1585
  None, if there is no such key.
1583
1586
  Otherwise, the timestamp before which replacement is allowed without UIA.
1584
1587
  """
1585
- timestamp = self._clock.time_msec() + duration_ms
1588
+ timestamp = self.clock.time_msec() + duration_ms
1586
1589
 
1587
1590
  def impl(txn: LoggingTransaction) -> Optional[int]:
1588
1591
  txn.execute(
@@ -167,6 +167,7 @@ class EventFederationWorkerStore(
167
167
  # Cache of event ID to list of auth event IDs and their depths.
168
168
  self._event_auth_cache: LruCache[str, List[Tuple[str, int]]] = LruCache(
169
169
  max_size=500000,
170
+ clock=self.hs.get_clock(),
170
171
  server_name=self.server_name,
171
172
  cache_name="_event_auth_cache",
172
173
  size_callback=len,
@@ -176,7 +177,7 @@ class EventFederationWorkerStore(
176
177
  # index.
177
178
  self.tests_allow_no_chain_cover_index = True
178
179
 
179
- self._clock.looping_call(self._get_stats_for_federation_staging, 30 * 1000)
180
+ self.clock.looping_call(self._get_stats_for_federation_staging, 30 * 1000)
180
181
 
181
182
  if isinstance(self.database_engine, PostgresEngine):
182
183
  self.db_pool.updates.register_background_validate_constraint_and_delete_rows(
@@ -1328,7 +1329,7 @@ class EventFederationWorkerStore(
1328
1329
  (
1329
1330
  room_id,
1330
1331
  current_depth,
1331
- self._clock.time_msec(),
1332
+ self.clock.time_msec(),
1332
1333
  BACKFILL_EVENT_EXPONENTIAL_BACKOFF_MAXIMUM_DOUBLING_STEPS,
1333
1334
  BACKFILL_EVENT_EXPONENTIAL_BACKOFF_STEP_MILLISECONDS,
1334
1335
  limit,
@@ -1841,7 +1842,7 @@ class EventFederationWorkerStore(
1841
1842
  last_cause=EXCLUDED.last_cause;
1842
1843
  """
1843
1844
 
1844
- txn.execute(sql, (room_id, event_id, 1, self._clock.time_msec(), cause))
1845
+ txn.execute(sql, (room_id, event_id, 1, self.clock.time_msec(), cause))
1845
1846
 
1846
1847
  @trace
1847
1848
  async def get_event_ids_with_failed_pull_attempts(
@@ -1905,7 +1906,7 @@ class EventFederationWorkerStore(
1905
1906
  ),
1906
1907
  )
1907
1908
 
1908
- current_time = self._clock.time_msec()
1909
+ current_time = self.clock.time_msec()
1909
1910
 
1910
1911
  event_ids_with_backoff = {}
1911
1912
  for event_id, last_attempt_ts, num_attempts in event_failed_pull_attempts:
@@ -2025,7 +2026,7 @@ class EventFederationWorkerStore(
2025
2026
  values={},
2026
2027
  insertion_values={
2027
2028
  "room_id": event.room_id,
2028
- "received_ts": self._clock.time_msec(),
2029
+ "received_ts": self.clock.time_msec(),
2029
2030
  "event_json": json_encoder.encode(event.get_dict()),
2030
2031
  "internal_metadata": json_encoder.encode(
2031
2032
  event.internal_metadata.get_dict()
@@ -2299,7 +2300,7 @@ class EventFederationWorkerStore(
2299
2300
  # If there is nothing in the staging area default it to 0.
2300
2301
  age = 0
2301
2302
  if received_ts is not None:
2302
- age = self._clock.time_msec() - received_ts
2303
+ age = self.clock.time_msec() - received_ts
2303
2304
 
2304
2305
  return count, age
2305
2306
 
@@ -95,6 +95,8 @@ from typing import (
95
95
 
96
96
  import attr
97
97
 
98
+ from twisted.internet.task import LoopingCall
99
+
98
100
  from synapse.api.constants import MAIN_TIMELINE, ReceiptTypes
99
101
  from synapse.metrics.background_process_metrics import wrap_as_background_process
100
102
  from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
@@ -254,6 +256,8 @@ def _deserialize_action(actions: str, is_highlight: bool) -> List[Union[dict, st
254
256
 
255
257
 
256
258
  class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBaseStore):
259
+ _background_tasks: List[LoopingCall] = []
260
+
257
261
  def __init__(
258
262
  self,
259
263
  database: DatabasePool,
@@ -263,7 +267,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
263
267
  super().__init__(database, db_conn, hs)
264
268
 
265
269
  # Track when the process started.
266
- self._started_ts = self._clock.time_msec()
270
+ self._started_ts = self.clock.time_msec()
267
271
 
268
272
  # These get correctly set by _find_stream_orderings_for_times_txn
269
273
  self.stream_ordering_month_ago: Optional[int] = None
@@ -273,18 +277,14 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
273
277
  self._find_stream_orderings_for_times_txn(cur)
274
278
  cur.close()
275
279
 
276
- self.find_stream_orderings_looping_call = self._clock.looping_call(
277
- self._find_stream_orderings_for_times, 10 * 60 * 1000
278
- )
280
+ self.clock.looping_call(self._find_stream_orderings_for_times, 10 * 60 * 1000)
279
281
 
280
282
  self._rotate_count = 10000
281
283
  self._doing_notif_rotation = False
282
284
  if hs.config.worker.run_background_tasks:
283
- self._rotate_notif_loop = self._clock.looping_call(
284
- self._rotate_notifs, 30 * 1000
285
- )
285
+ self.clock.looping_call(self._rotate_notifs, 30 * 1000)
286
286
 
287
- self._clear_old_staging_loop = self._clock.looping_call(
287
+ self.clock.looping_call(
288
288
  self._clear_old_push_actions_staging, 30 * 60 * 1000
289
289
  )
290
290
 
@@ -1190,7 +1190,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
1190
1190
  is_highlight, # highlight column
1191
1191
  int(count_as_unread), # unread column
1192
1192
  thread_id, # thread_id column
1193
- self._clock.time_msec(), # inserted_ts column
1193
+ self.clock.time_msec(), # inserted_ts column
1194
1194
  )
1195
1195
 
1196
1196
  await self.db_pool.simple_insert_many(
@@ -1241,14 +1241,14 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
1241
1241
  def _find_stream_orderings_for_times_txn(self, txn: LoggingTransaction) -> None:
1242
1242
  logger.info("Searching for stream ordering 1 month ago")
1243
1243
  self.stream_ordering_month_ago = self._find_first_stream_ordering_after_ts_txn(
1244
- txn, self._clock.time_msec() - 30 * 24 * 60 * 60 * 1000
1244
+ txn, self.clock.time_msec() - 30 * 24 * 60 * 60 * 1000
1245
1245
  )
1246
1246
  logger.info(
1247
1247
  "Found stream ordering 1 month ago: it's %d", self.stream_ordering_month_ago
1248
1248
  )
1249
1249
  logger.info("Searching for stream ordering 1 day ago")
1250
1250
  self.stream_ordering_day_ago = self._find_first_stream_ordering_after_ts_txn(
1251
- txn, self._clock.time_msec() - 24 * 60 * 60 * 1000
1251
+ txn, self.clock.time_msec() - 24 * 60 * 60 * 1000
1252
1252
  )
1253
1253
  logger.info(
1254
1254
  "Found stream ordering 1 day ago: it's %d", self.stream_ordering_day_ago
@@ -1787,7 +1787,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
1787
1787
 
1788
1788
  # We delete anything more than an hour old, on the assumption that we'll
1789
1789
  # never take more than an hour to persist an event.
1790
- delete_before_ts = self._clock.time_msec() - 60 * 60 * 1000
1790
+ delete_before_ts = self.clock.time_msec() - 60 * 60 * 1000
1791
1791
 
1792
1792
  if self._started_ts > delete_before_ts:
1793
1793
  # We need to wait for at least an hour before we started deleting,
@@ -1824,7 +1824,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
1824
1824
  return
1825
1825
 
1826
1826
  # We sleep to ensure that we don't overwhelm the DB.
1827
- await self._clock.sleep(1.0)
1827
+ await self.clock.sleep(1.0)
1828
1828
 
1829
1829
  async def get_push_actions_for_user(
1830
1830
  self,
@@ -730,7 +730,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS
730
730
  WHERE ? <= event_id AND event_id <= ?
731
731
  """
732
732
 
733
- txn.execute(sql, (self._clock.time_msec(), last_event_id, upper_event_id))
733
+ txn.execute(sql, (self.clock.time_msec(), last_event_id, upper_event_id))
734
734
 
735
735
  self.db_pool.updates._background_update_progress_txn(
736
736
  txn, "redactions_received_ts", {"last_event_id": upper_event_id}
@@ -70,7 +70,6 @@ from synapse.logging.opentracing import (
70
70
  )
71
71
  from synapse.metrics import SERVER_NAME_LABEL
72
72
  from synapse.metrics.background_process_metrics import (
73
- run_as_background_process,
74
73
  wrap_as_background_process,
75
74
  )
76
75
  from synapse.replication.tcp.streams import BackfillStream, UnPartialStatedEventStream
@@ -282,13 +281,14 @@ class EventsWorkerStore(SQLBaseStore):
282
281
 
283
282
  if hs.config.worker.run_background_tasks:
284
283
  # We periodically clean out old transaction ID mappings
285
- self._clock.looping_call(
284
+ self.clock.looping_call(
286
285
  self._cleanup_old_transaction_ids,
287
286
  5 * 60 * 1000,
288
287
  )
289
288
 
290
289
  self._get_event_cache: AsyncLruCache[Tuple[str], EventCacheEntry] = (
291
290
  AsyncLruCache(
291
+ clock=hs.get_clock(),
292
292
  server_name=self.server_name,
293
293
  cache_name="*getEvent*",
294
294
  max_size=hs.config.caches.event_cache_size,
@@ -1154,9 +1154,7 @@ class EventsWorkerStore(SQLBaseStore):
1154
1154
  should_start = False
1155
1155
 
1156
1156
  if should_start:
1157
- run_as_background_process(
1158
- "fetch_events", self.server_name, self._fetch_thread
1159
- )
1157
+ self.hs.run_as_background_process("fetch_events", self._fetch_thread)
1160
1158
 
1161
1159
  async def _fetch_thread(self) -> None:
1162
1160
  """Services requests for events from `_event_fetch_list`."""
@@ -1276,7 +1274,7 @@ class EventsWorkerStore(SQLBaseStore):
1276
1274
  were not part of this request.
1277
1275
  """
1278
1276
  with Measure(
1279
- self._clock, name="_fetch_event_list", server_name=self.server_name
1277
+ self.clock, name="_fetch_event_list", server_name=self.server_name
1280
1278
  ):
1281
1279
  try:
1282
1280
  events_to_fetch = {
@@ -2278,7 +2276,7 @@ class EventsWorkerStore(SQLBaseStore):
2278
2276
  """Cleans out transaction id mappings older than 24hrs."""
2279
2277
 
2280
2278
  def _cleanup_old_transaction_ids_txn(txn: LoggingTransaction) -> None:
2281
- one_day_ago = self._clock.time_msec() - 24 * 60 * 60 * 1000
2279
+ one_day_ago = self.clock.time_msec() - 24 * 60 * 60 * 1000
2282
2280
  sql = """
2283
2281
  DELETE FROM event_txn_id_device_id
2284
2282
  WHERE inserted_ts < ?
@@ -2633,7 +2631,7 @@ class EventsWorkerStore(SQLBaseStore):
2633
2631
  keyvalues={"event_id": event_id},
2634
2632
  values={
2635
2633
  "reason": rejection_reason,
2636
- "last_check": self._clock.time_msec(),
2634
+ "last_check": self.clock.time_msec(),
2637
2635
  },
2638
2636
  )
2639
2637
  self.db_pool.simple_update_txn(
@@ -28,7 +28,6 @@ from twisted.internet import defer
28
28
  from twisted.internet.task import LoopingCall
29
29
 
30
30
  from synapse.metrics.background_process_metrics import (
31
- run_as_background_process,
32
31
  wrap_as_background_process,
33
32
  )
34
33
  from synapse.storage._base import SQLBaseStore
@@ -99,15 +98,15 @@ class LockStore(SQLBaseStore):
99
98
  # lead to a race, as we may drop the lock while we are still processing.
100
99
  # However, a) it should be a small window, b) the lock is best effort
101
100
  # anyway and c) we want to really avoid leaking locks when we restart.
102
- hs.get_clock().add_system_event_trigger(
103
- "before",
104
- "shutdown",
105
- self._on_shutdown,
101
+ hs.register_async_shutdown_handler(
102
+ phase="before",
103
+ eventType="shutdown",
104
+ shutdown_func=self._on_shutdown,
106
105
  )
107
106
 
108
107
  self._acquiring_locks: Set[Tuple[str, str]] = set()
109
108
 
110
- self._clock.looping_call(
109
+ self.clock.looping_call(
111
110
  self._reap_stale_read_write_locks, _LOCK_TIMEOUT_MS / 10.0
112
111
  )
113
112
 
@@ -153,7 +152,7 @@ class LockStore(SQLBaseStore):
153
152
  if lock and await lock.is_still_valid():
154
153
  return None
155
154
 
156
- now = self._clock.time_msec()
155
+ now = self.clock.time_msec()
157
156
  token = random_string(6)
158
157
 
159
158
  def _try_acquire_lock_txn(txn: LoggingTransaction) -> bool:
@@ -202,7 +201,8 @@ class LockStore(SQLBaseStore):
202
201
  lock = Lock(
203
202
  self.server_name,
204
203
  self._reactor,
205
- self._clock,
204
+ self.hs,
205
+ self.clock,
206
206
  self,
207
207
  read_write=False,
208
208
  lock_name=lock_name,
@@ -251,7 +251,7 @@ class LockStore(SQLBaseStore):
251
251
  # constraints. If it doesn't then we have acquired the lock,
252
252
  # otherwise we haven't.
253
253
 
254
- now = self._clock.time_msec()
254
+ now = self.clock.time_msec()
255
255
  token = random_string(6)
256
256
 
257
257
  self.db_pool.simple_insert_txn(
@@ -270,7 +270,8 @@ class LockStore(SQLBaseStore):
270
270
  lock = Lock(
271
271
  self.server_name,
272
272
  self._reactor,
273
- self._clock,
273
+ self.hs,
274
+ self.clock,
274
275
  self,
275
276
  read_write=True,
276
277
  lock_name=lock_name,
@@ -338,7 +339,7 @@ class LockStore(SQLBaseStore):
338
339
  """
339
340
 
340
341
  def reap_stale_read_write_locks_txn(txn: LoggingTransaction) -> None:
341
- txn.execute(delete_sql, (self._clock.time_msec() - _LOCK_TIMEOUT_MS,))
342
+ txn.execute(delete_sql, (self.clock.time_msec() - _LOCK_TIMEOUT_MS,))
342
343
  if txn.rowcount:
343
344
  logger.info("Reaped %d stale locks", txn.rowcount)
344
345
 
@@ -374,6 +375,7 @@ class Lock:
374
375
  self,
375
376
  server_name: str,
376
377
  reactor: ISynapseReactor,
378
+ hs: "HomeServer",
377
379
  clock: Clock,
378
380
  store: LockStore,
379
381
  read_write: bool,
@@ -387,6 +389,7 @@ class Lock:
387
389
  """
388
390
  self._server_name = server_name
389
391
  self._reactor = reactor
392
+ self._hs = hs
390
393
  self._clock = clock
391
394
  self._store = store
392
395
  self._read_write = read_write
@@ -410,6 +413,7 @@ class Lock:
410
413
  _RENEWAL_INTERVAL_MS,
411
414
  self._server_name,
412
415
  self._store,
416
+ self._hs,
413
417
  self._clock,
414
418
  self._read_write,
415
419
  self._lock_name,
@@ -421,6 +425,7 @@ class Lock:
421
425
  def _renew(
422
426
  server_name: str,
423
427
  store: LockStore,
428
+ hs: "HomeServer",
424
429
  clock: Clock,
425
430
  read_write: bool,
426
431
  lock_name: str,
@@ -457,9 +462,8 @@ class Lock:
457
462
  desc="renew_lock",
458
463
  )
459
464
 
460
- return run_as_background_process(
465
+ return hs.run_as_background_process(
461
466
  "Lock._renew",
462
- server_name,
463
467
  _internal_renew,
464
468
  store,
465
469
  clock,
@@ -565,7 +565,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
565
565
  sql,
566
566
  (
567
567
  user_id.to_string(),
568
- self._clock.time_msec() - self.unused_expiration_time,
568
+ self.clock.time_msec() - self.unused_expiration_time,
569
569
  ),
570
570
  )
571
571
  row = txn.fetchone()
@@ -1059,7 +1059,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
1059
1059
  txn: LoggingTransaction,
1060
1060
  ) -> int:
1061
1061
  # Calculate the timestamp for the start of the time period
1062
- start_ts = self._clock.time_msec() - time_period_ms
1062
+ start_ts = self.clock.time_msec() - time_period_ms
1063
1063
  txn.execute(sql, (user_id, start_ts))
1064
1064
  row = txn.fetchone()
1065
1065
  if row is None:
@@ -78,7 +78,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
78
78
 
79
79
  # Read the extrems every 60 minutes
80
80
  if hs.config.worker.run_background_tasks:
81
- self._clock.looping_call(self._read_forward_extremities, 60 * 60 * 1000)
81
+ self.clock.looping_call(self._read_forward_extremities, 60 * 60 * 1000)
82
82
 
83
83
  # Used in _generate_user_daily_visits to keep track of progress
84
84
  self._last_user_visit_update = self._get_start_of_day()
@@ -224,7 +224,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
224
224
  """
225
225
  Counts the number of users who used this homeserver in the last 24 hours.
226
226
  """
227
- yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24)
227
+ yesterday = int(self.clock.time_msec()) - (1000 * 60 * 60 * 24)
228
228
  return await self.db_pool.runInteraction(
229
229
  "count_daily_users", self._count_users, yesterday
230
230
  )
@@ -236,7 +236,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
236
236
  from the mau figure in synapse.storage.monthly_active_users which,
237
237
  amongst other things, includes a 3 day grace period before a user counts.
238
238
  """
239
- thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)
239
+ thirty_days_ago = int(self.clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)
240
240
  return await self.db_pool.runInteraction(
241
241
  "count_monthly_users", self._count_users, thirty_days_ago
242
242
  )
@@ -281,7 +281,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
281
281
 
282
282
  def _count_r30v2_users(txn: LoggingTransaction) -> Dict[str, int]:
283
283
  thirty_days_in_secs = 86400 * 30
284
- now = int(self._clock.time())
284
+ now = int(self.clock.time())
285
285
  sixty_days_ago_in_secs = now - 2 * thirty_days_in_secs
286
286
  one_day_from_now_in_secs = now + 86400
287
287
 
@@ -389,7 +389,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
389
389
  """
390
390
  Returns millisecond unixtime for start of UTC day.
391
391
  """
392
- now = time.gmtime(self._clock.time())
392
+ now = time.gmtime(self.clock.time())
393
393
  today_start = calendar.timegm((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0))
394
394
  return today_start * 1000
395
395
 
@@ -403,7 +403,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
403
403
  logger.info("Calling _generate_user_daily_visits")
404
404
  today_start = self._get_start_of_day()
405
405
  a_day_in_milliseconds = 24 * 60 * 60 * 1000
406
- now = self._clock.time_msec()
406
+ now = self.clock.time_msec()
407
407
 
408
408
  # A note on user_agent. Technically a given device can have multiple
409
409
  # user agents, so we need to decide which one to pick. We could have
@@ -49,7 +49,6 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
49
49
  hs: "HomeServer",
50
50
  ):
51
51
  super().__init__(database, db_conn, hs)
52
- self._clock = hs.get_clock()
53
52
  self.hs = hs
54
53
 
55
54
  if hs.config.redis.redis_enabled:
@@ -226,7 +225,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
226
225
  reserved_users: reserved users to preserve
227
226
  """
228
227
 
229
- thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)
228
+ thirty_days_ago = int(self.clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)
230
229
 
231
230
  in_clause, in_clause_args = make_in_list_sql_clause(
232
231
  self.database_engine, "user_id", reserved_users
@@ -328,7 +327,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
328
327
  txn,
329
328
  table="monthly_active_users",
330
329
  keyvalues={"user_id": user_id},
331
- values={"timestamp": int(self._clock.time_msec())},
330
+ values={"timestamp": int(self.clock.time_msec())},
332
331
  )
333
332
  else:
334
333
  logger.warning("mau limit reserved threepid %s not found in db", tp)
@@ -391,7 +390,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
391
390
  txn,
392
391
  table="monthly_active_users",
393
392
  keyvalues={"user_id": user_id},
394
- values={"timestamp": int(self._clock.time_msec())},
393
+ values={"timestamp": int(self.clock.time_msec())},
395
394
  )
396
395
 
397
396
  self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ())
@@ -1073,7 +1073,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
1073
1073
  if event_ts is None:
1074
1074
  return None
1075
1075
 
1076
- now = self._clock.time_msec()
1076
+ now = self.clock.time_msec()
1077
1077
  logger.debug(
1078
1078
  "Receipt %s for event %s in %s (%i ms old)",
1079
1079
  receipt_type,