matrix-synapse 1.139.1__cp39-abi3-musllinux_1_2_aarch64.whl → 1.140.0rc1__cp39-abi3-musllinux_1_2_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of matrix-synapse might be problematic. Click here for more details.
- {matrix_synapse-1.139.1.dist-info → matrix_synapse-1.140.0rc1.dist-info}/METADATA +5 -3
- {matrix_synapse-1.139.1.dist-info → matrix_synapse-1.140.0rc1.dist-info}/RECORD +158 -155
- synapse/_scripts/generate_workers_map.py +6 -1
- synapse/_scripts/synapse_port_db.py +0 -2
- synapse/_scripts/update_synapse_database.py +1 -6
- synapse/api/auth/base.py +1 -3
- synapse/api/auth/mas.py +6 -8
- synapse/api/auth/msc3861_delegated.py +6 -8
- synapse/api/errors.py +3 -0
- synapse/app/_base.py +101 -39
- synapse/app/admin_cmd.py +2 -4
- synapse/app/appservice.py +1 -1
- synapse/app/client_reader.py +1 -1
- synapse/app/event_creator.py +1 -1
- synapse/app/federation_reader.py +1 -1
- synapse/app/federation_sender.py +1 -1
- synapse/app/frontend_proxy.py +1 -1
- synapse/app/generic_worker.py +17 -11
- synapse/app/homeserver.py +85 -47
- synapse/app/media_repository.py +1 -1
- synapse/app/phone_stats_home.py +16 -14
- synapse/app/pusher.py +1 -1
- synapse/app/synchrotron.py +1 -1
- synapse/app/user_dir.py +1 -1
- synapse/appservice/__init__.py +29 -2
- synapse/appservice/scheduler.py +8 -8
- synapse/config/_base.py +32 -14
- synapse/config/_base.pyi +5 -3
- synapse/config/experimental.py +3 -0
- synapse/config/homeserver.py +27 -1
- synapse/config/logger.py +3 -4
- synapse/config/matrixrtc.py +67 -0
- synapse/crypto/keyring.py +18 -4
- synapse/events/auto_accept_invites.py +0 -1
- synapse/federation/federation_client.py +39 -0
- synapse/federation/federation_server.py +1 -1
- synapse/federation/send_queue.py +3 -0
- synapse/federation/sender/__init__.py +24 -8
- synapse/federation/sender/per_destination_queue.py +31 -8
- synapse/federation/sender/transaction_manager.py +12 -0
- synapse/federation/transport/client.py +29 -0
- synapse/handlers/account_validity.py +2 -4
- synapse/handlers/appservice.py +5 -7
- synapse/handlers/deactivate_account.py +2 -3
- synapse/handlers/delayed_events.py +10 -13
- synapse/handlers/device.py +14 -14
- synapse/handlers/e2e_keys.py +4 -3
- synapse/handlers/federation.py +7 -11
- synapse/handlers/federation_event.py +5 -6
- synapse/handlers/message.py +16 -10
- synapse/handlers/pagination.py +3 -7
- synapse/handlers/presence.py +21 -25
- synapse/handlers/profile.py +1 -1
- synapse/handlers/read_marker.py +3 -1
- synapse/handlers/register.py +8 -1
- synapse/handlers/room.py +13 -4
- synapse/handlers/room_member.py +11 -7
- synapse/handlers/room_policy.py +96 -2
- synapse/handlers/sso.py +1 -1
- synapse/handlers/stats.py +5 -3
- synapse/handlers/sync.py +20 -13
- synapse/handlers/typing.py +5 -10
- synapse/handlers/user_directory.py +12 -11
- synapse/handlers/worker_lock.py +19 -15
- synapse/http/client.py +18 -13
- synapse/http/federation/matrix_federation_agent.py +6 -1
- synapse/http/federation/well_known_resolver.py +3 -1
- synapse/http/matrixfederationclient.py +50 -11
- synapse/http/proxy.py +2 -2
- synapse/http/server.py +36 -2
- synapse/http/site.py +109 -17
- synapse/logging/context.py +165 -63
- synapse/logging/opentracing.py +30 -6
- synapse/logging/scopecontextmanager.py +161 -0
- synapse/media/_base.py +2 -1
- synapse/media/media_repository.py +20 -6
- synapse/media/url_previewer.py +5 -6
- synapse/metrics/_gc.py +3 -1
- synapse/metrics/background_process_metrics.py +128 -24
- synapse/metrics/common_usage_metrics.py +3 -5
- synapse/module_api/__init__.py +42 -5
- synapse/notifier.py +10 -3
- synapse/push/emailpusher.py +5 -4
- synapse/push/httppusher.py +6 -6
- synapse/push/pusherpool.py +3 -8
- synapse/replication/http/devices.py +0 -41
- synapse/replication/tcp/client.py +8 -5
- synapse/replication/tcp/handler.py +2 -3
- synapse/replication/tcp/protocol.py +14 -7
- synapse/replication/tcp/redis.py +16 -11
- synapse/replication/tcp/resource.py +5 -4
- synapse/replication/tcp/streams/__init__.py +2 -0
- synapse/res/providers.json +6 -5
- synapse/rest/__init__.py +2 -0
- synapse/rest/admin/__init__.py +4 -0
- synapse/rest/admin/events.py +69 -0
- synapse/rest/admin/media.py +70 -2
- synapse/rest/client/keys.py +3 -3
- synapse/rest/client/matrixrtc.py +52 -0
- synapse/rest/client/push_rule.py +1 -1
- synapse/rest/client/room.py +2 -3
- synapse/rest/client/sync.py +1 -0
- synapse/rest/client/transactions.py +1 -1
- synapse/server.py +271 -38
- synapse/server_notices/server_notices_manager.py +1 -0
- synapse/state/__init__.py +4 -1
- synapse/storage/_base.py +1 -1
- synapse/storage/background_updates.py +8 -3
- synapse/storage/controllers/persist_events.py +4 -3
- synapse/storage/controllers/purge_events.py +2 -3
- synapse/storage/controllers/state.py +5 -5
- synapse/storage/database.py +12 -7
- synapse/storage/databases/main/__init__.py +7 -2
- synapse/storage/databases/main/cache.py +4 -3
- synapse/storage/databases/main/censor_events.py +1 -1
- synapse/storage/databases/main/client_ips.py +9 -8
- synapse/storage/databases/main/deviceinbox.py +7 -6
- synapse/storage/databases/main/devices.py +4 -4
- synapse/storage/databases/main/end_to_end_keys.py +6 -3
- synapse/storage/databases/main/event_federation.py +7 -6
- synapse/storage/databases/main/event_push_actions.py +13 -13
- synapse/storage/databases/main/events_bg_updates.py +1 -1
- synapse/storage/databases/main/events_worker.py +6 -8
- synapse/storage/databases/main/lock.py +17 -13
- synapse/storage/databases/main/media_repository.py +2 -2
- synapse/storage/databases/main/metrics.py +6 -6
- synapse/storage/databases/main/monthly_active_users.py +3 -4
- synapse/storage/databases/main/receipts.py +1 -1
- synapse/storage/databases/main/registration.py +18 -19
- synapse/storage/databases/main/roommember.py +1 -1
- synapse/storage/databases/main/session.py +3 -3
- synapse/storage/databases/main/sliding_sync.py +2 -2
- synapse/storage/databases/main/transactions.py +3 -3
- synapse/storage/databases/state/store.py +2 -0
- synapse/synapse_rust/http_client.pyi +4 -0
- synapse/synapse_rust.abi3.so +0 -0
- synapse/util/async_helpers.py +36 -24
- synapse/util/batching_queue.py +16 -6
- synapse/util/caches/__init__.py +1 -1
- synapse/util/caches/deferred_cache.py +4 -0
- synapse/util/caches/descriptors.py +14 -2
- synapse/util/caches/dictionary_cache.py +6 -1
- synapse/util/caches/expiringcache.py +16 -5
- synapse/util/caches/lrucache.py +14 -26
- synapse/util/caches/response_cache.py +11 -1
- synapse/util/clock.py +215 -39
- synapse/util/constants.py +2 -0
- synapse/util/daemonize.py +5 -1
- synapse/util/distributor.py +9 -5
- synapse/util/metrics.py +35 -6
- synapse/util/ratelimitutils.py +4 -1
- synapse/util/retryutils.py +7 -4
- synapse/util/task_scheduler.py +11 -14
- synapse/logging/filter.py +0 -38
- {matrix_synapse-1.139.1.dist-info → matrix_synapse-1.140.0rc1.dist-info}/AUTHORS.rst +0 -0
- {matrix_synapse-1.139.1.dist-info → matrix_synapse-1.140.0rc1.dist-info}/LICENSE-AGPL-3.0 +0 -0
- {matrix_synapse-1.139.1.dist-info → matrix_synapse-1.140.0rc1.dist-info}/LICENSE-COMMERCIAL +0 -0
- {matrix_synapse-1.139.1.dist-info → matrix_synapse-1.140.0rc1.dist-info}/WHEEL +0 -0
- {matrix_synapse-1.139.1.dist-info → matrix_synapse-1.140.0rc1.dist-info}/entry_points.txt +0 -0
|
@@ -24,9 +24,6 @@ from synapse.config.workers import MAIN_PROCESS_INSTANCE_NAME
|
|
|
24
24
|
from synapse.logging.context import make_deferred_yieldable
|
|
25
25
|
from synapse.logging.opentracing import set_tag
|
|
26
26
|
from synapse.metrics import SERVER_NAME_LABEL, event_processing_positions
|
|
27
|
-
from synapse.metrics.background_process_metrics import (
|
|
28
|
-
run_as_background_process,
|
|
29
|
-
)
|
|
30
27
|
from synapse.replication.http.delayed_events import (
|
|
31
28
|
ReplicationAddedDelayedEventRestServlet,
|
|
32
29
|
)
|
|
@@ -58,6 +55,7 @@ logger = logging.getLogger(__name__)
|
|
|
58
55
|
|
|
59
56
|
class DelayedEventsHandler:
|
|
60
57
|
def __init__(self, hs: "HomeServer"):
|
|
58
|
+
self.hs = hs
|
|
61
59
|
self.server_name = hs.hostname
|
|
62
60
|
self._store = hs.get_datastores().main
|
|
63
61
|
self._storage_controllers = hs.get_storage_controllers()
|
|
@@ -94,7 +92,10 @@ class DelayedEventsHandler:
|
|
|
94
92
|
hs.get_notifier().add_replication_callback(self.notify_new_event)
|
|
95
93
|
# Kick off again (without blocking) to catch any missed notifications
|
|
96
94
|
# that may have fired before the callback was added.
|
|
97
|
-
self._clock.call_later(
|
|
95
|
+
self._clock.call_later(
|
|
96
|
+
0,
|
|
97
|
+
self.notify_new_event,
|
|
98
|
+
)
|
|
98
99
|
|
|
99
100
|
# Delayed events that are already marked as processed on startup might not have been
|
|
100
101
|
# sent properly on the last run of the server, so unmark them to send them again.
|
|
@@ -112,15 +113,14 @@ class DelayedEventsHandler:
|
|
|
112
113
|
self._schedule_next_at(next_send_ts)
|
|
113
114
|
|
|
114
115
|
# Can send the events in background after having awaited on marking them as processed
|
|
115
|
-
run_as_background_process(
|
|
116
|
+
self.hs.run_as_background_process(
|
|
116
117
|
"_send_events",
|
|
117
|
-
self.server_name,
|
|
118
118
|
self._send_events,
|
|
119
119
|
events,
|
|
120
120
|
)
|
|
121
121
|
|
|
122
|
-
self._initialized_from_db = run_as_background_process(
|
|
123
|
-
"_schedule_db_events",
|
|
122
|
+
self._initialized_from_db = self.hs.run_as_background_process(
|
|
123
|
+
"_schedule_db_events", _schedule_db_events
|
|
124
124
|
)
|
|
125
125
|
else:
|
|
126
126
|
self._repl_client = ReplicationAddedDelayedEventRestServlet.make_client(hs)
|
|
@@ -145,9 +145,7 @@ class DelayedEventsHandler:
|
|
|
145
145
|
finally:
|
|
146
146
|
self._event_processing = False
|
|
147
147
|
|
|
148
|
-
run_as_background_process(
|
|
149
|
-
"delayed_events.notify_new_event", self.server_name, process
|
|
150
|
-
)
|
|
148
|
+
self.hs.run_as_background_process("delayed_events.notify_new_event", process)
|
|
151
149
|
|
|
152
150
|
async def _unsafe_process_new_event(self) -> None:
|
|
153
151
|
# We purposefully fetch the current max room stream ordering before
|
|
@@ -542,9 +540,8 @@ class DelayedEventsHandler:
|
|
|
542
540
|
if self._next_delayed_event_call is None:
|
|
543
541
|
self._next_delayed_event_call = self._clock.call_later(
|
|
544
542
|
delay_sec,
|
|
545
|
-
run_as_background_process,
|
|
543
|
+
self.hs.run_as_background_process,
|
|
546
544
|
"_send_on_timeout",
|
|
547
|
-
self.server_name,
|
|
548
545
|
self._send_on_timeout,
|
|
549
546
|
)
|
|
550
547
|
else:
|
synapse/handlers/device.py
CHANGED
|
@@ -47,7 +47,6 @@ from synapse.api.errors import (
|
|
|
47
47
|
)
|
|
48
48
|
from synapse.logging.opentracing import log_kv, set_tag, trace
|
|
49
49
|
from synapse.metrics.background_process_metrics import (
|
|
50
|
-
run_as_background_process,
|
|
51
50
|
wrap_as_background_process,
|
|
52
51
|
)
|
|
53
52
|
from synapse.replication.http.devices import (
|
|
@@ -125,7 +124,7 @@ class DeviceHandler:
|
|
|
125
124
|
def __init__(self, hs: "HomeServer"):
|
|
126
125
|
self.server_name = hs.hostname # nb must be called this for @measure_func
|
|
127
126
|
self.clock = hs.get_clock() # nb must be called this for @measure_func
|
|
128
|
-
self.hs = hs
|
|
127
|
+
self.hs = hs # nb must be called this for @wrap_as_background_process
|
|
129
128
|
self.store = cast("GenericWorkerStore", hs.get_datastores().main)
|
|
130
129
|
self.notifier = hs.get_notifier()
|
|
131
130
|
self.state = hs.get_state_handler()
|
|
@@ -191,10 +190,9 @@ class DeviceHandler:
|
|
|
191
190
|
and self._delete_stale_devices_after is not None
|
|
192
191
|
):
|
|
193
192
|
self.clock.looping_call(
|
|
194
|
-
run_as_background_process,
|
|
193
|
+
self.hs.run_as_background_process,
|
|
195
194
|
DELETE_STALE_DEVICES_INTERVAL_MS,
|
|
196
195
|
desc="delete_stale_devices",
|
|
197
|
-
server_name=self.server_name,
|
|
198
196
|
func=self._delete_stale_devices,
|
|
199
197
|
)
|
|
200
198
|
|
|
@@ -963,10 +961,9 @@ class DeviceWriterHandler(DeviceHandler):
|
|
|
963
961
|
|
|
964
962
|
def __init__(self, hs: "HomeServer"):
|
|
965
963
|
super().__init__(hs)
|
|
964
|
+
self.server_name = hs.hostname # nb must be called this for @measure_func
|
|
965
|
+
self.hs = hs # nb must be called this for @wrap_as_background_process
|
|
966
966
|
|
|
967
|
-
self.server_name = (
|
|
968
|
-
hs.hostname
|
|
969
|
-
) # nb must be called this for @measure_func and @wrap_as_background_process
|
|
970
967
|
# We only need to poke the federation sender explicitly if its on the
|
|
971
968
|
# same instance. Other federation sender instances will get notified by
|
|
972
969
|
# `synapse.app.generic_worker.FederationSenderHandler` when it sees it
|
|
@@ -1444,14 +1441,18 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
|
|
|
1444
1441
|
def __init__(self, hs: "HomeServer", device_handler: DeviceWriterHandler):
|
|
1445
1442
|
super().__init__(hs)
|
|
1446
1443
|
|
|
1447
|
-
self.
|
|
1444
|
+
self.hs = hs
|
|
1448
1445
|
self.federation = hs.get_federation_client()
|
|
1449
1446
|
self.server_name = hs.hostname # nb must be called this for @measure_func
|
|
1450
1447
|
self.clock = hs.get_clock() # nb must be called this for @measure_func
|
|
1451
1448
|
self.device_handler = device_handler
|
|
1452
1449
|
|
|
1453
|
-
self._remote_edu_linearizer = Linearizer(
|
|
1454
|
-
|
|
1450
|
+
self._remote_edu_linearizer = Linearizer(
|
|
1451
|
+
name="remote_device_list", clock=self.clock
|
|
1452
|
+
)
|
|
1453
|
+
self._resync_linearizer = Linearizer(
|
|
1454
|
+
name="remote_device_resync", clock=self.clock
|
|
1455
|
+
)
|
|
1455
1456
|
|
|
1456
1457
|
# user_id -> list of updates waiting to be handled.
|
|
1457
1458
|
self._pending_updates: Dict[
|
|
@@ -1464,6 +1465,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
|
|
|
1464
1465
|
self._seen_updates: ExpiringCache[str, Set[str]] = ExpiringCache(
|
|
1465
1466
|
cache_name="device_update_edu",
|
|
1466
1467
|
server_name=self.server_name,
|
|
1468
|
+
hs=self.hs,
|
|
1467
1469
|
clock=self.clock,
|
|
1468
1470
|
max_len=10000,
|
|
1469
1471
|
expiry_ms=30 * 60 * 1000,
|
|
@@ -1473,9 +1475,8 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
|
|
|
1473
1475
|
# Attempt to resync out of sync device lists every 30s.
|
|
1474
1476
|
self._resync_retry_lock = Lock()
|
|
1475
1477
|
self.clock.looping_call(
|
|
1476
|
-
run_as_background_process,
|
|
1478
|
+
self.hs.run_as_background_process,
|
|
1477
1479
|
30 * 1000,
|
|
1478
|
-
server_name=self.server_name,
|
|
1479
1480
|
func=self._maybe_retry_device_resync,
|
|
1480
1481
|
desc="_maybe_retry_device_resync",
|
|
1481
1482
|
)
|
|
@@ -1595,9 +1596,8 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
|
|
|
1595
1596
|
if resync:
|
|
1596
1597
|
# We mark as stale up front in case we get restarted.
|
|
1597
1598
|
await self.store.mark_remote_users_device_caches_as_stale([user_id])
|
|
1598
|
-
run_as_background_process(
|
|
1599
|
+
self.hs.run_as_background_process(
|
|
1599
1600
|
"_maybe_retry_device_resync",
|
|
1600
|
-
self.server_name,
|
|
1601
1601
|
self.multi_user_device_resync,
|
|
1602
1602
|
[user_id],
|
|
1603
1603
|
False,
|
synapse/handlers/e2e_keys.py
CHANGED
|
@@ -111,8 +111,7 @@ class E2eKeysHandler:
|
|
|
111
111
|
|
|
112
112
|
# Limit the number of in-flight requests from a single device.
|
|
113
113
|
self._query_devices_linearizer = Linearizer(
|
|
114
|
-
name="query_devices",
|
|
115
|
-
max_count=10,
|
|
114
|
+
name="query_devices", max_count=10, clock=hs.get_clock()
|
|
116
115
|
)
|
|
117
116
|
|
|
118
117
|
self._query_appservices_for_otks = (
|
|
@@ -1769,7 +1768,9 @@ class SigningKeyEduUpdater:
|
|
|
1769
1768
|
assert isinstance(device_handler, DeviceWriterHandler)
|
|
1770
1769
|
self._device_handler = device_handler
|
|
1771
1770
|
|
|
1772
|
-
self._remote_edu_linearizer = Linearizer(
|
|
1771
|
+
self._remote_edu_linearizer = Linearizer(
|
|
1772
|
+
name="remote_signing_key", clock=self.clock
|
|
1773
|
+
)
|
|
1773
1774
|
|
|
1774
1775
|
# user_id -> list of updates waiting to be handled.
|
|
1775
1776
|
self._pending_updates: Dict[str, List[Tuple[JsonDict, JsonDict]]] = {}
|
synapse/handlers/federation.py
CHANGED
|
@@ -72,7 +72,6 @@ from synapse.http.servlet import assert_params_in_dict
|
|
|
72
72
|
from synapse.logging.context import nested_logging_context
|
|
73
73
|
from synapse.logging.opentracing import SynapseTags, set_tag, tag_args, trace
|
|
74
74
|
from synapse.metrics import SERVER_NAME_LABEL
|
|
75
|
-
from synapse.metrics.background_process_metrics import run_as_background_process
|
|
76
75
|
from synapse.module_api import NOT_SPAM
|
|
77
76
|
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
|
78
77
|
from synapse.storage.invite_rule import InviteRule
|
|
@@ -160,7 +159,7 @@ class FederationHandler:
|
|
|
160
159
|
self._notifier = hs.get_notifier()
|
|
161
160
|
self._worker_locks = hs.get_worker_locks_handler()
|
|
162
161
|
|
|
163
|
-
self._room_backfill = Linearizer("room_backfill")
|
|
162
|
+
self._room_backfill = Linearizer(name="room_backfill", clock=self.clock)
|
|
164
163
|
|
|
165
164
|
self._third_party_event_rules = (
|
|
166
165
|
hs.get_module_api_callbacks().third_party_event_rules
|
|
@@ -180,16 +179,16 @@ class FederationHandler:
|
|
|
180
179
|
# When the lock is held for a given room, no other concurrent code may
|
|
181
180
|
# partial state or un-partial state the room.
|
|
182
181
|
self._is_partial_state_room_linearizer = Linearizer(
|
|
183
|
-
name="_is_partial_state_room_linearizer"
|
|
182
|
+
name="_is_partial_state_room_linearizer",
|
|
183
|
+
clock=self.clock,
|
|
184
184
|
)
|
|
185
185
|
|
|
186
186
|
# if this is the main process, fire off a background process to resume
|
|
187
187
|
# any partial-state-resync operations which were in flight when we
|
|
188
188
|
# were shut down.
|
|
189
189
|
if not hs.config.worker.worker_app:
|
|
190
|
-
run_as_background_process(
|
|
190
|
+
self.hs.run_as_background_process(
|
|
191
191
|
"resume_sync_partial_state_room",
|
|
192
|
-
self.server_name,
|
|
193
192
|
self._resume_partial_state_room_sync,
|
|
194
193
|
)
|
|
195
194
|
|
|
@@ -317,9 +316,8 @@ class FederationHandler:
|
|
|
317
316
|
logger.debug(
|
|
318
317
|
"_maybe_backfill_inner: all backfill points are *after* current depth. Trying again with later backfill points."
|
|
319
318
|
)
|
|
320
|
-
run_as_background_process(
|
|
319
|
+
self.hs.run_as_background_process(
|
|
321
320
|
"_maybe_backfill_inner_anyway_with_max_depth",
|
|
322
|
-
self.server_name,
|
|
323
321
|
self.maybe_backfill,
|
|
324
322
|
room_id=room_id,
|
|
325
323
|
# We use `MAX_DEPTH` so that we find all backfill points next
|
|
@@ -801,9 +799,8 @@ class FederationHandler:
|
|
|
801
799
|
# lots of requests for missing prev_events which we do actually
|
|
802
800
|
# have. Hence we fire off the background task, but don't wait for it.
|
|
803
801
|
|
|
804
|
-
run_as_background_process(
|
|
802
|
+
self.hs.run_as_background_process(
|
|
805
803
|
"handle_queued_pdus",
|
|
806
|
-
self.server_name,
|
|
807
804
|
self._handle_queued_pdus,
|
|
808
805
|
room_queue,
|
|
809
806
|
)
|
|
@@ -1876,9 +1873,8 @@ class FederationHandler:
|
|
|
1876
1873
|
room_id=room_id,
|
|
1877
1874
|
)
|
|
1878
1875
|
|
|
1879
|
-
run_as_background_process(
|
|
1876
|
+
self.hs.run_as_background_process(
|
|
1880
1877
|
desc="sync_partial_state_room",
|
|
1881
|
-
server_name=self.server_name,
|
|
1882
1878
|
func=_sync_partial_state_room_wrapper,
|
|
1883
1879
|
)
|
|
1884
1880
|
|
|
@@ -81,7 +81,6 @@ from synapse.logging.opentracing import (
|
|
|
81
81
|
trace,
|
|
82
82
|
)
|
|
83
83
|
from synapse.metrics import SERVER_NAME_LABEL
|
|
84
|
-
from synapse.metrics.background_process_metrics import run_as_background_process
|
|
85
84
|
from synapse.replication.http.federation import (
|
|
86
85
|
ReplicationFederationSendEventsRestServlet,
|
|
87
86
|
)
|
|
@@ -153,6 +152,7 @@ class FederationEventHandler:
|
|
|
153
152
|
|
|
154
153
|
def __init__(self, hs: "HomeServer"):
|
|
155
154
|
self.server_name = hs.hostname
|
|
155
|
+
self.hs = hs
|
|
156
156
|
self._clock = hs.get_clock()
|
|
157
157
|
self._store = hs.get_datastores().main
|
|
158
158
|
self._state_store = hs.get_datastores().state
|
|
@@ -175,6 +175,7 @@ class FederationEventHandler:
|
|
|
175
175
|
)
|
|
176
176
|
self._notifier = hs.get_notifier()
|
|
177
177
|
|
|
178
|
+
self._server_name = hs.hostname
|
|
178
179
|
self._is_mine_id = hs.is_mine_id
|
|
179
180
|
self._is_mine_server_name = hs.is_mine_server_name
|
|
180
181
|
self._instance_name = hs.get_instance_name()
|
|
@@ -191,7 +192,7 @@ class FederationEventHandler:
|
|
|
191
192
|
# federation event staging area.
|
|
192
193
|
self.room_queues: Dict[str, List[Tuple[EventBase, str]]] = {}
|
|
193
194
|
|
|
194
|
-
self._room_pdu_linearizer = Linearizer("fed_room_pdu")
|
|
195
|
+
self._room_pdu_linearizer = Linearizer(name="fed_room_pdu", clock=self._clock)
|
|
195
196
|
|
|
196
197
|
async def on_receive_pdu(self, origin: str, pdu: EventBase) -> None:
|
|
197
198
|
"""Process a PDU received via a federation /send/ transaction
|
|
@@ -974,9 +975,8 @@ class FederationEventHandler:
|
|
|
974
975
|
# Process previously failed backfill events in the background to not waste
|
|
975
976
|
# time on something that is likely to fail again.
|
|
976
977
|
if len(events_with_failed_pull_attempts) > 0:
|
|
977
|
-
run_as_background_process(
|
|
978
|
+
self.hs.run_as_background_process(
|
|
978
979
|
"_process_new_pulled_events_with_failed_pull_attempts",
|
|
979
|
-
self.server_name,
|
|
980
980
|
_process_new_pulled_events,
|
|
981
981
|
events_with_failed_pull_attempts,
|
|
982
982
|
)
|
|
@@ -1568,9 +1568,8 @@ class FederationEventHandler:
|
|
|
1568
1568
|
resync = True
|
|
1569
1569
|
|
|
1570
1570
|
if resync:
|
|
1571
|
-
run_as_background_process(
|
|
1571
|
+
self.hs.run_as_background_process(
|
|
1572
1572
|
"resync_device_due_to_pdu",
|
|
1573
|
-
self.server_name,
|
|
1574
1573
|
self._resync_device,
|
|
1575
1574
|
event.sender,
|
|
1576
1575
|
)
|
synapse/handlers/message.py
CHANGED
|
@@ -67,7 +67,6 @@ from synapse.handlers.directory import DirectoryHandler
|
|
|
67
67
|
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
|
68
68
|
from synapse.logging import opentracing
|
|
69
69
|
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
|
70
|
-
from synapse.metrics.background_process_metrics import run_as_background_process
|
|
71
70
|
from synapse.replication.http.send_events import ReplicationSendEventsRestServlet
|
|
72
71
|
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
|
73
72
|
from synapse.types import (
|
|
@@ -99,6 +98,7 @@ class MessageHandler:
|
|
|
99
98
|
|
|
100
99
|
def __init__(self, hs: "HomeServer"):
|
|
101
100
|
self.server_name = hs.hostname
|
|
101
|
+
self.hs = hs
|
|
102
102
|
self.auth = hs.get_auth()
|
|
103
103
|
self.clock = hs.get_clock()
|
|
104
104
|
self.state = hs.get_state_handler()
|
|
@@ -113,8 +113,8 @@ class MessageHandler:
|
|
|
113
113
|
self._scheduled_expiry: Optional[IDelayedCall] = None
|
|
114
114
|
|
|
115
115
|
if not hs.config.worker.worker_app:
|
|
116
|
-
run_as_background_process(
|
|
117
|
-
"_schedule_next_expiry", self.
|
|
116
|
+
self.hs.run_as_background_process(
|
|
117
|
+
"_schedule_next_expiry", self._schedule_next_expiry
|
|
118
118
|
)
|
|
119
119
|
|
|
120
120
|
async def get_room_data(
|
|
@@ -444,9 +444,8 @@ class MessageHandler:
|
|
|
444
444
|
|
|
445
445
|
self._scheduled_expiry = self.clock.call_later(
|
|
446
446
|
delay,
|
|
447
|
-
run_as_background_process,
|
|
447
|
+
self.hs.run_as_background_process,
|
|
448
448
|
"_expire_event",
|
|
449
|
-
self.server_name,
|
|
450
449
|
self._expire_event,
|
|
451
450
|
event_id,
|
|
452
451
|
)
|
|
@@ -513,7 +512,9 @@ class EventCreationHandler:
|
|
|
513
512
|
|
|
514
513
|
# We limit concurrent event creation for a room to 1. This prevents state resolution
|
|
515
514
|
# from occurring when sending bursts of events to a local room
|
|
516
|
-
self.limiter = Linearizer(
|
|
515
|
+
self.limiter = Linearizer(
|
|
516
|
+
max_count=1, name="room_event_creation_limit", clock=self.clock
|
|
517
|
+
)
|
|
517
518
|
|
|
518
519
|
self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator()
|
|
519
520
|
|
|
@@ -546,9 +547,8 @@ class EventCreationHandler:
|
|
|
546
547
|
and self.config.server.cleanup_extremities_with_dummy_events
|
|
547
548
|
):
|
|
548
549
|
self.clock.looping_call(
|
|
549
|
-
lambda: run_as_background_process(
|
|
550
|
+
lambda: self.hs.run_as_background_process(
|
|
550
551
|
"send_dummy_events_to_fill_extremities",
|
|
551
|
-
self.server_name,
|
|
552
552
|
self._send_dummy_events_to_fill_extremities,
|
|
553
553
|
),
|
|
554
554
|
5 * 60 * 1000,
|
|
@@ -568,6 +568,7 @@ class EventCreationHandler:
|
|
|
568
568
|
self._external_cache_joined_hosts_updates = ExpiringCache(
|
|
569
569
|
cache_name="_external_cache_joined_hosts_updates",
|
|
570
570
|
server_name=self.server_name,
|
|
571
|
+
hs=self.hs,
|
|
571
572
|
clock=self.clock,
|
|
572
573
|
expiry_ms=30 * 60 * 1000,
|
|
573
574
|
)
|
|
@@ -1138,6 +1139,12 @@ class EventCreationHandler:
|
|
|
1138
1139
|
assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % (
|
|
1139
1140
|
event.sender,
|
|
1140
1141
|
)
|
|
1142
|
+
# if this room uses a policy server, try to get a signature now.
|
|
1143
|
+
# We use verify=False here as we are about to call is_event_allowed on the same event
|
|
1144
|
+
# which will do sig checks.
|
|
1145
|
+
await self._policy_handler.ask_policy_server_to_sign_event(
|
|
1146
|
+
event, verify=False
|
|
1147
|
+
)
|
|
1141
1148
|
|
|
1142
1149
|
policy_allowed = await self._policy_handler.is_event_allowed(event)
|
|
1143
1150
|
if not policy_allowed:
|
|
@@ -2105,9 +2112,8 @@ class EventCreationHandler:
|
|
|
2105
2112
|
if event.type == EventTypes.Message:
|
|
2106
2113
|
# We don't want to block sending messages on any presence code. This
|
|
2107
2114
|
# matters as sometimes presence code can take a while.
|
|
2108
|
-
run_as_background_process(
|
|
2115
|
+
self.hs.run_as_background_process(
|
|
2109
2116
|
"bump_presence_active_time",
|
|
2110
|
-
self.server_name,
|
|
2111
2117
|
self._bump_active_time,
|
|
2112
2118
|
requester.user,
|
|
2113
2119
|
requester.device_id,
|
synapse/handlers/pagination.py
CHANGED
|
@@ -29,7 +29,6 @@ from synapse.api.filtering import Filter
|
|
|
29
29
|
from synapse.events.utils import SerializeEventConfig
|
|
30
30
|
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
|
31
31
|
from synapse.logging.opentracing import trace
|
|
32
|
-
from synapse.metrics.background_process_metrics import run_as_background_process
|
|
33
32
|
from synapse.rest.admin._base import assert_user_is_admin
|
|
34
33
|
from synapse.streams.config import PaginationConfig
|
|
35
34
|
from synapse.types import (
|
|
@@ -116,10 +115,9 @@ class PaginationHandler:
|
|
|
116
115
|
logger.info("Setting up purge job with config: %s", job)
|
|
117
116
|
|
|
118
117
|
self.clock.looping_call(
|
|
119
|
-
run_as_background_process,
|
|
118
|
+
self.hs.run_as_background_process,
|
|
120
119
|
job.interval,
|
|
121
120
|
"purge_history_for_rooms_in_range",
|
|
122
|
-
self.server_name,
|
|
123
121
|
self.purge_history_for_rooms_in_range,
|
|
124
122
|
job.shortest_max_lifetime,
|
|
125
123
|
job.longest_max_lifetime,
|
|
@@ -244,9 +242,8 @@ class PaginationHandler:
|
|
|
244
242
|
# We want to purge everything, including local events, and to run the purge in
|
|
245
243
|
# the background so that it's not blocking any other operation apart from
|
|
246
244
|
# other purges in the same room.
|
|
247
|
-
run_as_background_process(
|
|
245
|
+
self.hs.run_as_background_process(
|
|
248
246
|
PURGE_HISTORY_ACTION_NAME,
|
|
249
|
-
self.server_name,
|
|
250
247
|
self.purge_history,
|
|
251
248
|
room_id,
|
|
252
249
|
token,
|
|
@@ -604,9 +601,8 @@ class PaginationHandler:
|
|
|
604
601
|
# Otherwise, we can backfill in the background for eventual
|
|
605
602
|
# consistency's sake but we don't need to block the client waiting
|
|
606
603
|
# for a costly federation call and processing.
|
|
607
|
-
run_as_background_process(
|
|
604
|
+
self.hs.run_as_background_process(
|
|
608
605
|
"maybe_backfill_in_the_background",
|
|
609
|
-
self.server_name,
|
|
610
606
|
self.hs.get_federation_handler().maybe_backfill,
|
|
611
607
|
room_id,
|
|
612
608
|
curr_topo,
|
synapse/handlers/presence.py
CHANGED
|
@@ -107,7 +107,6 @@ from synapse.events.presence_router import PresenceRouter
|
|
|
107
107
|
from synapse.logging.context import run_in_background
|
|
108
108
|
from synapse.metrics import SERVER_NAME_LABEL, LaterGauge
|
|
109
109
|
from synapse.metrics.background_process_metrics import (
|
|
110
|
-
run_as_background_process,
|
|
111
110
|
wrap_as_background_process,
|
|
112
111
|
)
|
|
113
112
|
from synapse.replication.http.presence import (
|
|
@@ -537,19 +536,15 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
|
|
537
536
|
self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs)
|
|
538
537
|
self._set_state_client = ReplicationPresenceSetState.make_client(hs)
|
|
539
538
|
|
|
540
|
-
self.
|
|
541
|
-
self.send_stop_syncing, UPDATE_SYNCING_USERS_MS
|
|
542
|
-
)
|
|
539
|
+
self.clock.looping_call(self.send_stop_syncing, UPDATE_SYNCING_USERS_MS)
|
|
543
540
|
|
|
544
|
-
hs.
|
|
545
|
-
"before",
|
|
546
|
-
"shutdown",
|
|
547
|
-
|
|
548
|
-
"generic_presence.on_shutdown",
|
|
549
|
-
self.server_name,
|
|
550
|
-
self._on_shutdown,
|
|
541
|
+
hs.register_async_shutdown_handler(
|
|
542
|
+
phase="before",
|
|
543
|
+
eventType="shutdown",
|
|
544
|
+
shutdown_func=self._on_shutdown,
|
|
551
545
|
)
|
|
552
546
|
|
|
547
|
+
@wrap_as_background_process("WorkerPresenceHandler._on_shutdown")
|
|
553
548
|
async def _on_shutdown(self) -> None:
|
|
554
549
|
if self._track_presence:
|
|
555
550
|
self.hs.get_replication_command_handler().send_command(
|
|
@@ -779,9 +774,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
|
|
779
774
|
class PresenceHandler(BasePresenceHandler):
|
|
780
775
|
def __init__(self, hs: "HomeServer"):
|
|
781
776
|
super().__init__(hs)
|
|
782
|
-
self.server_name =
|
|
783
|
-
hs.hostname
|
|
784
|
-
) # nb must be called this for @wrap_as_background_process
|
|
777
|
+
self.server_name = hs.hostname
|
|
785
778
|
self.wheel_timer: WheelTimer[str] = WheelTimer()
|
|
786
779
|
self.notifier = hs.get_notifier()
|
|
787
780
|
|
|
@@ -842,13 +835,10 @@ class PresenceHandler(BasePresenceHandler):
|
|
|
842
835
|
# have not yet been persisted
|
|
843
836
|
self.unpersisted_users_changes: Set[str] = set()
|
|
844
837
|
|
|
845
|
-
hs.
|
|
846
|
-
"before",
|
|
847
|
-
"shutdown",
|
|
848
|
-
|
|
849
|
-
"presence.on_shutdown",
|
|
850
|
-
self.server_name,
|
|
851
|
-
self._on_shutdown,
|
|
838
|
+
hs.register_async_shutdown_handler(
|
|
839
|
+
phase="before",
|
|
840
|
+
eventType="shutdown",
|
|
841
|
+
shutdown_func=self._on_shutdown,
|
|
852
842
|
)
|
|
853
843
|
|
|
854
844
|
# Keeps track of the number of *ongoing* syncs on this process. While
|
|
@@ -872,14 +862,19 @@ class PresenceHandler(BasePresenceHandler):
|
|
|
872
862
|
] = {}
|
|
873
863
|
self.external_process_last_updated_ms: Dict[str, int] = {}
|
|
874
864
|
|
|
875
|
-
self.external_sync_linearizer = Linearizer(
|
|
865
|
+
self.external_sync_linearizer = Linearizer(
|
|
866
|
+
name="external_sync_linearizer", clock=self.clock
|
|
867
|
+
)
|
|
876
868
|
|
|
877
869
|
if self._track_presence:
|
|
878
870
|
# Start a LoopingCall in 30s that fires every 5s.
|
|
879
871
|
# The initial delay is to allow disconnected clients a chance to
|
|
880
872
|
# reconnect before we treat them as offline.
|
|
881
873
|
self.clock.call_later(
|
|
882
|
-
30,
|
|
874
|
+
30,
|
|
875
|
+
self.clock.looping_call,
|
|
876
|
+
self._handle_timeouts,
|
|
877
|
+
5000,
|
|
883
878
|
)
|
|
884
879
|
|
|
885
880
|
# Presence information is persisted, whether or not it is being tracked
|
|
@@ -906,6 +901,7 @@ class PresenceHandler(BasePresenceHandler):
|
|
|
906
901
|
self._event_pos = self.store.get_room_max_stream_ordering()
|
|
907
902
|
self._event_processing = False
|
|
908
903
|
|
|
904
|
+
@wrap_as_background_process("PresenceHandler._on_shutdown")
|
|
909
905
|
async def _on_shutdown(self) -> None:
|
|
910
906
|
"""Gets called when shutting down. This lets us persist any updates that
|
|
911
907
|
we haven't yet persisted, e.g. updates that only changes some internal
|
|
@@ -1537,8 +1533,8 @@ class PresenceHandler(BasePresenceHandler):
|
|
|
1537
1533
|
finally:
|
|
1538
1534
|
self._event_processing = False
|
|
1539
1535
|
|
|
1540
|
-
run_as_background_process(
|
|
1541
|
-
"presence.notify_new_event",
|
|
1536
|
+
self.hs.run_as_background_process(
|
|
1537
|
+
"presence.notify_new_event", _process_presence
|
|
1542
1538
|
)
|
|
1543
1539
|
|
|
1544
1540
|
async def _unsafe_process(self) -> None:
|
synapse/handlers/profile.py
CHANGED
|
@@ -56,8 +56,8 @@ class ProfileHandler:
|
|
|
56
56
|
|
|
57
57
|
def __init__(self, hs: "HomeServer"):
|
|
58
58
|
self.server_name = hs.hostname # nb must be called this for @cached
|
|
59
|
+
self.clock = hs.get_clock() # nb must be called this for @cached
|
|
59
60
|
self.store = hs.get_datastores().main
|
|
60
|
-
self.clock = hs.get_clock()
|
|
61
61
|
self.hs = hs
|
|
62
62
|
|
|
63
63
|
self.federation = hs.get_federation_client()
|
synapse/handlers/read_marker.py
CHANGED
|
@@ -36,7 +36,9 @@ class ReadMarkerHandler:
|
|
|
36
36
|
def __init__(self, hs: "HomeServer"):
|
|
37
37
|
self.store = hs.get_datastores().main
|
|
38
38
|
self.account_data_handler = hs.get_account_data_handler()
|
|
39
|
-
self.read_marker_linearizer = Linearizer(
|
|
39
|
+
self.read_marker_linearizer = Linearizer(
|
|
40
|
+
name="read_marker", clock=hs.get_clock()
|
|
41
|
+
)
|
|
40
42
|
|
|
41
43
|
async def received_client_read_marker(
|
|
42
44
|
self, room_id: str, user_id: str, event_id: str
|
synapse/handlers/register.py
CHANGED
|
@@ -23,7 +23,14 @@
|
|
|
23
23
|
"""Contains functions for registering clients."""
|
|
24
24
|
|
|
25
25
|
import logging
|
|
26
|
-
from typing import
|
|
26
|
+
from typing import (
|
|
27
|
+
TYPE_CHECKING,
|
|
28
|
+
Iterable,
|
|
29
|
+
List,
|
|
30
|
+
Optional,
|
|
31
|
+
Tuple,
|
|
32
|
+
TypedDict,
|
|
33
|
+
)
|
|
27
34
|
|
|
28
35
|
from prometheus_client import Counter
|
|
29
36
|
|
synapse/handlers/room.py
CHANGED
|
@@ -597,7 +597,7 @@ class RoomCreationHandler:
|
|
|
597
597
|
new_room_version,
|
|
598
598
|
additional_creators=additional_creators,
|
|
599
599
|
)
|
|
600
|
-
initial_state = {}
|
|
600
|
+
initial_state: MutableStateMap = {}
|
|
601
601
|
|
|
602
602
|
# Replicate relevant room events
|
|
603
603
|
types_to_copy: List[Tuple[str, Optional[str]]] = [
|
|
@@ -693,14 +693,23 @@ class RoomCreationHandler:
|
|
|
693
693
|
additional_creators,
|
|
694
694
|
)
|
|
695
695
|
|
|
696
|
-
# We construct what the body of a call to /createRoom would look like
|
|
697
|
-
# to the spam checker. We don't include a preset here, as we expect the
|
|
696
|
+
# We construct a subset of what the body of a call to /createRoom would look like
|
|
697
|
+
# for passing to the spam checker. We don't include a preset here, as we expect the
|
|
698
698
|
# initial state to contain everything we need.
|
|
699
|
+
# TODO: given we are upgrading, it would make sense to pass the room_version
|
|
700
|
+
# TODO: the preset might be useful too
|
|
699
701
|
spam_check = await self._spam_checker_module_callbacks.user_may_create_room(
|
|
700
702
|
user_id,
|
|
701
703
|
{
|
|
702
704
|
"creation_content": creation_content,
|
|
703
|
-
"initial_state":
|
|
705
|
+
"initial_state": [
|
|
706
|
+
{
|
|
707
|
+
"type": state_key[0],
|
|
708
|
+
"state_key": state_key[1],
|
|
709
|
+
"content": event_content,
|
|
710
|
+
}
|
|
711
|
+
for state_key, event_content in initial_state.items()
|
|
712
|
+
],
|
|
704
713
|
},
|
|
705
714
|
)
|
|
706
715
|
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
synapse/handlers/room_member.py
CHANGED
|
@@ -50,7 +50,6 @@ from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
|
|
|
50
50
|
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
|
51
51
|
from synapse.logging import opentracing
|
|
52
52
|
from synapse.metrics import SERVER_NAME_LABEL, event_processing_positions
|
|
53
|
-
from synapse.metrics.background_process_metrics import run_as_background_process
|
|
54
53
|
from synapse.replication.http.push import ReplicationCopyPusherRestServlet
|
|
55
54
|
from synapse.storage.databases.main.state_deltas import StateDelta
|
|
56
55
|
from synapse.storage.invite_rule import InviteRule
|
|
@@ -114,8 +113,12 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|
|
114
113
|
if self.hs.config.server.include_profile_data_on_invite:
|
|
115
114
|
self._membership_types_to_include_profile_data_in.add(Membership.INVITE)
|
|
116
115
|
|
|
117
|
-
self.member_linearizer: Linearizer = Linearizer(
|
|
118
|
-
|
|
116
|
+
self.member_linearizer: Linearizer = Linearizer(
|
|
117
|
+
name="member", clock=hs.get_clock()
|
|
118
|
+
)
|
|
119
|
+
self.member_as_limiter = Linearizer(
|
|
120
|
+
max_count=10, name="member_as_limiter", clock=hs.get_clock()
|
|
121
|
+
)
|
|
119
122
|
|
|
120
123
|
self.clock = hs.get_clock()
|
|
121
124
|
self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
|
|
@@ -2186,7 +2189,10 @@ class RoomForgetterHandler(StateDeltasHandler):
|
|
|
2186
2189
|
self._notifier.add_replication_callback(self.notify_new_event)
|
|
2187
2190
|
|
|
2188
2191
|
# We kick this off to pick up outstanding work from before the last restart.
|
|
2189
|
-
self._clock.call_later(
|
|
2192
|
+
self._clock.call_later(
|
|
2193
|
+
0,
|
|
2194
|
+
self.notify_new_event,
|
|
2195
|
+
)
|
|
2190
2196
|
|
|
2191
2197
|
def notify_new_event(self) -> None:
|
|
2192
2198
|
"""Called when there may be more deltas to process"""
|
|
@@ -2201,9 +2207,7 @@ class RoomForgetterHandler(StateDeltasHandler):
|
|
|
2201
2207
|
finally:
|
|
2202
2208
|
self._is_processing = False
|
|
2203
2209
|
|
|
2204
|
-
run_as_background_process(
|
|
2205
|
-
"room_forgetter.notify_new_event", self.server_name, process
|
|
2206
|
-
)
|
|
2210
|
+
self._hs.run_as_background_process("room_forgetter.notify_new_event", process)
|
|
2207
2211
|
|
|
2208
2212
|
async def _unsafe_process(self) -> None:
|
|
2209
2213
|
# If self.pos is None then means we haven't fetched it from DB
|