matrix-synapse 1.139.0rc2__cp39-abi3-musllinux_1_2_aarch64.whl → 1.140.0rc1__cp39-abi3-musllinux_1_2_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of matrix-synapse might be problematic. Click here for more details.
- {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/METADATA +5 -3
- {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/RECORD +158 -155
- synapse/_scripts/generate_workers_map.py +6 -1
- synapse/_scripts/synapse_port_db.py +0 -2
- synapse/_scripts/update_synapse_database.py +1 -6
- synapse/api/auth/base.py +1 -3
- synapse/api/auth/mas.py +6 -8
- synapse/api/auth/msc3861_delegated.py +6 -8
- synapse/api/errors.py +3 -0
- synapse/app/_base.py +101 -39
- synapse/app/admin_cmd.py +2 -4
- synapse/app/appservice.py +1 -1
- synapse/app/client_reader.py +1 -1
- synapse/app/event_creator.py +1 -1
- synapse/app/federation_reader.py +1 -1
- synapse/app/federation_sender.py +1 -1
- synapse/app/frontend_proxy.py +1 -1
- synapse/app/generic_worker.py +17 -11
- synapse/app/homeserver.py +85 -47
- synapse/app/media_repository.py +1 -1
- synapse/app/phone_stats_home.py +16 -14
- synapse/app/pusher.py +1 -1
- synapse/app/synchrotron.py +1 -1
- synapse/app/user_dir.py +1 -1
- synapse/appservice/__init__.py +29 -2
- synapse/appservice/scheduler.py +8 -8
- synapse/config/_base.py +32 -14
- synapse/config/_base.pyi +5 -3
- synapse/config/experimental.py +3 -0
- synapse/config/homeserver.py +27 -1
- synapse/config/logger.py +3 -4
- synapse/config/matrixrtc.py +67 -0
- synapse/crypto/keyring.py +18 -4
- synapse/events/auto_accept_invites.py +0 -1
- synapse/federation/federation_client.py +39 -0
- synapse/federation/federation_server.py +1 -1
- synapse/federation/send_queue.py +3 -0
- synapse/federation/sender/__init__.py +24 -8
- synapse/federation/sender/per_destination_queue.py +31 -8
- synapse/federation/sender/transaction_manager.py +12 -0
- synapse/federation/transport/client.py +29 -0
- synapse/handlers/account_validity.py +2 -4
- synapse/handlers/appservice.py +5 -7
- synapse/handlers/deactivate_account.py +2 -3
- synapse/handlers/delayed_events.py +10 -13
- synapse/handlers/device.py +14 -14
- synapse/handlers/e2e_keys.py +16 -11
- synapse/handlers/federation.py +7 -11
- synapse/handlers/federation_event.py +5 -6
- synapse/handlers/message.py +16 -10
- synapse/handlers/pagination.py +3 -7
- synapse/handlers/presence.py +21 -25
- synapse/handlers/profile.py +1 -1
- synapse/handlers/read_marker.py +3 -1
- synapse/handlers/register.py +8 -1
- synapse/handlers/room.py +13 -4
- synapse/handlers/room_member.py +11 -7
- synapse/handlers/room_policy.py +96 -2
- synapse/handlers/sso.py +1 -1
- synapse/handlers/stats.py +5 -3
- synapse/handlers/sync.py +20 -13
- synapse/handlers/typing.py +5 -10
- synapse/handlers/user_directory.py +12 -11
- synapse/handlers/worker_lock.py +19 -15
- synapse/http/client.py +18 -13
- synapse/http/federation/matrix_federation_agent.py +6 -1
- synapse/http/federation/well_known_resolver.py +3 -1
- synapse/http/matrixfederationclient.py +50 -11
- synapse/http/proxy.py +2 -2
- synapse/http/server.py +36 -2
- synapse/http/site.py +109 -17
- synapse/logging/context.py +201 -110
- synapse/logging/opentracing.py +30 -6
- synapse/logging/scopecontextmanager.py +161 -0
- synapse/media/_base.py +2 -1
- synapse/media/media_repository.py +20 -6
- synapse/media/url_previewer.py +5 -6
- synapse/metrics/_gc.py +3 -1
- synapse/metrics/background_process_metrics.py +128 -24
- synapse/metrics/common_usage_metrics.py +3 -5
- synapse/module_api/__init__.py +42 -5
- synapse/notifier.py +10 -3
- synapse/push/emailpusher.py +5 -4
- synapse/push/httppusher.py +6 -6
- synapse/push/pusherpool.py +3 -8
- synapse/replication/http/devices.py +0 -41
- synapse/replication/tcp/client.py +8 -5
- synapse/replication/tcp/handler.py +2 -3
- synapse/replication/tcp/protocol.py +14 -7
- synapse/replication/tcp/redis.py +16 -11
- synapse/replication/tcp/resource.py +5 -4
- synapse/replication/tcp/streams/__init__.py +2 -0
- synapse/res/providers.json +6 -5
- synapse/rest/__init__.py +2 -0
- synapse/rest/admin/__init__.py +4 -0
- synapse/rest/admin/events.py +69 -0
- synapse/rest/admin/media.py +70 -2
- synapse/rest/client/keys.py +147 -3
- synapse/rest/client/matrixrtc.py +52 -0
- synapse/rest/client/push_rule.py +1 -1
- synapse/rest/client/room.py +2 -3
- synapse/rest/client/sync.py +1 -3
- synapse/rest/client/transactions.py +1 -1
- synapse/server.py +271 -38
- synapse/server_notices/server_notices_manager.py +1 -0
- synapse/state/__init__.py +4 -1
- synapse/storage/_base.py +1 -1
- synapse/storage/background_updates.py +8 -3
- synapse/storage/controllers/persist_events.py +4 -3
- synapse/storage/controllers/purge_events.py +2 -3
- synapse/storage/controllers/state.py +5 -5
- synapse/storage/database.py +12 -7
- synapse/storage/databases/main/__init__.py +7 -2
- synapse/storage/databases/main/cache.py +4 -3
- synapse/storage/databases/main/censor_events.py +1 -1
- synapse/storage/databases/main/client_ips.py +9 -8
- synapse/storage/databases/main/deviceinbox.py +7 -6
- synapse/storage/databases/main/devices.py +4 -4
- synapse/storage/databases/main/end_to_end_keys.py +6 -3
- synapse/storage/databases/main/event_federation.py +7 -6
- synapse/storage/databases/main/event_push_actions.py +13 -13
- synapse/storage/databases/main/events_bg_updates.py +1 -1
- synapse/storage/databases/main/events_worker.py +6 -8
- synapse/storage/databases/main/lock.py +17 -13
- synapse/storage/databases/main/media_repository.py +2 -2
- synapse/storage/databases/main/metrics.py +6 -6
- synapse/storage/databases/main/monthly_active_users.py +3 -4
- synapse/storage/databases/main/receipts.py +1 -1
- synapse/storage/databases/main/registration.py +18 -19
- synapse/storage/databases/main/roommember.py +1 -1
- synapse/storage/databases/main/session.py +3 -3
- synapse/storage/databases/main/sliding_sync.py +2 -2
- synapse/storage/databases/main/transactions.py +3 -3
- synapse/storage/databases/state/store.py +2 -0
- synapse/synapse_rust/http_client.pyi +4 -0
- synapse/synapse_rust.abi3.so +0 -0
- synapse/util/async_helpers.py +36 -24
- synapse/util/batching_queue.py +16 -6
- synapse/util/caches/__init__.py +1 -1
- synapse/util/caches/deferred_cache.py +4 -0
- synapse/util/caches/descriptors.py +14 -2
- synapse/util/caches/dictionary_cache.py +6 -1
- synapse/util/caches/expiringcache.py +16 -5
- synapse/util/caches/lrucache.py +14 -26
- synapse/util/caches/response_cache.py +11 -1
- synapse/util/clock.py +215 -39
- synapse/util/constants.py +2 -0
- synapse/util/daemonize.py +5 -1
- synapse/util/distributor.py +9 -5
- synapse/util/metrics.py +35 -6
- synapse/util/ratelimitutils.py +4 -1
- synapse/util/retryutils.py +7 -4
- synapse/util/task_scheduler.py +11 -14
- synapse/logging/filter.py +0 -38
- {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/AUTHORS.rst +0 -0
- {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/LICENSE-AGPL-3.0 +0 -0
- {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/LICENSE-COMMERCIAL +0 -0
- {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/WHEEL +0 -0
- {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/entry_points.txt +0 -0
synapse/notifier.py
CHANGED
|
@@ -676,9 +676,16 @@ class Notifier:
|
|
|
676
676
|
# is a new token.
|
|
677
677
|
listener = user_stream.new_listener(prev_token)
|
|
678
678
|
listener = timeout_deferred(
|
|
679
|
-
listener,
|
|
680
|
-
(end_time - now) / 1000.0,
|
|
681
|
-
|
|
679
|
+
deferred=listener,
|
|
680
|
+
timeout=(end_time - now) / 1000.0,
|
|
681
|
+
# We don't track these calls since they are constantly being
|
|
682
|
+
# overridden by new calls to /sync and they don't hold the
|
|
683
|
+
# `HomeServer` in memory on shutdown. It is safe to let them
|
|
684
|
+
# timeout of their own accord after shutting down since it
|
|
685
|
+
# won't delay shutdown and there won't be any adverse
|
|
686
|
+
# behaviour.
|
|
687
|
+
cancel_on_shutdown=False,
|
|
688
|
+
clock=self.hs.get_clock(),
|
|
682
689
|
)
|
|
683
690
|
|
|
684
691
|
log_kv(
|
synapse/push/emailpusher.py
CHANGED
|
@@ -25,7 +25,6 @@ from typing import TYPE_CHECKING, Dict, List, Optional
|
|
|
25
25
|
from twisted.internet.error import AlreadyCalled, AlreadyCancelled
|
|
26
26
|
from twisted.internet.interfaces import IDelayedCall
|
|
27
27
|
|
|
28
|
-
from synapse.metrics.background_process_metrics import run_as_background_process
|
|
29
28
|
from synapse.push import Pusher, PusherConfig, PusherConfigException, ThrottleParams
|
|
30
29
|
from synapse.push.mailer import Mailer
|
|
31
30
|
from synapse.push.push_types import EmailReason
|
|
@@ -118,7 +117,7 @@ class EmailPusher(Pusher):
|
|
|
118
117
|
if self._is_processing:
|
|
119
118
|
return
|
|
120
119
|
|
|
121
|
-
run_as_background_process("emailpush.process", self.
|
|
120
|
+
self.hs.run_as_background_process("emailpush.process", self._process)
|
|
122
121
|
|
|
123
122
|
def _pause_processing(self) -> None:
|
|
124
123
|
"""Used by tests to temporarily pause processing of events.
|
|
@@ -228,8 +227,10 @@ class EmailPusher(Pusher):
|
|
|
228
227
|
self.timed_call = None
|
|
229
228
|
|
|
230
229
|
if soonest_due_at is not None:
|
|
231
|
-
|
|
232
|
-
|
|
230
|
+
delay = self.seconds_until(soonest_due_at)
|
|
231
|
+
self.timed_call = self.hs.get_clock().call_later(
|
|
232
|
+
delay,
|
|
233
|
+
self.on_timer,
|
|
233
234
|
)
|
|
234
235
|
|
|
235
236
|
async def save_last_stream_ordering_and_success(
|
synapse/push/httppusher.py
CHANGED
|
@@ -32,7 +32,6 @@ from synapse.api.constants import EventTypes
|
|
|
32
32
|
from synapse.events import EventBase
|
|
33
33
|
from synapse.logging import opentracing
|
|
34
34
|
from synapse.metrics import SERVER_NAME_LABEL
|
|
35
|
-
from synapse.metrics.background_process_metrics import run_as_background_process
|
|
36
35
|
from synapse.push import Pusher, PusherConfig, PusherConfigException
|
|
37
36
|
from synapse.storage.databases.main.event_push_actions import HttpPushAction
|
|
38
37
|
from synapse.types import JsonDict, JsonMapping
|
|
@@ -182,8 +181,8 @@ class HttpPusher(Pusher):
|
|
|
182
181
|
|
|
183
182
|
# We could check the receipts are actually m.read receipts here,
|
|
184
183
|
# but currently that's the only type of receipt anyway...
|
|
185
|
-
run_as_background_process(
|
|
186
|
-
"http_pusher.on_new_receipts", self.
|
|
184
|
+
self.hs.run_as_background_process(
|
|
185
|
+
"http_pusher.on_new_receipts", self._update_badge
|
|
187
186
|
)
|
|
188
187
|
|
|
189
188
|
async def _update_badge(self) -> None:
|
|
@@ -219,7 +218,7 @@ class HttpPusher(Pusher):
|
|
|
219
218
|
if self.failing_since and self.timed_call and self.timed_call.active():
|
|
220
219
|
return
|
|
221
220
|
|
|
222
|
-
run_as_background_process("httppush.process", self.
|
|
221
|
+
self.hs.run_as_background_process("httppush.process", self._process)
|
|
223
222
|
|
|
224
223
|
async def _process(self) -> None:
|
|
225
224
|
# we should never get here if we are already processing
|
|
@@ -336,8 +335,9 @@ class HttpPusher(Pusher):
|
|
|
336
335
|
)
|
|
337
336
|
else:
|
|
338
337
|
logger.info("Push failed: delaying for %ds", self.backoff_delay)
|
|
339
|
-
self.timed_call = self.hs.
|
|
340
|
-
self.backoff_delay,
|
|
338
|
+
self.timed_call = self.hs.get_clock().call_later(
|
|
339
|
+
self.backoff_delay,
|
|
340
|
+
self.on_timer,
|
|
341
341
|
)
|
|
342
342
|
self.backoff_delay = min(
|
|
343
343
|
self.backoff_delay * 2, self.MAX_BACKOFF_SEC
|
synapse/push/pusherpool.py
CHANGED
|
@@ -27,7 +27,6 @@ from prometheus_client import Gauge
|
|
|
27
27
|
from synapse.api.errors import Codes, SynapseError
|
|
28
28
|
from synapse.metrics import SERVER_NAME_LABEL
|
|
29
29
|
from synapse.metrics.background_process_metrics import (
|
|
30
|
-
run_as_background_process,
|
|
31
30
|
wrap_as_background_process,
|
|
32
31
|
)
|
|
33
32
|
from synapse.push import Pusher, PusherConfig, PusherConfigException
|
|
@@ -70,10 +69,8 @@ class PusherPool:
|
|
|
70
69
|
"""
|
|
71
70
|
|
|
72
71
|
def __init__(self, hs: "HomeServer"):
|
|
73
|
-
self.hs = hs
|
|
74
|
-
self.server_name =
|
|
75
|
-
hs.hostname
|
|
76
|
-
) # nb must be called this for @wrap_as_background_process
|
|
72
|
+
self.hs = hs # nb must be called this for @wrap_as_background_process
|
|
73
|
+
self.server_name = hs.hostname
|
|
77
74
|
self.pusher_factory = PusherFactory(hs)
|
|
78
75
|
self.store = self.hs.get_datastores().main
|
|
79
76
|
self.clock = self.hs.get_clock()
|
|
@@ -112,9 +109,7 @@ class PusherPool:
|
|
|
112
109
|
if not self._should_start_pushers:
|
|
113
110
|
logger.info("Not starting pushers because they are disabled in the config")
|
|
114
111
|
return
|
|
115
|
-
run_as_background_process(
|
|
116
|
-
"start_pushers", self.server_name, self._start_pushers
|
|
117
|
-
)
|
|
112
|
+
self.hs.run_as_background_process("start_pushers", self._start_pushers)
|
|
118
113
|
|
|
119
114
|
async def add_or_update_pusher(
|
|
120
115
|
self,
|
|
@@ -185,46 +185,6 @@ class ReplicationMultiUserDevicesResyncRestServlet(ReplicationEndpoint):
|
|
|
185
185
|
return 200, multi_user_devices
|
|
186
186
|
|
|
187
187
|
|
|
188
|
-
# FIXME(2025-07-22): Remove this on the next release, this will only get used
|
|
189
|
-
# during rollout to Synapse 1.135 and can be removed after that release.
|
|
190
|
-
class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint):
|
|
191
|
-
"""Unused endpoint, kept for backwards compatibility during rollout."""
|
|
192
|
-
|
|
193
|
-
NAME = "upload_keys_for_user"
|
|
194
|
-
PATH_ARGS = ()
|
|
195
|
-
CACHE = False
|
|
196
|
-
|
|
197
|
-
def __init__(self, hs: "HomeServer"):
|
|
198
|
-
super().__init__(hs)
|
|
199
|
-
|
|
200
|
-
self.e2e_keys_handler = hs.get_e2e_keys_handler()
|
|
201
|
-
self.store = hs.get_datastores().main
|
|
202
|
-
self.clock = hs.get_clock()
|
|
203
|
-
|
|
204
|
-
@staticmethod
|
|
205
|
-
async def _serialize_payload( # type: ignore[override]
|
|
206
|
-
user_id: str, device_id: str, keys: JsonDict
|
|
207
|
-
) -> JsonDict:
|
|
208
|
-
return {
|
|
209
|
-
"user_id": user_id,
|
|
210
|
-
"device_id": device_id,
|
|
211
|
-
"keys": keys,
|
|
212
|
-
}
|
|
213
|
-
|
|
214
|
-
async def _handle_request( # type: ignore[override]
|
|
215
|
-
self, request: Request, content: JsonDict
|
|
216
|
-
) -> Tuple[int, JsonDict]:
|
|
217
|
-
user_id = content["user_id"]
|
|
218
|
-
device_id = content["device_id"]
|
|
219
|
-
keys = content["keys"]
|
|
220
|
-
|
|
221
|
-
results = await self.e2e_keys_handler.upload_keys_for_user(
|
|
222
|
-
user_id, device_id, keys
|
|
223
|
-
)
|
|
224
|
-
|
|
225
|
-
return 200, results
|
|
226
|
-
|
|
227
|
-
|
|
228
188
|
class ReplicationHandleNewDeviceUpdateRestServlet(ReplicationEndpoint):
|
|
229
189
|
"""Wake up a device writer to send local device list changes as federation outbound pokes.
|
|
230
190
|
|
|
@@ -291,5 +251,4 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
|
|
291
251
|
ReplicationNotifyUserSignatureUpdateRestServlet(hs).register(http_server)
|
|
292
252
|
ReplicationMultiUserDevicesResyncRestServlet(hs).register(http_server)
|
|
293
253
|
ReplicationHandleNewDeviceUpdateRestServlet(hs).register(http_server)
|
|
294
|
-
ReplicationUploadKeysForUserRestServlet(hs).register(http_server)
|
|
295
254
|
ReplicationDeviceHandleRoomUnPartialStated(hs).register(http_server)
|
|
@@ -32,7 +32,6 @@ from synapse.api.constants import EventTypes, Membership, ReceiptTypes
|
|
|
32
32
|
from synapse.federation import send_queue
|
|
33
33
|
from synapse.federation.sender import FederationSender
|
|
34
34
|
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
|
|
35
|
-
from synapse.metrics.background_process_metrics import run_as_background_process
|
|
36
35
|
from synapse.replication.tcp.streams import (
|
|
37
36
|
AccountDataStream,
|
|
38
37
|
DeviceListsStream,
|
|
@@ -344,7 +343,9 @@ class ReplicationDataHandler:
|
|
|
344
343
|
# to wedge here forever.
|
|
345
344
|
deferred: "Deferred[None]" = Deferred()
|
|
346
345
|
deferred = timeout_deferred(
|
|
347
|
-
deferred,
|
|
346
|
+
deferred=deferred,
|
|
347
|
+
timeout=_WAIT_FOR_REPLICATION_TIMEOUT_SECONDS,
|
|
348
|
+
clock=self._clock,
|
|
348
349
|
)
|
|
349
350
|
|
|
350
351
|
waiting_list = self._streams_to_waiters.setdefault(
|
|
@@ -436,7 +437,9 @@ class FederationSenderHandler:
|
|
|
436
437
|
# to. This is always set before we use it.
|
|
437
438
|
self.federation_position: Optional[int] = None
|
|
438
439
|
|
|
439
|
-
self._fed_position_linearizer = Linearizer(
|
|
440
|
+
self._fed_position_linearizer = Linearizer(
|
|
441
|
+
name="_fed_position_linearizer", clock=hs.get_clock()
|
|
442
|
+
)
|
|
440
443
|
|
|
441
444
|
async def process_replication_rows(
|
|
442
445
|
self, stream_name: str, token: int, rows: list
|
|
@@ -511,8 +514,8 @@ class FederationSenderHandler:
|
|
|
511
514
|
# no need to queue up another task.
|
|
512
515
|
return
|
|
513
516
|
|
|
514
|
-
run_as_background_process(
|
|
515
|
-
"_save_and_send_ack", self.
|
|
517
|
+
self._hs.run_as_background_process(
|
|
518
|
+
"_save_and_send_ack", self._save_and_send_ack
|
|
516
519
|
)
|
|
517
520
|
|
|
518
521
|
async def _save_and_send_ack(self) -> None:
|
|
@@ -41,7 +41,6 @@ from prometheus_client import Counter
|
|
|
41
41
|
from twisted.internet.protocol import ReconnectingClientFactory
|
|
42
42
|
|
|
43
43
|
from synapse.metrics import SERVER_NAME_LABEL, LaterGauge
|
|
44
|
-
from synapse.metrics.background_process_metrics import run_as_background_process
|
|
45
44
|
from synapse.replication.tcp.commands import (
|
|
46
45
|
ClearUserSyncsCommand,
|
|
47
46
|
Command,
|
|
@@ -132,6 +131,7 @@ class ReplicationCommandHandler:
|
|
|
132
131
|
|
|
133
132
|
def __init__(self, hs: "HomeServer"):
|
|
134
133
|
self.server_name = hs.hostname
|
|
134
|
+
self.hs = hs
|
|
135
135
|
self._replication_data_handler = hs.get_replication_data_handler()
|
|
136
136
|
self._presence_handler = hs.get_presence_handler()
|
|
137
137
|
self._store = hs.get_datastores().main
|
|
@@ -361,9 +361,8 @@ class ReplicationCommandHandler:
|
|
|
361
361
|
return
|
|
362
362
|
|
|
363
363
|
# fire off a background process to start processing the queue.
|
|
364
|
-
run_as_background_process(
|
|
364
|
+
self.hs.run_as_background_process(
|
|
365
365
|
"process-replication-data",
|
|
366
|
-
self.server_name,
|
|
367
366
|
self._unsafe_process_queue,
|
|
368
367
|
stream_name,
|
|
369
368
|
)
|
|
@@ -42,7 +42,6 @@ from synapse.logging.context import PreserveLoggingContext
|
|
|
42
42
|
from synapse.metrics import SERVER_NAME_LABEL, LaterGauge
|
|
43
43
|
from synapse.metrics.background_process_metrics import (
|
|
44
44
|
BackgroundProcessLoggingContext,
|
|
45
|
-
run_as_background_process,
|
|
46
45
|
)
|
|
47
46
|
from synapse.replication.tcp.commands import (
|
|
48
47
|
VALID_CLIENT_COMMANDS,
|
|
@@ -140,9 +139,14 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
|
|
140
139
|
max_line_buffer = 10000
|
|
141
140
|
|
|
142
141
|
def __init__(
|
|
143
|
-
self,
|
|
142
|
+
self,
|
|
143
|
+
hs: "HomeServer",
|
|
144
|
+
server_name: str,
|
|
145
|
+
clock: Clock,
|
|
146
|
+
handler: "ReplicationCommandHandler",
|
|
144
147
|
):
|
|
145
148
|
self.server_name = server_name
|
|
149
|
+
self.hs = hs
|
|
146
150
|
self.clock = clock
|
|
147
151
|
self.command_handler = handler
|
|
148
152
|
|
|
@@ -290,9 +294,8 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
|
|
290
294
|
# if so.
|
|
291
295
|
|
|
292
296
|
if isawaitable(res):
|
|
293
|
-
run_as_background_process(
|
|
297
|
+
self.hs.run_as_background_process(
|
|
294
298
|
"replication-" + cmd.get_logcontext_id(),
|
|
295
|
-
self.server_name,
|
|
296
299
|
lambda: res,
|
|
297
300
|
)
|
|
298
301
|
|
|
@@ -470,9 +473,13 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
|
|
|
470
473
|
VALID_OUTBOUND_COMMANDS = VALID_SERVER_COMMANDS
|
|
471
474
|
|
|
472
475
|
def __init__(
|
|
473
|
-
self,
|
|
476
|
+
self,
|
|
477
|
+
hs: "HomeServer",
|
|
478
|
+
server_name: str,
|
|
479
|
+
clock: Clock,
|
|
480
|
+
handler: "ReplicationCommandHandler",
|
|
474
481
|
):
|
|
475
|
-
super().__init__(server_name, clock, handler)
|
|
482
|
+
super().__init__(hs, server_name, clock, handler)
|
|
476
483
|
|
|
477
484
|
self.server_name = server_name
|
|
478
485
|
|
|
@@ -497,7 +504,7 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
|
|
|
497
504
|
clock: Clock,
|
|
498
505
|
command_handler: "ReplicationCommandHandler",
|
|
499
506
|
):
|
|
500
|
-
super().__init__(server_name, clock, command_handler)
|
|
507
|
+
super().__init__(hs, server_name, clock, command_handler)
|
|
501
508
|
|
|
502
509
|
self.client_name = client_name
|
|
503
510
|
self.server_name = server_name
|
synapse/replication/tcp/redis.py
CHANGED
|
@@ -40,7 +40,6 @@ from synapse.logging.context import PreserveLoggingContext, make_deferred_yielda
|
|
|
40
40
|
from synapse.metrics import SERVER_NAME_LABEL
|
|
41
41
|
from synapse.metrics.background_process_metrics import (
|
|
42
42
|
BackgroundProcessLoggingContext,
|
|
43
|
-
run_as_background_process,
|
|
44
43
|
wrap_as_background_process,
|
|
45
44
|
)
|
|
46
45
|
from synapse.replication.tcp.commands import (
|
|
@@ -109,6 +108,7 @@ class RedisSubscriber(SubscriberProtocol):
|
|
|
109
108
|
"""
|
|
110
109
|
|
|
111
110
|
server_name: str
|
|
111
|
+
hs: "HomeServer"
|
|
112
112
|
synapse_handler: "ReplicationCommandHandler"
|
|
113
113
|
synapse_stream_prefix: str
|
|
114
114
|
synapse_channel_names: List[str]
|
|
@@ -146,9 +146,7 @@ class RedisSubscriber(SubscriberProtocol):
|
|
|
146
146
|
def connectionMade(self) -> None:
|
|
147
147
|
logger.info("Connected to redis")
|
|
148
148
|
super().connectionMade()
|
|
149
|
-
run_as_background_process(
|
|
150
|
-
"subscribe-replication", self.server_name, self._send_subscribe
|
|
151
|
-
)
|
|
149
|
+
self.hs.run_as_background_process("subscribe-replication", self._send_subscribe)
|
|
152
150
|
|
|
153
151
|
async def _send_subscribe(self) -> None:
|
|
154
152
|
# it's important to make sure that we only send the REPLICATE command once we
|
|
@@ -223,8 +221,8 @@ class RedisSubscriber(SubscriberProtocol):
|
|
|
223
221
|
# if so.
|
|
224
222
|
|
|
225
223
|
if isawaitable(res):
|
|
226
|
-
run_as_background_process(
|
|
227
|
-
"replication-" + cmd.get_logcontext_id(),
|
|
224
|
+
self.hs.run_as_background_process(
|
|
225
|
+
"replication-" + cmd.get_logcontext_id(), lambda: res
|
|
228
226
|
)
|
|
229
227
|
|
|
230
228
|
def connectionLost(self, reason: Failure) -> None: # type: ignore[override]
|
|
@@ -245,11 +243,17 @@ class RedisSubscriber(SubscriberProtocol):
|
|
|
245
243
|
Args:
|
|
246
244
|
cmd: The command to send
|
|
247
245
|
"""
|
|
248
|
-
run_as_background_process(
|
|
246
|
+
self.hs.run_as_background_process(
|
|
249
247
|
"send-cmd",
|
|
250
|
-
self.server_name,
|
|
251
248
|
self._async_send_command,
|
|
252
249
|
cmd,
|
|
250
|
+
# We originally started tracing background processes to avoid `There was no
|
|
251
|
+
# active span` errors but this change meant we started generating 15x the
|
|
252
|
+
# number of spans than before (this is one of the most heavily called
|
|
253
|
+
# instances of `run_as_background_process`).
|
|
254
|
+
#
|
|
255
|
+
# Since we don't log or tag a tracing span in the downstream
|
|
256
|
+
# code, we can safely disable this.
|
|
253
257
|
bg_start_span=False,
|
|
254
258
|
)
|
|
255
259
|
|
|
@@ -310,9 +314,8 @@ class SynapseRedisFactory(RedisFactory):
|
|
|
310
314
|
convertNumbers=convertNumbers,
|
|
311
315
|
)
|
|
312
316
|
|
|
313
|
-
self.
|
|
314
|
-
|
|
315
|
-
) # nb must be called this for @wrap_as_background_process
|
|
317
|
+
self.hs = hs # nb must be called this for @wrap_as_background_process
|
|
318
|
+
self.server_name = hs.hostname
|
|
316
319
|
|
|
317
320
|
hs.get_clock().looping_call(self._send_ping, 30 * 1000)
|
|
318
321
|
|
|
@@ -390,6 +393,7 @@ class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory):
|
|
|
390
393
|
)
|
|
391
394
|
|
|
392
395
|
self.server_name = hs.hostname
|
|
396
|
+
self.hs = hs
|
|
393
397
|
self.synapse_handler = hs.get_replication_command_handler()
|
|
394
398
|
self.synapse_stream_prefix = hs.hostname
|
|
395
399
|
self.synapse_channel_names = channel_names
|
|
@@ -405,6 +409,7 @@ class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory):
|
|
|
405
409
|
# the base method does some other things than just instantiating the
|
|
406
410
|
# protocol.
|
|
407
411
|
p.server_name = self.server_name
|
|
412
|
+
p.hs = self.hs
|
|
408
413
|
p.synapse_handler = self.synapse_handler
|
|
409
414
|
p.synapse_outbound_redis_connection = self.synapse_outbound_redis_connection
|
|
410
415
|
p.synapse_stream_prefix = self.synapse_stream_prefix
|
|
@@ -30,7 +30,6 @@ from twisted.internet.interfaces import IAddress
|
|
|
30
30
|
from twisted.internet.protocol import ServerFactory
|
|
31
31
|
|
|
32
32
|
from synapse.metrics import SERVER_NAME_LABEL
|
|
33
|
-
from synapse.metrics.background_process_metrics import run_as_background_process
|
|
34
33
|
from synapse.replication.tcp.commands import PositionCommand
|
|
35
34
|
from synapse.replication.tcp.protocol import ServerReplicationStreamProtocol
|
|
36
35
|
from synapse.replication.tcp.streams import EventsStream
|
|
@@ -55,6 +54,7 @@ class ReplicationStreamProtocolFactory(ServerFactory):
|
|
|
55
54
|
def __init__(self, hs: "HomeServer"):
|
|
56
55
|
self.command_handler = hs.get_replication_command_handler()
|
|
57
56
|
self.clock = hs.get_clock()
|
|
57
|
+
self.hs = hs
|
|
58
58
|
self.server_name = hs.config.server.server_name
|
|
59
59
|
|
|
60
60
|
# If we've created a `ReplicationStreamProtocolFactory` then we're
|
|
@@ -69,7 +69,7 @@ class ReplicationStreamProtocolFactory(ServerFactory):
|
|
|
69
69
|
|
|
70
70
|
def buildProtocol(self, addr: IAddress) -> ServerReplicationStreamProtocol:
|
|
71
71
|
return ServerReplicationStreamProtocol(
|
|
72
|
-
self.server_name, self.clock, self.command_handler
|
|
72
|
+
self.hs, self.server_name, self.clock, self.command_handler
|
|
73
73
|
)
|
|
74
74
|
|
|
75
75
|
|
|
@@ -82,6 +82,7 @@ class ReplicationStreamer:
|
|
|
82
82
|
|
|
83
83
|
def __init__(self, hs: "HomeServer"):
|
|
84
84
|
self.server_name = hs.hostname
|
|
85
|
+
self.hs = hs
|
|
85
86
|
self.store = hs.get_datastores().main
|
|
86
87
|
self.clock = hs.get_clock()
|
|
87
88
|
self.notifier = hs.get_notifier()
|
|
@@ -147,8 +148,8 @@ class ReplicationStreamer:
|
|
|
147
148
|
logger.debug("Notifier poke loop already running")
|
|
148
149
|
return
|
|
149
150
|
|
|
150
|
-
run_as_background_process(
|
|
151
|
-
"replication_notifier", self.
|
|
151
|
+
self.hs.run_as_background_process(
|
|
152
|
+
"replication_notifier", self._run_notifier_loop
|
|
152
153
|
)
|
|
153
154
|
|
|
154
155
|
async def _run_notifier_loop(self) -> None:
|
|
@@ -77,6 +77,7 @@ STREAMS_MAP = {
|
|
|
77
77
|
__all__ = [
|
|
78
78
|
"STREAMS_MAP",
|
|
79
79
|
"Stream",
|
|
80
|
+
"EventsStream",
|
|
80
81
|
"BackfillStream",
|
|
81
82
|
"PresenceStream",
|
|
82
83
|
"PresenceFederationStream",
|
|
@@ -87,6 +88,7 @@ __all__ = [
|
|
|
87
88
|
"CachesStream",
|
|
88
89
|
"DeviceListsStream",
|
|
89
90
|
"ToDeviceStream",
|
|
91
|
+
"FederationStream",
|
|
90
92
|
"AccountDataStream",
|
|
91
93
|
"ThreadSubscriptionsStream",
|
|
92
94
|
"UnPartialStatedRoomStream",
|
synapse/res/providers.json
CHANGED
|
@@ -1,14 +1,15 @@
|
|
|
1
1
|
[
|
|
2
2
|
{
|
|
3
|
-
"provider_name": "
|
|
4
|
-
"provider_url": "
|
|
3
|
+
"provider_name": "X",
|
|
4
|
+
"provider_url": "https://x.com/",
|
|
5
5
|
"endpoints": [
|
|
6
6
|
{
|
|
7
7
|
"schemes": [
|
|
8
|
-
"https://
|
|
9
|
-
"https
|
|
8
|
+
"https://x.com/*",
|
|
9
|
+
"https://x.com/*/status/*",
|
|
10
|
+
"https://*.x.com/*/status/*"
|
|
10
11
|
],
|
|
11
|
-
"url": "https://publish.
|
|
12
|
+
"url": "https://publish.x.com/oembed"
|
|
12
13
|
}
|
|
13
14
|
]
|
|
14
15
|
},
|
synapse/rest/__init__.py
CHANGED
|
@@ -42,6 +42,7 @@ from synapse.rest.client import (
|
|
|
42
42
|
login,
|
|
43
43
|
login_token_request,
|
|
44
44
|
logout,
|
|
45
|
+
matrixrtc,
|
|
45
46
|
mutual_rooms,
|
|
46
47
|
notifications,
|
|
47
48
|
openid,
|
|
@@ -89,6 +90,7 @@ CLIENT_SERVLET_FUNCTIONS: Tuple[RegisterServletsFunc, ...] = (
|
|
|
89
90
|
presence.register_servlets,
|
|
90
91
|
directory.register_servlets,
|
|
91
92
|
voip.register_servlets,
|
|
93
|
+
matrixrtc.register_servlets,
|
|
92
94
|
pusher.register_servlets,
|
|
93
95
|
push_rule.register_servlets,
|
|
94
96
|
logout.register_servlets,
|
synapse/rest/admin/__init__.py
CHANGED
|
@@ -57,6 +57,9 @@ from synapse.rest.admin.event_reports import (
|
|
|
57
57
|
EventReportDetailRestServlet,
|
|
58
58
|
EventReportsRestServlet,
|
|
59
59
|
)
|
|
60
|
+
from synapse.rest.admin.events import (
|
|
61
|
+
EventRestServlet,
|
|
62
|
+
)
|
|
60
63
|
from synapse.rest.admin.experimental_features import ExperimentalFeaturesRestServlet
|
|
61
64
|
from synapse.rest.admin.federation import (
|
|
62
65
|
DestinationMembershipRestServlet,
|
|
@@ -339,6 +342,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
|
|
339
342
|
ExperimentalFeaturesRestServlet(hs).register(http_server)
|
|
340
343
|
SuspendAccountRestServlet(hs).register(http_server)
|
|
341
344
|
ScheduledTasksRestServlet(hs).register(http_server)
|
|
345
|
+
EventRestServlet(hs).register(http_server)
|
|
342
346
|
|
|
343
347
|
|
|
344
348
|
def register_servlets_for_client_rest_resource(
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
from http import HTTPStatus
|
|
2
|
+
from typing import TYPE_CHECKING, Tuple
|
|
3
|
+
|
|
4
|
+
from synapse.api.errors import NotFoundError
|
|
5
|
+
from synapse.events.utils import (
|
|
6
|
+
SerializeEventConfig,
|
|
7
|
+
format_event_raw,
|
|
8
|
+
serialize_event,
|
|
9
|
+
)
|
|
10
|
+
from synapse.http.servlet import RestServlet
|
|
11
|
+
from synapse.http.site import SynapseRequest
|
|
12
|
+
from synapse.rest.admin import admin_patterns
|
|
13
|
+
from synapse.rest.admin._base import assert_user_is_admin
|
|
14
|
+
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
|
15
|
+
from synapse.types import JsonDict
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from synapse.server import HomeServer
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class EventRestServlet(RestServlet):
|
|
22
|
+
"""
|
|
23
|
+
Get an event that is known to the homeserver.
|
|
24
|
+
The requester must have administrator access in Synapse.
|
|
25
|
+
|
|
26
|
+
GET /_synapse/admin/v1/fetch_event/<event_id>
|
|
27
|
+
returns:
|
|
28
|
+
200 OK with event json if the event is known to the homeserver. Otherwise raises
|
|
29
|
+
a NotFound error.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
event_id: the id of the requested event.
|
|
33
|
+
Returns:
|
|
34
|
+
JSON blob of the event
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
PATTERNS = admin_patterns("/fetch_event/(?P<event_id>[^/]*)$")
|
|
38
|
+
|
|
39
|
+
def __init__(self, hs: "HomeServer"):
|
|
40
|
+
self._auth = hs.get_auth()
|
|
41
|
+
self._store = hs.get_datastores().main
|
|
42
|
+
self._clock = hs.get_clock()
|
|
43
|
+
|
|
44
|
+
async def on_GET(
|
|
45
|
+
self, request: SynapseRequest, event_id: str
|
|
46
|
+
) -> Tuple[int, JsonDict]:
|
|
47
|
+
requester = await self._auth.get_user_by_req(request)
|
|
48
|
+
await assert_user_is_admin(self._auth, requester)
|
|
49
|
+
|
|
50
|
+
event = await self._store.get_event(
|
|
51
|
+
event_id,
|
|
52
|
+
EventRedactBehaviour.as_is,
|
|
53
|
+
allow_none=True,
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
if event is None:
|
|
57
|
+
raise NotFoundError("Event not found")
|
|
58
|
+
|
|
59
|
+
config = SerializeEventConfig(
|
|
60
|
+
as_client_event=False,
|
|
61
|
+
event_format=format_event_raw,
|
|
62
|
+
requester=requester,
|
|
63
|
+
only_event_fields=None,
|
|
64
|
+
include_stripped_room_state=True,
|
|
65
|
+
include_admin_metadata=True,
|
|
66
|
+
)
|
|
67
|
+
res = {"event": serialize_event(event, self._clock.time_msec(), config=config)}
|
|
68
|
+
|
|
69
|
+
return HTTPStatus.OK, res
|
synapse/rest/admin/media.py
CHANGED
|
@@ -18,7 +18,6 @@
|
|
|
18
18
|
# [This file includes modifications made by New Vector Limited]
|
|
19
19
|
#
|
|
20
20
|
#
|
|
21
|
-
|
|
22
21
|
import logging
|
|
23
22
|
from http import HTTPStatus
|
|
24
23
|
from typing import TYPE_CHECKING, Optional, Tuple
|
|
@@ -41,7 +40,9 @@ from synapse.rest.admin._base import (
|
|
|
41
40
|
assert_requester_is_admin,
|
|
42
41
|
assert_user_is_admin,
|
|
43
42
|
)
|
|
44
|
-
from synapse.storage.databases.main.media_repository import
|
|
43
|
+
from synapse.storage.databases.main.media_repository import (
|
|
44
|
+
MediaSortOrder,
|
|
45
|
+
)
|
|
45
46
|
from synapse.types import JsonDict, UserID
|
|
46
47
|
|
|
47
48
|
if TYPE_CHECKING:
|
|
@@ -50,6 +51,72 @@ if TYPE_CHECKING:
|
|
|
50
51
|
logger = logging.getLogger(__name__)
|
|
51
52
|
|
|
52
53
|
|
|
54
|
+
class QueryMediaById(RestServlet):
|
|
55
|
+
"""
|
|
56
|
+
Fetch info about a piece of local or cached remote media.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
PATTERNS = admin_patterns("/media/(?P<server_name>[^/]*)/(?P<media_id>[^/]*)$")
|
|
60
|
+
|
|
61
|
+
def __init__(self, hs: "HomeServer"):
|
|
62
|
+
self.store = hs.get_datastores().main
|
|
63
|
+
self.auth = hs.get_auth()
|
|
64
|
+
self.server_name = hs.hostname
|
|
65
|
+
self.hs = hs
|
|
66
|
+
self.media_repo = hs.get_media_repository()
|
|
67
|
+
|
|
68
|
+
async def on_GET(
|
|
69
|
+
self, request: SynapseRequest, server_name: str, media_id: str
|
|
70
|
+
) -> Tuple[int, JsonDict]:
|
|
71
|
+
requester = await self.auth.get_user_by_req(request)
|
|
72
|
+
await assert_user_is_admin(self.auth, requester)
|
|
73
|
+
|
|
74
|
+
if not self.hs.is_mine_server_name(server_name):
|
|
75
|
+
remote_media_info = await self.media_repo.get_cached_remote_media_info(
|
|
76
|
+
server_name, media_id
|
|
77
|
+
)
|
|
78
|
+
if remote_media_info is None:
|
|
79
|
+
raise NotFoundError("Unknown media")
|
|
80
|
+
resp = {
|
|
81
|
+
"media_origin": remote_media_info.media_origin,
|
|
82
|
+
"user_id": None,
|
|
83
|
+
"media_id": remote_media_info.media_id,
|
|
84
|
+
"media_type": remote_media_info.media_type,
|
|
85
|
+
"media_length": remote_media_info.media_length,
|
|
86
|
+
"upload_name": remote_media_info.upload_name,
|
|
87
|
+
"created_ts": remote_media_info.created_ts,
|
|
88
|
+
"filesystem_id": remote_media_info.filesystem_id,
|
|
89
|
+
"url_cache": None,
|
|
90
|
+
"last_access_ts": remote_media_info.last_access_ts,
|
|
91
|
+
"quarantined_by": remote_media_info.quarantined_by,
|
|
92
|
+
"authenticated": remote_media_info.authenticated,
|
|
93
|
+
"safe_from_quarantine": None,
|
|
94
|
+
"sha256": remote_media_info.sha256,
|
|
95
|
+
}
|
|
96
|
+
else:
|
|
97
|
+
local_media_info = await self.store.get_local_media(media_id)
|
|
98
|
+
if local_media_info is None:
|
|
99
|
+
raise NotFoundError("Unknown media")
|
|
100
|
+
resp = {
|
|
101
|
+
"media_origin": None,
|
|
102
|
+
"user_id": local_media_info.user_id,
|
|
103
|
+
"media_id": local_media_info.media_id,
|
|
104
|
+
"media_type": local_media_info.media_type,
|
|
105
|
+
"media_length": local_media_info.media_length,
|
|
106
|
+
"upload_name": local_media_info.upload_name,
|
|
107
|
+
"created_ts": local_media_info.created_ts,
|
|
108
|
+
"filesystem_id": None,
|
|
109
|
+
"url_cache": local_media_info.url_cache,
|
|
110
|
+
"last_access_ts": local_media_info.last_access_ts,
|
|
111
|
+
"quarantined_by": local_media_info.quarantined_by,
|
|
112
|
+
"authenticated": local_media_info.authenticated,
|
|
113
|
+
"safe_from_quarantine": local_media_info.safe_from_quarantine,
|
|
114
|
+
"sha256": local_media_info.sha256,
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
return HTTPStatus.OK, {"media_info": resp}
|
|
118
|
+
|
|
119
|
+
|
|
53
120
|
class QuarantineMediaInRoom(RestServlet):
|
|
54
121
|
"""Quarantines all media in a room so that no one can download it via
|
|
55
122
|
this server.
|
|
@@ -470,3 +537,4 @@ def register_servlets_for_media_repo(hs: "HomeServer", http_server: HttpServer)
|
|
|
470
537
|
DeleteMediaByDateSize(hs).register(http_server)
|
|
471
538
|
DeleteMediaByID(hs).register(http_server)
|
|
472
539
|
UserMediaRestServlet(hs).register(http_server)
|
|
540
|
+
QueryMediaById(hs).register(http_server)
|