matrix-synapse 1.139.0rc2__cp39-abi3-musllinux_1_2_aarch64.whl → 1.140.0rc1__cp39-abi3-musllinux_1_2_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of matrix-synapse might be problematic. Click here for more details.
- {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/METADATA +5 -3
- {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/RECORD +158 -155
- synapse/_scripts/generate_workers_map.py +6 -1
- synapse/_scripts/synapse_port_db.py +0 -2
- synapse/_scripts/update_synapse_database.py +1 -6
- synapse/api/auth/base.py +1 -3
- synapse/api/auth/mas.py +6 -8
- synapse/api/auth/msc3861_delegated.py +6 -8
- synapse/api/errors.py +3 -0
- synapse/app/_base.py +101 -39
- synapse/app/admin_cmd.py +2 -4
- synapse/app/appservice.py +1 -1
- synapse/app/client_reader.py +1 -1
- synapse/app/event_creator.py +1 -1
- synapse/app/federation_reader.py +1 -1
- synapse/app/federation_sender.py +1 -1
- synapse/app/frontend_proxy.py +1 -1
- synapse/app/generic_worker.py +17 -11
- synapse/app/homeserver.py +85 -47
- synapse/app/media_repository.py +1 -1
- synapse/app/phone_stats_home.py +16 -14
- synapse/app/pusher.py +1 -1
- synapse/app/synchrotron.py +1 -1
- synapse/app/user_dir.py +1 -1
- synapse/appservice/__init__.py +29 -2
- synapse/appservice/scheduler.py +8 -8
- synapse/config/_base.py +32 -14
- synapse/config/_base.pyi +5 -3
- synapse/config/experimental.py +3 -0
- synapse/config/homeserver.py +27 -1
- synapse/config/logger.py +3 -4
- synapse/config/matrixrtc.py +67 -0
- synapse/crypto/keyring.py +18 -4
- synapse/events/auto_accept_invites.py +0 -1
- synapse/federation/federation_client.py +39 -0
- synapse/federation/federation_server.py +1 -1
- synapse/federation/send_queue.py +3 -0
- synapse/federation/sender/__init__.py +24 -8
- synapse/federation/sender/per_destination_queue.py +31 -8
- synapse/federation/sender/transaction_manager.py +12 -0
- synapse/federation/transport/client.py +29 -0
- synapse/handlers/account_validity.py +2 -4
- synapse/handlers/appservice.py +5 -7
- synapse/handlers/deactivate_account.py +2 -3
- synapse/handlers/delayed_events.py +10 -13
- synapse/handlers/device.py +14 -14
- synapse/handlers/e2e_keys.py +16 -11
- synapse/handlers/federation.py +7 -11
- synapse/handlers/federation_event.py +5 -6
- synapse/handlers/message.py +16 -10
- synapse/handlers/pagination.py +3 -7
- synapse/handlers/presence.py +21 -25
- synapse/handlers/profile.py +1 -1
- synapse/handlers/read_marker.py +3 -1
- synapse/handlers/register.py +8 -1
- synapse/handlers/room.py +13 -4
- synapse/handlers/room_member.py +11 -7
- synapse/handlers/room_policy.py +96 -2
- synapse/handlers/sso.py +1 -1
- synapse/handlers/stats.py +5 -3
- synapse/handlers/sync.py +20 -13
- synapse/handlers/typing.py +5 -10
- synapse/handlers/user_directory.py +12 -11
- synapse/handlers/worker_lock.py +19 -15
- synapse/http/client.py +18 -13
- synapse/http/federation/matrix_federation_agent.py +6 -1
- synapse/http/federation/well_known_resolver.py +3 -1
- synapse/http/matrixfederationclient.py +50 -11
- synapse/http/proxy.py +2 -2
- synapse/http/server.py +36 -2
- synapse/http/site.py +109 -17
- synapse/logging/context.py +201 -110
- synapse/logging/opentracing.py +30 -6
- synapse/logging/scopecontextmanager.py +161 -0
- synapse/media/_base.py +2 -1
- synapse/media/media_repository.py +20 -6
- synapse/media/url_previewer.py +5 -6
- synapse/metrics/_gc.py +3 -1
- synapse/metrics/background_process_metrics.py +128 -24
- synapse/metrics/common_usage_metrics.py +3 -5
- synapse/module_api/__init__.py +42 -5
- synapse/notifier.py +10 -3
- synapse/push/emailpusher.py +5 -4
- synapse/push/httppusher.py +6 -6
- synapse/push/pusherpool.py +3 -8
- synapse/replication/http/devices.py +0 -41
- synapse/replication/tcp/client.py +8 -5
- synapse/replication/tcp/handler.py +2 -3
- synapse/replication/tcp/protocol.py +14 -7
- synapse/replication/tcp/redis.py +16 -11
- synapse/replication/tcp/resource.py +5 -4
- synapse/replication/tcp/streams/__init__.py +2 -0
- synapse/res/providers.json +6 -5
- synapse/rest/__init__.py +2 -0
- synapse/rest/admin/__init__.py +4 -0
- synapse/rest/admin/events.py +69 -0
- synapse/rest/admin/media.py +70 -2
- synapse/rest/client/keys.py +147 -3
- synapse/rest/client/matrixrtc.py +52 -0
- synapse/rest/client/push_rule.py +1 -1
- synapse/rest/client/room.py +2 -3
- synapse/rest/client/sync.py +1 -3
- synapse/rest/client/transactions.py +1 -1
- synapse/server.py +271 -38
- synapse/server_notices/server_notices_manager.py +1 -0
- synapse/state/__init__.py +4 -1
- synapse/storage/_base.py +1 -1
- synapse/storage/background_updates.py +8 -3
- synapse/storage/controllers/persist_events.py +4 -3
- synapse/storage/controllers/purge_events.py +2 -3
- synapse/storage/controllers/state.py +5 -5
- synapse/storage/database.py +12 -7
- synapse/storage/databases/main/__init__.py +7 -2
- synapse/storage/databases/main/cache.py +4 -3
- synapse/storage/databases/main/censor_events.py +1 -1
- synapse/storage/databases/main/client_ips.py +9 -8
- synapse/storage/databases/main/deviceinbox.py +7 -6
- synapse/storage/databases/main/devices.py +4 -4
- synapse/storage/databases/main/end_to_end_keys.py +6 -3
- synapse/storage/databases/main/event_federation.py +7 -6
- synapse/storage/databases/main/event_push_actions.py +13 -13
- synapse/storage/databases/main/events_bg_updates.py +1 -1
- synapse/storage/databases/main/events_worker.py +6 -8
- synapse/storage/databases/main/lock.py +17 -13
- synapse/storage/databases/main/media_repository.py +2 -2
- synapse/storage/databases/main/metrics.py +6 -6
- synapse/storage/databases/main/monthly_active_users.py +3 -4
- synapse/storage/databases/main/receipts.py +1 -1
- synapse/storage/databases/main/registration.py +18 -19
- synapse/storage/databases/main/roommember.py +1 -1
- synapse/storage/databases/main/session.py +3 -3
- synapse/storage/databases/main/sliding_sync.py +2 -2
- synapse/storage/databases/main/transactions.py +3 -3
- synapse/storage/databases/state/store.py +2 -0
- synapse/synapse_rust/http_client.pyi +4 -0
- synapse/synapse_rust.abi3.so +0 -0
- synapse/util/async_helpers.py +36 -24
- synapse/util/batching_queue.py +16 -6
- synapse/util/caches/__init__.py +1 -1
- synapse/util/caches/deferred_cache.py +4 -0
- synapse/util/caches/descriptors.py +14 -2
- synapse/util/caches/dictionary_cache.py +6 -1
- synapse/util/caches/expiringcache.py +16 -5
- synapse/util/caches/lrucache.py +14 -26
- synapse/util/caches/response_cache.py +11 -1
- synapse/util/clock.py +215 -39
- synapse/util/constants.py +2 -0
- synapse/util/daemonize.py +5 -1
- synapse/util/distributor.py +9 -5
- synapse/util/metrics.py +35 -6
- synapse/util/ratelimitutils.py +4 -1
- synapse/util/retryutils.py +7 -4
- synapse/util/task_scheduler.py +11 -14
- synapse/logging/filter.py +0 -38
- {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/AUTHORS.rst +0 -0
- {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/LICENSE-AGPL-3.0 +0 -0
- {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/LICENSE-COMMERCIAL +0 -0
- {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/WHEEL +0 -0
- {matrix_synapse-1.139.0rc2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
#
|
|
2
|
+
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
|
3
|
+
#
|
|
4
|
+
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
|
5
|
+
# Copyright (C) 2023 New Vector, Ltd
|
|
6
|
+
#
|
|
7
|
+
# This program is free software: you can redistribute it and/or modify
|
|
8
|
+
# it under the terms of the GNU Affero General Public License as
|
|
9
|
+
# published by the Free Software Foundation, either version 3 of the
|
|
10
|
+
# License, or (at your option) any later version.
|
|
11
|
+
#
|
|
12
|
+
# See the GNU Affero General Public License for more details:
|
|
13
|
+
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
|
14
|
+
#
|
|
15
|
+
# Originally licensed under the Apache License, Version 2.0:
|
|
16
|
+
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
|
17
|
+
#
|
|
18
|
+
# [This file includes modifications made by New Vector Limited]
|
|
19
|
+
#
|
|
20
|
+
#
|
|
21
|
+
|
|
22
|
+
import logging
|
|
23
|
+
from typing import Optional
|
|
24
|
+
|
|
25
|
+
from opentracing import Scope, ScopeManager, Span
|
|
26
|
+
|
|
27
|
+
from synapse.logging.context import (
|
|
28
|
+
LoggingContext,
|
|
29
|
+
current_context,
|
|
30
|
+
nested_logging_context,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
logger = logging.getLogger(__name__)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class LogContextScopeManager(ScopeManager):
|
|
37
|
+
"""
|
|
38
|
+
The LogContextScopeManager tracks the active scope in opentracing
|
|
39
|
+
by using the log contexts which are native to synapse. This is so
|
|
40
|
+
that the basic opentracing api can be used across twisted defereds.
|
|
41
|
+
|
|
42
|
+
It would be nice just to use opentracing's ContextVarsScopeManager,
|
|
43
|
+
but currently that doesn't work due to https://twistedmatrix.com/trac/ticket/10301.
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
def __init__(self) -> None:
|
|
47
|
+
pass
|
|
48
|
+
|
|
49
|
+
@property
|
|
50
|
+
def active(self) -> Optional[Scope]:
|
|
51
|
+
"""
|
|
52
|
+
Returns the currently active Scope which can be used to access the
|
|
53
|
+
currently active Scope.span.
|
|
54
|
+
If there is a non-null Scope, its wrapped Span
|
|
55
|
+
becomes an implicit parent of any newly-created Span at
|
|
56
|
+
Tracer.start_active_span() time.
|
|
57
|
+
|
|
58
|
+
Return:
|
|
59
|
+
The Scope that is active, or None if not available.
|
|
60
|
+
"""
|
|
61
|
+
ctx = current_context()
|
|
62
|
+
return ctx.scope
|
|
63
|
+
|
|
64
|
+
def activate(self, span: Span, finish_on_close: bool) -> Scope:
|
|
65
|
+
"""
|
|
66
|
+
Makes a Span active.
|
|
67
|
+
Args
|
|
68
|
+
span: the span that should become active.
|
|
69
|
+
finish_on_close: whether Span should be automatically finished when
|
|
70
|
+
Scope.close() is called.
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
Scope to control the end of the active period for
|
|
74
|
+
*span*. It is a programming error to neglect to call
|
|
75
|
+
Scope.close() on the returned instance.
|
|
76
|
+
"""
|
|
77
|
+
|
|
78
|
+
ctx = current_context()
|
|
79
|
+
|
|
80
|
+
if not ctx:
|
|
81
|
+
logger.error("Tried to activate scope outside of loggingcontext")
|
|
82
|
+
return Scope(None, span) # type: ignore[arg-type]
|
|
83
|
+
|
|
84
|
+
if ctx.scope is not None:
|
|
85
|
+
# start a new logging context as a child of the existing one.
|
|
86
|
+
# Doing so -- rather than updating the existing logcontext -- means that
|
|
87
|
+
# creating several concurrent spans under the same logcontext works
|
|
88
|
+
# correctly.
|
|
89
|
+
ctx = nested_logging_context("")
|
|
90
|
+
enter_logcontext = True
|
|
91
|
+
else:
|
|
92
|
+
# if there is no span currently associated with the current logcontext, we
|
|
93
|
+
# just store the scope in it.
|
|
94
|
+
#
|
|
95
|
+
# This feels a bit dubious, but it does hack around a problem where a
|
|
96
|
+
# span outlasts its parent logcontext (which would otherwise lead to
|
|
97
|
+
# "Re-starting finished log context" errors).
|
|
98
|
+
enter_logcontext = False
|
|
99
|
+
|
|
100
|
+
scope = _LogContextScope(self, span, ctx, enter_logcontext, finish_on_close)
|
|
101
|
+
ctx.scope = scope
|
|
102
|
+
if enter_logcontext:
|
|
103
|
+
ctx.__enter__()
|
|
104
|
+
|
|
105
|
+
return scope
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class _LogContextScope(Scope):
|
|
109
|
+
"""
|
|
110
|
+
A custom opentracing scope, associated with a LogContext
|
|
111
|
+
|
|
112
|
+
* When the scope is closed, the logcontext's active scope is reset to None.
|
|
113
|
+
and - if enter_logcontext was set - the logcontext is finished too.
|
|
114
|
+
"""
|
|
115
|
+
|
|
116
|
+
def __init__(
|
|
117
|
+
self,
|
|
118
|
+
manager: LogContextScopeManager,
|
|
119
|
+
span: Span,
|
|
120
|
+
logcontext: LoggingContext,
|
|
121
|
+
enter_logcontext: bool,
|
|
122
|
+
finish_on_close: bool,
|
|
123
|
+
):
|
|
124
|
+
"""
|
|
125
|
+
Args:
|
|
126
|
+
manager:
|
|
127
|
+
the manager that is responsible for this scope.
|
|
128
|
+
span:
|
|
129
|
+
the opentracing span which this scope represents the local
|
|
130
|
+
lifetime for.
|
|
131
|
+
logcontext:
|
|
132
|
+
the log context to which this scope is attached.
|
|
133
|
+
enter_logcontext:
|
|
134
|
+
if True the log context will be exited when the scope is finished
|
|
135
|
+
finish_on_close:
|
|
136
|
+
if True finish the span when the scope is closed
|
|
137
|
+
"""
|
|
138
|
+
super().__init__(manager, span)
|
|
139
|
+
self.logcontext = logcontext
|
|
140
|
+
self._finish_on_close = finish_on_close
|
|
141
|
+
self._enter_logcontext = enter_logcontext
|
|
142
|
+
|
|
143
|
+
def __str__(self) -> str:
|
|
144
|
+
return f"Scope<{self.span}>"
|
|
145
|
+
|
|
146
|
+
def close(self) -> None:
|
|
147
|
+
active_scope = self.manager.active
|
|
148
|
+
if active_scope is not self:
|
|
149
|
+
logger.error(
|
|
150
|
+
"Closing scope %s which is not the currently-active one %s",
|
|
151
|
+
self,
|
|
152
|
+
active_scope,
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
if self._finish_on_close:
|
|
156
|
+
self.span.finish()
|
|
157
|
+
|
|
158
|
+
self.logcontext.scope = None
|
|
159
|
+
|
|
160
|
+
if self._enter_logcontext:
|
|
161
|
+
self.logcontext.__exit__(None, None, None)
|
synapse/media/_base.py
CHANGED
|
@@ -704,6 +704,7 @@ class ThreadedFileSender:
|
|
|
704
704
|
|
|
705
705
|
def __init__(self, hs: "HomeServer") -> None:
|
|
706
706
|
self.reactor = hs.get_reactor()
|
|
707
|
+
self.clock = hs.get_clock()
|
|
707
708
|
self.thread_pool = hs.get_media_sender_thread_pool()
|
|
708
709
|
|
|
709
710
|
self.file: Optional[BinaryIO] = None
|
|
@@ -712,7 +713,7 @@ class ThreadedFileSender:
|
|
|
712
713
|
|
|
713
714
|
# Signals if the thread should keep reading/sending data. Set means
|
|
714
715
|
# continue, clear means pause.
|
|
715
|
-
self.wakeup_event = DeferredEvent(self.
|
|
716
|
+
self.wakeup_event = DeferredEvent(self.clock)
|
|
716
717
|
|
|
717
718
|
# Signals if the thread should terminate, e.g. because the consumer has
|
|
718
719
|
# gone away.
|
|
@@ -67,7 +67,6 @@ from synapse.media.media_storage import (
|
|
|
67
67
|
from synapse.media.storage_provider import StorageProviderWrapper
|
|
68
68
|
from synapse.media.thumbnailer import Thumbnailer, ThumbnailError
|
|
69
69
|
from synapse.media.url_previewer import UrlPreviewer
|
|
70
|
-
from synapse.metrics.background_process_metrics import run_as_background_process
|
|
71
70
|
from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia
|
|
72
71
|
from synapse.types import UserID
|
|
73
72
|
from synapse.util.async_helpers import Linearizer
|
|
@@ -108,7 +107,7 @@ class MediaRepository:
|
|
|
108
107
|
self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails
|
|
109
108
|
self.thumbnail_requirements = hs.config.media.thumbnail_requirements
|
|
110
109
|
|
|
111
|
-
self.remote_media_linearizer = Linearizer(name="media_remote")
|
|
110
|
+
self.remote_media_linearizer = Linearizer(name="media_remote", clock=self.clock)
|
|
112
111
|
|
|
113
112
|
self.recently_accessed_remotes: Set[Tuple[str, str]] = set()
|
|
114
113
|
self.recently_accessed_locals: Set[str] = set()
|
|
@@ -187,16 +186,14 @@ class MediaRepository:
|
|
|
187
186
|
self.media_repository_callbacks = hs.get_module_api_callbacks().media_repository
|
|
188
187
|
|
|
189
188
|
def _start_update_recently_accessed(self) -> Deferred:
|
|
190
|
-
return run_as_background_process(
|
|
189
|
+
return self.hs.run_as_background_process(
|
|
191
190
|
"update_recently_accessed_media",
|
|
192
|
-
self.server_name,
|
|
193
191
|
self._update_recently_accessed,
|
|
194
192
|
)
|
|
195
193
|
|
|
196
194
|
def _start_apply_media_retention_rules(self) -> Deferred:
|
|
197
|
-
return run_as_background_process(
|
|
195
|
+
return self.hs.run_as_background_process(
|
|
198
196
|
"apply_media_retention_rules",
|
|
199
|
-
self.server_name,
|
|
200
197
|
self._apply_media_retention_rules,
|
|
201
198
|
)
|
|
202
199
|
|
|
@@ -423,6 +420,23 @@ class MediaRepository:
|
|
|
423
420
|
send_cors=True,
|
|
424
421
|
)
|
|
425
422
|
|
|
423
|
+
async def get_cached_remote_media_info(
|
|
424
|
+
self, origin: str, media_id: str
|
|
425
|
+
) -> Optional[RemoteMedia]:
|
|
426
|
+
"""
|
|
427
|
+
Get cached remote media info for a given origin/media ID combo. If the requested
|
|
428
|
+
media is not found locally, it will not be requested over federation and the
|
|
429
|
+
call will return None.
|
|
430
|
+
|
|
431
|
+
Args:
|
|
432
|
+
origin: The origin of the remote media
|
|
433
|
+
media_id: The media ID of the requested content
|
|
434
|
+
|
|
435
|
+
Returns:
|
|
436
|
+
The info for the cached remote media or None if it was not found
|
|
437
|
+
"""
|
|
438
|
+
return await self.store.get_cached_remote_media(origin, media_id)
|
|
439
|
+
|
|
426
440
|
async def get_local_media_info(
|
|
427
441
|
self, request: SynapseRequest, media_id: str, max_timeout_ms: int
|
|
428
442
|
) -> Optional[LocalMedia]:
|
synapse/media/url_previewer.py
CHANGED
|
@@ -44,7 +44,6 @@ from synapse.media._base import FileInfo, get_filename_from_headers
|
|
|
44
44
|
from synapse.media.media_storage import MediaStorage, SHA256TransparentIOWriter
|
|
45
45
|
from synapse.media.oembed import OEmbedProvider
|
|
46
46
|
from synapse.media.preview_html import decode_body, parse_html_to_open_graph
|
|
47
|
-
from synapse.metrics.background_process_metrics import run_as_background_process
|
|
48
47
|
from synapse.types import JsonDict, UserID
|
|
49
48
|
from synapse.util.async_helpers import ObservableDeferred
|
|
50
49
|
from synapse.util.caches.expiringcache import ExpiringCache
|
|
@@ -167,6 +166,7 @@ class UrlPreviewer:
|
|
|
167
166
|
media_storage: MediaStorage,
|
|
168
167
|
):
|
|
169
168
|
self.clock = hs.get_clock()
|
|
169
|
+
self.hs = hs
|
|
170
170
|
self.filepaths = media_repo.filepaths
|
|
171
171
|
self.max_spider_size = hs.config.media.max_spider_size
|
|
172
172
|
self.server_name = hs.hostname
|
|
@@ -201,15 +201,14 @@ class UrlPreviewer:
|
|
|
201
201
|
self._cache: ExpiringCache[str, ObservableDeferred] = ExpiringCache(
|
|
202
202
|
cache_name="url_previews",
|
|
203
203
|
server_name=self.server_name,
|
|
204
|
+
hs=self.hs,
|
|
204
205
|
clock=self.clock,
|
|
205
206
|
# don't spider URLs more often than once an hour
|
|
206
207
|
expiry_ms=ONE_HOUR,
|
|
207
208
|
)
|
|
208
209
|
|
|
209
210
|
if self._worker_run_media_background_jobs:
|
|
210
|
-
self.
|
|
211
|
-
self._start_expire_url_cache_data, 10 * 1000
|
|
212
|
-
)
|
|
211
|
+
self.clock.looping_call(self._start_expire_url_cache_data, 10 * 1000)
|
|
213
212
|
|
|
214
213
|
async def preview(self, url: str, user: UserID, ts: int) -> bytes:
|
|
215
214
|
# the in-memory cache:
|
|
@@ -739,8 +738,8 @@ class UrlPreviewer:
|
|
|
739
738
|
return open_graph_result, oembed_response.author_name, expiration_ms
|
|
740
739
|
|
|
741
740
|
def _start_expire_url_cache_data(self) -> Deferred:
|
|
742
|
-
return run_as_background_process(
|
|
743
|
-
"expire_url_cache_data", self.
|
|
741
|
+
return self.hs.run_as_background_process(
|
|
742
|
+
"expire_url_cache_data", self._expire_url_cache_data
|
|
744
743
|
)
|
|
745
744
|
|
|
746
745
|
async def _expire_url_cache_data(self) -> None:
|
synapse/metrics/_gc.py
CHANGED
|
@@ -138,7 +138,9 @@ def install_gc_manager() -> None:
|
|
|
138
138
|
gc_time.labels(i).observe(end - start)
|
|
139
139
|
gc_unreachable.labels(i).set(unreachable)
|
|
140
140
|
|
|
141
|
-
|
|
141
|
+
# We can ignore the lint here since this looping call does not hold a `HomeServer`
|
|
142
|
+
# reference so can be cleaned up by other means on shutdown.
|
|
143
|
+
gc_task = task.LoopingCall(_maybe_gc) # type: ignore[prefer-synapse-clock-looping-call]
|
|
142
144
|
gc_task.start(0.1)
|
|
143
145
|
|
|
144
146
|
|
|
@@ -20,7 +20,7 @@
|
|
|
20
20
|
|
|
21
21
|
import logging
|
|
22
22
|
import threading
|
|
23
|
-
from contextlib import nullcontext
|
|
23
|
+
from contextlib import contextmanager, nullcontext
|
|
24
24
|
from functools import wraps
|
|
25
25
|
from types import TracebackType
|
|
26
26
|
from typing import (
|
|
@@ -28,7 +28,9 @@ from typing import (
|
|
|
28
28
|
Any,
|
|
29
29
|
Awaitable,
|
|
30
30
|
Callable,
|
|
31
|
+
ContextManager,
|
|
31
32
|
Dict,
|
|
33
|
+
Generator,
|
|
32
34
|
Iterable,
|
|
33
35
|
Optional,
|
|
34
36
|
Protocol,
|
|
@@ -49,7 +51,12 @@ from synapse.logging.context import (
|
|
|
49
51
|
LoggingContext,
|
|
50
52
|
PreserveLoggingContext,
|
|
51
53
|
)
|
|
52
|
-
from synapse.logging.opentracing import
|
|
54
|
+
from synapse.logging.opentracing import (
|
|
55
|
+
SynapseTags,
|
|
56
|
+
active_span,
|
|
57
|
+
start_active_span,
|
|
58
|
+
start_active_span_follows_from,
|
|
59
|
+
)
|
|
53
60
|
from synapse.metrics import SERVER_NAME_LABEL
|
|
54
61
|
from synapse.metrics._types import Collector
|
|
55
62
|
|
|
@@ -59,6 +66,13 @@ if TYPE_CHECKING:
|
|
|
59
66
|
# Old versions don't have `LiteralString`
|
|
60
67
|
from typing_extensions import LiteralString
|
|
61
68
|
|
|
69
|
+
from synapse.server import HomeServer
|
|
70
|
+
|
|
71
|
+
try:
|
|
72
|
+
import opentracing
|
|
73
|
+
except ImportError:
|
|
74
|
+
opentracing = None # type: ignore[assignment]
|
|
75
|
+
|
|
62
76
|
|
|
63
77
|
logger = logging.getLogger(__name__)
|
|
64
78
|
|
|
@@ -216,6 +230,7 @@ def run_as_background_process(
|
|
|
216
230
|
func: Callable[..., Awaitable[Optional[R]]],
|
|
217
231
|
*args: Any,
|
|
218
232
|
bg_start_span: bool = True,
|
|
233
|
+
test_only_tracer: Optional["opentracing.Tracer"] = None,
|
|
219
234
|
**kwargs: Any,
|
|
220
235
|
) -> "defer.Deferred[Optional[R]]":
|
|
221
236
|
"""Run the given function in its own logcontext, with resource metrics
|
|
@@ -241,6 +256,8 @@ def run_as_background_process(
|
|
|
241
256
|
bg_start_span: Whether to start an opentracing span. Defaults to True.
|
|
242
257
|
Should only be disabled for processes that will not log to or tag
|
|
243
258
|
a span.
|
|
259
|
+
test_only_tracer: Set the OpenTracing tracer to use. This is only useful for
|
|
260
|
+
tests.
|
|
244
261
|
args: positional args for func
|
|
245
262
|
kwargs: keyword args for func
|
|
246
263
|
|
|
@@ -250,6 +267,12 @@ def run_as_background_process(
|
|
|
250
267
|
rules.
|
|
251
268
|
"""
|
|
252
269
|
|
|
270
|
+
# Since we track the tracing scope in the `LoggingContext`, before we move to the
|
|
271
|
+
# sentinel logcontext (or a new `LoggingContext`), grab the currently active
|
|
272
|
+
# tracing span (if any) so that we can create a cross-link to the background process
|
|
273
|
+
# trace.
|
|
274
|
+
original_active_tracing_span = active_span(tracer=test_only_tracer)
|
|
275
|
+
|
|
253
276
|
async def run() -> Optional[R]:
|
|
254
277
|
with _bg_metrics_lock:
|
|
255
278
|
count = _background_process_counts.get(desc, 0)
|
|
@@ -264,15 +287,101 @@ def run_as_background_process(
|
|
|
264
287
|
|
|
265
288
|
with BackgroundProcessLoggingContext(
|
|
266
289
|
name=desc, server_name=server_name, instance_id=count
|
|
267
|
-
) as
|
|
290
|
+
) as logging_context:
|
|
268
291
|
try:
|
|
269
292
|
if bg_start_span:
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
293
|
+
# If there is already an active span (e.g. because this background
|
|
294
|
+
# process was started as part of handling a request for example),
|
|
295
|
+
# because this is a long-running background task that may serve a
|
|
296
|
+
# broader purpose than the request that kicked it off, we don't want
|
|
297
|
+
# it to be a direct child of the currently active trace connected to
|
|
298
|
+
# the request. We only want a loose reference to jump between the
|
|
299
|
+
# traces.
|
|
300
|
+
#
|
|
301
|
+
# For example, when making a `/messages` request, when approaching a
|
|
302
|
+
# gap, we may kick off a background process to fetch missing events
|
|
303
|
+
# from federation. The `/messages` request trace should't include
|
|
304
|
+
# the entire time taken and details around fetching the missing
|
|
305
|
+
# events since the request doesn't rely on the result, it was just
|
|
306
|
+
# part of the heuristic to initiate things.
|
|
307
|
+
#
|
|
308
|
+
# We don't care about the value from the context manager as it's not
|
|
309
|
+
# used (so we just use `Any` for the type). Ideally, we'd be able to
|
|
310
|
+
# mark this as unused like an `assert_never` of sorts.
|
|
311
|
+
tracing_scope: ContextManager[Any]
|
|
312
|
+
if original_active_tracing_span is not None:
|
|
313
|
+
# With the OpenTracing client that we're using, it's impossible to
|
|
314
|
+
# create a disconnected root span while also providing `references`
|
|
315
|
+
# so we first create a bare root span, then create a child span that
|
|
316
|
+
# includes the references that we want.
|
|
317
|
+
root_tracing_scope = start_active_span(
|
|
318
|
+
f"bgproc.{desc}",
|
|
319
|
+
tags={SynapseTags.REQUEST_ID: str(logging_context)},
|
|
320
|
+
# Create a root span for the background process (disconnected
|
|
321
|
+
# from other spans)
|
|
322
|
+
ignore_active_span=True,
|
|
323
|
+
tracer=test_only_tracer,
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
# Also add a span in the original request trace that cross-links
|
|
327
|
+
# to background process trace. We immediately finish the span as
|
|
328
|
+
# this is just a marker to follow where the real work is being
|
|
329
|
+
# done.
|
|
330
|
+
#
|
|
331
|
+
# In OpenTracing, `FOLLOWS_FROM` indicates parent-child
|
|
332
|
+
# relationship whereas we just want a cross-link to the
|
|
333
|
+
# downstream trace. This is a bit hacky, but the closest we
|
|
334
|
+
# can get to in OpenTracing land. If we ever migrate to
|
|
335
|
+
# OpenTelemetry, we should use a normal `Link` for this.
|
|
336
|
+
with start_active_span_follows_from(
|
|
337
|
+
f"start_bgproc.{desc}",
|
|
338
|
+
child_of=original_active_tracing_span,
|
|
339
|
+
ignore_active_span=True,
|
|
340
|
+
# Create the `FOLLOWS_FROM` reference to the background
|
|
341
|
+
# process span so there is a loose coupling between the two
|
|
342
|
+
# traces and it's easy to jump between.
|
|
343
|
+
contexts=[root_tracing_scope.span.context],
|
|
344
|
+
tracer=test_only_tracer,
|
|
345
|
+
):
|
|
346
|
+
pass
|
|
347
|
+
|
|
348
|
+
# Then start the tracing scope that we're going to use for
|
|
349
|
+
# the duration of the background process within the root
|
|
350
|
+
# span we just created.
|
|
351
|
+
child_tracing_scope = start_active_span_follows_from(
|
|
352
|
+
f"bgproc_child.{desc}",
|
|
353
|
+
child_of=root_tracing_scope.span,
|
|
354
|
+
ignore_active_span=True,
|
|
355
|
+
tags={SynapseTags.REQUEST_ID: str(logging_context)},
|
|
356
|
+
# Create the `FOLLOWS_FROM` reference to the request's
|
|
357
|
+
# span so there is a loose coupling between the two
|
|
358
|
+
# traces and it's easy to jump between.
|
|
359
|
+
contexts=[original_active_tracing_span.context],
|
|
360
|
+
tracer=test_only_tracer,
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
# For easy usage down below, we create a context manager that
|
|
364
|
+
# combines both scopes.
|
|
365
|
+
@contextmanager
|
|
366
|
+
def combined_context_manager() -> Generator[None, None, None]:
|
|
367
|
+
with root_tracing_scope, child_tracing_scope:
|
|
368
|
+
yield
|
|
369
|
+
|
|
370
|
+
tracing_scope = combined_context_manager()
|
|
371
|
+
|
|
372
|
+
else:
|
|
373
|
+
# Otherwise, when there is no active span, we will be creating
|
|
374
|
+
# a disconnected root span already and we don't have to
|
|
375
|
+
# worry about cross-linking to anything.
|
|
376
|
+
tracing_scope = start_active_span(
|
|
377
|
+
f"bgproc.{desc}",
|
|
378
|
+
tags={SynapseTags.REQUEST_ID: str(logging_context)},
|
|
379
|
+
tracer=test_only_tracer,
|
|
380
|
+
)
|
|
273
381
|
else:
|
|
274
|
-
|
|
275
|
-
|
|
382
|
+
tracing_scope = nullcontext()
|
|
383
|
+
|
|
384
|
+
with tracing_scope:
|
|
276
385
|
return await func(*args, **kwargs)
|
|
277
386
|
except Exception:
|
|
278
387
|
logger.exception(
|
|
@@ -308,11 +417,11 @@ def run_as_background_process(
|
|
|
308
417
|
P = ParamSpec("P")
|
|
309
418
|
|
|
310
419
|
|
|
311
|
-
class
|
|
312
|
-
|
|
420
|
+
class HasHomeServer(Protocol):
|
|
421
|
+
hs: "HomeServer"
|
|
313
422
|
"""
|
|
314
|
-
The homeserver
|
|
315
|
-
|
|
423
|
+
The homeserver that this cache is associated with (used to label the metric and
|
|
424
|
+
track backgroun processes for clean shutdown).
|
|
316
425
|
"""
|
|
317
426
|
|
|
318
427
|
|
|
@@ -342,27 +451,22 @@ def wrap_as_background_process(
|
|
|
342
451
|
"""
|
|
343
452
|
|
|
344
453
|
def wrapper(
|
|
345
|
-
func: Callable[Concatenate[
|
|
454
|
+
func: Callable[Concatenate[HasHomeServer, P], Awaitable[Optional[R]]],
|
|
346
455
|
) -> Callable[P, "defer.Deferred[Optional[R]]"]:
|
|
347
456
|
@wraps(func)
|
|
348
457
|
def wrapped_func(
|
|
349
|
-
self:
|
|
458
|
+
self: HasHomeServer, *args: P.args, **kwargs: P.kwargs
|
|
350
459
|
) -> "defer.Deferred[Optional[R]]":
|
|
351
|
-
assert self.
|
|
352
|
-
"The `
|
|
460
|
+
assert self.hs is not None, (
|
|
461
|
+
"The `hs` attribute must be set on the object where `@wrap_as_background_process` decorator is used."
|
|
353
462
|
)
|
|
354
463
|
|
|
355
|
-
return run_as_background_process(
|
|
464
|
+
return self.hs.run_as_background_process(
|
|
356
465
|
desc,
|
|
357
|
-
self.server_name,
|
|
358
466
|
func,
|
|
359
467
|
self,
|
|
360
468
|
*args,
|
|
361
|
-
|
|
362
|
-
# Argument 4 to "run_as_background_process" has incompatible type
|
|
363
|
-
# "**P.kwargs"; expected "bool"
|
|
364
|
-
# See https://github.com/python/mypy/issues/8862
|
|
365
|
-
**kwargs, # type: ignore[arg-type]
|
|
469
|
+
**kwargs,
|
|
366
470
|
)
|
|
367
471
|
|
|
368
472
|
# There are some shenanigans here, because we're decorating a method but
|
|
@@ -401,7 +505,7 @@ class BackgroundProcessLoggingContext(LoggingContext):
|
|
|
401
505
|
"""
|
|
402
506
|
if instance_id is None:
|
|
403
507
|
instance_id = id(self)
|
|
404
|
-
super().__init__("%s-%s" % (name, instance_id))
|
|
508
|
+
super().__init__(name="%s-%s" % (name, instance_id), server_name=server_name)
|
|
405
509
|
self._proc: Optional[_BackgroundProcess] = _BackgroundProcess(
|
|
406
510
|
desc=name, server_name=server_name, ctx=self
|
|
407
511
|
)
|
|
@@ -23,7 +23,6 @@ from typing import TYPE_CHECKING
|
|
|
23
23
|
import attr
|
|
24
24
|
|
|
25
25
|
from synapse.metrics import SERVER_NAME_LABEL
|
|
26
|
-
from synapse.metrics.background_process_metrics import run_as_background_process
|
|
27
26
|
|
|
28
27
|
if TYPE_CHECKING:
|
|
29
28
|
from synapse.server import HomeServer
|
|
@@ -52,6 +51,7 @@ class CommonUsageMetricsManager:
|
|
|
52
51
|
self.server_name = hs.hostname
|
|
53
52
|
self._store = hs.get_datastores().main
|
|
54
53
|
self._clock = hs.get_clock()
|
|
54
|
+
self._hs = hs
|
|
55
55
|
|
|
56
56
|
async def get_metrics(self) -> CommonUsageMetrics:
|
|
57
57
|
"""Get the CommonUsageMetrics object. If no collection has happened yet, do it
|
|
@@ -64,16 +64,14 @@ class CommonUsageMetricsManager:
|
|
|
64
64
|
|
|
65
65
|
async def setup(self) -> None:
|
|
66
66
|
"""Keep the gauges for common usage metrics up to date."""
|
|
67
|
-
run_as_background_process(
|
|
67
|
+
self._hs.run_as_background_process(
|
|
68
68
|
desc="common_usage_metrics_update_gauges",
|
|
69
|
-
server_name=self.server_name,
|
|
70
69
|
func=self._update_gauges,
|
|
71
70
|
)
|
|
72
71
|
self._clock.looping_call(
|
|
73
|
-
run_as_background_process,
|
|
72
|
+
self._hs.run_as_background_process,
|
|
74
73
|
5 * 60 * 1000,
|
|
75
74
|
desc="common_usage_metrics_update_gauges",
|
|
76
|
-
server_name=self.server_name,
|
|
77
75
|
func=self._update_gauges,
|
|
78
76
|
)
|
|
79
77
|
|
synapse/module_api/__init__.py
CHANGED
|
@@ -43,6 +43,7 @@ from typing_extensions import Concatenate, ParamSpec
|
|
|
43
43
|
|
|
44
44
|
from twisted.internet import defer
|
|
45
45
|
from twisted.internet.interfaces import IDelayedCall
|
|
46
|
+
from twisted.python.threadpool import ThreadPool
|
|
46
47
|
from twisted.web.resource import Resource
|
|
47
48
|
|
|
48
49
|
from synapse.api import errors
|
|
@@ -79,6 +80,7 @@ from synapse.http.servlet import parse_json_object_from_request
|
|
|
79
80
|
from synapse.http.site import SynapseRequest
|
|
80
81
|
from synapse.logging.context import (
|
|
81
82
|
defer_to_thread,
|
|
83
|
+
defer_to_threadpool,
|
|
82
84
|
make_deferred_yieldable,
|
|
83
85
|
run_in_background,
|
|
84
86
|
)
|
|
@@ -275,7 +277,15 @@ def run_as_background_process(
|
|
|
275
277
|
# function instead.
|
|
276
278
|
stub_server_name = "synapse_module_running_from_unknown_server"
|
|
277
279
|
|
|
278
|
-
|
|
280
|
+
# Ignore the linter error here. Since this is leveraging the
|
|
281
|
+
# `run_as_background_process` function directly and we don't want to break the
|
|
282
|
+
# module api, we need to keep the function signature the same. This means we don't
|
|
283
|
+
# have access to the running `HomeServer` and cannot track this background process
|
|
284
|
+
# for cleanup during shutdown.
|
|
285
|
+
# This is not an issue during runtime and is only potentially problematic if the
|
|
286
|
+
# application cares about being able to garbage collect `HomeServer` instances
|
|
287
|
+
# during runtime.
|
|
288
|
+
return _run_as_background_process( # type: ignore[untracked-background-process]
|
|
279
289
|
desc,
|
|
280
290
|
stub_server_name,
|
|
281
291
|
func,
|
|
@@ -1402,7 +1412,7 @@ class ModuleApi:
|
|
|
1402
1412
|
|
|
1403
1413
|
if self._hs.config.worker.run_background_tasks or run_on_all_instances:
|
|
1404
1414
|
self._clock.looping_call(
|
|
1405
|
-
self.run_as_background_process,
|
|
1415
|
+
self._hs.run_as_background_process,
|
|
1406
1416
|
msec,
|
|
1407
1417
|
desc,
|
|
1408
1418
|
lambda: maybe_awaitable(f(*args, **kwargs)),
|
|
@@ -1460,7 +1470,7 @@ class ModuleApi:
|
|
|
1460
1470
|
return self._clock.call_later(
|
|
1461
1471
|
# convert ms to seconds as needed by call_later.
|
|
1462
1472
|
msec * 0.001,
|
|
1463
|
-
self.run_as_background_process,
|
|
1473
|
+
self._hs.run_as_background_process,
|
|
1464
1474
|
desc,
|
|
1465
1475
|
lambda: maybe_awaitable(f(*args, **kwargs)),
|
|
1466
1476
|
)
|
|
@@ -1701,8 +1711,8 @@ class ModuleApi:
|
|
|
1701
1711
|
Note that the returned Deferred does not follow the synapse logcontext
|
|
1702
1712
|
rules.
|
|
1703
1713
|
"""
|
|
1704
|
-
return
|
|
1705
|
-
desc,
|
|
1714
|
+
return self._hs.run_as_background_process(
|
|
1715
|
+
desc, func, *args, bg_start_span=bg_start_span, **kwargs
|
|
1706
1716
|
)
|
|
1707
1717
|
|
|
1708
1718
|
async def defer_to_thread(
|
|
@@ -1725,6 +1735,33 @@ class ModuleApi:
|
|
|
1725
1735
|
"""
|
|
1726
1736
|
return await defer_to_thread(self._hs.get_reactor(), f, *args, **kwargs)
|
|
1727
1737
|
|
|
1738
|
+
async def defer_to_threadpool(
|
|
1739
|
+
self,
|
|
1740
|
+
threadpool: ThreadPool,
|
|
1741
|
+
f: Callable[P, T],
|
|
1742
|
+
*args: P.args,
|
|
1743
|
+
**kwargs: P.kwargs,
|
|
1744
|
+
) -> T:
|
|
1745
|
+
"""Runs the given function in a separate thread from the given thread pool.
|
|
1746
|
+
|
|
1747
|
+
Allows specifying a custom thread pool instead of using the default Synapse
|
|
1748
|
+
one. To use the default Synapse threadpool, use `defer_to_thread` instead.
|
|
1749
|
+
|
|
1750
|
+
Added in Synapse v1.140.0.
|
|
1751
|
+
|
|
1752
|
+
Args:
|
|
1753
|
+
threadpool: The thread pool to use.
|
|
1754
|
+
f: The function to run.
|
|
1755
|
+
args: The function's arguments.
|
|
1756
|
+
kwargs: The function's keyword arguments.
|
|
1757
|
+
|
|
1758
|
+
Returns:
|
|
1759
|
+
The return value of the function once ran in a thread.
|
|
1760
|
+
"""
|
|
1761
|
+
return await defer_to_threadpool(
|
|
1762
|
+
self._hs.get_reactor(), threadpool, f, *args, **kwargs
|
|
1763
|
+
)
|
|
1764
|
+
|
|
1728
1765
|
async def check_username(self, username: str) -> None:
|
|
1729
1766
|
"""Checks if the provided username uses the grammar defined in the Matrix
|
|
1730
1767
|
specification, and is already being used by an existing user.
|