matrix-synapse 1.139.2__cp39-abi3-manylinux_2_28_aarch64.whl → 1.140.0rc1__cp39-abi3-manylinux_2_28_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of matrix-synapse might be problematic. Click here for more details.
- {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/METADATA +5 -3
- {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/RECORD +157 -154
- synapse/_scripts/generate_workers_map.py +6 -1
- synapse/_scripts/synapse_port_db.py +0 -2
- synapse/_scripts/update_synapse_database.py +1 -6
- synapse/api/auth/base.py +1 -3
- synapse/api/auth/mas.py +6 -8
- synapse/api/auth/msc3861_delegated.py +6 -8
- synapse/api/errors.py +3 -0
- synapse/app/_base.py +101 -39
- synapse/app/admin_cmd.py +2 -4
- synapse/app/appservice.py +1 -1
- synapse/app/client_reader.py +1 -1
- synapse/app/event_creator.py +1 -1
- synapse/app/federation_reader.py +1 -1
- synapse/app/federation_sender.py +1 -1
- synapse/app/frontend_proxy.py +1 -1
- synapse/app/generic_worker.py +17 -11
- synapse/app/homeserver.py +85 -47
- synapse/app/media_repository.py +1 -1
- synapse/app/phone_stats_home.py +16 -14
- synapse/app/pusher.py +1 -1
- synapse/app/synchrotron.py +1 -1
- synapse/app/user_dir.py +1 -1
- synapse/appservice/__init__.py +29 -2
- synapse/appservice/scheduler.py +8 -8
- synapse/config/_base.py +32 -14
- synapse/config/_base.pyi +5 -3
- synapse/config/experimental.py +3 -0
- synapse/config/homeserver.py +27 -1
- synapse/config/logger.py +3 -4
- synapse/config/matrixrtc.py +67 -0
- synapse/crypto/keyring.py +18 -4
- synapse/events/auto_accept_invites.py +0 -1
- synapse/federation/federation_client.py +39 -0
- synapse/federation/federation_server.py +1 -1
- synapse/federation/send_queue.py +3 -0
- synapse/federation/sender/__init__.py +24 -8
- synapse/federation/sender/per_destination_queue.py +31 -8
- synapse/federation/sender/transaction_manager.py +12 -0
- synapse/federation/transport/client.py +29 -0
- synapse/handlers/account_validity.py +2 -4
- synapse/handlers/appservice.py +5 -7
- synapse/handlers/deactivate_account.py +2 -3
- synapse/handlers/delayed_events.py +10 -13
- synapse/handlers/device.py +14 -14
- synapse/handlers/e2e_keys.py +4 -3
- synapse/handlers/federation.py +7 -11
- synapse/handlers/federation_event.py +5 -6
- synapse/handlers/message.py +16 -10
- synapse/handlers/pagination.py +3 -7
- synapse/handlers/presence.py +21 -25
- synapse/handlers/profile.py +1 -1
- synapse/handlers/read_marker.py +3 -1
- synapse/handlers/register.py +8 -1
- synapse/handlers/room.py +13 -4
- synapse/handlers/room_member.py +11 -7
- synapse/handlers/room_policy.py +96 -2
- synapse/handlers/sso.py +1 -1
- synapse/handlers/stats.py +5 -3
- synapse/handlers/sync.py +20 -13
- synapse/handlers/typing.py +5 -10
- synapse/handlers/user_directory.py +12 -11
- synapse/handlers/worker_lock.py +19 -15
- synapse/http/client.py +18 -13
- synapse/http/federation/matrix_federation_agent.py +6 -1
- synapse/http/federation/well_known_resolver.py +3 -1
- synapse/http/matrixfederationclient.py +50 -11
- synapse/http/proxy.py +2 -2
- synapse/http/server.py +36 -2
- synapse/http/site.py +109 -17
- synapse/logging/context.py +165 -63
- synapse/logging/opentracing.py +30 -6
- synapse/logging/scopecontextmanager.py +161 -0
- synapse/media/_base.py +2 -1
- synapse/media/media_repository.py +20 -6
- synapse/media/url_previewer.py +5 -6
- synapse/metrics/_gc.py +3 -1
- synapse/metrics/background_process_metrics.py +128 -24
- synapse/metrics/common_usage_metrics.py +3 -5
- synapse/module_api/__init__.py +42 -5
- synapse/notifier.py +10 -3
- synapse/push/emailpusher.py +5 -4
- synapse/push/httppusher.py +6 -6
- synapse/push/pusherpool.py +3 -8
- synapse/replication/http/devices.py +0 -41
- synapse/replication/tcp/client.py +8 -5
- synapse/replication/tcp/handler.py +2 -3
- synapse/replication/tcp/protocol.py +14 -7
- synapse/replication/tcp/redis.py +16 -11
- synapse/replication/tcp/resource.py +5 -4
- synapse/replication/tcp/streams/__init__.py +2 -0
- synapse/res/providers.json +6 -5
- synapse/rest/__init__.py +2 -0
- synapse/rest/admin/__init__.py +4 -0
- synapse/rest/admin/events.py +69 -0
- synapse/rest/admin/media.py +70 -2
- synapse/rest/client/matrixrtc.py +52 -0
- synapse/rest/client/push_rule.py +1 -1
- synapse/rest/client/room.py +2 -3
- synapse/rest/client/sync.py +1 -0
- synapse/rest/client/transactions.py +1 -1
- synapse/server.py +271 -38
- synapse/server_notices/server_notices_manager.py +1 -0
- synapse/state/__init__.py +4 -1
- synapse/storage/_base.py +1 -1
- synapse/storage/background_updates.py +8 -3
- synapse/storage/controllers/persist_events.py +4 -3
- synapse/storage/controllers/purge_events.py +2 -3
- synapse/storage/controllers/state.py +5 -5
- synapse/storage/database.py +12 -7
- synapse/storage/databases/main/__init__.py +7 -2
- synapse/storage/databases/main/cache.py +4 -3
- synapse/storage/databases/main/censor_events.py +1 -1
- synapse/storage/databases/main/client_ips.py +9 -8
- synapse/storage/databases/main/deviceinbox.py +7 -6
- synapse/storage/databases/main/devices.py +4 -4
- synapse/storage/databases/main/end_to_end_keys.py +6 -3
- synapse/storage/databases/main/event_federation.py +7 -6
- synapse/storage/databases/main/event_push_actions.py +13 -13
- synapse/storage/databases/main/events_bg_updates.py +1 -1
- synapse/storage/databases/main/events_worker.py +6 -8
- synapse/storage/databases/main/lock.py +17 -13
- synapse/storage/databases/main/media_repository.py +2 -2
- synapse/storage/databases/main/metrics.py +6 -6
- synapse/storage/databases/main/monthly_active_users.py +3 -4
- synapse/storage/databases/main/receipts.py +1 -1
- synapse/storage/databases/main/registration.py +18 -19
- synapse/storage/databases/main/roommember.py +1 -1
- synapse/storage/databases/main/session.py +3 -3
- synapse/storage/databases/main/sliding_sync.py +2 -2
- synapse/storage/databases/main/transactions.py +3 -3
- synapse/storage/databases/state/store.py +2 -0
- synapse/synapse_rust/http_client.pyi +4 -0
- synapse/synapse_rust.abi3.so +0 -0
- synapse/util/async_helpers.py +36 -24
- synapse/util/batching_queue.py +16 -6
- synapse/util/caches/__init__.py +1 -1
- synapse/util/caches/deferred_cache.py +4 -0
- synapse/util/caches/descriptors.py +14 -2
- synapse/util/caches/dictionary_cache.py +6 -1
- synapse/util/caches/expiringcache.py +16 -5
- synapse/util/caches/lrucache.py +14 -26
- synapse/util/caches/response_cache.py +11 -1
- synapse/util/clock.py +215 -39
- synapse/util/constants.py +2 -0
- synapse/util/daemonize.py +5 -1
- synapse/util/distributor.py +9 -5
- synapse/util/metrics.py +35 -6
- synapse/util/ratelimitutils.py +4 -1
- synapse/util/retryutils.py +7 -4
- synapse/util/task_scheduler.py +11 -14
- synapse/logging/filter.py +0 -38
- {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/AUTHORS.rst +0 -0
- {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/LICENSE-AGPL-3.0 +0 -0
- {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/LICENSE-COMMERCIAL +0 -0
- {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/WHEEL +0 -0
- {matrix_synapse-1.139.2.dist-info → matrix_synapse-1.140.0rc1.dist-info}/entry_points.txt +0 -0
synapse/logging/context.py
CHANGED
|
@@ -33,7 +33,6 @@ See doc/log_contexts.rst for details on how this works.
|
|
|
33
33
|
import logging
|
|
34
34
|
import threading
|
|
35
35
|
import typing
|
|
36
|
-
import warnings
|
|
37
36
|
from types import TracebackType
|
|
38
37
|
from typing import (
|
|
39
38
|
TYPE_CHECKING,
|
|
@@ -55,11 +54,29 @@ from typing_extensions import ParamSpec
|
|
|
55
54
|
from twisted.internet import defer, threads
|
|
56
55
|
from twisted.python.threadpool import ThreadPool
|
|
57
56
|
|
|
57
|
+
from synapse.logging.loggers import ExplicitlyConfiguredLogger
|
|
58
|
+
from synapse.util.stringutils import random_string
|
|
59
|
+
|
|
58
60
|
if TYPE_CHECKING:
|
|
61
|
+
from synapse.logging.scopecontextmanager import _LogContextScope
|
|
59
62
|
from synapse.types import ISynapseReactor
|
|
60
63
|
|
|
61
64
|
logger = logging.getLogger(__name__)
|
|
62
65
|
|
|
66
|
+
original_logger_class = logging.getLoggerClass()
|
|
67
|
+
logging.setLoggerClass(ExplicitlyConfiguredLogger)
|
|
68
|
+
logcontext_debug_logger = logging.getLogger("synapse.logging.context.debug")
|
|
69
|
+
"""
|
|
70
|
+
A logger for debugging when the logcontext switches.
|
|
71
|
+
|
|
72
|
+
Because this is very noisy and probably something only developers want to see when
|
|
73
|
+
debugging logcontext problems, we want people to explictly opt-in before seeing anything
|
|
74
|
+
in the logs. Requires explicitly setting `synapse.logging.context.debug` in the logging
|
|
75
|
+
configuration and does not inherit the log level from the parent logger.
|
|
76
|
+
"""
|
|
77
|
+
# Restore the original logger class
|
|
78
|
+
logging.setLoggerClass(original_logger_class)
|
|
79
|
+
|
|
63
80
|
try:
|
|
64
81
|
import resource
|
|
65
82
|
|
|
@@ -238,13 +255,22 @@ class _Sentinel:
|
|
|
238
255
|
we should always know which server the logs are coming from.
|
|
239
256
|
"""
|
|
240
257
|
|
|
241
|
-
__slots__ = [
|
|
258
|
+
__slots__ = [
|
|
259
|
+
"previous_context",
|
|
260
|
+
"finished",
|
|
261
|
+
"scope",
|
|
262
|
+
"server_name",
|
|
263
|
+
"request",
|
|
264
|
+
"tag",
|
|
265
|
+
]
|
|
242
266
|
|
|
243
267
|
def __init__(self) -> None:
|
|
244
268
|
# Minimal set for compatibility with LoggingContext
|
|
245
269
|
self.previous_context = None
|
|
246
270
|
self.finished = False
|
|
271
|
+
self.server_name = "unknown_server_from_sentinel_context"
|
|
247
272
|
self.request = None
|
|
273
|
+
self.scope = None
|
|
248
274
|
self.tag = None
|
|
249
275
|
|
|
250
276
|
def __str__(self) -> str:
|
|
@@ -282,14 +308,19 @@ class LoggingContext:
|
|
|
282
308
|
child to the parent
|
|
283
309
|
|
|
284
310
|
Args:
|
|
285
|
-
name: Name for the context for logging.
|
|
286
|
-
|
|
311
|
+
name: Name for the context for logging.
|
|
312
|
+
server_name: The name of the server this context is associated with
|
|
313
|
+
(`config.server.server_name` or `hs.hostname`)
|
|
287
314
|
parent_context (LoggingContext|None): The parent of the new context
|
|
315
|
+
request: Synapse Request Context object. Useful to associate all the logs
|
|
316
|
+
happening to a given request.
|
|
317
|
+
|
|
288
318
|
"""
|
|
289
319
|
|
|
290
320
|
__slots__ = [
|
|
291
321
|
"previous_context",
|
|
292
322
|
"name",
|
|
323
|
+
"server_name",
|
|
293
324
|
"parent_context",
|
|
294
325
|
"_resource_usage",
|
|
295
326
|
"usage_start",
|
|
@@ -297,11 +328,14 @@ class LoggingContext:
|
|
|
297
328
|
"finished",
|
|
298
329
|
"request",
|
|
299
330
|
"tag",
|
|
331
|
+
"scope",
|
|
300
332
|
]
|
|
301
333
|
|
|
302
334
|
def __init__(
|
|
303
335
|
self,
|
|
304
|
-
|
|
336
|
+
*,
|
|
337
|
+
name: str,
|
|
338
|
+
server_name: str,
|
|
305
339
|
parent_context: "Optional[LoggingContext]" = None,
|
|
306
340
|
request: Optional[ContextRequest] = None,
|
|
307
341
|
) -> None:
|
|
@@ -314,9 +348,12 @@ class LoggingContext:
|
|
|
314
348
|
# if the context is not currently active.
|
|
315
349
|
self.usage_start: Optional[resource.struct_rusage] = None
|
|
316
350
|
|
|
351
|
+
self.name = name
|
|
352
|
+
self.server_name = server_name
|
|
317
353
|
self.main_thread = get_thread_id()
|
|
318
354
|
self.request = None
|
|
319
355
|
self.tag = ""
|
|
356
|
+
self.scope: Optional["_LogContextScope"] = None
|
|
320
357
|
|
|
321
358
|
# keep track of whether we have hit the __exit__ block for this context
|
|
322
359
|
# (suggesting that the the thing that created the context thinks it should
|
|
@@ -325,69 +362,24 @@ class LoggingContext:
|
|
|
325
362
|
|
|
326
363
|
self.parent_context = parent_context
|
|
327
364
|
|
|
365
|
+
# Inherit some fields from the parent context
|
|
328
366
|
if self.parent_context is not None:
|
|
329
|
-
#
|
|
367
|
+
# which request this corresponds to
|
|
330
368
|
self.request = self.parent_context.request
|
|
331
369
|
|
|
370
|
+
# we also track the current scope:
|
|
371
|
+
self.scope = self.parent_context.scope
|
|
372
|
+
|
|
332
373
|
if request is not None:
|
|
333
374
|
# the request param overrides the request from the parent context
|
|
334
375
|
self.request = request
|
|
335
376
|
|
|
336
|
-
# if we don't have a `name`, but do have a parent context, use its name.
|
|
337
|
-
if self.parent_context and name is None:
|
|
338
|
-
name = str(self.parent_context)
|
|
339
|
-
if name is None:
|
|
340
|
-
raise ValueError(
|
|
341
|
-
"LoggingContext must be given either a name or a parent context"
|
|
342
|
-
)
|
|
343
|
-
self.name = name
|
|
344
|
-
|
|
345
377
|
def __str__(self) -> str:
|
|
346
378
|
return self.name
|
|
347
379
|
|
|
348
|
-
@classmethod
|
|
349
|
-
def current_context(cls) -> LoggingContextOrSentinel:
|
|
350
|
-
"""Get the current logging context from thread local storage
|
|
351
|
-
|
|
352
|
-
This exists for backwards compatibility. ``current_context()`` should be
|
|
353
|
-
called directly.
|
|
354
|
-
|
|
355
|
-
Returns:
|
|
356
|
-
The current logging context
|
|
357
|
-
"""
|
|
358
|
-
warnings.warn(
|
|
359
|
-
"synapse.logging.context.LoggingContext.current_context() is deprecated "
|
|
360
|
-
"in favor of synapse.logging.context.current_context().",
|
|
361
|
-
DeprecationWarning,
|
|
362
|
-
stacklevel=2,
|
|
363
|
-
)
|
|
364
|
-
return current_context()
|
|
365
|
-
|
|
366
|
-
@classmethod
|
|
367
|
-
def set_current_context(
|
|
368
|
-
cls, context: LoggingContextOrSentinel
|
|
369
|
-
) -> LoggingContextOrSentinel:
|
|
370
|
-
"""Set the current logging context in thread local storage
|
|
371
|
-
|
|
372
|
-
This exists for backwards compatibility. ``set_current_context()`` should be
|
|
373
|
-
called directly.
|
|
374
|
-
|
|
375
|
-
Args:
|
|
376
|
-
context: The context to activate.
|
|
377
|
-
|
|
378
|
-
Returns:
|
|
379
|
-
The context that was previously active
|
|
380
|
-
"""
|
|
381
|
-
warnings.warn(
|
|
382
|
-
"synapse.logging.context.LoggingContext.set_current_context() is deprecated "
|
|
383
|
-
"in favor of synapse.logging.context.set_current_context().",
|
|
384
|
-
DeprecationWarning,
|
|
385
|
-
stacklevel=2,
|
|
386
|
-
)
|
|
387
|
-
return set_current_context(context)
|
|
388
|
-
|
|
389
380
|
def __enter__(self) -> "LoggingContext":
|
|
390
381
|
"""Enters this logging context into thread local storage"""
|
|
382
|
+
logcontext_debug_logger.debug("LoggingContext(%s).__enter__", self.name)
|
|
391
383
|
old_context = set_current_context(self)
|
|
392
384
|
if self.previous_context != old_context:
|
|
393
385
|
logcontext_error(
|
|
@@ -410,6 +402,9 @@ class LoggingContext:
|
|
|
410
402
|
Returns:
|
|
411
403
|
None to avoid suppressing any exceptions that were thrown.
|
|
412
404
|
"""
|
|
405
|
+
logcontext_debug_logger.debug(
|
|
406
|
+
"LoggingContext(%s).__exit__ --> %s", self.name, self.previous_context
|
|
407
|
+
)
|
|
413
408
|
current = set_current_context(self.previous_context)
|
|
414
409
|
if current is not self:
|
|
415
410
|
if current is SENTINEL_CONTEXT:
|
|
@@ -588,7 +583,26 @@ class LoggingContextFilter(logging.Filter):
|
|
|
588
583
|
record.
|
|
589
584
|
"""
|
|
590
585
|
|
|
591
|
-
def __init__(
|
|
586
|
+
def __init__(
|
|
587
|
+
self,
|
|
588
|
+
# `request` is here for backwards compatibility since we previously recommended
|
|
589
|
+
# people manually configure `LoggingContextFilter` like the following.
|
|
590
|
+
#
|
|
591
|
+
# ```yaml
|
|
592
|
+
# filters:
|
|
593
|
+
# context:
|
|
594
|
+
# (): synapse.logging.context.LoggingContextFilter
|
|
595
|
+
# request: ""
|
|
596
|
+
# ```
|
|
597
|
+
#
|
|
598
|
+
# TODO: Since we now configure `LoggingContextFilter` automatically since #8051
|
|
599
|
+
# (2020-08-11), we could consider removing this useless parameter. This would
|
|
600
|
+
# require people to remove their own manual configuration of
|
|
601
|
+
# `LoggingContextFilter` as it would cause `TypeError: Filter.__init__() got an
|
|
602
|
+
# unexpected keyword argument 'request'` -> `ValueError: Unable to configure
|
|
603
|
+
# filter 'context'`
|
|
604
|
+
request: str = "",
|
|
605
|
+
):
|
|
592
606
|
self._default_request = request
|
|
593
607
|
|
|
594
608
|
def filter(self, record: logging.LogRecord) -> Literal[True]:
|
|
@@ -598,11 +612,13 @@ class LoggingContextFilter(logging.Filter):
|
|
|
598
612
|
"""
|
|
599
613
|
context = current_context()
|
|
600
614
|
record.request = self._default_request
|
|
615
|
+
record.server_name = "unknown_server_from_no_context"
|
|
601
616
|
|
|
602
617
|
# context should never be None, but if it somehow ends up being, then
|
|
603
618
|
# we end up in a death spiral of infinite loops, so let's check, for
|
|
604
619
|
# robustness' sake.
|
|
605
620
|
if context is not None:
|
|
621
|
+
record.server_name = context.server_name
|
|
606
622
|
# Logging is interested in the request ID. Note that for backwards
|
|
607
623
|
# compatibility this is stored as the "request" on the record.
|
|
608
624
|
record.request = str(context)
|
|
@@ -637,14 +653,21 @@ class PreserveLoggingContext:
|
|
|
637
653
|
reactor back to the code).
|
|
638
654
|
"""
|
|
639
655
|
|
|
640
|
-
__slots__ = ["_old_context", "_new_context"]
|
|
656
|
+
__slots__ = ["_old_context", "_new_context", "_instance_id"]
|
|
641
657
|
|
|
642
658
|
def __init__(
|
|
643
659
|
self, new_context: LoggingContextOrSentinel = SENTINEL_CONTEXT
|
|
644
660
|
) -> None:
|
|
645
661
|
self._new_context = new_context
|
|
662
|
+
self._instance_id = random_string(5)
|
|
646
663
|
|
|
647
664
|
def __enter__(self) -> None:
|
|
665
|
+
logcontext_debug_logger.debug(
|
|
666
|
+
"PreserveLoggingContext(%s).__enter__ %s --> %s",
|
|
667
|
+
self._instance_id,
|
|
668
|
+
current_context(),
|
|
669
|
+
self._new_context,
|
|
670
|
+
)
|
|
648
671
|
self._old_context = set_current_context(self._new_context)
|
|
649
672
|
|
|
650
673
|
def __exit__(
|
|
@@ -653,6 +676,12 @@ class PreserveLoggingContext:
|
|
|
653
676
|
value: Optional[BaseException],
|
|
654
677
|
traceback: Optional[TracebackType],
|
|
655
678
|
) -> None:
|
|
679
|
+
logcontext_debug_logger.debug(
|
|
680
|
+
"PreserveLoggingContext(%s).__exit %s --> %s",
|
|
681
|
+
self._instance_id,
|
|
682
|
+
current_context(),
|
|
683
|
+
self._old_context,
|
|
684
|
+
)
|
|
656
685
|
context = set_current_context(self._old_context)
|
|
657
686
|
|
|
658
687
|
if context != self._new_context:
|
|
@@ -728,12 +757,15 @@ def nested_logging_context(suffix: str) -> LoggingContext:
|
|
|
728
757
|
"Starting nested logging context from sentinel context: metrics will be lost"
|
|
729
758
|
)
|
|
730
759
|
parent_context = None
|
|
760
|
+
server_name = "unknown_server_from_sentinel_context"
|
|
731
761
|
else:
|
|
732
762
|
assert isinstance(curr_context, LoggingContext)
|
|
733
763
|
parent_context = curr_context
|
|
764
|
+
server_name = parent_context.server_name
|
|
734
765
|
prefix = str(curr_context)
|
|
735
766
|
return LoggingContext(
|
|
736
|
-
prefix + "-" + suffix,
|
|
767
|
+
name=prefix + "-" + suffix,
|
|
768
|
+
server_name=server_name,
|
|
737
769
|
parent_context=parent_context,
|
|
738
770
|
)
|
|
739
771
|
|
|
@@ -829,7 +861,11 @@ def run_in_background(
|
|
|
829
861
|
Note that the returned Deferred does not follow the synapse logcontext
|
|
830
862
|
rules.
|
|
831
863
|
"""
|
|
864
|
+
instance_id = random_string(5)
|
|
832
865
|
calling_context = current_context()
|
|
866
|
+
logcontext_debug_logger.debug(
|
|
867
|
+
"run_in_background(%s): called with logcontext=%s", instance_id, calling_context
|
|
868
|
+
)
|
|
833
869
|
try:
|
|
834
870
|
# (kick off the task in the current context)
|
|
835
871
|
res = f(*args, **kwargs)
|
|
@@ -871,6 +907,11 @@ def run_in_background(
|
|
|
871
907
|
# to reset the logcontext to the sentinel logcontext as that would run
|
|
872
908
|
# immediately (remember our goal is to maintain the calling logcontext when we
|
|
873
909
|
# return).
|
|
910
|
+
logcontext_debug_logger.debug(
|
|
911
|
+
"run_in_background(%s): deferred already completed and the function should have maintained the logcontext %s",
|
|
912
|
+
instance_id,
|
|
913
|
+
calling_context,
|
|
914
|
+
)
|
|
874
915
|
return d
|
|
875
916
|
|
|
876
917
|
# Since the function we called may follow the Synapse logcontext rules (Rules for
|
|
@@ -881,6 +922,11 @@ def run_in_background(
|
|
|
881
922
|
#
|
|
882
923
|
# Our goal is to have the caller logcontext unchanged after firing off the
|
|
883
924
|
# background task and returning.
|
|
925
|
+
logcontext_debug_logger.debug(
|
|
926
|
+
"run_in_background(%s): restoring calling logcontext %s",
|
|
927
|
+
instance_id,
|
|
928
|
+
calling_context,
|
|
929
|
+
)
|
|
884
930
|
set_current_context(calling_context)
|
|
885
931
|
|
|
886
932
|
# If the function we called is playing nice and following the Synapse logcontext
|
|
@@ -896,7 +942,23 @@ def run_in_background(
|
|
|
896
942
|
# which is supposed to have a single entry and exit point. But
|
|
897
943
|
# by spawning off another deferred, we are effectively
|
|
898
944
|
# adding a new exit point.)
|
|
899
|
-
|
|
945
|
+
if logcontext_debug_logger.isEnabledFor(logging.DEBUG):
|
|
946
|
+
|
|
947
|
+
def _log_set_context_cb(
|
|
948
|
+
result: ResultT, context: LoggingContextOrSentinel
|
|
949
|
+
) -> ResultT:
|
|
950
|
+
logcontext_debug_logger.debug(
|
|
951
|
+
"run_in_background(%s): resetting logcontext to %s",
|
|
952
|
+
instance_id,
|
|
953
|
+
context,
|
|
954
|
+
)
|
|
955
|
+
set_current_context(context)
|
|
956
|
+
return result
|
|
957
|
+
|
|
958
|
+
d.addBoth(_log_set_context_cb, SENTINEL_CONTEXT)
|
|
959
|
+
else:
|
|
960
|
+
d.addBoth(_set_context_cb, SENTINEL_CONTEXT)
|
|
961
|
+
|
|
900
962
|
return d
|
|
901
963
|
|
|
902
964
|
|
|
@@ -952,10 +1014,21 @@ def make_deferred_yieldable(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]
|
|
|
952
1014
|
restores the old context once the awaitable completes (execution passes from the
|
|
953
1015
|
reactor back to the code).
|
|
954
1016
|
"""
|
|
1017
|
+
instance_id = random_string(5)
|
|
1018
|
+
logcontext_debug_logger.debug(
|
|
1019
|
+
"make_deferred_yieldable(%s): called with logcontext=%s",
|
|
1020
|
+
instance_id,
|
|
1021
|
+
current_context(),
|
|
1022
|
+
)
|
|
1023
|
+
|
|
955
1024
|
# The deferred has already completed
|
|
956
1025
|
if deferred.called and not deferred.paused:
|
|
957
1026
|
# it looks like this deferred is ready to run any callbacks we give it
|
|
958
1027
|
# immediately. We may as well optimise out the logcontext faffery.
|
|
1028
|
+
logcontext_debug_logger.debug(
|
|
1029
|
+
"make_deferred_yieldable(%s): deferred already completed and the function should have maintained the logcontext",
|
|
1030
|
+
instance_id,
|
|
1031
|
+
)
|
|
959
1032
|
return deferred
|
|
960
1033
|
|
|
961
1034
|
# Our goal is to have the caller logcontext unchanged after they yield/await the
|
|
@@ -967,8 +1040,31 @@ def make_deferred_yieldable(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]
|
|
|
967
1040
|
# does) while the deferred runs in the reactor event loop, we reset the logcontext
|
|
968
1041
|
# and add a callback to the deferred to restore it so the caller's logcontext is
|
|
969
1042
|
# active when the deferred completes.
|
|
970
|
-
|
|
971
|
-
|
|
1043
|
+
|
|
1044
|
+
logcontext_debug_logger.debug(
|
|
1045
|
+
"make_deferred_yieldable(%s): resetting logcontext to %s",
|
|
1046
|
+
instance_id,
|
|
1047
|
+
SENTINEL_CONTEXT,
|
|
1048
|
+
)
|
|
1049
|
+
calling_context = set_current_context(SENTINEL_CONTEXT)
|
|
1050
|
+
|
|
1051
|
+
if logcontext_debug_logger.isEnabledFor(logging.DEBUG):
|
|
1052
|
+
|
|
1053
|
+
def _log_set_context_cb(
|
|
1054
|
+
result: ResultT, context: LoggingContextOrSentinel
|
|
1055
|
+
) -> ResultT:
|
|
1056
|
+
logcontext_debug_logger.debug(
|
|
1057
|
+
"make_deferred_yieldable(%s): restoring calling logcontext to %s",
|
|
1058
|
+
instance_id,
|
|
1059
|
+
context,
|
|
1060
|
+
)
|
|
1061
|
+
set_current_context(context)
|
|
1062
|
+
return result
|
|
1063
|
+
|
|
1064
|
+
deferred.addBoth(_log_set_context_cb, calling_context)
|
|
1065
|
+
else:
|
|
1066
|
+
deferred.addBoth(_set_context_cb, calling_context)
|
|
1067
|
+
|
|
972
1068
|
return deferred
|
|
973
1069
|
|
|
974
1070
|
|
|
@@ -1058,12 +1154,18 @@ def defer_to_threadpool(
|
|
|
1058
1154
|
"Calling defer_to_threadpool from sentinel context: metrics will be lost"
|
|
1059
1155
|
)
|
|
1060
1156
|
parent_context = None
|
|
1157
|
+
server_name = "unknown_server_from_sentinel_context"
|
|
1061
1158
|
else:
|
|
1062
1159
|
assert isinstance(curr_context, LoggingContext)
|
|
1063
1160
|
parent_context = curr_context
|
|
1161
|
+
server_name = parent_context.server_name
|
|
1064
1162
|
|
|
1065
1163
|
def g() -> R:
|
|
1066
|
-
with LoggingContext(
|
|
1164
|
+
with LoggingContext(
|
|
1165
|
+
name=str(curr_context),
|
|
1166
|
+
server_name=server_name,
|
|
1167
|
+
parent_context=parent_context,
|
|
1168
|
+
):
|
|
1067
1169
|
return f(*args, **kwargs)
|
|
1068
1170
|
|
|
1069
1171
|
return make_deferred_yieldable(threads.deferToThreadPool(reactor, threadpool, g))
|
synapse/logging/opentracing.py
CHANGED
|
@@ -251,17 +251,18 @@ class _DummyTagNames:
|
|
|
251
251
|
try:
|
|
252
252
|
import opentracing
|
|
253
253
|
import opentracing.tags
|
|
254
|
-
from opentracing.scope_managers.contextvars import ContextVarsScopeManager
|
|
255
254
|
|
|
256
255
|
tags = opentracing.tags
|
|
257
256
|
except ImportError:
|
|
258
257
|
opentracing = None # type: ignore[assignment]
|
|
259
258
|
tags = _DummyTagNames # type: ignore[assignment]
|
|
260
|
-
ContextVarsScopeManager = None # type: ignore
|
|
261
259
|
try:
|
|
262
260
|
from jaeger_client import Config as JaegerConfig
|
|
261
|
+
|
|
262
|
+
from synapse.logging.scopecontextmanager import LogContextScopeManager
|
|
263
263
|
except ImportError:
|
|
264
264
|
JaegerConfig = None # type: ignore
|
|
265
|
+
LogContextScopeManager = None # type: ignore
|
|
265
266
|
|
|
266
267
|
|
|
267
268
|
try:
|
|
@@ -483,7 +484,7 @@ def init_tracer(hs: "HomeServer") -> None:
|
|
|
483
484
|
config = JaegerConfig(
|
|
484
485
|
config=jaeger_config,
|
|
485
486
|
service_name=f"{hs.config.server.server_name} {instance_name_by_type}",
|
|
486
|
-
scope_manager=
|
|
487
|
+
scope_manager=LogContextScopeManager(),
|
|
487
488
|
metrics_factory=PrometheusMetricsFactory(),
|
|
488
489
|
)
|
|
489
490
|
|
|
@@ -576,7 +577,9 @@ def start_active_span_follows_from(
|
|
|
576
577
|
operation_name: str,
|
|
577
578
|
contexts: Collection,
|
|
578
579
|
child_of: Optional[Union["opentracing.Span", "opentracing.SpanContext"]] = None,
|
|
580
|
+
tags: Optional[Dict[str, str]] = None,
|
|
579
581
|
start_time: Optional[float] = None,
|
|
582
|
+
ignore_active_span: bool = False,
|
|
580
583
|
*,
|
|
581
584
|
inherit_force_tracing: bool = False,
|
|
582
585
|
tracer: Optional["opentracing.Tracer"] = None,
|
|
@@ -591,9 +594,16 @@ def start_active_span_follows_from(
|
|
|
591
594
|
span will be the parent. (If there is no currently active span, the first
|
|
592
595
|
span in `contexts` will be the parent.)
|
|
593
596
|
|
|
597
|
+
tags: an optional dictionary of span tags. The caller gives up ownership of that
|
|
598
|
+
dictionary, because the :class:`Tracer` may use it as-is to avoid extra data
|
|
599
|
+
copying.
|
|
600
|
+
|
|
594
601
|
start_time: optional override for the start time of the created span. Seconds
|
|
595
602
|
since the epoch.
|
|
596
603
|
|
|
604
|
+
ignore_active_span: an explicit flag that ignores the current active
|
|
605
|
+
scope and creates a root span.
|
|
606
|
+
|
|
597
607
|
inherit_force_tracing: if set, and any of the previous contexts have had tracing
|
|
598
608
|
forced, the new span will also have tracing forced.
|
|
599
609
|
tracer: override the opentracing tracer. By default the global tracer is used.
|
|
@@ -606,7 +616,9 @@ def start_active_span_follows_from(
|
|
|
606
616
|
operation_name,
|
|
607
617
|
child_of=child_of,
|
|
608
618
|
references=references,
|
|
619
|
+
tags=tags,
|
|
609
620
|
start_time=start_time,
|
|
621
|
+
ignore_active_span=ignore_active_span,
|
|
610
622
|
tracer=tracer,
|
|
611
623
|
)
|
|
612
624
|
|
|
@@ -672,9 +684,21 @@ def start_active_span_from_edu(
|
|
|
672
684
|
|
|
673
685
|
# Opentracing setters for tags, logs, etc
|
|
674
686
|
@only_if_tracing
|
|
675
|
-
def active_span(
|
|
676
|
-
|
|
677
|
-
|
|
687
|
+
def active_span(
|
|
688
|
+
*,
|
|
689
|
+
tracer: Optional["opentracing.Tracer"] = None,
|
|
690
|
+
) -> Optional["opentracing.Span"]:
|
|
691
|
+
"""
|
|
692
|
+
Get the currently active span, if any
|
|
693
|
+
|
|
694
|
+
Args:
|
|
695
|
+
tracer: override the opentracing tracer. By default the global tracer is used.
|
|
696
|
+
"""
|
|
697
|
+
if tracer is None:
|
|
698
|
+
# use the global tracer by default
|
|
699
|
+
tracer = opentracing.tracer
|
|
700
|
+
|
|
701
|
+
return tracer.active_span
|
|
678
702
|
|
|
679
703
|
|
|
680
704
|
@ensure_active_span("set a tag")
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
#
|
|
2
|
+
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
|
3
|
+
#
|
|
4
|
+
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
|
5
|
+
# Copyright (C) 2023 New Vector, Ltd
|
|
6
|
+
#
|
|
7
|
+
# This program is free software: you can redistribute it and/or modify
|
|
8
|
+
# it under the terms of the GNU Affero General Public License as
|
|
9
|
+
# published by the Free Software Foundation, either version 3 of the
|
|
10
|
+
# License, or (at your option) any later version.
|
|
11
|
+
#
|
|
12
|
+
# See the GNU Affero General Public License for more details:
|
|
13
|
+
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
|
14
|
+
#
|
|
15
|
+
# Originally licensed under the Apache License, Version 2.0:
|
|
16
|
+
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
|
17
|
+
#
|
|
18
|
+
# [This file includes modifications made by New Vector Limited]
|
|
19
|
+
#
|
|
20
|
+
#
|
|
21
|
+
|
|
22
|
+
import logging
|
|
23
|
+
from typing import Optional
|
|
24
|
+
|
|
25
|
+
from opentracing import Scope, ScopeManager, Span
|
|
26
|
+
|
|
27
|
+
from synapse.logging.context import (
|
|
28
|
+
LoggingContext,
|
|
29
|
+
current_context,
|
|
30
|
+
nested_logging_context,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
logger = logging.getLogger(__name__)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class LogContextScopeManager(ScopeManager):
|
|
37
|
+
"""
|
|
38
|
+
The LogContextScopeManager tracks the active scope in opentracing
|
|
39
|
+
by using the log contexts which are native to synapse. This is so
|
|
40
|
+
that the basic opentracing api can be used across twisted defereds.
|
|
41
|
+
|
|
42
|
+
It would be nice just to use opentracing's ContextVarsScopeManager,
|
|
43
|
+
but currently that doesn't work due to https://twistedmatrix.com/trac/ticket/10301.
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
def __init__(self) -> None:
|
|
47
|
+
pass
|
|
48
|
+
|
|
49
|
+
@property
|
|
50
|
+
def active(self) -> Optional[Scope]:
|
|
51
|
+
"""
|
|
52
|
+
Returns the currently active Scope which can be used to access the
|
|
53
|
+
currently active Scope.span.
|
|
54
|
+
If there is a non-null Scope, its wrapped Span
|
|
55
|
+
becomes an implicit parent of any newly-created Span at
|
|
56
|
+
Tracer.start_active_span() time.
|
|
57
|
+
|
|
58
|
+
Return:
|
|
59
|
+
The Scope that is active, or None if not available.
|
|
60
|
+
"""
|
|
61
|
+
ctx = current_context()
|
|
62
|
+
return ctx.scope
|
|
63
|
+
|
|
64
|
+
def activate(self, span: Span, finish_on_close: bool) -> Scope:
|
|
65
|
+
"""
|
|
66
|
+
Makes a Span active.
|
|
67
|
+
Args
|
|
68
|
+
span: the span that should become active.
|
|
69
|
+
finish_on_close: whether Span should be automatically finished when
|
|
70
|
+
Scope.close() is called.
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
Scope to control the end of the active period for
|
|
74
|
+
*span*. It is a programming error to neglect to call
|
|
75
|
+
Scope.close() on the returned instance.
|
|
76
|
+
"""
|
|
77
|
+
|
|
78
|
+
ctx = current_context()
|
|
79
|
+
|
|
80
|
+
if not ctx:
|
|
81
|
+
logger.error("Tried to activate scope outside of loggingcontext")
|
|
82
|
+
return Scope(None, span) # type: ignore[arg-type]
|
|
83
|
+
|
|
84
|
+
if ctx.scope is not None:
|
|
85
|
+
# start a new logging context as a child of the existing one.
|
|
86
|
+
# Doing so -- rather than updating the existing logcontext -- means that
|
|
87
|
+
# creating several concurrent spans under the same logcontext works
|
|
88
|
+
# correctly.
|
|
89
|
+
ctx = nested_logging_context("")
|
|
90
|
+
enter_logcontext = True
|
|
91
|
+
else:
|
|
92
|
+
# if there is no span currently associated with the current logcontext, we
|
|
93
|
+
# just store the scope in it.
|
|
94
|
+
#
|
|
95
|
+
# This feels a bit dubious, but it does hack around a problem where a
|
|
96
|
+
# span outlasts its parent logcontext (which would otherwise lead to
|
|
97
|
+
# "Re-starting finished log context" errors).
|
|
98
|
+
enter_logcontext = False
|
|
99
|
+
|
|
100
|
+
scope = _LogContextScope(self, span, ctx, enter_logcontext, finish_on_close)
|
|
101
|
+
ctx.scope = scope
|
|
102
|
+
if enter_logcontext:
|
|
103
|
+
ctx.__enter__()
|
|
104
|
+
|
|
105
|
+
return scope
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class _LogContextScope(Scope):
|
|
109
|
+
"""
|
|
110
|
+
A custom opentracing scope, associated with a LogContext
|
|
111
|
+
|
|
112
|
+
* When the scope is closed, the logcontext's active scope is reset to None.
|
|
113
|
+
and - if enter_logcontext was set - the logcontext is finished too.
|
|
114
|
+
"""
|
|
115
|
+
|
|
116
|
+
def __init__(
|
|
117
|
+
self,
|
|
118
|
+
manager: LogContextScopeManager,
|
|
119
|
+
span: Span,
|
|
120
|
+
logcontext: LoggingContext,
|
|
121
|
+
enter_logcontext: bool,
|
|
122
|
+
finish_on_close: bool,
|
|
123
|
+
):
|
|
124
|
+
"""
|
|
125
|
+
Args:
|
|
126
|
+
manager:
|
|
127
|
+
the manager that is responsible for this scope.
|
|
128
|
+
span:
|
|
129
|
+
the opentracing span which this scope represents the local
|
|
130
|
+
lifetime for.
|
|
131
|
+
logcontext:
|
|
132
|
+
the log context to which this scope is attached.
|
|
133
|
+
enter_logcontext:
|
|
134
|
+
if True the log context will be exited when the scope is finished
|
|
135
|
+
finish_on_close:
|
|
136
|
+
if True finish the span when the scope is closed
|
|
137
|
+
"""
|
|
138
|
+
super().__init__(manager, span)
|
|
139
|
+
self.logcontext = logcontext
|
|
140
|
+
self._finish_on_close = finish_on_close
|
|
141
|
+
self._enter_logcontext = enter_logcontext
|
|
142
|
+
|
|
143
|
+
def __str__(self) -> str:
|
|
144
|
+
return f"Scope<{self.span}>"
|
|
145
|
+
|
|
146
|
+
def close(self) -> None:
|
|
147
|
+
active_scope = self.manager.active
|
|
148
|
+
if active_scope is not self:
|
|
149
|
+
logger.error(
|
|
150
|
+
"Closing scope %s which is not the currently-active one %s",
|
|
151
|
+
self,
|
|
152
|
+
active_scope,
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
if self._finish_on_close:
|
|
156
|
+
self.span.finish()
|
|
157
|
+
|
|
158
|
+
self.logcontext.scope = None
|
|
159
|
+
|
|
160
|
+
if self._enter_logcontext:
|
|
161
|
+
self.logcontext.__exit__(None, None, None)
|
synapse/media/_base.py
CHANGED
|
@@ -704,6 +704,7 @@ class ThreadedFileSender:
|
|
|
704
704
|
|
|
705
705
|
def __init__(self, hs: "HomeServer") -> None:
|
|
706
706
|
self.reactor = hs.get_reactor()
|
|
707
|
+
self.clock = hs.get_clock()
|
|
707
708
|
self.thread_pool = hs.get_media_sender_thread_pool()
|
|
708
709
|
|
|
709
710
|
self.file: Optional[BinaryIO] = None
|
|
@@ -712,7 +713,7 @@ class ThreadedFileSender:
|
|
|
712
713
|
|
|
713
714
|
# Signals if the thread should keep reading/sending data. Set means
|
|
714
715
|
# continue, clear means pause.
|
|
715
|
-
self.wakeup_event = DeferredEvent(self.
|
|
716
|
+
self.wakeup_event = DeferredEvent(self.clock)
|
|
716
717
|
|
|
717
718
|
# Signals if the thread should terminate, e.g. because the consumer has
|
|
718
719
|
# gone away.
|