matrix-synapse 1.143.0rc2__cp310-abi3-manylinux_2_28_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrix_synapse-1.143.0rc2.dist-info/AUTHORS.rst +51 -0
- matrix_synapse-1.143.0rc2.dist-info/LICENSE-AGPL-3.0 +661 -0
- matrix_synapse-1.143.0rc2.dist-info/LICENSE-COMMERCIAL +6 -0
- matrix_synapse-1.143.0rc2.dist-info/METADATA +385 -0
- matrix_synapse-1.143.0rc2.dist-info/RECORD +1058 -0
- matrix_synapse-1.143.0rc2.dist-info/WHEEL +4 -0
- matrix_synapse-1.143.0rc2.dist-info/entry_points.txt +14 -0
- synapse/__init__.py +97 -0
- synapse/_scripts/__init__.py +0 -0
- synapse/_scripts/export_signing_key.py +109 -0
- synapse/_scripts/generate_config.py +83 -0
- synapse/_scripts/generate_log_config.py +56 -0
- synapse/_scripts/generate_signing_key.py +55 -0
- synapse/_scripts/generate_workers_map.py +318 -0
- synapse/_scripts/hash_password.py +95 -0
- synapse/_scripts/move_remote_media_to_new_store.py +128 -0
- synapse/_scripts/register_new_matrix_user.py +402 -0
- synapse/_scripts/review_recent_signups.py +212 -0
- synapse/_scripts/synapse_port_db.py +1604 -0
- synapse/_scripts/synctl.py +365 -0
- synapse/_scripts/update_synapse_database.py +130 -0
- synapse/api/__init__.py +20 -0
- synapse/api/auth/__init__.py +207 -0
- synapse/api/auth/base.py +406 -0
- synapse/api/auth/internal.py +299 -0
- synapse/api/auth/mas.py +436 -0
- synapse/api/auth/msc3861_delegated.py +617 -0
- synapse/api/auth_blocking.py +144 -0
- synapse/api/constants.py +362 -0
- synapse/api/errors.py +907 -0
- synapse/api/filtering.py +537 -0
- synapse/api/presence.py +102 -0
- synapse/api/ratelimiting.py +480 -0
- synapse/api/room_versions.py +535 -0
- synapse/api/urls.py +118 -0
- synapse/app/__init__.py +60 -0
- synapse/app/_base.py +862 -0
- synapse/app/admin_cmd.py +388 -0
- synapse/app/appservice.py +30 -0
- synapse/app/client_reader.py +30 -0
- synapse/app/complement_fork_starter.py +206 -0
- synapse/app/event_creator.py +29 -0
- synapse/app/federation_reader.py +30 -0
- synapse/app/federation_sender.py +30 -0
- synapse/app/frontend_proxy.py +30 -0
- synapse/app/generic_worker.py +474 -0
- synapse/app/homeserver.py +505 -0
- synapse/app/media_repository.py +30 -0
- synapse/app/phone_stats_home.py +296 -0
- synapse/app/pusher.py +30 -0
- synapse/app/synchrotron.py +30 -0
- synapse/app/user_dir.py +31 -0
- synapse/appservice/__init__.py +458 -0
- synapse/appservice/api.py +567 -0
- synapse/appservice/scheduler.py +564 -0
- synapse/config/__init__.py +27 -0
- synapse/config/__main__.py +62 -0
- synapse/config/_base.py +1106 -0
- synapse/config/_base.pyi +215 -0
- synapse/config/_util.py +99 -0
- synapse/config/account_validity.py +116 -0
- synapse/config/api.py +141 -0
- synapse/config/appservice.py +210 -0
- synapse/config/auth.py +80 -0
- synapse/config/auto_accept_invites.py +43 -0
- synapse/config/background_updates.py +44 -0
- synapse/config/cache.py +231 -0
- synapse/config/captcha.py +90 -0
- synapse/config/cas.py +116 -0
- synapse/config/consent.py +73 -0
- synapse/config/database.py +184 -0
- synapse/config/emailconfig.py +367 -0
- synapse/config/experimental.py +595 -0
- synapse/config/federation.py +114 -0
- synapse/config/homeserver.py +141 -0
- synapse/config/jwt.py +55 -0
- synapse/config/key.py +447 -0
- synapse/config/logger.py +390 -0
- synapse/config/mas.py +192 -0
- synapse/config/matrixrtc.py +66 -0
- synapse/config/metrics.py +84 -0
- synapse/config/modules.py +40 -0
- synapse/config/oembed.py +185 -0
- synapse/config/oidc.py +509 -0
- synapse/config/password_auth_providers.py +82 -0
- synapse/config/push.py +64 -0
- synapse/config/ratelimiting.py +254 -0
- synapse/config/redis.py +74 -0
- synapse/config/registration.py +296 -0
- synapse/config/repository.py +311 -0
- synapse/config/retention.py +162 -0
- synapse/config/room.py +88 -0
- synapse/config/room_directory.py +165 -0
- synapse/config/saml2.py +251 -0
- synapse/config/server.py +1170 -0
- synapse/config/server_notices.py +84 -0
- synapse/config/spam_checker.py +66 -0
- synapse/config/sso.py +121 -0
- synapse/config/stats.py +54 -0
- synapse/config/third_party_event_rules.py +40 -0
- synapse/config/tls.py +192 -0
- synapse/config/tracer.py +71 -0
- synapse/config/user_directory.py +47 -0
- synapse/config/user_types.py +42 -0
- synapse/config/voip.py +59 -0
- synapse/config/workers.py +642 -0
- synapse/crypto/__init__.py +20 -0
- synapse/crypto/context_factory.py +278 -0
- synapse/crypto/event_signing.py +194 -0
- synapse/crypto/keyring.py +931 -0
- synapse/event_auth.py +1266 -0
- synapse/events/__init__.py +667 -0
- synapse/events/auto_accept_invites.py +216 -0
- synapse/events/builder.py +387 -0
- synapse/events/presence_router.py +243 -0
- synapse/events/snapshot.py +559 -0
- synapse/events/utils.py +924 -0
- synapse/events/validator.py +305 -0
- synapse/federation/__init__.py +22 -0
- synapse/federation/federation_base.py +382 -0
- synapse/federation/federation_client.py +2132 -0
- synapse/federation/federation_server.py +1540 -0
- synapse/federation/persistence.py +70 -0
- synapse/federation/send_queue.py +531 -0
- synapse/federation/sender/__init__.py +1164 -0
- synapse/federation/sender/per_destination_queue.py +886 -0
- synapse/federation/sender/transaction_manager.py +210 -0
- synapse/federation/transport/__init__.py +28 -0
- synapse/federation/transport/client.py +1199 -0
- synapse/federation/transport/server/__init__.py +334 -0
- synapse/federation/transport/server/_base.py +429 -0
- synapse/federation/transport/server/federation.py +910 -0
- synapse/federation/units.py +133 -0
- synapse/handlers/__init__.py +20 -0
- synapse/handlers/account.py +162 -0
- synapse/handlers/account_data.py +360 -0
- synapse/handlers/account_validity.py +361 -0
- synapse/handlers/admin.py +615 -0
- synapse/handlers/appservice.py +989 -0
- synapse/handlers/auth.py +2481 -0
- synapse/handlers/cas.py +413 -0
- synapse/handlers/deactivate_account.py +363 -0
- synapse/handlers/delayed_events.py +599 -0
- synapse/handlers/device.py +1870 -0
- synapse/handlers/devicemessage.py +399 -0
- synapse/handlers/directory.py +545 -0
- synapse/handlers/e2e_keys.py +1834 -0
- synapse/handlers/e2e_room_keys.py +455 -0
- synapse/handlers/event_auth.py +390 -0
- synapse/handlers/events.py +201 -0
- synapse/handlers/federation.py +2039 -0
- synapse/handlers/federation_event.py +2419 -0
- synapse/handlers/identity.py +812 -0
- synapse/handlers/initial_sync.py +528 -0
- synapse/handlers/jwt.py +120 -0
- synapse/handlers/message.py +2347 -0
- synapse/handlers/oidc.py +1801 -0
- synapse/handlers/pagination.py +768 -0
- synapse/handlers/password_policy.py +102 -0
- synapse/handlers/presence.py +2633 -0
- synapse/handlers/profile.py +655 -0
- synapse/handlers/push_rules.py +164 -0
- synapse/handlers/read_marker.py +79 -0
- synapse/handlers/receipts.py +351 -0
- synapse/handlers/register.py +1059 -0
- synapse/handlers/relations.py +623 -0
- synapse/handlers/reports.py +98 -0
- synapse/handlers/room.py +2448 -0
- synapse/handlers/room_list.py +632 -0
- synapse/handlers/room_member.py +2365 -0
- synapse/handlers/room_member_worker.py +146 -0
- synapse/handlers/room_policy.py +186 -0
- synapse/handlers/room_summary.py +1057 -0
- synapse/handlers/saml.py +524 -0
- synapse/handlers/search.py +723 -0
- synapse/handlers/send_email.py +209 -0
- synapse/handlers/set_password.py +71 -0
- synapse/handlers/sliding_sync/__init__.py +1701 -0
- synapse/handlers/sliding_sync/extensions.py +969 -0
- synapse/handlers/sliding_sync/room_lists.py +2262 -0
- synapse/handlers/sliding_sync/store.py +128 -0
- synapse/handlers/sso.py +1291 -0
- synapse/handlers/state_deltas.py +82 -0
- synapse/handlers/stats.py +321 -0
- synapse/handlers/sync.py +3106 -0
- synapse/handlers/thread_subscriptions.py +190 -0
- synapse/handlers/typing.py +606 -0
- synapse/handlers/ui_auth/__init__.py +48 -0
- synapse/handlers/ui_auth/checkers.py +332 -0
- synapse/handlers/user_directory.py +783 -0
- synapse/handlers/worker_lock.py +371 -0
- synapse/http/__init__.py +105 -0
- synapse/http/additional_resource.py +62 -0
- synapse/http/client.py +1373 -0
- synapse/http/connectproxyclient.py +316 -0
- synapse/http/federation/__init__.py +19 -0
- synapse/http/federation/matrix_federation_agent.py +490 -0
- synapse/http/federation/srv_resolver.py +196 -0
- synapse/http/federation/well_known_resolver.py +367 -0
- synapse/http/matrixfederationclient.py +1873 -0
- synapse/http/proxy.py +290 -0
- synapse/http/proxyagent.py +497 -0
- synapse/http/replicationagent.py +202 -0
- synapse/http/request_metrics.py +309 -0
- synapse/http/server.py +1110 -0
- synapse/http/servlet.py +1018 -0
- synapse/http/site.py +825 -0
- synapse/http/types.py +27 -0
- synapse/logging/__init__.py +31 -0
- synapse/logging/_remote.py +261 -0
- synapse/logging/_terse_json.py +95 -0
- synapse/logging/context.py +1209 -0
- synapse/logging/formatter.py +62 -0
- synapse/logging/handlers.py +99 -0
- synapse/logging/loggers.py +25 -0
- synapse/logging/opentracing.py +1132 -0
- synapse/logging/scopecontextmanager.py +160 -0
- synapse/media/_base.py +830 -0
- synapse/media/filepath.py +417 -0
- synapse/media/media_repository.py +1580 -0
- synapse/media/media_storage.py +702 -0
- synapse/media/oembed.py +277 -0
- synapse/media/preview_html.py +556 -0
- synapse/media/storage_provider.py +195 -0
- synapse/media/thumbnailer.py +833 -0
- synapse/media/url_previewer.py +875 -0
- synapse/metrics/__init__.py +748 -0
- synapse/metrics/_gc.py +219 -0
- synapse/metrics/_reactor_metrics.py +171 -0
- synapse/metrics/_types.py +38 -0
- synapse/metrics/background_process_metrics.py +555 -0
- synapse/metrics/common_usage_metrics.py +94 -0
- synapse/metrics/jemalloc.py +248 -0
- synapse/module_api/__init__.py +2131 -0
- synapse/module_api/callbacks/__init__.py +50 -0
- synapse/module_api/callbacks/account_validity_callbacks.py +106 -0
- synapse/module_api/callbacks/media_repository_callbacks.py +157 -0
- synapse/module_api/callbacks/ratelimit_callbacks.py +78 -0
- synapse/module_api/callbacks/spamchecker_callbacks.py +991 -0
- synapse/module_api/callbacks/third_party_event_rules_callbacks.py +592 -0
- synapse/module_api/errors.py +42 -0
- synapse/notifier.py +970 -0
- synapse/push/__init__.py +212 -0
- synapse/push/bulk_push_rule_evaluator.py +635 -0
- synapse/push/clientformat.py +126 -0
- synapse/push/emailpusher.py +333 -0
- synapse/push/httppusher.py +564 -0
- synapse/push/mailer.py +1010 -0
- synapse/push/presentable_names.py +216 -0
- synapse/push/push_tools.py +114 -0
- synapse/push/push_types.py +141 -0
- synapse/push/pusher.py +87 -0
- synapse/push/pusherpool.py +501 -0
- synapse/push/rulekinds.py +33 -0
- synapse/py.typed +0 -0
- synapse/replication/__init__.py +20 -0
- synapse/replication/http/__init__.py +68 -0
- synapse/replication/http/_base.py +468 -0
- synapse/replication/http/account_data.py +297 -0
- synapse/replication/http/deactivate_account.py +81 -0
- synapse/replication/http/delayed_events.py +62 -0
- synapse/replication/http/devices.py +254 -0
- synapse/replication/http/federation.py +334 -0
- synapse/replication/http/login.py +106 -0
- synapse/replication/http/membership.py +364 -0
- synapse/replication/http/presence.py +133 -0
- synapse/replication/http/push.py +156 -0
- synapse/replication/http/register.py +172 -0
- synapse/replication/http/send_events.py +182 -0
- synapse/replication/http/state.py +82 -0
- synapse/replication/http/streams.py +101 -0
- synapse/replication/tcp/__init__.py +56 -0
- synapse/replication/tcp/client.py +552 -0
- synapse/replication/tcp/commands.py +569 -0
- synapse/replication/tcp/context.py +41 -0
- synapse/replication/tcp/external_cache.py +156 -0
- synapse/replication/tcp/handler.py +922 -0
- synapse/replication/tcp/protocol.py +608 -0
- synapse/replication/tcp/redis.py +509 -0
- synapse/replication/tcp/resource.py +348 -0
- synapse/replication/tcp/streams/__init__.py +96 -0
- synapse/replication/tcp/streams/_base.py +765 -0
- synapse/replication/tcp/streams/events.py +287 -0
- synapse/replication/tcp/streams/federation.py +92 -0
- synapse/replication/tcp/streams/partial_state.py +80 -0
- synapse/res/providers.json +29 -0
- synapse/res/templates/_base.html +29 -0
- synapse/res/templates/account_previously_renewed.html +6 -0
- synapse/res/templates/account_renewed.html +6 -0
- synapse/res/templates/add_threepid.html +8 -0
- synapse/res/templates/add_threepid.txt +6 -0
- synapse/res/templates/add_threepid_failure.html +7 -0
- synapse/res/templates/add_threepid_success.html +6 -0
- synapse/res/templates/already_in_use.html +12 -0
- synapse/res/templates/already_in_use.txt +10 -0
- synapse/res/templates/auth_success.html +21 -0
- synapse/res/templates/invalid_token.html +6 -0
- synapse/res/templates/mail-Element.css +7 -0
- synapse/res/templates/mail-Vector.css +7 -0
- synapse/res/templates/mail-expiry.css +4 -0
- synapse/res/templates/mail.css +156 -0
- synapse/res/templates/notice_expiry.html +46 -0
- synapse/res/templates/notice_expiry.txt +7 -0
- synapse/res/templates/notif.html +51 -0
- synapse/res/templates/notif.txt +22 -0
- synapse/res/templates/notif_mail.html +59 -0
- synapse/res/templates/notif_mail.txt +10 -0
- synapse/res/templates/password_reset.html +10 -0
- synapse/res/templates/password_reset.txt +7 -0
- synapse/res/templates/password_reset_confirmation.html +15 -0
- synapse/res/templates/password_reset_failure.html +7 -0
- synapse/res/templates/password_reset_success.html +6 -0
- synapse/res/templates/recaptcha.html +42 -0
- synapse/res/templates/registration.html +12 -0
- synapse/res/templates/registration.txt +10 -0
- synapse/res/templates/registration_failure.html +6 -0
- synapse/res/templates/registration_success.html +6 -0
- synapse/res/templates/registration_token.html +18 -0
- synapse/res/templates/room.html +33 -0
- synapse/res/templates/room.txt +9 -0
- synapse/res/templates/sso.css +129 -0
- synapse/res/templates/sso_account_deactivated.html +25 -0
- synapse/res/templates/sso_auth_account_details.html +186 -0
- synapse/res/templates/sso_auth_account_details.js +116 -0
- synapse/res/templates/sso_auth_bad_user.html +26 -0
- synapse/res/templates/sso_auth_confirm.html +27 -0
- synapse/res/templates/sso_auth_success.html +26 -0
- synapse/res/templates/sso_error.html +71 -0
- synapse/res/templates/sso_footer.html +19 -0
- synapse/res/templates/sso_login_idp_picker.html +60 -0
- synapse/res/templates/sso_new_user_consent.html +30 -0
- synapse/res/templates/sso_partial_profile.html +19 -0
- synapse/res/templates/sso_redirect_confirm.html +39 -0
- synapse/res/templates/style.css +33 -0
- synapse/res/templates/terms.html +27 -0
- synapse/rest/__init__.py +197 -0
- synapse/rest/admin/__init__.py +390 -0
- synapse/rest/admin/_base.py +72 -0
- synapse/rest/admin/background_updates.py +171 -0
- synapse/rest/admin/devices.py +221 -0
- synapse/rest/admin/event_reports.py +173 -0
- synapse/rest/admin/events.py +69 -0
- synapse/rest/admin/experimental_features.py +137 -0
- synapse/rest/admin/federation.py +243 -0
- synapse/rest/admin/media.py +540 -0
- synapse/rest/admin/registration_tokens.py +358 -0
- synapse/rest/admin/rooms.py +1061 -0
- synapse/rest/admin/scheduled_tasks.py +70 -0
- synapse/rest/admin/server_notice_servlet.py +132 -0
- synapse/rest/admin/statistics.py +132 -0
- synapse/rest/admin/username_available.py +58 -0
- synapse/rest/admin/users.py +1606 -0
- synapse/rest/client/__init__.py +20 -0
- synapse/rest/client/_base.py +113 -0
- synapse/rest/client/account.py +930 -0
- synapse/rest/client/account_data.py +319 -0
- synapse/rest/client/account_validity.py +103 -0
- synapse/rest/client/appservice_ping.py +125 -0
- synapse/rest/client/auth.py +218 -0
- synapse/rest/client/auth_metadata.py +122 -0
- synapse/rest/client/capabilities.py +121 -0
- synapse/rest/client/delayed_events.py +165 -0
- synapse/rest/client/devices.py +587 -0
- synapse/rest/client/directory.py +211 -0
- synapse/rest/client/events.py +116 -0
- synapse/rest/client/filter.py +112 -0
- synapse/rest/client/initial_sync.py +65 -0
- synapse/rest/client/keys.py +678 -0
- synapse/rest/client/knock.py +104 -0
- synapse/rest/client/login.py +750 -0
- synapse/rest/client/login_token_request.py +127 -0
- synapse/rest/client/logout.py +93 -0
- synapse/rest/client/matrixrtc.py +52 -0
- synapse/rest/client/media.py +285 -0
- synapse/rest/client/mutual_rooms.py +93 -0
- synapse/rest/client/notifications.py +137 -0
- synapse/rest/client/openid.py +109 -0
- synapse/rest/client/password_policy.py +69 -0
- synapse/rest/client/presence.py +131 -0
- synapse/rest/client/profile.py +291 -0
- synapse/rest/client/push_rule.py +331 -0
- synapse/rest/client/pusher.py +181 -0
- synapse/rest/client/read_marker.py +104 -0
- synapse/rest/client/receipts.py +165 -0
- synapse/rest/client/register.py +1067 -0
- synapse/rest/client/relations.py +138 -0
- synapse/rest/client/rendezvous.py +76 -0
- synapse/rest/client/reporting.py +207 -0
- synapse/rest/client/room.py +1669 -0
- synapse/rest/client/room_keys.py +426 -0
- synapse/rest/client/room_upgrade_rest_servlet.py +112 -0
- synapse/rest/client/sendtodevice.py +85 -0
- synapse/rest/client/sync.py +1131 -0
- synapse/rest/client/tags.py +129 -0
- synapse/rest/client/thirdparty.py +130 -0
- synapse/rest/client/thread_subscriptions.py +247 -0
- synapse/rest/client/tokenrefresh.py +52 -0
- synapse/rest/client/transactions.py +149 -0
- synapse/rest/client/user_directory.py +90 -0
- synapse/rest/client/versions.py +191 -0
- synapse/rest/client/voip.py +88 -0
- synapse/rest/consent/__init__.py +0 -0
- synapse/rest/consent/consent_resource.py +210 -0
- synapse/rest/health.py +38 -0
- synapse/rest/key/__init__.py +20 -0
- synapse/rest/key/v2/__init__.py +40 -0
- synapse/rest/key/v2/local_key_resource.py +125 -0
- synapse/rest/key/v2/remote_key_resource.py +302 -0
- synapse/rest/media/__init__.py +0 -0
- synapse/rest/media/config_resource.py +53 -0
- synapse/rest/media/create_resource.py +90 -0
- synapse/rest/media/download_resource.py +110 -0
- synapse/rest/media/media_repository_resource.py +113 -0
- synapse/rest/media/preview_url_resource.py +77 -0
- synapse/rest/media/thumbnail_resource.py +142 -0
- synapse/rest/media/upload_resource.py +187 -0
- synapse/rest/media/v1/__init__.py +39 -0
- synapse/rest/media/v1/_base.py +23 -0
- synapse/rest/media/v1/media_storage.py +23 -0
- synapse/rest/media/v1/storage_provider.py +23 -0
- synapse/rest/synapse/__init__.py +20 -0
- synapse/rest/synapse/client/__init__.py +93 -0
- synapse/rest/synapse/client/federation_whitelist.py +66 -0
- synapse/rest/synapse/client/jwks.py +77 -0
- synapse/rest/synapse/client/new_user_consent.py +115 -0
- synapse/rest/synapse/client/oidc/__init__.py +45 -0
- synapse/rest/synapse/client/oidc/backchannel_logout_resource.py +42 -0
- synapse/rest/synapse/client/oidc/callback_resource.py +48 -0
- synapse/rest/synapse/client/password_reset.py +129 -0
- synapse/rest/synapse/client/pick_idp.py +107 -0
- synapse/rest/synapse/client/pick_username.py +153 -0
- synapse/rest/synapse/client/rendezvous.py +58 -0
- synapse/rest/synapse/client/saml2/__init__.py +42 -0
- synapse/rest/synapse/client/saml2/metadata_resource.py +46 -0
- synapse/rest/synapse/client/saml2/response_resource.py +52 -0
- synapse/rest/synapse/client/sso_register.py +56 -0
- synapse/rest/synapse/client/unsubscribe.py +88 -0
- synapse/rest/synapse/mas/__init__.py +71 -0
- synapse/rest/synapse/mas/_base.py +55 -0
- synapse/rest/synapse/mas/devices.py +239 -0
- synapse/rest/synapse/mas/users.py +469 -0
- synapse/rest/well_known.py +148 -0
- synapse/server.py +1257 -0
- synapse/server_notices/__init__.py +0 -0
- synapse/server_notices/consent_server_notices.py +136 -0
- synapse/server_notices/resource_limits_server_notices.py +215 -0
- synapse/server_notices/server_notices_manager.py +388 -0
- synapse/server_notices/server_notices_sender.py +67 -0
- synapse/server_notices/worker_server_notices_sender.py +46 -0
- synapse/spam_checker_api/__init__.py +31 -0
- synapse/state/__init__.py +1022 -0
- synapse/state/v1.py +369 -0
- synapse/state/v2.py +984 -0
- synapse/static/client/login/index.html +47 -0
- synapse/static/client/login/js/jquery-3.4.1.min.js +2 -0
- synapse/static/client/login/js/login.js +291 -0
- synapse/static/client/login/spinner.gif +0 -0
- synapse/static/client/login/style.css +79 -0
- synapse/static/index.html +63 -0
- synapse/storage/__init__.py +43 -0
- synapse/storage/_base.py +245 -0
- synapse/storage/admin_client_config.py +25 -0
- synapse/storage/background_updates.py +1188 -0
- synapse/storage/controllers/__init__.py +57 -0
- synapse/storage/controllers/persist_events.py +1237 -0
- synapse/storage/controllers/purge_events.py +455 -0
- synapse/storage/controllers/state.py +950 -0
- synapse/storage/controllers/stats.py +119 -0
- synapse/storage/database.py +2719 -0
- synapse/storage/databases/__init__.py +175 -0
- synapse/storage/databases/main/__init__.py +420 -0
- synapse/storage/databases/main/account_data.py +1059 -0
- synapse/storage/databases/main/appservice.py +473 -0
- synapse/storage/databases/main/cache.py +911 -0
- synapse/storage/databases/main/censor_events.py +225 -0
- synapse/storage/databases/main/client_ips.py +815 -0
- synapse/storage/databases/main/delayed_events.py +562 -0
- synapse/storage/databases/main/deviceinbox.py +1271 -0
- synapse/storage/databases/main/devices.py +2578 -0
- synapse/storage/databases/main/directory.py +212 -0
- synapse/storage/databases/main/e2e_room_keys.py +689 -0
- synapse/storage/databases/main/end_to_end_keys.py +1894 -0
- synapse/storage/databases/main/event_federation.py +2508 -0
- synapse/storage/databases/main/event_push_actions.py +1933 -0
- synapse/storage/databases/main/events.py +3765 -0
- synapse/storage/databases/main/events_bg_updates.py +2910 -0
- synapse/storage/databases/main/events_forward_extremities.py +126 -0
- synapse/storage/databases/main/events_worker.py +2786 -0
- synapse/storage/databases/main/experimental_features.py +130 -0
- synapse/storage/databases/main/filtering.py +231 -0
- synapse/storage/databases/main/keys.py +291 -0
- synapse/storage/databases/main/lock.py +553 -0
- synapse/storage/databases/main/media_repository.py +1068 -0
- synapse/storage/databases/main/metrics.py +460 -0
- synapse/storage/databases/main/monthly_active_users.py +443 -0
- synapse/storage/databases/main/openid.py +60 -0
- synapse/storage/databases/main/presence.py +509 -0
- synapse/storage/databases/main/profile.py +539 -0
- synapse/storage/databases/main/purge_events.py +521 -0
- synapse/storage/databases/main/push_rule.py +970 -0
- synapse/storage/databases/main/pusher.py +793 -0
- synapse/storage/databases/main/receipts.py +1341 -0
- synapse/storage/databases/main/registration.py +3072 -0
- synapse/storage/databases/main/rejections.py +37 -0
- synapse/storage/databases/main/relations.py +1116 -0
- synapse/storage/databases/main/room.py +2779 -0
- synapse/storage/databases/main/roommember.py +2110 -0
- synapse/storage/databases/main/search.py +939 -0
- synapse/storage/databases/main/session.py +151 -0
- synapse/storage/databases/main/signatures.py +94 -0
- synapse/storage/databases/main/sliding_sync.py +603 -0
- synapse/storage/databases/main/state.py +1002 -0
- synapse/storage/databases/main/state_deltas.py +329 -0
- synapse/storage/databases/main/stats.py +789 -0
- synapse/storage/databases/main/stream.py +2577 -0
- synapse/storage/databases/main/tags.py +360 -0
- synapse/storage/databases/main/task_scheduler.py +225 -0
- synapse/storage/databases/main/thread_subscriptions.py +589 -0
- synapse/storage/databases/main/transactions.py +675 -0
- synapse/storage/databases/main/ui_auth.py +420 -0
- synapse/storage/databases/main/user_directory.py +1330 -0
- synapse/storage/databases/main/user_erasure_store.py +117 -0
- synapse/storage/databases/state/__init__.py +22 -0
- synapse/storage/databases/state/bg_updates.py +497 -0
- synapse/storage/databases/state/deletion.py +557 -0
- synapse/storage/databases/state/store.py +948 -0
- synapse/storage/engines/__init__.py +70 -0
- synapse/storage/engines/_base.py +154 -0
- synapse/storage/engines/postgres.py +261 -0
- synapse/storage/engines/sqlite.py +199 -0
- synapse/storage/invite_rule.py +112 -0
- synapse/storage/keys.py +40 -0
- synapse/storage/prepare_database.py +730 -0
- synapse/storage/push_rule.py +28 -0
- synapse/storage/roommember.py +88 -0
- synapse/storage/schema/README.md +4 -0
- synapse/storage/schema/__init__.py +186 -0
- synapse/storage/schema/common/delta/25/00background_updates.sql +40 -0
- synapse/storage/schema/common/delta/35/00background_updates_add_col.sql +36 -0
- synapse/storage/schema/common/delta/58/00background_update_ordering.sql +38 -0
- synapse/storage/schema/common/full_schemas/72/full.sql.postgres +8 -0
- synapse/storage/schema/common/full_schemas/72/full.sql.sqlite +6 -0
- synapse/storage/schema/common/schema_version.sql +60 -0
- synapse/storage/schema/main/delta/12/v12.sql +82 -0
- synapse/storage/schema/main/delta/13/v13.sql +38 -0
- synapse/storage/schema/main/delta/14/v14.sql +42 -0
- synapse/storage/schema/main/delta/15/appservice_txns.sql +50 -0
- synapse/storage/schema/main/delta/15/presence_indices.sql +2 -0
- synapse/storage/schema/main/delta/15/v15.sql +24 -0
- synapse/storage/schema/main/delta/16/events_order_index.sql +4 -0
- synapse/storage/schema/main/delta/16/remote_media_cache_index.sql +2 -0
- synapse/storage/schema/main/delta/16/remove_duplicates.sql +9 -0
- synapse/storage/schema/main/delta/16/room_alias_index.sql +3 -0
- synapse/storage/schema/main/delta/16/unique_constraints.sql +72 -0
- synapse/storage/schema/main/delta/16/users.sql +56 -0
- synapse/storage/schema/main/delta/17/drop_indexes.sql +37 -0
- synapse/storage/schema/main/delta/17/server_keys.sql +43 -0
- synapse/storage/schema/main/delta/17/user_threepids.sql +9 -0
- synapse/storage/schema/main/delta/18/server_keys_bigger_ints.sql +51 -0
- synapse/storage/schema/main/delta/19/event_index.sql +38 -0
- synapse/storage/schema/main/delta/20/dummy.sql +1 -0
- synapse/storage/schema/main/delta/20/pushers.py +93 -0
- synapse/storage/schema/main/delta/21/end_to_end_keys.sql +53 -0
- synapse/storage/schema/main/delta/21/receipts.sql +57 -0
- synapse/storage/schema/main/delta/22/receipts_index.sql +41 -0
- synapse/storage/schema/main/delta/22/user_threepids_unique.sql +19 -0
- synapse/storage/schema/main/delta/24/stats_reporting.sql +37 -0
- synapse/storage/schema/main/delta/25/fts.py +81 -0
- synapse/storage/schema/main/delta/25/guest_access.sql +44 -0
- synapse/storage/schema/main/delta/25/history_visibility.sql +44 -0
- synapse/storage/schema/main/delta/25/tags.sql +57 -0
- synapse/storage/schema/main/delta/26/account_data.sql +36 -0
- synapse/storage/schema/main/delta/27/account_data.sql +55 -0
- synapse/storage/schema/main/delta/27/forgotten_memberships.sql +45 -0
- synapse/storage/schema/main/delta/27/ts.py +61 -0
- synapse/storage/schema/main/delta/28/event_push_actions.sql +46 -0
- synapse/storage/schema/main/delta/28/events_room_stream.sql +39 -0
- synapse/storage/schema/main/delta/28/public_roms_index.sql +39 -0
- synapse/storage/schema/main/delta/28/receipts_user_id_index.sql +41 -0
- synapse/storage/schema/main/delta/28/upgrade_times.sql +40 -0
- synapse/storage/schema/main/delta/28/users_is_guest.sql +41 -0
- synapse/storage/schema/main/delta/29/push_actions.sql +54 -0
- synapse/storage/schema/main/delta/30/alias_creator.sql +35 -0
- synapse/storage/schema/main/delta/30/as_users.py +82 -0
- synapse/storage/schema/main/delta/30/deleted_pushers.sql +44 -0
- synapse/storage/schema/main/delta/30/presence_stream.sql +49 -0
- synapse/storage/schema/main/delta/30/public_rooms.sql +42 -0
- synapse/storage/schema/main/delta/30/push_rule_stream.sql +57 -0
- synapse/storage/schema/main/delta/30/threepid_guest_access_tokens.sql +43 -0
- synapse/storage/schema/main/delta/31/invites.sql +61 -0
- synapse/storage/schema/main/delta/31/local_media_repository_url_cache.sql +46 -0
- synapse/storage/schema/main/delta/31/pushers_0.py +92 -0
- synapse/storage/schema/main/delta/31/pushers_index.sql +41 -0
- synapse/storage/schema/main/delta/31/search_update.py +65 -0
- synapse/storage/schema/main/delta/32/events.sql +35 -0
- synapse/storage/schema/main/delta/32/openid.sql +9 -0
- synapse/storage/schema/main/delta/32/pusher_throttle.sql +42 -0
- synapse/storage/schema/main/delta/32/remove_indices.sql +52 -0
- synapse/storage/schema/main/delta/32/reports.sql +44 -0
- synapse/storage/schema/main/delta/33/access_tokens_device_index.sql +36 -0
- synapse/storage/schema/main/delta/33/devices.sql +40 -0
- synapse/storage/schema/main/delta/33/devices_for_e2e_keys.sql +38 -0
- synapse/storage/schema/main/delta/33/devices_for_e2e_keys_clear_unknown_device.sql +39 -0
- synapse/storage/schema/main/delta/33/event_fields.py +61 -0
- synapse/storage/schema/main/delta/33/remote_media_ts.py +43 -0
- synapse/storage/schema/main/delta/33/user_ips_index.sql +36 -0
- synapse/storage/schema/main/delta/34/appservice_stream.sql +42 -0
- synapse/storage/schema/main/delta/34/cache_stream.py +50 -0
- synapse/storage/schema/main/delta/34/device_inbox.sql +43 -0
- synapse/storage/schema/main/delta/34/push_display_name_rename.sql +39 -0
- synapse/storage/schema/main/delta/34/received_txn_purge.py +36 -0
- synapse/storage/schema/main/delta/35/contains_url.sql +36 -0
- synapse/storage/schema/main/delta/35/device_outbox.sql +58 -0
- synapse/storage/schema/main/delta/35/device_stream_id.sql +40 -0
- synapse/storage/schema/main/delta/35/event_push_actions_index.sql +36 -0
- synapse/storage/schema/main/delta/35/public_room_list_change_stream.sql +52 -0
- synapse/storage/schema/main/delta/35/stream_order_to_extrem.sql +56 -0
- synapse/storage/schema/main/delta/36/readd_public_rooms.sql +45 -0
- synapse/storage/schema/main/delta/37/remove_auth_idx.py +89 -0
- synapse/storage/schema/main/delta/37/user_threepids.sql +71 -0
- synapse/storage/schema/main/delta/38/postgres_fts_gist.sql +38 -0
- synapse/storage/schema/main/delta/39/appservice_room_list.sql +48 -0
- synapse/storage/schema/main/delta/39/device_federation_stream_idx.sql +35 -0
- synapse/storage/schema/main/delta/39/event_push_index.sql +36 -0
- synapse/storage/schema/main/delta/39/federation_out_position.sql +41 -0
- synapse/storage/schema/main/delta/39/membership_profile.sql +39 -0
- synapse/storage/schema/main/delta/40/current_state_idx.sql +36 -0
- synapse/storage/schema/main/delta/40/device_inbox.sql +40 -0
- synapse/storage/schema/main/delta/40/device_list_streams.sql +79 -0
- synapse/storage/schema/main/delta/40/event_push_summary.sql +57 -0
- synapse/storage/schema/main/delta/40/pushers.sql +58 -0
- synapse/storage/schema/main/delta/41/device_list_stream_idx.sql +36 -0
- synapse/storage/schema/main/delta/41/device_outbound_index.sql +35 -0
- synapse/storage/schema/main/delta/41/event_search_event_id_idx.sql +36 -0
- synapse/storage/schema/main/delta/41/ratelimit.sql +41 -0
- synapse/storage/schema/main/delta/42/current_state_delta.sql +48 -0
- synapse/storage/schema/main/delta/42/device_list_last_id.sql +52 -0
- synapse/storage/schema/main/delta/42/event_auth_state_only.sql +36 -0
- synapse/storage/schema/main/delta/42/user_dir.py +88 -0
- synapse/storage/schema/main/delta/43/blocked_rooms.sql +40 -0
- synapse/storage/schema/main/delta/43/quarantine_media.sql +36 -0
- synapse/storage/schema/main/delta/43/url_cache.sql +35 -0
- synapse/storage/schema/main/delta/43/user_share.sql +52 -0
- synapse/storage/schema/main/delta/44/expire_url_cache.sql +60 -0
- synapse/storage/schema/main/delta/45/group_server.sql +186 -0
- synapse/storage/schema/main/delta/45/profile_cache.sql +47 -0
- synapse/storage/schema/main/delta/46/drop_refresh_tokens.sql +36 -0
- synapse/storage/schema/main/delta/46/drop_unique_deleted_pushers.sql +54 -0
- synapse/storage/schema/main/delta/46/group_server.sql +51 -0
- synapse/storage/schema/main/delta/46/local_media_repository_url_idx.sql +43 -0
- synapse/storage/schema/main/delta/46/user_dir_null_room_ids.sql +54 -0
- synapse/storage/schema/main/delta/46/user_dir_typos.sql +43 -0
- synapse/storage/schema/main/delta/47/last_access_media.sql +35 -0
- synapse/storage/schema/main/delta/47/postgres_fts_gin.sql +36 -0
- synapse/storage/schema/main/delta/47/push_actions_staging.sql +47 -0
- synapse/storage/schema/main/delta/48/add_user_consent.sql +37 -0
- synapse/storage/schema/main/delta/48/add_user_ips_last_seen_index.sql +36 -0
- synapse/storage/schema/main/delta/48/deactivated_users.sql +44 -0
- synapse/storage/schema/main/delta/48/group_unique_indexes.py +67 -0
- synapse/storage/schema/main/delta/48/groups_joinable.sql +41 -0
- synapse/storage/schema/main/delta/49/add_user_consent_server_notice_sent.sql +39 -0
- synapse/storage/schema/main/delta/49/add_user_daily_visits.sql +40 -0
- synapse/storage/schema/main/delta/49/add_user_ips_last_seen_only_index.sql +36 -0
- synapse/storage/schema/main/delta/50/add_creation_ts_users_index.sql +38 -0
- synapse/storage/schema/main/delta/50/erasure_store.sql +40 -0
- synapse/storage/schema/main/delta/50/make_event_content_nullable.py +102 -0
- synapse/storage/schema/main/delta/51/e2e_room_keys.sql +58 -0
- synapse/storage/schema/main/delta/51/monthly_active_users.sql +46 -0
- synapse/storage/schema/main/delta/52/add_event_to_state_group_index.sql +38 -0
- synapse/storage/schema/main/delta/52/device_list_streams_unique_idx.sql +55 -0
- synapse/storage/schema/main/delta/52/e2e_room_keys.sql +72 -0
- synapse/storage/schema/main/delta/53/add_user_type_to_users.sql +38 -0
- synapse/storage/schema/main/delta/53/drop_sent_transactions.sql +35 -0
- synapse/storage/schema/main/delta/53/event_format_version.sql +35 -0
- synapse/storage/schema/main/delta/53/user_dir_populate.sql +49 -0
- synapse/storage/schema/main/delta/53/user_ips_index.sql +49 -0
- synapse/storage/schema/main/delta/53/user_share.sql +63 -0
- synapse/storage/schema/main/delta/53/user_threepid_id.sql +48 -0
- synapse/storage/schema/main/delta/53/users_in_public_rooms.sql +47 -0
- synapse/storage/schema/main/delta/54/account_validity_with_renewal.sql +49 -0
- synapse/storage/schema/main/delta/54/add_validity_to_server_keys.sql +42 -0
- synapse/storage/schema/main/delta/54/delete_forward_extremities.sql +42 -0
- synapse/storage/schema/main/delta/54/drop_legacy_tables.sql +49 -0
- synapse/storage/schema/main/delta/54/drop_presence_list.sql +35 -0
- synapse/storage/schema/main/delta/54/relations.sql +46 -0
- synapse/storage/schema/main/delta/54/stats.sql +99 -0
- synapse/storage/schema/main/delta/54/stats2.sql +47 -0
- synapse/storage/schema/main/delta/55/access_token_expiry.sql +37 -0
- synapse/storage/schema/main/delta/55/track_threepid_validations.sql +50 -0
- synapse/storage/schema/main/delta/55/users_alter_deactivated.sql +38 -0
- synapse/storage/schema/main/delta/56/add_spans_to_device_lists.sql +39 -0
- synapse/storage/schema/main/delta/56/current_state_events_membership.sql +41 -0
- synapse/storage/schema/main/delta/56/current_state_events_membership_mk2.sql +43 -0
- synapse/storage/schema/main/delta/56/delete_keys_from_deleted_backups.sql +44 -0
- synapse/storage/schema/main/delta/56/destinations_failure_ts.sql +44 -0
- synapse/storage/schema/main/delta/56/destinations_retry_interval_type.sql.postgres +18 -0
- synapse/storage/schema/main/delta/56/device_stream_id_insert.sql +39 -0
- synapse/storage/schema/main/delta/56/devices_last_seen.sql +43 -0
- synapse/storage/schema/main/delta/56/drop_unused_event_tables.sql +39 -0
- synapse/storage/schema/main/delta/56/event_expiry.sql +40 -0
- synapse/storage/schema/main/delta/56/event_labels.sql +49 -0
- synapse/storage/schema/main/delta/56/event_labels_background_update.sql +36 -0
- synapse/storage/schema/main/delta/56/fix_room_keys_index.sql +37 -0
- synapse/storage/schema/main/delta/56/hidden_devices.sql +37 -0
- synapse/storage/schema/main/delta/56/hidden_devices_fix.sql.sqlite +42 -0
- synapse/storage/schema/main/delta/56/nuke_empty_communities_from_db.sql +48 -0
- synapse/storage/schema/main/delta/56/public_room_list_idx.sql +35 -0
- synapse/storage/schema/main/delta/56/redaction_censor.sql +35 -0
- synapse/storage/schema/main/delta/56/redaction_censor2.sql +41 -0
- synapse/storage/schema/main/delta/56/redaction_censor3_fix_update.sql.postgres +25 -0
- synapse/storage/schema/main/delta/56/redaction_censor4.sql +35 -0
- synapse/storage/schema/main/delta/56/remove_tombstoned_rooms_from_directory.sql +38 -0
- synapse/storage/schema/main/delta/56/room_key_etag.sql +36 -0
- synapse/storage/schema/main/delta/56/room_membership_idx.sql +37 -0
- synapse/storage/schema/main/delta/56/room_retention.sql +52 -0
- synapse/storage/schema/main/delta/56/signing_keys.sql +75 -0
- synapse/storage/schema/main/delta/56/signing_keys_nonunique_signatures.sql +41 -0
- synapse/storage/schema/main/delta/56/stats_separated.sql +175 -0
- synapse/storage/schema/main/delta/56/unique_user_filter_index.py +46 -0
- synapse/storage/schema/main/delta/56/user_external_ids.sql +43 -0
- synapse/storage/schema/main/delta/56/users_in_public_rooms_idx.sql +36 -0
- synapse/storage/schema/main/delta/57/delete_old_current_state_events.sql +41 -0
- synapse/storage/schema/main/delta/57/device_list_remote_cache_stale.sql +44 -0
- synapse/storage/schema/main/delta/57/local_current_membership.py +111 -0
- synapse/storage/schema/main/delta/57/remove_sent_outbound_pokes.sql +40 -0
- synapse/storage/schema/main/delta/57/rooms_version_column.sql +43 -0
- synapse/storage/schema/main/delta/57/rooms_version_column_2.sql.postgres +35 -0
- synapse/storage/schema/main/delta/57/rooms_version_column_2.sql.sqlite +22 -0
- synapse/storage/schema/main/delta/57/rooms_version_column_3.sql.postgres +39 -0
- synapse/storage/schema/main/delta/57/rooms_version_column_3.sql.sqlite +23 -0
- synapse/storage/schema/main/delta/58/02remove_dup_outbound_pokes.sql +41 -0
- synapse/storage/schema/main/delta/58/03persist_ui_auth.sql +55 -0
- synapse/storage/schema/main/delta/58/05cache_instance.sql.postgres +30 -0
- synapse/storage/schema/main/delta/58/06dlols_unique_idx.py +83 -0
- synapse/storage/schema/main/delta/58/07add_method_to_thumbnail_constraint.sql.postgres +33 -0
- synapse/storage/schema/main/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite +44 -0
- synapse/storage/schema/main/delta/58/07persist_ui_auth_ips.sql +44 -0
- synapse/storage/schema/main/delta/58/08_media_safe_from_quarantine.sql.postgres +18 -0
- synapse/storage/schema/main/delta/58/08_media_safe_from_quarantine.sql.sqlite +18 -0
- synapse/storage/schema/main/delta/58/09shadow_ban.sql +37 -0
- synapse/storage/schema/main/delta/58/10_pushrules_enabled_delete_obsolete.sql +47 -0
- synapse/storage/schema/main/delta/58/10drop_local_rejections_stream.sql +41 -0
- synapse/storage/schema/main/delta/58/10federation_pos_instance_name.sql +41 -0
- synapse/storage/schema/main/delta/58/11dehydration.sql +39 -0
- synapse/storage/schema/main/delta/58/11fallback.sql +43 -0
- synapse/storage/schema/main/delta/58/11user_id_seq.py +38 -0
- synapse/storage/schema/main/delta/58/12room_stats.sql +51 -0
- synapse/storage/schema/main/delta/58/13remove_presence_allow_inbound.sql +36 -0
- synapse/storage/schema/main/delta/58/14events_instance_name.sql +35 -0
- synapse/storage/schema/main/delta/58/14events_instance_name.sql.postgres +28 -0
- synapse/storage/schema/main/delta/58/15_catchup_destination_rooms.sql +61 -0
- synapse/storage/schema/main/delta/58/15unread_count.sql +45 -0
- synapse/storage/schema/main/delta/58/16populate_stats_process_rooms_fix.sql +41 -0
- synapse/storage/schema/main/delta/58/17_catchup_last_successful.sql +40 -0
- synapse/storage/schema/main/delta/58/18stream_positions.sql +41 -0
- synapse/storage/schema/main/delta/58/19instance_map.sql.postgres +25 -0
- synapse/storage/schema/main/delta/58/19txn_id.sql +59 -0
- synapse/storage/schema/main/delta/58/20instance_name_event_tables.sql +36 -0
- synapse/storage/schema/main/delta/58/20user_daily_visits.sql +37 -0
- synapse/storage/schema/main/delta/58/21as_device_stream.sql +36 -0
- synapse/storage/schema/main/delta/58/21drop_device_max_stream_id.sql +1 -0
- synapse/storage/schema/main/delta/58/22puppet_token.sql +36 -0
- synapse/storage/schema/main/delta/58/22users_have_local_media.sql +2 -0
- synapse/storage/schema/main/delta/58/23e2e_cross_signing_keys_idx.sql +36 -0
- synapse/storage/schema/main/delta/58/24drop_event_json_index.sql +38 -0
- synapse/storage/schema/main/delta/58/25user_external_ids_user_id_idx.sql +36 -0
- synapse/storage/schema/main/delta/58/26access_token_last_validated.sql +37 -0
- synapse/storage/schema/main/delta/58/27local_invites.sql +37 -0
- synapse/storage/schema/main/delta/58/28drop_last_used_column.sql.postgres +16 -0
- synapse/storage/schema/main/delta/58/28drop_last_used_column.sql.sqlite +62 -0
- synapse/storage/schema/main/delta/59/01ignored_user.py +85 -0
- synapse/storage/schema/main/delta/59/02shard_send_to_device.sql +37 -0
- synapse/storage/schema/main/delta/59/03shard_send_to_device_sequence.sql.postgres +25 -0
- synapse/storage/schema/main/delta/59/04_event_auth_chains.sql +71 -0
- synapse/storage/schema/main/delta/59/04_event_auth_chains.sql.postgres +16 -0
- synapse/storage/schema/main/delta/59/04drop_account_data.sql +36 -0
- synapse/storage/schema/main/delta/59/05cache_invalidation.sql +36 -0
- synapse/storage/schema/main/delta/59/06chain_cover_index.sql +36 -0
- synapse/storage/schema/main/delta/59/06shard_account_data.sql +39 -0
- synapse/storage/schema/main/delta/59/06shard_account_data.sql.postgres +32 -0
- synapse/storage/schema/main/delta/59/07shard_account_data_fix.sql +37 -0
- synapse/storage/schema/main/delta/59/08delete_pushers_for_deactivated_accounts.sql +39 -0
- synapse/storage/schema/main/delta/59/08delete_stale_pushers.sql +39 -0
- synapse/storage/schema/main/delta/59/09rejected_events_metadata.sql +45 -0
- synapse/storage/schema/main/delta/59/10delete_purged_chain_cover.sql +36 -0
- synapse/storage/schema/main/delta/59/11add_knock_members_to_stats.sql +39 -0
- synapse/storage/schema/main/delta/59/11drop_thumbnail_constraint.sql.postgres +22 -0
- synapse/storage/schema/main/delta/59/12account_validity_token_used_ts_ms.sql +37 -0
- synapse/storage/schema/main/delta/59/12presence_stream_instance.sql +37 -0
- synapse/storage/schema/main/delta/59/12presence_stream_instance_seq.sql.postgres +20 -0
- synapse/storage/schema/main/delta/59/13users_to_send_full_presence_to.sql +53 -0
- synapse/storage/schema/main/delta/59/14refresh_tokens.sql +53 -0
- synapse/storage/schema/main/delta/59/15locks.sql +56 -0
- synapse/storage/schema/main/delta/59/16federation_inbound_staging.sql +51 -0
- synapse/storage/schema/main/delta/60/01recreate_stream_ordering.sql.postgres +45 -0
- synapse/storage/schema/main/delta/60/02change_stream_ordering_columns.sql.postgres +30 -0
- synapse/storage/schema/main/delta/61/01change_appservices_txns.sql.postgres +23 -0
- synapse/storage/schema/main/delta/61/01insertion_event_lookups.sql +68 -0
- synapse/storage/schema/main/delta/61/02drop_redundant_room_depth_index.sql +37 -0
- synapse/storage/schema/main/delta/61/03recreate_min_depth.py +74 -0
- synapse/storage/schema/main/delta/62/01insertion_event_extremities.sql +43 -0
- synapse/storage/schema/main/delta/63/01create_registration_tokens.sql +42 -0
- synapse/storage/schema/main/delta/63/02delete_unlinked_email_pushers.sql +39 -0
- synapse/storage/schema/main/delta/63/02populate-rooms-creator.sql +36 -0
- synapse/storage/schema/main/delta/63/03session_store.sql +42 -0
- synapse/storage/schema/main/delta/63/04add_presence_stream_not_offline_index.sql +37 -0
- synapse/storage/schema/main/delta/64/01msc2716_chunk_to_batch_rename.sql.postgres +23 -0
- synapse/storage/schema/main/delta/64/01msc2716_chunk_to_batch_rename.sql.sqlite +37 -0
- synapse/storage/schema/main/delta/65/01msc2716_insertion_event_edges.sql +38 -0
- synapse/storage/schema/main/delta/65/03remove_hidden_devices_from_device_inbox.sql +41 -0
- synapse/storage/schema/main/delta/65/04_local_group_updates.sql +37 -0
- synapse/storage/schema/main/delta/65/05_remove_room_stats_historical_and_user_stats_historical.sql +38 -0
- synapse/storage/schema/main/delta/65/06remove_deleted_devices_from_device_inbox.sql +53 -0
- synapse/storage/schema/main/delta/65/07_arbitrary_relations.sql +37 -0
- synapse/storage/schema/main/delta/65/08_device_inbox_background_updates.sql +37 -0
- synapse/storage/schema/main/delta/65/10_expirable_refresh_tokens.sql +47 -0
- synapse/storage/schema/main/delta/65/11_devices_auth_provider_session.sql +46 -0
- synapse/storage/schema/main/delta/67/01drop_public_room_list_stream.sql +37 -0
- synapse/storage/schema/main/delta/68/01event_columns.sql +45 -0
- synapse/storage/schema/main/delta/68/02_msc2409_add_device_id_appservice_stream_type.sql +40 -0
- synapse/storage/schema/main/delta/68/03_delete_account_data_for_deactivated_accounts.sql +39 -0
- synapse/storage/schema/main/delta/68/04_refresh_tokens_index_next_token_id.sql +47 -0
- synapse/storage/schema/main/delta/68/04partial_state_rooms.sql +60 -0
- synapse/storage/schema/main/delta/68/05_delete_non_strings_from_event_search.sql.sqlite +22 -0
- synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py +80 -0
- synapse/storage/schema/main/delta/68/06_msc3202_add_device_list_appservice_stream_type.sql +42 -0
- synapse/storage/schema/main/delta/69/01as_txn_seq.py +54 -0
- synapse/storage/schema/main/delta/69/01device_list_oubound_by_room.sql +57 -0
- synapse/storage/schema/main/delta/69/02cache_invalidation_index.sql +37 -0
- synapse/storage/schema/main/delta/70/01clean_table_purged_rooms.sql +39 -0
- synapse/storage/schema/main/delta/71/01rebuild_event_edges.sql.postgres +43 -0
- synapse/storage/schema/main/delta/71/01rebuild_event_edges.sql.sqlite +47 -0
- synapse/storage/schema/main/delta/71/01remove_noop_background_updates.sql +80 -0
- synapse/storage/schema/main/delta/71/02event_push_summary_unique.sql +37 -0
- synapse/storage/schema/main/delta/72/01add_room_type_to_state_stats.sql +38 -0
- synapse/storage/schema/main/delta/72/01event_push_summary_receipt.sql +54 -0
- synapse/storage/schema/main/delta/72/02event_push_actions_index.sql +38 -0
- synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py +57 -0
- synapse/storage/schema/main/delta/72/03drop_event_reference_hashes.sql +36 -0
- synapse/storage/schema/main/delta/72/03remove_groups.sql +50 -0
- synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.postgres +17 -0
- synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.sqlite +40 -0
- synapse/storage/schema/main/delta/72/05receipts_event_stream_ordering.sql +38 -0
- synapse/storage/schema/main/delta/72/05remove_unstable_private_read_receipts.sql +38 -0
- synapse/storage/schema/main/delta/72/06add_consent_ts_to_users.sql +35 -0
- synapse/storage/schema/main/delta/72/06thread_notifications.sql +49 -0
- synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py +67 -0
- synapse/storage/schema/main/delta/72/07thread_receipts.sql.postgres +30 -0
- synapse/storage/schema/main/delta/72/07thread_receipts.sql.sqlite +70 -0
- synapse/storage/schema/main/delta/72/08begin_cache_invalidation_seq_at_2.sql.postgres +23 -0
- synapse/storage/schema/main/delta/72/08thread_receipts.sql +39 -0
- synapse/storage/schema/main/delta/72/09partial_indices.sql.sqlite +56 -0
- synapse/storage/schema/main/delta/73/01event_failed_pull_attempts.sql +48 -0
- synapse/storage/schema/main/delta/73/02add_pusher_enabled.sql +35 -0
- synapse/storage/schema/main/delta/73/02room_id_indexes_for_purging.sql +41 -0
- synapse/storage/schema/main/delta/73/03pusher_device_id.sql +39 -0
- synapse/storage/schema/main/delta/73/03users_approved_column.sql +39 -0
- synapse/storage/schema/main/delta/73/04partial_join_details.sql +42 -0
- synapse/storage/schema/main/delta/73/04pending_device_list_updates.sql +47 -0
- synapse/storage/schema/main/delta/73/05old_push_actions.sql.postgres +22 -0
- synapse/storage/schema/main/delta/73/05old_push_actions.sql.sqlite +24 -0
- synapse/storage/schema/main/delta/73/06thread_notifications_thread_id_idx.sql +42 -0
- synapse/storage/schema/main/delta/73/08thread_receipts_non_null.sql.postgres +23 -0
- synapse/storage/schema/main/delta/73/08thread_receipts_non_null.sql.sqlite +76 -0
- synapse/storage/schema/main/delta/73/09partial_joined_via_destination.sql +37 -0
- synapse/storage/schema/main/delta/73/09threads_table.sql +49 -0
- synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py +71 -0
- synapse/storage/schema/main/delta/73/10login_tokens.sql +54 -0
- synapse/storage/schema/main/delta/73/11event_search_room_id_n_distinct.sql.postgres +33 -0
- synapse/storage/schema/main/delta/73/12refactor_device_list_outbound_pokes.sql +72 -0
- synapse/storage/schema/main/delta/73/13add_device_lists_index.sql +39 -0
- synapse/storage/schema/main/delta/73/20_un_partial_stated_room_stream.sql +51 -0
- synapse/storage/schema/main/delta/73/21_un_partial_stated_room_stream_seq.sql.postgres +20 -0
- synapse/storage/schema/main/delta/73/22_rebuild_user_dir_stats.sql +48 -0
- synapse/storage/schema/main/delta/73/22_un_partial_stated_event_stream.sql +53 -0
- synapse/storage/schema/main/delta/73/23_fix_thread_index.sql +52 -0
- synapse/storage/schema/main/delta/73/23_un_partial_stated_room_stream_seq.sql.postgres +20 -0
- synapse/storage/schema/main/delta/73/24_events_jump_to_date_index.sql +36 -0
- synapse/storage/schema/main/delta/73/25drop_presence.sql +36 -0
- synapse/storage/schema/main/delta/74/01_user_directory_stale_remote_users.sql +58 -0
- synapse/storage/schema/main/delta/74/02_set_device_id_for_pushers_bg_update.sql +38 -0
- synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.postgres +29 -0
- synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.sqlite +23 -0
- synapse/storage/schema/main/delta/74/03_room_membership_index.sql +38 -0
- synapse/storage/schema/main/delta/74/04_delete_e2e_backup_keys_for_deactivated_users.sql +36 -0
- synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py +87 -0
- synapse/storage/schema/main/delta/74/05_events_txn_id_device_id.sql +72 -0
- synapse/storage/schema/main/delta/74/90COMMENTS_destinations.sql.postgres +52 -0
- synapse/storage/schema/main/delta/76/01_add_profiles_full_user_id_column.sql +39 -0
- synapse/storage/schema/main/delta/76/02_add_user_filters_full_user_id_column.sql +39 -0
- synapse/storage/schema/main/delta/76/03_per_user_experimental_features.sql +46 -0
- synapse/storage/schema/main/delta/76/04_add_room_forgetter.sql +43 -0
- synapse/storage/schema/main/delta/77/01_add_profiles_not_valid_check.sql.postgres +16 -0
- synapse/storage/schema/main/delta/77/02_add_user_filters_not_valid_check.sql.postgres +16 -0
- synapse/storage/schema/main/delta/77/03bg_populate_full_user_id_profiles.sql +35 -0
- synapse/storage/schema/main/delta/77/04bg_populate_full_user_id_user_filters.sql +35 -0
- synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql +67 -0
- synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite +102 -0
- synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres +27 -0
- synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres +27 -0
- synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres +29 -0
- synapse/storage/schema/main/delta/77/14bg_indices_event_stream_ordering.sql +39 -0
- synapse/storage/schema/main/delta/78/01_validate_and_update_profiles.py +99 -0
- synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py +100 -0
- synapse/storage/schema/main/delta/78/03_remove_unused_indexes_user_filters.py +72 -0
- synapse/storage/schema/main/delta/78/03event_extremities_constraints.py +65 -0
- synapse/storage/schema/main/delta/78/04_add_full_user_id_index_user_filters.py +32 -0
- synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.postgres +102 -0
- synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.sqlite +72 -0
- synapse/storage/schema/main/delta/79/04_mitigate_stream_ordering_update_race.py +70 -0
- synapse/storage/schema/main/delta/79/05_read_write_locks_triggers.sql.postgres +69 -0
- synapse/storage/schema/main/delta/79/05_read_write_locks_triggers.sql.sqlite +65 -0
- synapse/storage/schema/main/delta/80/01_users_alter_locked.sql +35 -0
- synapse/storage/schema/main/delta/80/02_read_write_locks_unlogged.sql.postgres +30 -0
- synapse/storage/schema/main/delta/80/02_scheduled_tasks.sql +47 -0
- synapse/storage/schema/main/delta/80/03_read_write_locks_triggers.sql.postgres +37 -0
- synapse/storage/schema/main/delta/80/04_read_write_locks_deadlock.sql.postgres +71 -0
- synapse/storage/schema/main/delta/82/02_scheduled_tasks_index.sql +35 -0
- synapse/storage/schema/main/delta/82/04_add_indices_for_purging_rooms.sql +39 -0
- synapse/storage/schema/main/delta/82/05gaps.sql +44 -0
- synapse/storage/schema/main/delta/83/01_drop_old_tables.sql +43 -0
- synapse/storage/schema/main/delta/83/03_instance_name_receipts.sql.sqlite +17 -0
- synapse/storage/schema/main/delta/83/05_cross_signing_key_update_grant.sql +34 -0
- synapse/storage/schema/main/delta/83/06_event_push_summary_room.sql +36 -0
- synapse/storage/schema/main/delta/84/01_auth_links_stats.sql.postgres +20 -0
- synapse/storage/schema/main/delta/84/02_auth_links_index.sql +16 -0
- synapse/storage/schema/main/delta/84/03_auth_links_analyze.sql.postgres +16 -0
- synapse/storage/schema/main/delta/84/04_access_token_index.sql +15 -0
- synapse/storage/schema/main/delta/85/01_add_suspended.sql +14 -0
- synapse/storage/schema/main/delta/85/02_add_instance_names.sql +27 -0
- synapse/storage/schema/main/delta/85/03_new_sequences.sql.postgres +54 -0
- synapse/storage/schema/main/delta/85/04_cleanup_device_federation_outbox.sql +15 -0
- synapse/storage/schema/main/delta/85/05_add_instance_names_converted_pos.sql +16 -0
- synapse/storage/schema/main/delta/85/06_add_room_reports.sql +20 -0
- synapse/storage/schema/main/delta/86/01_authenticate_media.sql +15 -0
- synapse/storage/schema/main/delta/86/02_receipts_event_id_index.sql +15 -0
- synapse/storage/schema/main/delta/87/01_sliding_sync_memberships.sql +169 -0
- synapse/storage/schema/main/delta/87/02_per_connection_state.sql +81 -0
- synapse/storage/schema/main/delta/87/03_current_state_index.sql +19 -0
- synapse/storage/schema/main/delta/88/01_add_delayed_events.sql +43 -0
- synapse/storage/schema/main/delta/88/01_custom_profile_fields.sql +15 -0
- synapse/storage/schema/main/delta/88/02_fix_sliding_sync_membership_snapshots_forgotten_column.sql +21 -0
- synapse/storage/schema/main/delta/88/03_add_otk_ts_added_index.sql +18 -0
- synapse/storage/schema/main/delta/88/04_current_state_delta_index.sql +18 -0
- synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.postgres +19 -0
- synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.sqlite +19 -0
- synapse/storage/schema/main/delta/88/05_sliding_sync_room_config_index.sql +20 -0
- synapse/storage/schema/main/delta/88/06_events_received_ts_index.sql +17 -0
- synapse/storage/schema/main/delta/89/01_sliding_sync_membership_snapshot_index.sql +15 -0
- synapse/storage/schema/main/delta/90/01_add_column_participant_room_memberships_table.sql +16 -0
- synapse/storage/schema/main/delta/91/01_media_hash.sql +28 -0
- synapse/storage/schema/main/delta/92/01_remove_trigger.sql.postgres +16 -0
- synapse/storage/schema/main/delta/92/01_remove_trigger.sql.sqlite +16 -0
- synapse/storage/schema/main/delta/92/02_remove_populate_participant_bg_update.sql +17 -0
- synapse/storage/schema/main/delta/92/04_ss_membership_snapshot_idx.sql +16 -0
- synapse/storage/schema/main/delta/92/04_thread_subscriptions.sql +59 -0
- synapse/storage/schema/main/delta/92/04_thread_subscriptions_seq.sql.postgres +19 -0
- synapse/storage/schema/main/delta/92/05_fixup_max_depth_cap.sql +17 -0
- synapse/storage/schema/main/delta/92/05_thread_subscriptions_comments.sql.postgres +18 -0
- synapse/storage/schema/main/delta/92/06_device_federation_inbox_index.sql +16 -0
- synapse/storage/schema/main/delta/92/06_threads_last_sent_stream_ordering_comments.sql.postgres +24 -0
- synapse/storage/schema/main/delta/92/07_add_user_reports.sql +22 -0
- synapse/storage/schema/main/delta/92/07_event_txn_id_device_id_txn_id2.sql +15 -0
- synapse/storage/schema/main/delta/92/08_room_ban_redactions.sql +21 -0
- synapse/storage/schema/main/delta/92/08_thread_subscriptions_seq_fixup.sql.postgres +19 -0
- synapse/storage/schema/main/delta/92/09_thread_subscriptions_update.sql +20 -0
- synapse/storage/schema/main/delta/92/09_thread_subscriptions_update.sql.postgres +18 -0
- synapse/storage/schema/main/delta/93/01_add_delayed_events.sql +15 -0
- synapse/storage/schema/main/full_schemas/72/full.sql.postgres +1344 -0
- synapse/storage/schema/main/full_schemas/72/full.sql.sqlite +646 -0
- synapse/storage/schema/state/delta/23/drop_state_index.sql +35 -0
- synapse/storage/schema/state/delta/32/remove_state_indices.sql +38 -0
- synapse/storage/schema/state/delta/35/add_state_index.sql +36 -0
- synapse/storage/schema/state/delta/35/state.sql +41 -0
- synapse/storage/schema/state/delta/35/state_dedupe.sql +36 -0
- synapse/storage/schema/state/delta/47/state_group_seq.py +38 -0
- synapse/storage/schema/state/delta/56/state_group_room_idx.sql +36 -0
- synapse/storage/schema/state/delta/61/02state_groups_state_n_distinct.sql.postgres +34 -0
- synapse/storage/schema/state/delta/70/08_state_group_edges_unique.sql +36 -0
- synapse/storage/schema/state/delta/89/01_state_groups_deletion.sql +39 -0
- synapse/storage/schema/state/delta/90/02_delete_unreferenced_state_groups.sql +16 -0
- synapse/storage/schema/state/delta/90/03_remove_old_deletion_bg_update.sql +15 -0
- synapse/storage/schema/state/full_schemas/72/full.sql.postgres +30 -0
- synapse/storage/schema/state/full_schemas/72/full.sql.sqlite +20 -0
- synapse/storage/types.py +183 -0
- synapse/storage/util/__init__.py +20 -0
- synapse/storage/util/id_generators.py +928 -0
- synapse/storage/util/partial_state_events_tracker.py +194 -0
- synapse/storage/util/sequence.py +315 -0
- synapse/streams/__init__.py +43 -0
- synapse/streams/config.py +91 -0
- synapse/streams/events.py +203 -0
- synapse/synapse_rust/__init__.pyi +3 -0
- synapse/synapse_rust/acl.pyi +20 -0
- synapse/synapse_rust/events.pyi +136 -0
- synapse/synapse_rust/http_client.pyi +32 -0
- synapse/synapse_rust/push.pyi +86 -0
- synapse/synapse_rust/rendezvous.pyi +30 -0
- synapse/synapse_rust/segmenter.pyi +1 -0
- synapse/synapse_rust.abi3.so +0 -0
- synapse/types/__init__.py +1600 -0
- synapse/types/handlers/__init__.py +93 -0
- synapse/types/handlers/policy_server.py +16 -0
- synapse/types/handlers/sliding_sync.py +908 -0
- synapse/types/rest/__init__.py +25 -0
- synapse/types/rest/client/__init__.py +413 -0
- synapse/types/state.py +634 -0
- synapse/types/storage/__init__.py +66 -0
- synapse/util/__init__.py +169 -0
- synapse/util/async_helpers.py +1045 -0
- synapse/util/background_queue.py +142 -0
- synapse/util/batching_queue.py +202 -0
- synapse/util/caches/__init__.py +300 -0
- synapse/util/caches/cached_call.py +143 -0
- synapse/util/caches/deferred_cache.py +530 -0
- synapse/util/caches/descriptors.py +692 -0
- synapse/util/caches/dictionary_cache.py +346 -0
- synapse/util/caches/expiringcache.py +249 -0
- synapse/util/caches/lrucache.py +975 -0
- synapse/util/caches/response_cache.py +322 -0
- synapse/util/caches/stream_change_cache.py +370 -0
- synapse/util/caches/treecache.py +189 -0
- synapse/util/caches/ttlcache.py +197 -0
- synapse/util/cancellation.py +63 -0
- synapse/util/check_dependencies.py +335 -0
- synapse/util/clock.py +567 -0
- synapse/util/constants.py +22 -0
- synapse/util/daemonize.py +165 -0
- synapse/util/distributor.py +157 -0
- synapse/util/events.py +134 -0
- synapse/util/file_consumer.py +164 -0
- synapse/util/frozenutils.py +57 -0
- synapse/util/gai_resolver.py +178 -0
- synapse/util/hash.py +38 -0
- synapse/util/httpresourcetree.py +108 -0
- synapse/util/iterutils.py +189 -0
- synapse/util/json.py +56 -0
- synapse/util/linked_list.py +156 -0
- synapse/util/logcontext.py +46 -0
- synapse/util/logformatter.py +28 -0
- synapse/util/macaroons.py +325 -0
- synapse/util/manhole.py +191 -0
- synapse/util/metrics.py +339 -0
- synapse/util/module_loader.py +116 -0
- synapse/util/msisdn.py +51 -0
- synapse/util/patch_inline_callbacks.py +250 -0
- synapse/util/pydantic_models.py +63 -0
- synapse/util/ratelimitutils.py +419 -0
- synapse/util/retryutils.py +339 -0
- synapse/util/rlimit.py +42 -0
- synapse/util/rust.py +133 -0
- synapse/util/sentinel.py +21 -0
- synapse/util/stringutils.py +293 -0
- synapse/util/task_scheduler.py +493 -0
- synapse/util/templates.py +126 -0
- synapse/util/threepids.py +123 -0
- synapse/util/wheel_timer.py +112 -0
- synapse/visibility.py +835 -0
|
@@ -0,0 +1,2910 @@
|
|
|
1
|
+
#
|
|
2
|
+
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
|
3
|
+
#
|
|
4
|
+
# Copyright 2019-2022 The Matrix.org Foundation C.I.C.
|
|
5
|
+
# Copyright (C) 2023 New Vector, Ltd
|
|
6
|
+
#
|
|
7
|
+
# This program is free software: you can redistribute it and/or modify
|
|
8
|
+
# it under the terms of the GNU Affero General Public License as
|
|
9
|
+
# published by the Free Software Foundation, either version 3 of the
|
|
10
|
+
# License, or (at your option) any later version.
|
|
11
|
+
#
|
|
12
|
+
# See the GNU Affero General Public License for more details:
|
|
13
|
+
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
|
14
|
+
#
|
|
15
|
+
# Originally licensed under the Apache License, Version 2.0:
|
|
16
|
+
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
|
17
|
+
#
|
|
18
|
+
# [This file includes modifications made by New Vector Limited]
|
|
19
|
+
#
|
|
20
|
+
#
|
|
21
|
+
|
|
22
|
+
import logging
|
|
23
|
+
from typing import TYPE_CHECKING, cast
|
|
24
|
+
|
|
25
|
+
import attr
|
|
26
|
+
|
|
27
|
+
from synapse.api.constants import (
|
|
28
|
+
MAX_DEPTH,
|
|
29
|
+
EventContentFields,
|
|
30
|
+
Membership,
|
|
31
|
+
RelationTypes,
|
|
32
|
+
)
|
|
33
|
+
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
|
34
|
+
from synapse.events import EventBase, make_event_from_dict
|
|
35
|
+
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
|
|
36
|
+
from synapse.storage.database import (
|
|
37
|
+
DatabasePool,
|
|
38
|
+
LoggingDatabaseConnection,
|
|
39
|
+
LoggingTransaction,
|
|
40
|
+
make_tuple_comparison_clause,
|
|
41
|
+
)
|
|
42
|
+
from synapse.storage.databases.main.events import (
|
|
43
|
+
SLIDING_SYNC_RELEVANT_STATE_SET,
|
|
44
|
+
PersistEventsStore,
|
|
45
|
+
SlidingSyncMembershipInfoWithEventPos,
|
|
46
|
+
SlidingSyncMembershipSnapshotSharedInsertValues,
|
|
47
|
+
SlidingSyncStateInsertValues,
|
|
48
|
+
)
|
|
49
|
+
from synapse.storage.databases.main.events_worker import (
|
|
50
|
+
DatabaseCorruptionError,
|
|
51
|
+
InvalidEventError,
|
|
52
|
+
)
|
|
53
|
+
from synapse.storage.databases.main.state_deltas import StateDeltasStore
|
|
54
|
+
from synapse.storage.databases.main.stream import StreamWorkerStore
|
|
55
|
+
from synapse.storage.engines import PostgresEngine
|
|
56
|
+
from synapse.storage.types import Cursor
|
|
57
|
+
from synapse.types import JsonDict, RoomStreamToken, StateMap, StrCollection
|
|
58
|
+
from synapse.types.handlers import SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES
|
|
59
|
+
from synapse.types.state import StateFilter
|
|
60
|
+
from synapse.types.storage import _BackgroundUpdates
|
|
61
|
+
from synapse.util.iterutils import batch_iter
|
|
62
|
+
from synapse.util.json import json_encoder
|
|
63
|
+
|
|
64
|
+
if TYPE_CHECKING:
|
|
65
|
+
from synapse.server import HomeServer
|
|
66
|
+
|
|
67
|
+
logger = logging.getLogger(__name__)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
_REPLACE_STREAM_ORDERING_SQL_COMMANDS = (
|
|
71
|
+
# there should be no leftover rows without a stream_ordering2, but just in case...
|
|
72
|
+
"UPDATE events SET stream_ordering2 = stream_ordering WHERE stream_ordering2 IS NULL",
|
|
73
|
+
# now we can drop the rule and switch the columns
|
|
74
|
+
"DROP RULE populate_stream_ordering2 ON events",
|
|
75
|
+
"ALTER TABLE events DROP COLUMN stream_ordering",
|
|
76
|
+
"ALTER TABLE events RENAME COLUMN stream_ordering2 TO stream_ordering",
|
|
77
|
+
# ... and finally, rename the indexes into place for consistency with sqlite
|
|
78
|
+
"ALTER INDEX event_contains_url_index2 RENAME TO event_contains_url_index",
|
|
79
|
+
"ALTER INDEX events_order_room2 RENAME TO events_order_room",
|
|
80
|
+
"ALTER INDEX events_room_stream2 RENAME TO events_room_stream",
|
|
81
|
+
"ALTER INDEX events_ts2 RENAME TO events_ts",
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
86
|
+
class _CalculateChainCover:
|
|
87
|
+
"""Return value for _calculate_chain_cover_txn."""
|
|
88
|
+
|
|
89
|
+
# The last room_id/depth/stream processed.
|
|
90
|
+
room_id: str
|
|
91
|
+
depth: int
|
|
92
|
+
stream: int
|
|
93
|
+
|
|
94
|
+
# Number of rows processed
|
|
95
|
+
processed_count: int
|
|
96
|
+
|
|
97
|
+
# Map from room_id to last depth/stream processed for each room that we have
|
|
98
|
+
# processed all events for (i.e. the rooms we can flip the
|
|
99
|
+
# `has_auth_chain_index` for)
|
|
100
|
+
finished_room_map: dict[str, tuple[int, int]]
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
104
|
+
class _JoinedRoomStreamOrderingUpdate:
|
|
105
|
+
"""
|
|
106
|
+
Intermediate container class used in `SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE`
|
|
107
|
+
"""
|
|
108
|
+
|
|
109
|
+
# The most recent event stream_ordering for the room
|
|
110
|
+
most_recent_event_stream_ordering: int
|
|
111
|
+
# The most recent event `bump_stamp` for the room
|
|
112
|
+
most_recent_bump_stamp: int | None
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseStore):
|
|
116
|
+
def __init__(
|
|
117
|
+
self,
|
|
118
|
+
database: DatabasePool,
|
|
119
|
+
db_conn: LoggingDatabaseConnection,
|
|
120
|
+
hs: "HomeServer",
|
|
121
|
+
):
|
|
122
|
+
super().__init__(database, db_conn, hs)
|
|
123
|
+
|
|
124
|
+
self.db_pool.updates.register_background_update_handler(
|
|
125
|
+
_BackgroundUpdates.EVENT_ORIGIN_SERVER_TS_NAME,
|
|
126
|
+
self._background_reindex_origin_server_ts,
|
|
127
|
+
)
|
|
128
|
+
self.db_pool.updates.register_background_update_handler(
|
|
129
|
+
_BackgroundUpdates.EVENT_FIELDS_SENDER_URL_UPDATE_NAME,
|
|
130
|
+
self._background_reindex_fields_sender,
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
self.db_pool.updates.register_background_index_update(
|
|
134
|
+
"event_contains_url_index",
|
|
135
|
+
index_name="event_contains_url_index",
|
|
136
|
+
table="events",
|
|
137
|
+
columns=["room_id", "topological_ordering", "stream_ordering"],
|
|
138
|
+
where_clause="contains_url = true AND outlier = false",
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
# an event_id index on event_search is useful for the purge_history
|
|
142
|
+
# api. Plus it means we get to enforce some integrity with a UNIQUE
|
|
143
|
+
# clause
|
|
144
|
+
self.db_pool.updates.register_background_index_update(
|
|
145
|
+
"event_search_event_id_idx",
|
|
146
|
+
index_name="event_search_event_id_idx",
|
|
147
|
+
table="event_search",
|
|
148
|
+
columns=["event_id"],
|
|
149
|
+
unique=True,
|
|
150
|
+
psql_only=True,
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
self.db_pool.updates.register_background_update_handler(
|
|
154
|
+
_BackgroundUpdates.DELETE_SOFT_FAILED_EXTREMITIES,
|
|
155
|
+
self._cleanup_extremities_bg_update,
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
self.db_pool.updates.register_background_update_handler(
|
|
159
|
+
"redactions_received_ts", self._redactions_received_ts
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# This index gets deleted in `event_fix_redactions_bytes` update
|
|
163
|
+
self.db_pool.updates.register_background_index_update(
|
|
164
|
+
"event_fix_redactions_bytes_create_index",
|
|
165
|
+
index_name="redactions_censored_redacts",
|
|
166
|
+
table="redactions",
|
|
167
|
+
columns=["redacts"],
|
|
168
|
+
where_clause="have_censored",
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
self.db_pool.updates.register_background_update_handler(
|
|
172
|
+
"event_fix_redactions_bytes", self._event_fix_redactions_bytes
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
self.db_pool.updates.register_background_update_handler(
|
|
176
|
+
"event_store_labels", self._event_store_labels
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
self.db_pool.updates.register_background_index_update(
|
|
180
|
+
"redactions_have_censored_ts_idx",
|
|
181
|
+
index_name="redactions_have_censored_ts",
|
|
182
|
+
table="redactions",
|
|
183
|
+
columns=["received_ts"],
|
|
184
|
+
where_clause="NOT have_censored",
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
self.db_pool.updates.register_background_index_update(
|
|
188
|
+
"users_have_local_media",
|
|
189
|
+
index_name="users_have_local_media",
|
|
190
|
+
table="local_media_repository",
|
|
191
|
+
columns=["user_id", "created_ts"],
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
self.db_pool.updates.register_background_update_handler(
|
|
195
|
+
"rejected_events_metadata",
|
|
196
|
+
self._rejected_events_metadata,
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
self.db_pool.updates.register_background_update_handler(
|
|
200
|
+
"chain_cover",
|
|
201
|
+
self._chain_cover_index,
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
self.db_pool.updates.register_background_update_handler(
|
|
205
|
+
"purged_chain_cover",
|
|
206
|
+
self._purged_chain_cover_index,
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
self.db_pool.updates.register_background_update_handler(
|
|
210
|
+
"event_arbitrary_relations",
|
|
211
|
+
self._event_arbitrary_relations,
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
################################################################################
|
|
215
|
+
|
|
216
|
+
# bg updates for replacing stream_ordering with a BIGINT
|
|
217
|
+
# (these only run on postgres.)
|
|
218
|
+
|
|
219
|
+
self.db_pool.updates.register_background_update_handler(
|
|
220
|
+
_BackgroundUpdates.POPULATE_STREAM_ORDERING2,
|
|
221
|
+
self._background_populate_stream_ordering2,
|
|
222
|
+
)
|
|
223
|
+
# CREATE UNIQUE INDEX events_stream_ordering ON events(stream_ordering2);
|
|
224
|
+
self.db_pool.updates.register_background_index_update(
|
|
225
|
+
_BackgroundUpdates.INDEX_STREAM_ORDERING2,
|
|
226
|
+
index_name="events_stream_ordering",
|
|
227
|
+
table="events",
|
|
228
|
+
columns=["stream_ordering2"],
|
|
229
|
+
unique=True,
|
|
230
|
+
)
|
|
231
|
+
# CREATE INDEX event_contains_url_index ON events(room_id, topological_ordering, stream_ordering) WHERE contains_url = true AND outlier = false;
|
|
232
|
+
self.db_pool.updates.register_background_index_update(
|
|
233
|
+
_BackgroundUpdates.INDEX_STREAM_ORDERING2_CONTAINS_URL,
|
|
234
|
+
index_name="event_contains_url_index2",
|
|
235
|
+
table="events",
|
|
236
|
+
columns=["room_id", "topological_ordering", "stream_ordering2"],
|
|
237
|
+
where_clause="contains_url = true AND outlier = false",
|
|
238
|
+
)
|
|
239
|
+
# CREATE INDEX events_order_room ON events(room_id, topological_ordering, stream_ordering);
|
|
240
|
+
self.db_pool.updates.register_background_index_update(
|
|
241
|
+
_BackgroundUpdates.INDEX_STREAM_ORDERING2_ROOM_ORDER,
|
|
242
|
+
index_name="events_order_room2",
|
|
243
|
+
table="events",
|
|
244
|
+
columns=["room_id", "topological_ordering", "stream_ordering2"],
|
|
245
|
+
)
|
|
246
|
+
# CREATE INDEX events_room_stream ON events(room_id, stream_ordering);
|
|
247
|
+
self.db_pool.updates.register_background_index_update(
|
|
248
|
+
_BackgroundUpdates.INDEX_STREAM_ORDERING2_ROOM_STREAM,
|
|
249
|
+
index_name="events_room_stream2",
|
|
250
|
+
table="events",
|
|
251
|
+
columns=["room_id", "stream_ordering2"],
|
|
252
|
+
)
|
|
253
|
+
# CREATE INDEX events_ts ON events(origin_server_ts, stream_ordering);
|
|
254
|
+
self.db_pool.updates.register_background_index_update(
|
|
255
|
+
_BackgroundUpdates.INDEX_STREAM_ORDERING2_TS,
|
|
256
|
+
index_name="events_ts2",
|
|
257
|
+
table="events",
|
|
258
|
+
columns=["origin_server_ts", "stream_ordering2"],
|
|
259
|
+
)
|
|
260
|
+
self.db_pool.updates.register_background_update_handler(
|
|
261
|
+
_BackgroundUpdates.REPLACE_STREAM_ORDERING_COLUMN,
|
|
262
|
+
self._background_replace_stream_ordering_column,
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
################################################################################
|
|
266
|
+
|
|
267
|
+
self.db_pool.updates.register_background_update_handler(
|
|
268
|
+
_BackgroundUpdates.EVENT_EDGES_DROP_INVALID_ROWS,
|
|
269
|
+
self._background_drop_invalid_event_edges_rows,
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
self.db_pool.updates.register_background_index_update(
|
|
273
|
+
_BackgroundUpdates.EVENT_EDGES_REPLACE_INDEX,
|
|
274
|
+
index_name="event_edges_event_id_prev_event_id_idx",
|
|
275
|
+
table="event_edges",
|
|
276
|
+
columns=["event_id", "prev_event_id"],
|
|
277
|
+
unique=True,
|
|
278
|
+
# the old index which just covered event_id is now redundant.
|
|
279
|
+
replaces_index="ev_edges_id",
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
self.db_pool.updates.register_background_update_handler(
|
|
283
|
+
_BackgroundUpdates.EVENTS_POPULATE_STATE_KEY_REJECTIONS,
|
|
284
|
+
self._background_events_populate_state_key_rejections,
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
# Add an index that would be useful for jumping to date using
|
|
288
|
+
# get_event_id_for_timestamp.
|
|
289
|
+
self.db_pool.updates.register_background_index_update(
|
|
290
|
+
_BackgroundUpdates.EVENTS_JUMP_TO_DATE_INDEX,
|
|
291
|
+
index_name="events_jump_to_date_idx",
|
|
292
|
+
table="events",
|
|
293
|
+
columns=["room_id", "origin_server_ts"],
|
|
294
|
+
where_clause="NOT outlier",
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
# These indices are needed to validate the foreign key constraint
|
|
298
|
+
# when events are deleted.
|
|
299
|
+
self.db_pool.updates.register_background_index_update(
|
|
300
|
+
_BackgroundUpdates.CURRENT_STATE_EVENTS_STREAM_ORDERING_INDEX_UPDATE_NAME,
|
|
301
|
+
index_name="current_state_events_stream_ordering_idx",
|
|
302
|
+
table="current_state_events",
|
|
303
|
+
columns=["event_stream_ordering"],
|
|
304
|
+
)
|
|
305
|
+
self.db_pool.updates.register_background_index_update(
|
|
306
|
+
_BackgroundUpdates.ROOM_MEMBERSHIPS_STREAM_ORDERING_INDEX_UPDATE_NAME,
|
|
307
|
+
index_name="room_memberships_stream_ordering_idx",
|
|
308
|
+
table="room_memberships",
|
|
309
|
+
columns=["event_stream_ordering"],
|
|
310
|
+
)
|
|
311
|
+
self.db_pool.updates.register_background_index_update(
|
|
312
|
+
_BackgroundUpdates.LOCAL_CURRENT_MEMBERSHIP_STREAM_ORDERING_INDEX_UPDATE_NAME,
|
|
313
|
+
index_name="local_current_membership_stream_ordering_idx",
|
|
314
|
+
table="local_current_membership",
|
|
315
|
+
columns=["event_stream_ordering"],
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
# Handle background updates for Sliding Sync tables
|
|
319
|
+
#
|
|
320
|
+
self.db_pool.updates.register_background_update_handler(
|
|
321
|
+
_BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE,
|
|
322
|
+
self._sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update,
|
|
323
|
+
)
|
|
324
|
+
# Add some background updates to populate the sliding sync tables
|
|
325
|
+
self.db_pool.updates.register_background_update_handler(
|
|
326
|
+
_BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE,
|
|
327
|
+
self._sliding_sync_joined_rooms_bg_update,
|
|
328
|
+
)
|
|
329
|
+
self.db_pool.updates.register_background_update_handler(
|
|
330
|
+
_BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
|
|
331
|
+
self._sliding_sync_membership_snapshots_bg_update,
|
|
332
|
+
)
|
|
333
|
+
# Add a background update to fix data integrity issue in the
|
|
334
|
+
# `sliding_sync_membership_snapshots` -> `forgotten` column
|
|
335
|
+
self.db_pool.updates.register_background_update_handler(
|
|
336
|
+
_BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE,
|
|
337
|
+
self._sliding_sync_membership_snapshots_fix_forgotten_column_bg_update,
|
|
338
|
+
)
|
|
339
|
+
|
|
340
|
+
self.db_pool.updates.register_background_update_handler(
|
|
341
|
+
_BackgroundUpdates.FIXUP_MAX_DEPTH_CAP, self.fixup_max_depth_cap_bg_update
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
# We want this to run on the main database at startup before we start processing
|
|
345
|
+
# events.
|
|
346
|
+
#
|
|
347
|
+
# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
|
|
348
|
+
# foreground update for
|
|
349
|
+
# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
|
|
350
|
+
# https://github.com/element-hq/synapse/issues/17623)
|
|
351
|
+
with db_conn.cursor(txn_name="resolve_sliding_sync") as txn:
|
|
352
|
+
_resolve_stale_data_in_sliding_sync_tables(
|
|
353
|
+
txn=txn,
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
async def _background_reindex_fields_sender(
|
|
357
|
+
self, progress: JsonDict, batch_size: int
|
|
358
|
+
) -> int:
|
|
359
|
+
target_min_stream_id = progress["target_min_stream_id_inclusive"]
|
|
360
|
+
max_stream_id = progress["max_stream_id_exclusive"]
|
|
361
|
+
rows_inserted = progress.get("rows_inserted", 0)
|
|
362
|
+
|
|
363
|
+
def reindex_txn(txn: LoggingTransaction) -> int:
|
|
364
|
+
sql = (
|
|
365
|
+
"SELECT stream_ordering, event_id, json FROM events"
|
|
366
|
+
" INNER JOIN event_json USING (event_id)"
|
|
367
|
+
" WHERE ? <= stream_ordering AND stream_ordering < ?"
|
|
368
|
+
" ORDER BY stream_ordering DESC"
|
|
369
|
+
" LIMIT ?"
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
|
|
373
|
+
|
|
374
|
+
rows = txn.fetchall()
|
|
375
|
+
if not rows:
|
|
376
|
+
return 0
|
|
377
|
+
|
|
378
|
+
min_stream_id = rows[-1][0]
|
|
379
|
+
|
|
380
|
+
update_rows = []
|
|
381
|
+
for row in rows:
|
|
382
|
+
try:
|
|
383
|
+
event_id = row[1]
|
|
384
|
+
event_json = db_to_json(row[2])
|
|
385
|
+
sender = event_json["sender"]
|
|
386
|
+
content = event_json["content"]
|
|
387
|
+
|
|
388
|
+
contains_url = "url" in content
|
|
389
|
+
if contains_url:
|
|
390
|
+
contains_url &= isinstance(content["url"], str)
|
|
391
|
+
except (KeyError, AttributeError):
|
|
392
|
+
# If the event is missing a necessary field then
|
|
393
|
+
# skip over it.
|
|
394
|
+
continue
|
|
395
|
+
|
|
396
|
+
update_rows.append((sender, contains_url, event_id))
|
|
397
|
+
|
|
398
|
+
sql = "UPDATE events SET sender = ?, contains_url = ? WHERE event_id = ?"
|
|
399
|
+
|
|
400
|
+
txn.execute_batch(sql, update_rows)
|
|
401
|
+
|
|
402
|
+
progress = {
|
|
403
|
+
"target_min_stream_id_inclusive": target_min_stream_id,
|
|
404
|
+
"max_stream_id_exclusive": min_stream_id,
|
|
405
|
+
"rows_inserted": rows_inserted + len(rows),
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
self.db_pool.updates._background_update_progress_txn(
|
|
409
|
+
txn, _BackgroundUpdates.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
return len(rows)
|
|
413
|
+
|
|
414
|
+
result = await self.db_pool.runInteraction(
|
|
415
|
+
_BackgroundUpdates.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn
|
|
416
|
+
)
|
|
417
|
+
|
|
418
|
+
if not result:
|
|
419
|
+
await self.db_pool.updates._end_background_update(
|
|
420
|
+
_BackgroundUpdates.EVENT_FIELDS_SENDER_URL_UPDATE_NAME
|
|
421
|
+
)
|
|
422
|
+
|
|
423
|
+
return result
|
|
424
|
+
|
|
425
|
+
async def _background_reindex_origin_server_ts(
|
|
426
|
+
self, progress: JsonDict, batch_size: int
|
|
427
|
+
) -> int:
|
|
428
|
+
target_min_stream_id = progress["target_min_stream_id_inclusive"]
|
|
429
|
+
max_stream_id = progress["max_stream_id_exclusive"]
|
|
430
|
+
rows_inserted = progress.get("rows_inserted", 0)
|
|
431
|
+
|
|
432
|
+
def reindex_search_txn(txn: LoggingTransaction) -> int:
|
|
433
|
+
sql = (
|
|
434
|
+
"SELECT stream_ordering, event_id FROM events"
|
|
435
|
+
" WHERE ? <= stream_ordering AND stream_ordering < ?"
|
|
436
|
+
" ORDER BY stream_ordering DESC"
|
|
437
|
+
" LIMIT ?"
|
|
438
|
+
)
|
|
439
|
+
|
|
440
|
+
txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
|
|
441
|
+
|
|
442
|
+
rows = txn.fetchall()
|
|
443
|
+
if not rows:
|
|
444
|
+
return 0
|
|
445
|
+
|
|
446
|
+
min_stream_id = rows[-1][0]
|
|
447
|
+
event_ids = [row[1] for row in rows]
|
|
448
|
+
|
|
449
|
+
rows_to_update = []
|
|
450
|
+
|
|
451
|
+
chunks = [event_ids[i : i + 100] for i in range(0, len(event_ids), 100)]
|
|
452
|
+
for chunk in chunks:
|
|
453
|
+
ev_rows = cast(
|
|
454
|
+
list[tuple[str, str]],
|
|
455
|
+
self.db_pool.simple_select_many_txn(
|
|
456
|
+
txn,
|
|
457
|
+
table="event_json",
|
|
458
|
+
column="event_id",
|
|
459
|
+
iterable=chunk,
|
|
460
|
+
retcols=["event_id", "json"],
|
|
461
|
+
keyvalues={},
|
|
462
|
+
),
|
|
463
|
+
)
|
|
464
|
+
|
|
465
|
+
for event_id, json in ev_rows:
|
|
466
|
+
event_json = db_to_json(json)
|
|
467
|
+
try:
|
|
468
|
+
origin_server_ts = event_json["origin_server_ts"]
|
|
469
|
+
except (KeyError, AttributeError):
|
|
470
|
+
# If the event is missing a necessary field then
|
|
471
|
+
# skip over it.
|
|
472
|
+
continue
|
|
473
|
+
|
|
474
|
+
rows_to_update.append((origin_server_ts, event_id))
|
|
475
|
+
|
|
476
|
+
sql = "UPDATE events SET origin_server_ts = ? WHERE event_id = ?"
|
|
477
|
+
|
|
478
|
+
txn.execute_batch(sql, rows_to_update)
|
|
479
|
+
|
|
480
|
+
progress = {
|
|
481
|
+
"target_min_stream_id_inclusive": target_min_stream_id,
|
|
482
|
+
"max_stream_id_exclusive": min_stream_id,
|
|
483
|
+
"rows_inserted": rows_inserted + len(rows_to_update),
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
self.db_pool.updates._background_update_progress_txn(
|
|
487
|
+
txn, _BackgroundUpdates.EVENT_ORIGIN_SERVER_TS_NAME, progress
|
|
488
|
+
)
|
|
489
|
+
|
|
490
|
+
return len(rows_to_update)
|
|
491
|
+
|
|
492
|
+
result = await self.db_pool.runInteraction(
|
|
493
|
+
_BackgroundUpdates.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn
|
|
494
|
+
)
|
|
495
|
+
|
|
496
|
+
if not result:
|
|
497
|
+
await self.db_pool.updates._end_background_update(
|
|
498
|
+
_BackgroundUpdates.EVENT_ORIGIN_SERVER_TS_NAME
|
|
499
|
+
)
|
|
500
|
+
|
|
501
|
+
return result
|
|
502
|
+
|
|
503
|
+
async def _cleanup_extremities_bg_update(
|
|
504
|
+
self, progress: JsonDict, batch_size: int
|
|
505
|
+
) -> int:
|
|
506
|
+
"""Background update to clean out extremities that should have been
|
|
507
|
+
deleted previously.
|
|
508
|
+
|
|
509
|
+
Mainly used to deal with the aftermath of https://github.com/matrix-org/synapse/issues/5269.
|
|
510
|
+
"""
|
|
511
|
+
|
|
512
|
+
# This works by first copying all existing forward extremities into the
|
|
513
|
+
# `_extremities_to_check` table at start up, and then checking each
|
|
514
|
+
# event in that table whether we have any descendants that are not
|
|
515
|
+
# soft-failed/rejected. If that is the case then we delete that event
|
|
516
|
+
# from the forward extremities table.
|
|
517
|
+
#
|
|
518
|
+
# For efficiency, we do this in batches by recursively pulling out all
|
|
519
|
+
# descendants of a batch until we find the non soft-failed/rejected
|
|
520
|
+
# events, i.e. the set of descendants whose chain of prev events back
|
|
521
|
+
# to the batch of extremities are all soft-failed or rejected.
|
|
522
|
+
# Typically, we won't find any such events as extremities will rarely
|
|
523
|
+
# have any descendants, but if they do then we should delete those
|
|
524
|
+
# extremities.
|
|
525
|
+
|
|
526
|
+
def _cleanup_extremities_bg_update_txn(txn: LoggingTransaction) -> int:
|
|
527
|
+
# The set of extremity event IDs that we're checking this round
|
|
528
|
+
original_set = set()
|
|
529
|
+
|
|
530
|
+
# A dict[str, set[str]] of event ID to their prev events.
|
|
531
|
+
graph: dict[str, set[str]] = {}
|
|
532
|
+
|
|
533
|
+
# The set of descendants of the original set that are not rejected
|
|
534
|
+
# nor soft-failed. Ancestors of these events should be removed
|
|
535
|
+
# from the forward extremities table.
|
|
536
|
+
non_rejected_leaves = set()
|
|
537
|
+
|
|
538
|
+
# Set of event IDs that have been soft failed, and for which we
|
|
539
|
+
# should check if they have descendants which haven't been soft
|
|
540
|
+
# failed.
|
|
541
|
+
soft_failed_events_to_lookup = set()
|
|
542
|
+
|
|
543
|
+
# First, we get `batch_size` events from the table, pulling out
|
|
544
|
+
# their successor events, if any, and the successor events'
|
|
545
|
+
# rejection status.
|
|
546
|
+
txn.execute(
|
|
547
|
+
"""SELECT prev_event_id, event_id, internal_metadata,
|
|
548
|
+
rejections.event_id IS NOT NULL, events.outlier
|
|
549
|
+
FROM (
|
|
550
|
+
SELECT event_id AS prev_event_id
|
|
551
|
+
FROM _extremities_to_check
|
|
552
|
+
LIMIT ?
|
|
553
|
+
) AS f
|
|
554
|
+
LEFT JOIN event_edges USING (prev_event_id)
|
|
555
|
+
LEFT JOIN events USING (event_id)
|
|
556
|
+
LEFT JOIN event_json USING (event_id)
|
|
557
|
+
LEFT JOIN rejections USING (event_id)
|
|
558
|
+
""",
|
|
559
|
+
(batch_size,),
|
|
560
|
+
)
|
|
561
|
+
|
|
562
|
+
for prev_event_id, event_id, metadata, rejected, outlier in txn:
|
|
563
|
+
original_set.add(prev_event_id)
|
|
564
|
+
|
|
565
|
+
if not event_id or outlier:
|
|
566
|
+
# Common case where the forward extremity doesn't have any
|
|
567
|
+
# descendants.
|
|
568
|
+
continue
|
|
569
|
+
|
|
570
|
+
graph.setdefault(event_id, set()).add(prev_event_id)
|
|
571
|
+
|
|
572
|
+
soft_failed = False
|
|
573
|
+
if metadata:
|
|
574
|
+
soft_failed = db_to_json(metadata).get("soft_failed")
|
|
575
|
+
|
|
576
|
+
if soft_failed or rejected:
|
|
577
|
+
soft_failed_events_to_lookup.add(event_id)
|
|
578
|
+
else:
|
|
579
|
+
non_rejected_leaves.add(event_id)
|
|
580
|
+
|
|
581
|
+
# Now we recursively check all the soft-failed descendants we
|
|
582
|
+
# found above in the same way, until we have nothing left to
|
|
583
|
+
# check.
|
|
584
|
+
while soft_failed_events_to_lookup:
|
|
585
|
+
# We only want to do 100 at a time, so we split given list
|
|
586
|
+
# into two.
|
|
587
|
+
batch = list(soft_failed_events_to_lookup)
|
|
588
|
+
to_check, to_defer = batch[:100], batch[100:]
|
|
589
|
+
soft_failed_events_to_lookup = set(to_defer)
|
|
590
|
+
|
|
591
|
+
sql = """SELECT prev_event_id, event_id, internal_metadata,
|
|
592
|
+
rejections.event_id IS NOT NULL
|
|
593
|
+
FROM event_edges
|
|
594
|
+
INNER JOIN events USING (event_id)
|
|
595
|
+
INNER JOIN event_json USING (event_id)
|
|
596
|
+
LEFT JOIN rejections USING (event_id)
|
|
597
|
+
WHERE
|
|
598
|
+
NOT events.outlier
|
|
599
|
+
AND
|
|
600
|
+
"""
|
|
601
|
+
clause, args = make_in_list_sql_clause(
|
|
602
|
+
self.database_engine, "prev_event_id", to_check
|
|
603
|
+
)
|
|
604
|
+
txn.execute(sql + clause, list(args))
|
|
605
|
+
|
|
606
|
+
for prev_event_id, event_id, metadata, rejected in txn:
|
|
607
|
+
if event_id in graph:
|
|
608
|
+
# Already handled this event previously, but we still
|
|
609
|
+
# want to record the edge.
|
|
610
|
+
graph[event_id].add(prev_event_id)
|
|
611
|
+
continue
|
|
612
|
+
|
|
613
|
+
graph[event_id] = {prev_event_id}
|
|
614
|
+
|
|
615
|
+
soft_failed = db_to_json(metadata).get("soft_failed")
|
|
616
|
+
if soft_failed or rejected:
|
|
617
|
+
soft_failed_events_to_lookup.add(event_id)
|
|
618
|
+
else:
|
|
619
|
+
non_rejected_leaves.add(event_id)
|
|
620
|
+
|
|
621
|
+
# We have a set of non-soft-failed descendants, so we recurse up
|
|
622
|
+
# the graph to find all ancestors and add them to the set of event
|
|
623
|
+
# IDs that we can delete from forward extremities table.
|
|
624
|
+
to_delete = set()
|
|
625
|
+
while non_rejected_leaves:
|
|
626
|
+
event_id = non_rejected_leaves.pop()
|
|
627
|
+
prev_event_ids = graph.get(event_id, set())
|
|
628
|
+
non_rejected_leaves.update(prev_event_ids)
|
|
629
|
+
to_delete.update(prev_event_ids)
|
|
630
|
+
|
|
631
|
+
to_delete.intersection_update(original_set)
|
|
632
|
+
|
|
633
|
+
deleted = self.db_pool.simple_delete_many_txn(
|
|
634
|
+
txn=txn,
|
|
635
|
+
table="event_forward_extremities",
|
|
636
|
+
column="event_id",
|
|
637
|
+
values=to_delete,
|
|
638
|
+
keyvalues={},
|
|
639
|
+
)
|
|
640
|
+
|
|
641
|
+
logger.info(
|
|
642
|
+
"Deleted %d forward extremities of %d checked, to clean up matrix-org/synapse#5269",
|
|
643
|
+
deleted,
|
|
644
|
+
len(original_set),
|
|
645
|
+
)
|
|
646
|
+
|
|
647
|
+
if deleted:
|
|
648
|
+
# We now need to invalidate the caches of these rooms
|
|
649
|
+
rows = cast(
|
|
650
|
+
list[tuple[str]],
|
|
651
|
+
self.db_pool.simple_select_many_txn(
|
|
652
|
+
txn,
|
|
653
|
+
table="events",
|
|
654
|
+
column="event_id",
|
|
655
|
+
iterable=to_delete,
|
|
656
|
+
keyvalues={},
|
|
657
|
+
retcols=("room_id",),
|
|
658
|
+
),
|
|
659
|
+
)
|
|
660
|
+
room_ids = {row[0] for row in rows}
|
|
661
|
+
for room_id in room_ids:
|
|
662
|
+
txn.call_after(
|
|
663
|
+
self.get_latest_event_ids_in_room.invalidate, # type: ignore[attr-defined]
|
|
664
|
+
(room_id,),
|
|
665
|
+
)
|
|
666
|
+
|
|
667
|
+
self.db_pool.simple_delete_many_txn(
|
|
668
|
+
txn=txn,
|
|
669
|
+
table="_extremities_to_check",
|
|
670
|
+
column="event_id",
|
|
671
|
+
values=original_set,
|
|
672
|
+
keyvalues={},
|
|
673
|
+
)
|
|
674
|
+
|
|
675
|
+
return len(original_set)
|
|
676
|
+
|
|
677
|
+
num_handled = await self.db_pool.runInteraction(
|
|
678
|
+
"_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn
|
|
679
|
+
)
|
|
680
|
+
|
|
681
|
+
if not num_handled:
|
|
682
|
+
await self.db_pool.updates._end_background_update(
|
|
683
|
+
_BackgroundUpdates.DELETE_SOFT_FAILED_EXTREMITIES
|
|
684
|
+
)
|
|
685
|
+
|
|
686
|
+
def _drop_table_txn(txn: LoggingTransaction) -> None:
|
|
687
|
+
txn.execute("DROP TABLE _extremities_to_check")
|
|
688
|
+
|
|
689
|
+
await self.db_pool.runInteraction(
|
|
690
|
+
"_cleanup_extremities_bg_update_drop_table", _drop_table_txn
|
|
691
|
+
)
|
|
692
|
+
|
|
693
|
+
return num_handled
|
|
694
|
+
|
|
695
|
+
async def _redactions_received_ts(self, progress: JsonDict, batch_size: int) -> int:
|
|
696
|
+
"""Handles filling out the `received_ts` column in redactions."""
|
|
697
|
+
last_event_id = progress.get("last_event_id", "")
|
|
698
|
+
|
|
699
|
+
def _redactions_received_ts_txn(txn: LoggingTransaction) -> int:
|
|
700
|
+
# Fetch the set of event IDs that we want to update
|
|
701
|
+
sql = """
|
|
702
|
+
SELECT event_id FROM redactions
|
|
703
|
+
WHERE event_id > ?
|
|
704
|
+
ORDER BY event_id ASC
|
|
705
|
+
LIMIT ?
|
|
706
|
+
"""
|
|
707
|
+
|
|
708
|
+
txn.execute(sql, (last_event_id, batch_size))
|
|
709
|
+
|
|
710
|
+
rows = txn.fetchall()
|
|
711
|
+
if not rows:
|
|
712
|
+
return 0
|
|
713
|
+
|
|
714
|
+
(upper_event_id,) = rows[-1]
|
|
715
|
+
|
|
716
|
+
# Update the redactions with the received_ts.
|
|
717
|
+
#
|
|
718
|
+
# Note: Not all events have an associated received_ts, so we
|
|
719
|
+
# fallback to using origin_server_ts. If we for some reason don't
|
|
720
|
+
# have an origin_server_ts, lets just use the current timestamp.
|
|
721
|
+
#
|
|
722
|
+
# We don't want to leave it null, as then we'll never try and
|
|
723
|
+
# censor those redactions.
|
|
724
|
+
sql = """
|
|
725
|
+
UPDATE redactions
|
|
726
|
+
SET received_ts = (
|
|
727
|
+
SELECT COALESCE(received_ts, origin_server_ts, ?) FROM events
|
|
728
|
+
WHERE events.event_id = redactions.event_id
|
|
729
|
+
)
|
|
730
|
+
WHERE ? <= event_id AND event_id <= ?
|
|
731
|
+
"""
|
|
732
|
+
|
|
733
|
+
txn.execute(sql, (self.clock.time_msec(), last_event_id, upper_event_id))
|
|
734
|
+
|
|
735
|
+
self.db_pool.updates._background_update_progress_txn(
|
|
736
|
+
txn, "redactions_received_ts", {"last_event_id": upper_event_id}
|
|
737
|
+
)
|
|
738
|
+
|
|
739
|
+
return len(rows)
|
|
740
|
+
|
|
741
|
+
count = await self.db_pool.runInteraction(
|
|
742
|
+
"_redactions_received_ts", _redactions_received_ts_txn
|
|
743
|
+
)
|
|
744
|
+
|
|
745
|
+
if not count:
|
|
746
|
+
await self.db_pool.updates._end_background_update("redactions_received_ts")
|
|
747
|
+
|
|
748
|
+
return count
|
|
749
|
+
|
|
750
|
+
async def _event_fix_redactions_bytes(
|
|
751
|
+
self, progress: JsonDict, batch_size: int
|
|
752
|
+
) -> int:
|
|
753
|
+
"""Undoes hex encoded censored redacted event JSON."""
|
|
754
|
+
|
|
755
|
+
def _event_fix_redactions_bytes_txn(txn: LoggingTransaction) -> None:
|
|
756
|
+
# This update is quite fast due to new index.
|
|
757
|
+
txn.execute(
|
|
758
|
+
"""
|
|
759
|
+
UPDATE event_json
|
|
760
|
+
SET
|
|
761
|
+
json = convert_from(json::bytea, 'utf8')
|
|
762
|
+
FROM redactions
|
|
763
|
+
WHERE
|
|
764
|
+
redactions.have_censored
|
|
765
|
+
AND event_json.event_id = redactions.redacts
|
|
766
|
+
AND json NOT LIKE '{%';
|
|
767
|
+
"""
|
|
768
|
+
)
|
|
769
|
+
|
|
770
|
+
txn.execute("DROP INDEX redactions_censored_redacts")
|
|
771
|
+
|
|
772
|
+
await self.db_pool.runInteraction(
|
|
773
|
+
"_event_fix_redactions_bytes", _event_fix_redactions_bytes_txn
|
|
774
|
+
)
|
|
775
|
+
|
|
776
|
+
await self.db_pool.updates._end_background_update("event_fix_redactions_bytes")
|
|
777
|
+
|
|
778
|
+
return 1
|
|
779
|
+
|
|
780
|
+
async def _event_store_labels(self, progress: JsonDict, batch_size: int) -> int:
|
|
781
|
+
"""Background update handler which will store labels for existing events."""
|
|
782
|
+
last_event_id = progress.get("last_event_id", "")
|
|
783
|
+
|
|
784
|
+
def _event_store_labels_txn(txn: LoggingTransaction) -> int:
|
|
785
|
+
txn.execute(
|
|
786
|
+
"""
|
|
787
|
+
SELECT event_id, json FROM event_json
|
|
788
|
+
LEFT JOIN event_labels USING (event_id)
|
|
789
|
+
WHERE event_id > ? AND label IS NULL
|
|
790
|
+
ORDER BY event_id LIMIT ?
|
|
791
|
+
""",
|
|
792
|
+
(last_event_id, batch_size),
|
|
793
|
+
)
|
|
794
|
+
|
|
795
|
+
results = list(txn)
|
|
796
|
+
|
|
797
|
+
nbrows = 0
|
|
798
|
+
last_row_event_id = ""
|
|
799
|
+
for event_id, event_json_raw in results:
|
|
800
|
+
try:
|
|
801
|
+
event_json = db_to_json(event_json_raw)
|
|
802
|
+
|
|
803
|
+
self.db_pool.simple_insert_many_txn(
|
|
804
|
+
txn=txn,
|
|
805
|
+
table="event_labels",
|
|
806
|
+
keys=("event_id", "label", "room_id", "topological_ordering"),
|
|
807
|
+
values=[
|
|
808
|
+
(
|
|
809
|
+
event_id,
|
|
810
|
+
label,
|
|
811
|
+
event_json["room_id"],
|
|
812
|
+
event_json["depth"],
|
|
813
|
+
)
|
|
814
|
+
for label in event_json["content"].get(
|
|
815
|
+
EventContentFields.LABELS, []
|
|
816
|
+
)
|
|
817
|
+
if isinstance(label, str)
|
|
818
|
+
],
|
|
819
|
+
)
|
|
820
|
+
except Exception as e:
|
|
821
|
+
logger.warning(
|
|
822
|
+
"Unable to load event %s (no labels will be imported): %s",
|
|
823
|
+
event_id,
|
|
824
|
+
e,
|
|
825
|
+
)
|
|
826
|
+
|
|
827
|
+
nbrows += 1
|
|
828
|
+
last_row_event_id = event_id
|
|
829
|
+
|
|
830
|
+
self.db_pool.updates._background_update_progress_txn(
|
|
831
|
+
txn, "event_store_labels", {"last_event_id": last_row_event_id}
|
|
832
|
+
)
|
|
833
|
+
|
|
834
|
+
return nbrows
|
|
835
|
+
|
|
836
|
+
num_rows = await self.db_pool.runInteraction(
|
|
837
|
+
desc="event_store_labels", func=_event_store_labels_txn
|
|
838
|
+
)
|
|
839
|
+
|
|
840
|
+
if not num_rows:
|
|
841
|
+
await self.db_pool.updates._end_background_update("event_store_labels")
|
|
842
|
+
|
|
843
|
+
return num_rows
|
|
844
|
+
|
|
845
|
+
async def _rejected_events_metadata(self, progress: dict, batch_size: int) -> int:
|
|
846
|
+
"""Adds rejected events to the `state_events` and `event_auth` metadata
|
|
847
|
+
tables.
|
|
848
|
+
"""
|
|
849
|
+
|
|
850
|
+
last_event_id = progress.get("last_event_id", "")
|
|
851
|
+
|
|
852
|
+
def get_rejected_events(
|
|
853
|
+
txn: Cursor,
|
|
854
|
+
) -> list[tuple[str, str, JsonDict, bool, bool]]:
|
|
855
|
+
# Fetch rejected event json, their room version and whether we have
|
|
856
|
+
# inserted them into the state_events or auth_events tables.
|
|
857
|
+
#
|
|
858
|
+
# Note we can assume that events that don't have a corresponding
|
|
859
|
+
# room version are V1 rooms.
|
|
860
|
+
sql = """
|
|
861
|
+
SELECT DISTINCT
|
|
862
|
+
event_id,
|
|
863
|
+
COALESCE(room_version, '1'),
|
|
864
|
+
json,
|
|
865
|
+
state_events.event_id IS NOT NULL,
|
|
866
|
+
event_auth.event_id IS NOT NULL
|
|
867
|
+
FROM rejections
|
|
868
|
+
INNER JOIN event_json USING (event_id)
|
|
869
|
+
LEFT JOIN rooms USING (room_id)
|
|
870
|
+
LEFT JOIN state_events USING (event_id)
|
|
871
|
+
LEFT JOIN event_auth USING (event_id)
|
|
872
|
+
WHERE event_id > ?
|
|
873
|
+
ORDER BY event_id
|
|
874
|
+
LIMIT ?
|
|
875
|
+
"""
|
|
876
|
+
|
|
877
|
+
txn.execute(
|
|
878
|
+
sql,
|
|
879
|
+
(
|
|
880
|
+
last_event_id,
|
|
881
|
+
batch_size,
|
|
882
|
+
),
|
|
883
|
+
)
|
|
884
|
+
|
|
885
|
+
return cast(
|
|
886
|
+
list[tuple[str, str, JsonDict, bool, bool]],
|
|
887
|
+
[(row[0], row[1], db_to_json(row[2]), row[3], row[4]) for row in txn],
|
|
888
|
+
)
|
|
889
|
+
|
|
890
|
+
results = await self.db_pool.runInteraction(
|
|
891
|
+
desc="_rejected_events_metadata_get", func=get_rejected_events
|
|
892
|
+
)
|
|
893
|
+
|
|
894
|
+
if not results:
|
|
895
|
+
await self.db_pool.updates._end_background_update(
|
|
896
|
+
"rejected_events_metadata"
|
|
897
|
+
)
|
|
898
|
+
return 0
|
|
899
|
+
|
|
900
|
+
state_events = []
|
|
901
|
+
auth_events = []
|
|
902
|
+
for event_id, room_version, event_json, has_state, has_event_auth in results:
|
|
903
|
+
last_event_id = event_id
|
|
904
|
+
|
|
905
|
+
if has_state and has_event_auth:
|
|
906
|
+
continue
|
|
907
|
+
|
|
908
|
+
room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version)
|
|
909
|
+
if not room_version_obj:
|
|
910
|
+
# We no longer support this room version, so we just ignore the
|
|
911
|
+
# events entirely.
|
|
912
|
+
logger.info(
|
|
913
|
+
"Ignoring event with unknown room version %r: %r",
|
|
914
|
+
room_version,
|
|
915
|
+
event_id,
|
|
916
|
+
)
|
|
917
|
+
continue
|
|
918
|
+
|
|
919
|
+
event = make_event_from_dict(event_json, room_version_obj)
|
|
920
|
+
|
|
921
|
+
if not event.is_state():
|
|
922
|
+
continue
|
|
923
|
+
|
|
924
|
+
if not has_state:
|
|
925
|
+
state_events.append(
|
|
926
|
+
(event.event_id, event.room_id, event.type, event.state_key)
|
|
927
|
+
)
|
|
928
|
+
|
|
929
|
+
if not has_event_auth:
|
|
930
|
+
# Old, dodgy, events may have duplicate auth events, which we
|
|
931
|
+
# need to deduplicate as we have a unique constraint.
|
|
932
|
+
for auth_id in set(event.auth_event_ids()):
|
|
933
|
+
auth_events.append((event.event_id, event.room_id, auth_id))
|
|
934
|
+
|
|
935
|
+
if state_events:
|
|
936
|
+
await self.db_pool.simple_insert_many(
|
|
937
|
+
table="state_events",
|
|
938
|
+
keys=("event_id", "room_id", "type", "state_key"),
|
|
939
|
+
values=state_events,
|
|
940
|
+
desc="_rejected_events_metadata_state_events",
|
|
941
|
+
)
|
|
942
|
+
|
|
943
|
+
if auth_events:
|
|
944
|
+
await self.db_pool.simple_insert_many(
|
|
945
|
+
table="event_auth",
|
|
946
|
+
keys=("event_id", "room_id", "auth_id"),
|
|
947
|
+
values=auth_events,
|
|
948
|
+
desc="_rejected_events_metadata_event_auth",
|
|
949
|
+
)
|
|
950
|
+
|
|
951
|
+
await self.db_pool.updates._background_update_progress(
|
|
952
|
+
"rejected_events_metadata", {"last_event_id": last_event_id}
|
|
953
|
+
)
|
|
954
|
+
|
|
955
|
+
if len(results) < batch_size:
|
|
956
|
+
await self.db_pool.updates._end_background_update(
|
|
957
|
+
"rejected_events_metadata"
|
|
958
|
+
)
|
|
959
|
+
|
|
960
|
+
return len(results)
|
|
961
|
+
|
|
962
|
+
async def _chain_cover_index(self, progress: dict, batch_size: int) -> int:
|
|
963
|
+
"""A background updates that iterates over all rooms and generates the
|
|
964
|
+
chain cover index for them.
|
|
965
|
+
"""
|
|
966
|
+
|
|
967
|
+
current_room_id = progress.get("current_room_id", "")
|
|
968
|
+
|
|
969
|
+
# Where we've processed up to in the room, defaults to the start of the
|
|
970
|
+
# room.
|
|
971
|
+
last_depth = progress.get("last_depth", -1)
|
|
972
|
+
last_stream = progress.get("last_stream", -1)
|
|
973
|
+
|
|
974
|
+
result = await self.db_pool.runInteraction(
|
|
975
|
+
"_chain_cover_index",
|
|
976
|
+
self._calculate_chain_cover_txn,
|
|
977
|
+
current_room_id,
|
|
978
|
+
last_depth,
|
|
979
|
+
last_stream,
|
|
980
|
+
batch_size,
|
|
981
|
+
single_room=False,
|
|
982
|
+
)
|
|
983
|
+
|
|
984
|
+
finished = result.processed_count == 0
|
|
985
|
+
|
|
986
|
+
total_rows_processed = result.processed_count
|
|
987
|
+
current_room_id = result.room_id
|
|
988
|
+
last_depth = result.depth
|
|
989
|
+
last_stream = result.stream
|
|
990
|
+
|
|
991
|
+
for room_id, (depth, stream) in result.finished_room_map.items():
|
|
992
|
+
# If we've done all the events in the room we flip the
|
|
993
|
+
# `has_auth_chain_index` in the DB. Note that its possible for
|
|
994
|
+
# further events to be persisted between the above and setting the
|
|
995
|
+
# flag without having the chain cover calculated for them. This is
|
|
996
|
+
# fine as a) the code gracefully handles these cases and b) we'll
|
|
997
|
+
# calculate them below.
|
|
998
|
+
|
|
999
|
+
await self.db_pool.simple_update(
|
|
1000
|
+
table="rooms",
|
|
1001
|
+
keyvalues={"room_id": room_id},
|
|
1002
|
+
updatevalues={"has_auth_chain_index": True},
|
|
1003
|
+
desc="_chain_cover_index",
|
|
1004
|
+
)
|
|
1005
|
+
|
|
1006
|
+
# Handle any events that might have raced with us flipping the
|
|
1007
|
+
# bit above.
|
|
1008
|
+
result = await self.db_pool.runInteraction(
|
|
1009
|
+
"_chain_cover_index",
|
|
1010
|
+
self._calculate_chain_cover_txn,
|
|
1011
|
+
room_id,
|
|
1012
|
+
depth,
|
|
1013
|
+
stream,
|
|
1014
|
+
batch_size=None,
|
|
1015
|
+
single_room=True,
|
|
1016
|
+
)
|
|
1017
|
+
|
|
1018
|
+
total_rows_processed += result.processed_count
|
|
1019
|
+
|
|
1020
|
+
if finished:
|
|
1021
|
+
await self.db_pool.updates._end_background_update("chain_cover")
|
|
1022
|
+
return total_rows_processed
|
|
1023
|
+
|
|
1024
|
+
await self.db_pool.updates._background_update_progress(
|
|
1025
|
+
"chain_cover",
|
|
1026
|
+
{
|
|
1027
|
+
"current_room_id": current_room_id,
|
|
1028
|
+
"last_depth": last_depth,
|
|
1029
|
+
"last_stream": last_stream,
|
|
1030
|
+
},
|
|
1031
|
+
)
|
|
1032
|
+
|
|
1033
|
+
return total_rows_processed
|
|
1034
|
+
|
|
1035
|
+
def _calculate_chain_cover_txn(
|
|
1036
|
+
self,
|
|
1037
|
+
txn: LoggingTransaction,
|
|
1038
|
+
last_room_id: str,
|
|
1039
|
+
last_depth: int,
|
|
1040
|
+
last_stream: int,
|
|
1041
|
+
batch_size: int | None,
|
|
1042
|
+
single_room: bool,
|
|
1043
|
+
) -> _CalculateChainCover:
|
|
1044
|
+
"""Calculate the chain cover for `batch_size` events, ordered by
|
|
1045
|
+
`(room_id, depth, stream)`.
|
|
1046
|
+
|
|
1047
|
+
Args:
|
|
1048
|
+
txn,
|
|
1049
|
+
last_room_id, last_depth, last_stream: The `(room_id, depth, stream)`
|
|
1050
|
+
tuple to fetch results after.
|
|
1051
|
+
batch_size: The maximum number of events to process. If None then
|
|
1052
|
+
no limit.
|
|
1053
|
+
single_room: Whether to calculate the index for just the given
|
|
1054
|
+
room.
|
|
1055
|
+
"""
|
|
1056
|
+
|
|
1057
|
+
# Get the next set of events in the room (that we haven't already
|
|
1058
|
+
# computed chain cover for). We do this in topological order.
|
|
1059
|
+
|
|
1060
|
+
# We want to do a `(topological_ordering, stream_ordering) > (?,?)`
|
|
1061
|
+
# comparison, but that is not supported on older SQLite versions
|
|
1062
|
+
tuple_clause, tuple_args = make_tuple_comparison_clause(
|
|
1063
|
+
[
|
|
1064
|
+
("events.room_id", last_room_id),
|
|
1065
|
+
("topological_ordering", last_depth),
|
|
1066
|
+
("stream_ordering", last_stream),
|
|
1067
|
+
],
|
|
1068
|
+
)
|
|
1069
|
+
|
|
1070
|
+
extra_clause = ""
|
|
1071
|
+
if single_room:
|
|
1072
|
+
extra_clause = "AND events.room_id = ?"
|
|
1073
|
+
tuple_args.append(last_room_id)
|
|
1074
|
+
|
|
1075
|
+
sql = """
|
|
1076
|
+
SELECT
|
|
1077
|
+
event_id, state_events.type, state_events.state_key,
|
|
1078
|
+
topological_ordering, stream_ordering,
|
|
1079
|
+
events.room_id
|
|
1080
|
+
FROM events
|
|
1081
|
+
INNER JOIN state_events USING (event_id)
|
|
1082
|
+
LEFT JOIN event_auth_chains USING (event_id)
|
|
1083
|
+
LEFT JOIN event_auth_chain_to_calculate USING (event_id)
|
|
1084
|
+
WHERE event_auth_chains.event_id IS NULL
|
|
1085
|
+
AND event_auth_chain_to_calculate.event_id IS NULL
|
|
1086
|
+
AND %(tuple_cmp)s
|
|
1087
|
+
%(extra)s
|
|
1088
|
+
ORDER BY events.room_id, topological_ordering, stream_ordering
|
|
1089
|
+
%(limit)s
|
|
1090
|
+
""" % {
|
|
1091
|
+
"tuple_cmp": tuple_clause,
|
|
1092
|
+
"limit": "LIMIT ?" if batch_size is not None else "",
|
|
1093
|
+
"extra": extra_clause,
|
|
1094
|
+
}
|
|
1095
|
+
|
|
1096
|
+
if batch_size is not None:
|
|
1097
|
+
tuple_args.append(batch_size)
|
|
1098
|
+
|
|
1099
|
+
txn.execute(sql, tuple_args)
|
|
1100
|
+
rows = txn.fetchall()
|
|
1101
|
+
|
|
1102
|
+
# Put the results in the necessary format for
|
|
1103
|
+
# `_add_chain_cover_index`
|
|
1104
|
+
event_to_room_id = {row[0]: row[5] for row in rows}
|
|
1105
|
+
event_to_types = {row[0]: (row[1], row[2]) for row in rows}
|
|
1106
|
+
|
|
1107
|
+
# Calculate the new last position we've processed up to.
|
|
1108
|
+
new_last_depth: int = rows[-1][3] if rows else last_depth
|
|
1109
|
+
new_last_stream: int = rows[-1][4] if rows else last_stream
|
|
1110
|
+
new_last_room_id: str = rows[-1][5] if rows else ""
|
|
1111
|
+
|
|
1112
|
+
# Map from room_id to last depth/stream_ordering processed for the room,
|
|
1113
|
+
# excluding the last room (which we're likely still processing). We also
|
|
1114
|
+
# need to include the room passed in if it's not included in the result
|
|
1115
|
+
# set (as we then know we've processed all events in said room).
|
|
1116
|
+
#
|
|
1117
|
+
# This is the set of rooms that we can now safely flip the
|
|
1118
|
+
# `has_auth_chain_index` bit for.
|
|
1119
|
+
finished_rooms = {
|
|
1120
|
+
row[5]: (row[3], row[4]) for row in rows if row[5] != new_last_room_id
|
|
1121
|
+
}
|
|
1122
|
+
if last_room_id not in finished_rooms and last_room_id != new_last_room_id:
|
|
1123
|
+
finished_rooms[last_room_id] = (last_depth, last_stream)
|
|
1124
|
+
|
|
1125
|
+
count = len(rows)
|
|
1126
|
+
|
|
1127
|
+
# We also need to fetch the auth events for them.
|
|
1128
|
+
auth_events = cast(
|
|
1129
|
+
list[tuple[str, str]],
|
|
1130
|
+
self.db_pool.simple_select_many_txn(
|
|
1131
|
+
txn,
|
|
1132
|
+
table="event_auth",
|
|
1133
|
+
column="event_id",
|
|
1134
|
+
iterable=event_to_room_id,
|
|
1135
|
+
keyvalues={},
|
|
1136
|
+
retcols=("event_id", "auth_id"),
|
|
1137
|
+
),
|
|
1138
|
+
)
|
|
1139
|
+
|
|
1140
|
+
event_to_auth_chain: dict[str, list[str]] = {}
|
|
1141
|
+
for event_id, auth_id in auth_events:
|
|
1142
|
+
event_to_auth_chain.setdefault(event_id, []).append(auth_id)
|
|
1143
|
+
|
|
1144
|
+
# Calculate and persist the chain cover index for this set of events.
|
|
1145
|
+
#
|
|
1146
|
+
# Annoyingly we need to gut wrench into the persit event store so that
|
|
1147
|
+
# we can reuse the function to calculate the chain cover for rooms.
|
|
1148
|
+
PersistEventsStore._add_chain_cover_index(
|
|
1149
|
+
txn,
|
|
1150
|
+
self.db_pool,
|
|
1151
|
+
self.event_chain_id_gen,
|
|
1152
|
+
event_to_room_id,
|
|
1153
|
+
event_to_types,
|
|
1154
|
+
cast(dict[str, StrCollection], event_to_auth_chain),
|
|
1155
|
+
)
|
|
1156
|
+
|
|
1157
|
+
return _CalculateChainCover(
|
|
1158
|
+
room_id=new_last_room_id,
|
|
1159
|
+
depth=new_last_depth,
|
|
1160
|
+
stream=new_last_stream,
|
|
1161
|
+
processed_count=count,
|
|
1162
|
+
finished_room_map=finished_rooms,
|
|
1163
|
+
)
|
|
1164
|
+
|
|
1165
|
+
async def _purged_chain_cover_index(self, progress: dict, batch_size: int) -> int:
|
|
1166
|
+
"""
|
|
1167
|
+
A background updates that iterates over the chain cover and deletes the
|
|
1168
|
+
chain cover for events that have been purged.
|
|
1169
|
+
|
|
1170
|
+
This may be due to fully purging a room or via setting a retention policy.
|
|
1171
|
+
"""
|
|
1172
|
+
current_event_id = progress.get("current_event_id", "")
|
|
1173
|
+
|
|
1174
|
+
def purged_chain_cover_txn(txn: LoggingTransaction) -> int:
|
|
1175
|
+
# The event ID from events will be null if the chain ID / sequence
|
|
1176
|
+
# number points to a purged event.
|
|
1177
|
+
sql = """
|
|
1178
|
+
SELECT event_id, chain_id, sequence_number, e.event_id IS NOT NULL
|
|
1179
|
+
FROM event_auth_chains
|
|
1180
|
+
LEFT JOIN events AS e USING (event_id)
|
|
1181
|
+
WHERE event_id > ? ORDER BY event_auth_chains.event_id ASC LIMIT ?
|
|
1182
|
+
"""
|
|
1183
|
+
txn.execute(sql, (current_event_id, batch_size))
|
|
1184
|
+
|
|
1185
|
+
rows = txn.fetchall()
|
|
1186
|
+
if not rows:
|
|
1187
|
+
return 0
|
|
1188
|
+
|
|
1189
|
+
# The event IDs and chain IDs / sequence numbers where the event has
|
|
1190
|
+
# been purged.
|
|
1191
|
+
unreferenced_event_ids = []
|
|
1192
|
+
unreferenced_chain_id_tuples = []
|
|
1193
|
+
event_id = ""
|
|
1194
|
+
for event_id, chain_id, sequence_number, has_event in rows:
|
|
1195
|
+
if not has_event:
|
|
1196
|
+
unreferenced_event_ids.append((event_id,))
|
|
1197
|
+
unreferenced_chain_id_tuples.append((chain_id, sequence_number))
|
|
1198
|
+
|
|
1199
|
+
# Delete the unreferenced auth chains from event_auth_chain_links and
|
|
1200
|
+
# event_auth_chains.
|
|
1201
|
+
txn.executemany(
|
|
1202
|
+
"""
|
|
1203
|
+
DELETE FROM event_auth_chains WHERE event_id = ?
|
|
1204
|
+
""",
|
|
1205
|
+
unreferenced_event_ids,
|
|
1206
|
+
)
|
|
1207
|
+
# We should also delete matching target_*, but there is no index on
|
|
1208
|
+
# target_chain_id. Hopefully any purged events are due to a room
|
|
1209
|
+
# being fully purged and they will be removed from the origin_*
|
|
1210
|
+
# searches.
|
|
1211
|
+
txn.executemany(
|
|
1212
|
+
"""
|
|
1213
|
+
DELETE FROM event_auth_chain_links WHERE
|
|
1214
|
+
origin_chain_id = ? AND origin_sequence_number = ?
|
|
1215
|
+
""",
|
|
1216
|
+
unreferenced_chain_id_tuples,
|
|
1217
|
+
)
|
|
1218
|
+
|
|
1219
|
+
progress = {
|
|
1220
|
+
"current_event_id": event_id,
|
|
1221
|
+
}
|
|
1222
|
+
|
|
1223
|
+
self.db_pool.updates._background_update_progress_txn(
|
|
1224
|
+
txn, "purged_chain_cover", progress
|
|
1225
|
+
)
|
|
1226
|
+
|
|
1227
|
+
return len(rows)
|
|
1228
|
+
|
|
1229
|
+
result = await self.db_pool.runInteraction(
|
|
1230
|
+
"_purged_chain_cover_index",
|
|
1231
|
+
purged_chain_cover_txn,
|
|
1232
|
+
)
|
|
1233
|
+
|
|
1234
|
+
if not result:
|
|
1235
|
+
await self.db_pool.updates._end_background_update("purged_chain_cover")
|
|
1236
|
+
|
|
1237
|
+
return result
|
|
1238
|
+
|
|
1239
|
+
async def _event_arbitrary_relations(
|
|
1240
|
+
self, progress: JsonDict, batch_size: int
|
|
1241
|
+
) -> int:
|
|
1242
|
+
"""Background update handler which will store previously unknown relations for existing events."""
|
|
1243
|
+
last_event_id = progress.get("last_event_id", "")
|
|
1244
|
+
|
|
1245
|
+
def _event_arbitrary_relations_txn(txn: LoggingTransaction) -> int:
|
|
1246
|
+
# Fetch events and then filter based on whether the event has a
|
|
1247
|
+
# relation or not.
|
|
1248
|
+
txn.execute(
|
|
1249
|
+
"""
|
|
1250
|
+
SELECT event_id, json FROM event_json
|
|
1251
|
+
WHERE event_id > ?
|
|
1252
|
+
ORDER BY event_id LIMIT ?
|
|
1253
|
+
""",
|
|
1254
|
+
(last_event_id, batch_size),
|
|
1255
|
+
)
|
|
1256
|
+
|
|
1257
|
+
results = list(txn)
|
|
1258
|
+
# (event_id, parent_id, rel_type) for each relation
|
|
1259
|
+
relations_to_insert: list[tuple[str, str, str, str]] = []
|
|
1260
|
+
for event_id, event_json_raw in results:
|
|
1261
|
+
try:
|
|
1262
|
+
event_json = db_to_json(event_json_raw)
|
|
1263
|
+
except Exception as e:
|
|
1264
|
+
logger.warning(
|
|
1265
|
+
"Unable to load event %s (no relations will be updated): %s",
|
|
1266
|
+
event_id,
|
|
1267
|
+
e,
|
|
1268
|
+
)
|
|
1269
|
+
continue
|
|
1270
|
+
|
|
1271
|
+
# If there's no relation, skip!
|
|
1272
|
+
relates_to = event_json["content"].get("m.relates_to")
|
|
1273
|
+
if not relates_to or not isinstance(relates_to, dict):
|
|
1274
|
+
continue
|
|
1275
|
+
|
|
1276
|
+
# If the relation type or parent event ID is not a string, skip it.
|
|
1277
|
+
#
|
|
1278
|
+
# Do not consider relation types that have existed for a long time,
|
|
1279
|
+
# since they will already be listed in the `event_relations` table.
|
|
1280
|
+
rel_type = relates_to.get("rel_type")
|
|
1281
|
+
if not isinstance(rel_type, str) or rel_type in (
|
|
1282
|
+
RelationTypes.ANNOTATION,
|
|
1283
|
+
RelationTypes.REFERENCE,
|
|
1284
|
+
RelationTypes.REPLACE,
|
|
1285
|
+
):
|
|
1286
|
+
continue
|
|
1287
|
+
|
|
1288
|
+
parent_id = relates_to.get("event_id")
|
|
1289
|
+
if not isinstance(parent_id, str):
|
|
1290
|
+
continue
|
|
1291
|
+
|
|
1292
|
+
room_id = event_json["room_id"]
|
|
1293
|
+
relations_to_insert.append((room_id, event_id, parent_id, rel_type))
|
|
1294
|
+
|
|
1295
|
+
# Insert the missing data, note that we upsert here in case the event
|
|
1296
|
+
# has already been processed.
|
|
1297
|
+
if relations_to_insert:
|
|
1298
|
+
self.db_pool.simple_upsert_many_txn(
|
|
1299
|
+
txn=txn,
|
|
1300
|
+
table="event_relations",
|
|
1301
|
+
key_names=("event_id",),
|
|
1302
|
+
key_values=[(r[1],) for r in relations_to_insert],
|
|
1303
|
+
value_names=("relates_to_id", "relation_type"),
|
|
1304
|
+
value_values=[r[2:] for r in relations_to_insert],
|
|
1305
|
+
)
|
|
1306
|
+
|
|
1307
|
+
# Iterate the parent IDs and invalidate caches.
|
|
1308
|
+
self._invalidate_cache_and_stream_bulk( # type: ignore[attr-defined]
|
|
1309
|
+
txn,
|
|
1310
|
+
self.get_relations_for_event, # type: ignore[attr-defined]
|
|
1311
|
+
{
|
|
1312
|
+
(
|
|
1313
|
+
r[0], # room_id
|
|
1314
|
+
r[2], # parent_id
|
|
1315
|
+
)
|
|
1316
|
+
for r in relations_to_insert
|
|
1317
|
+
},
|
|
1318
|
+
)
|
|
1319
|
+
self._invalidate_cache_and_stream_bulk( # type: ignore[attr-defined]
|
|
1320
|
+
txn,
|
|
1321
|
+
self.get_thread_summary, # type: ignore[attr-defined]
|
|
1322
|
+
{(r[1],) for r in relations_to_insert},
|
|
1323
|
+
)
|
|
1324
|
+
|
|
1325
|
+
if results:
|
|
1326
|
+
latest_event_id = results[-1][0]
|
|
1327
|
+
self.db_pool.updates._background_update_progress_txn(
|
|
1328
|
+
txn, "event_arbitrary_relations", {"last_event_id": latest_event_id}
|
|
1329
|
+
)
|
|
1330
|
+
|
|
1331
|
+
return len(results)
|
|
1332
|
+
|
|
1333
|
+
num_rows = await self.db_pool.runInteraction(
|
|
1334
|
+
desc="event_arbitrary_relations", func=_event_arbitrary_relations_txn
|
|
1335
|
+
)
|
|
1336
|
+
|
|
1337
|
+
if not num_rows:
|
|
1338
|
+
await self.db_pool.updates._end_background_update(
|
|
1339
|
+
"event_arbitrary_relations"
|
|
1340
|
+
)
|
|
1341
|
+
|
|
1342
|
+
return num_rows
|
|
1343
|
+
|
|
1344
|
+
async def _background_populate_stream_ordering2(
|
|
1345
|
+
self, progress: JsonDict, batch_size: int
|
|
1346
|
+
) -> int:
|
|
1347
|
+
"""Populate events.stream_ordering2, then replace stream_ordering
|
|
1348
|
+
|
|
1349
|
+
This is to deal with the fact that stream_ordering was initially created as a
|
|
1350
|
+
32-bit integer field.
|
|
1351
|
+
"""
|
|
1352
|
+
batch_size = max(batch_size, 1)
|
|
1353
|
+
|
|
1354
|
+
def process(txn: LoggingTransaction) -> int:
|
|
1355
|
+
last_stream = progress.get("last_stream", -(1 << 31))
|
|
1356
|
+
txn.execute(
|
|
1357
|
+
"""
|
|
1358
|
+
UPDATE events SET stream_ordering2=stream_ordering
|
|
1359
|
+
WHERE stream_ordering IN (
|
|
1360
|
+
SELECT stream_ordering FROM events WHERE stream_ordering > ?
|
|
1361
|
+
ORDER BY stream_ordering LIMIT ?
|
|
1362
|
+
)
|
|
1363
|
+
RETURNING stream_ordering;
|
|
1364
|
+
""",
|
|
1365
|
+
(last_stream, batch_size),
|
|
1366
|
+
)
|
|
1367
|
+
row_count = txn.rowcount
|
|
1368
|
+
if row_count == 0:
|
|
1369
|
+
return 0
|
|
1370
|
+
last_stream = max(row[0] for row in txn)
|
|
1371
|
+
logger.info("populated stream_ordering2 up to %i", last_stream)
|
|
1372
|
+
|
|
1373
|
+
self.db_pool.updates._background_update_progress_txn(
|
|
1374
|
+
txn,
|
|
1375
|
+
_BackgroundUpdates.POPULATE_STREAM_ORDERING2,
|
|
1376
|
+
{"last_stream": last_stream},
|
|
1377
|
+
)
|
|
1378
|
+
return row_count
|
|
1379
|
+
|
|
1380
|
+
result = await self.db_pool.runInteraction(
|
|
1381
|
+
"_background_populate_stream_ordering2", process
|
|
1382
|
+
)
|
|
1383
|
+
|
|
1384
|
+
if result != 0:
|
|
1385
|
+
return result
|
|
1386
|
+
|
|
1387
|
+
await self.db_pool.updates._end_background_update(
|
|
1388
|
+
_BackgroundUpdates.POPULATE_STREAM_ORDERING2
|
|
1389
|
+
)
|
|
1390
|
+
return 0
|
|
1391
|
+
|
|
1392
|
+
async def _background_replace_stream_ordering_column(
|
|
1393
|
+
self, progress: JsonDict, batch_size: int
|
|
1394
|
+
) -> int:
|
|
1395
|
+
"""Drop the old 'stream_ordering' column and rename 'stream_ordering2' into its place."""
|
|
1396
|
+
|
|
1397
|
+
def process(txn: Cursor) -> None:
|
|
1398
|
+
for sql in _REPLACE_STREAM_ORDERING_SQL_COMMANDS:
|
|
1399
|
+
logger.info("completing stream_ordering migration: %s", sql)
|
|
1400
|
+
txn.execute(sql)
|
|
1401
|
+
|
|
1402
|
+
# ANALYZE the new column to build stats on it, to encourage PostgreSQL to use the
|
|
1403
|
+
# indexes on it.
|
|
1404
|
+
await self.db_pool.runInteraction(
|
|
1405
|
+
"background_analyze_new_stream_ordering_column",
|
|
1406
|
+
lambda txn: txn.execute("ANALYZE events(stream_ordering2)"),
|
|
1407
|
+
)
|
|
1408
|
+
|
|
1409
|
+
await self.db_pool.runInteraction(
|
|
1410
|
+
"_background_replace_stream_ordering_column", process
|
|
1411
|
+
)
|
|
1412
|
+
|
|
1413
|
+
await self.db_pool.updates._end_background_update(
|
|
1414
|
+
_BackgroundUpdates.REPLACE_STREAM_ORDERING_COLUMN
|
|
1415
|
+
)
|
|
1416
|
+
|
|
1417
|
+
return 0
|
|
1418
|
+
|
|
1419
|
+
async def _background_drop_invalid_event_edges_rows(
|
|
1420
|
+
self, progress: JsonDict, batch_size: int
|
|
1421
|
+
) -> int:
|
|
1422
|
+
"""Drop invalid rows from event_edges
|
|
1423
|
+
|
|
1424
|
+
This only runs for postgres. For SQLite, it all happens synchronously.
|
|
1425
|
+
|
|
1426
|
+
Firstly, drop any rows with is_state=True. These may have been added a long time
|
|
1427
|
+
ago, but they are no longer used.
|
|
1428
|
+
|
|
1429
|
+
We also drop rows that do not correspond to entries in `events`, and add a
|
|
1430
|
+
foreign key.
|
|
1431
|
+
"""
|
|
1432
|
+
|
|
1433
|
+
last_event_id = progress.get("last_event_id", "")
|
|
1434
|
+
|
|
1435
|
+
def drop_invalid_event_edges_txn(txn: LoggingTransaction) -> bool:
|
|
1436
|
+
"""Returns True if we're done."""
|
|
1437
|
+
|
|
1438
|
+
# first we need to find an endpoint.
|
|
1439
|
+
txn.execute(
|
|
1440
|
+
"""
|
|
1441
|
+
SELECT event_id FROM event_edges
|
|
1442
|
+
WHERE event_id > ?
|
|
1443
|
+
ORDER BY event_id
|
|
1444
|
+
LIMIT 1 OFFSET ?
|
|
1445
|
+
""",
|
|
1446
|
+
(last_event_id, batch_size),
|
|
1447
|
+
)
|
|
1448
|
+
|
|
1449
|
+
endpoint = None
|
|
1450
|
+
row = txn.fetchone()
|
|
1451
|
+
|
|
1452
|
+
if row:
|
|
1453
|
+
endpoint = row[0]
|
|
1454
|
+
|
|
1455
|
+
where_clause = "ee.event_id > ?"
|
|
1456
|
+
args = [last_event_id]
|
|
1457
|
+
if endpoint:
|
|
1458
|
+
where_clause += " AND ee.event_id <= ?"
|
|
1459
|
+
args.append(endpoint)
|
|
1460
|
+
|
|
1461
|
+
# now delete any that:
|
|
1462
|
+
# - have is_state=TRUE, or
|
|
1463
|
+
# - do not correspond to a row in `events`
|
|
1464
|
+
txn.execute(
|
|
1465
|
+
f"""
|
|
1466
|
+
DELETE FROM event_edges
|
|
1467
|
+
WHERE event_id IN (
|
|
1468
|
+
SELECT ee.event_id
|
|
1469
|
+
FROM event_edges ee
|
|
1470
|
+
LEFT JOIN events ev USING (event_id)
|
|
1471
|
+
WHERE ({where_clause}) AND
|
|
1472
|
+
(is_state OR ev.event_id IS NULL)
|
|
1473
|
+
)""",
|
|
1474
|
+
args,
|
|
1475
|
+
)
|
|
1476
|
+
|
|
1477
|
+
logger.info(
|
|
1478
|
+
"cleaned up event_edges up to %s: removed %i/%i rows",
|
|
1479
|
+
endpoint,
|
|
1480
|
+
txn.rowcount,
|
|
1481
|
+
batch_size,
|
|
1482
|
+
)
|
|
1483
|
+
|
|
1484
|
+
if endpoint is not None:
|
|
1485
|
+
self.db_pool.updates._background_update_progress_txn(
|
|
1486
|
+
txn,
|
|
1487
|
+
_BackgroundUpdates.EVENT_EDGES_DROP_INVALID_ROWS,
|
|
1488
|
+
{"last_event_id": endpoint},
|
|
1489
|
+
)
|
|
1490
|
+
return False
|
|
1491
|
+
|
|
1492
|
+
# if that was the final batch, we validate the foreign key.
|
|
1493
|
+
#
|
|
1494
|
+
# The constraint should have been in place and enforced for new rows since
|
|
1495
|
+
# before we started deleting invalid rows, so there's no chance for any
|
|
1496
|
+
# invalid rows to have snuck in the meantime. In other words, this really
|
|
1497
|
+
# ought to succeed.
|
|
1498
|
+
logger.info("cleaned up event_edges; enabling foreign key")
|
|
1499
|
+
txn.execute(
|
|
1500
|
+
"ALTER TABLE event_edges VALIDATE CONSTRAINT event_edges_event_id_fkey"
|
|
1501
|
+
)
|
|
1502
|
+
return True
|
|
1503
|
+
|
|
1504
|
+
done = await self.db_pool.runInteraction(
|
|
1505
|
+
desc="drop_invalid_event_edges", func=drop_invalid_event_edges_txn
|
|
1506
|
+
)
|
|
1507
|
+
|
|
1508
|
+
if done:
|
|
1509
|
+
await self.db_pool.updates._end_background_update(
|
|
1510
|
+
_BackgroundUpdates.EVENT_EDGES_DROP_INVALID_ROWS
|
|
1511
|
+
)
|
|
1512
|
+
|
|
1513
|
+
return batch_size
|
|
1514
|
+
|
|
1515
|
+
async def _background_events_populate_state_key_rejections(
|
|
1516
|
+
self, progress: JsonDict, batch_size: int
|
|
1517
|
+
) -> int:
|
|
1518
|
+
"""Back-populate `events.state_key` and `events.rejection_reason"""
|
|
1519
|
+
|
|
1520
|
+
min_stream_ordering_exclusive = progress["min_stream_ordering_exclusive"]
|
|
1521
|
+
max_stream_ordering_inclusive = progress["max_stream_ordering_inclusive"]
|
|
1522
|
+
|
|
1523
|
+
def _populate_txn(txn: LoggingTransaction) -> bool:
|
|
1524
|
+
"""Returns True if we're done."""
|
|
1525
|
+
|
|
1526
|
+
# first we need to find an endpoint.
|
|
1527
|
+
# we need to find the final row in the batch of batch_size, which means
|
|
1528
|
+
# we need to skip over (batch_size-1) rows and get the next row.
|
|
1529
|
+
txn.execute(
|
|
1530
|
+
"""
|
|
1531
|
+
SELECT stream_ordering FROM events
|
|
1532
|
+
WHERE stream_ordering > ? AND stream_ordering <= ?
|
|
1533
|
+
ORDER BY stream_ordering
|
|
1534
|
+
LIMIT 1 OFFSET ?
|
|
1535
|
+
""",
|
|
1536
|
+
(
|
|
1537
|
+
min_stream_ordering_exclusive,
|
|
1538
|
+
max_stream_ordering_inclusive,
|
|
1539
|
+
batch_size - 1,
|
|
1540
|
+
),
|
|
1541
|
+
)
|
|
1542
|
+
|
|
1543
|
+
row = txn.fetchone()
|
|
1544
|
+
if row:
|
|
1545
|
+
endpoint = row[0]
|
|
1546
|
+
else:
|
|
1547
|
+
# if the query didn't return a row, we must be almost done. We just
|
|
1548
|
+
# need to go up to the recorded max_stream_ordering.
|
|
1549
|
+
endpoint = max_stream_ordering_inclusive
|
|
1550
|
+
|
|
1551
|
+
where_clause = "stream_ordering > ? AND stream_ordering <= ?"
|
|
1552
|
+
args = [min_stream_ordering_exclusive, endpoint]
|
|
1553
|
+
|
|
1554
|
+
# now do the updates.
|
|
1555
|
+
txn.execute(
|
|
1556
|
+
f"""
|
|
1557
|
+
UPDATE events
|
|
1558
|
+
SET state_key = (SELECT state_key FROM state_events se WHERE se.event_id = events.event_id),
|
|
1559
|
+
rejection_reason = (SELECT reason FROM rejections rej WHERE rej.event_id = events.event_id)
|
|
1560
|
+
WHERE ({where_clause})
|
|
1561
|
+
""",
|
|
1562
|
+
args,
|
|
1563
|
+
)
|
|
1564
|
+
|
|
1565
|
+
logger.info(
|
|
1566
|
+
"populated new `events` columns up to %i/%i: updated %i rows",
|
|
1567
|
+
endpoint,
|
|
1568
|
+
max_stream_ordering_inclusive,
|
|
1569
|
+
txn.rowcount,
|
|
1570
|
+
)
|
|
1571
|
+
|
|
1572
|
+
if endpoint >= max_stream_ordering_inclusive:
|
|
1573
|
+
# we're done
|
|
1574
|
+
return True
|
|
1575
|
+
|
|
1576
|
+
progress["min_stream_ordering_exclusive"] = endpoint
|
|
1577
|
+
self.db_pool.updates._background_update_progress_txn(
|
|
1578
|
+
txn,
|
|
1579
|
+
_BackgroundUpdates.EVENTS_POPULATE_STATE_KEY_REJECTIONS,
|
|
1580
|
+
progress,
|
|
1581
|
+
)
|
|
1582
|
+
return False
|
|
1583
|
+
|
|
1584
|
+
done = await self.db_pool.runInteraction(
|
|
1585
|
+
desc="events_populate_state_key_rejections", func=_populate_txn
|
|
1586
|
+
)
|
|
1587
|
+
|
|
1588
|
+
if done:
|
|
1589
|
+
await self.db_pool.updates._end_background_update(
|
|
1590
|
+
_BackgroundUpdates.EVENTS_POPULATE_STATE_KEY_REJECTIONS
|
|
1591
|
+
)
|
|
1592
|
+
|
|
1593
|
+
return batch_size
|
|
1594
|
+
|
|
1595
|
+
async def _sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update(
|
|
1596
|
+
self, progress: JsonDict, _batch_size: int
|
|
1597
|
+
) -> int:
|
|
1598
|
+
"""
|
|
1599
|
+
Prefill `sliding_sync_joined_rooms_to_recalculate` table with all rooms we know about already.
|
|
1600
|
+
"""
|
|
1601
|
+
|
|
1602
|
+
def _txn(txn: LoggingTransaction) -> None:
|
|
1603
|
+
# We do this as one big bulk insert. This has been tested on a bigger
|
|
1604
|
+
# homeserver with ~10M rooms and took 60s. There is potential for this to
|
|
1605
|
+
# starve disk usage while this goes on.
|
|
1606
|
+
#
|
|
1607
|
+
# We upsert in case we have to run this multiple times.
|
|
1608
|
+
txn.execute(
|
|
1609
|
+
"""
|
|
1610
|
+
INSERT INTO sliding_sync_joined_rooms_to_recalculate
|
|
1611
|
+
(room_id)
|
|
1612
|
+
SELECT DISTINCT room_id FROM local_current_membership
|
|
1613
|
+
WHERE membership = 'join'
|
|
1614
|
+
ON CONFLICT (room_id)
|
|
1615
|
+
DO NOTHING;
|
|
1616
|
+
""",
|
|
1617
|
+
)
|
|
1618
|
+
|
|
1619
|
+
await self.db_pool.runInteraction(
|
|
1620
|
+
"_sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update",
|
|
1621
|
+
_txn,
|
|
1622
|
+
)
|
|
1623
|
+
|
|
1624
|
+
# Background update is done.
|
|
1625
|
+
await self.db_pool.updates._end_background_update(
|
|
1626
|
+
_BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE
|
|
1627
|
+
)
|
|
1628
|
+
return 0
|
|
1629
|
+
|
|
1630
|
+
async def _sliding_sync_joined_rooms_bg_update(
|
|
1631
|
+
self, progress: JsonDict, batch_size: int
|
|
1632
|
+
) -> int:
|
|
1633
|
+
"""
|
|
1634
|
+
Background update to populate the `sliding_sync_joined_rooms` table.
|
|
1635
|
+
"""
|
|
1636
|
+
# We don't need to fetch any progress state because we just grab the next N
|
|
1637
|
+
# events in `sliding_sync_joined_rooms_to_recalculate`
|
|
1638
|
+
|
|
1639
|
+
def _get_rooms_to_update_txn(txn: LoggingTransaction) -> list[tuple[str]]:
|
|
1640
|
+
"""
|
|
1641
|
+
Returns:
|
|
1642
|
+
A list of room ID's to update along with the progress value
|
|
1643
|
+
(event_stream_ordering) indicating the continuation point in the
|
|
1644
|
+
`current_state_events` table for the next batch.
|
|
1645
|
+
"""
|
|
1646
|
+
# Fetch the set of room IDs that we want to update
|
|
1647
|
+
#
|
|
1648
|
+
# We use `current_state_events` table as the barometer for whether the
|
|
1649
|
+
# server is still participating in the room because if we're
|
|
1650
|
+
# `no_longer_in_room`, this table would be cleared out for the given
|
|
1651
|
+
# `room_id`.
|
|
1652
|
+
txn.execute(
|
|
1653
|
+
"""
|
|
1654
|
+
SELECT room_id
|
|
1655
|
+
FROM sliding_sync_joined_rooms_to_recalculate
|
|
1656
|
+
LIMIT ?
|
|
1657
|
+
""",
|
|
1658
|
+
(batch_size,),
|
|
1659
|
+
)
|
|
1660
|
+
|
|
1661
|
+
rooms_to_update_rows = cast(list[tuple[str]], txn.fetchall())
|
|
1662
|
+
|
|
1663
|
+
return rooms_to_update_rows
|
|
1664
|
+
|
|
1665
|
+
rooms_to_update = await self.db_pool.runInteraction(
|
|
1666
|
+
"_sliding_sync_joined_rooms_bg_update._get_rooms_to_update_txn",
|
|
1667
|
+
_get_rooms_to_update_txn,
|
|
1668
|
+
)
|
|
1669
|
+
|
|
1670
|
+
if not rooms_to_update:
|
|
1671
|
+
await self.db_pool.updates._end_background_update(
|
|
1672
|
+
_BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE
|
|
1673
|
+
)
|
|
1674
|
+
return 0
|
|
1675
|
+
|
|
1676
|
+
# Map from room_id to insert/update state values in the `sliding_sync_joined_rooms` table.
|
|
1677
|
+
joined_room_updates: dict[str, SlidingSyncStateInsertValues] = {}
|
|
1678
|
+
# Map from room_id to stream_ordering/bump_stamp, etc values
|
|
1679
|
+
joined_room_stream_ordering_updates: dict[
|
|
1680
|
+
str, _JoinedRoomStreamOrderingUpdate
|
|
1681
|
+
] = {}
|
|
1682
|
+
# As long as we get this value before we fetch the current state, we can use it
|
|
1683
|
+
# to check if something has changed since that point.
|
|
1684
|
+
most_recent_current_state_delta_stream_id = (
|
|
1685
|
+
await self.get_max_stream_id_in_current_state_deltas()
|
|
1686
|
+
)
|
|
1687
|
+
for (room_id,) in rooms_to_update:
|
|
1688
|
+
current_state_ids_map = await self.db_pool.runInteraction(
|
|
1689
|
+
"_sliding_sync_joined_rooms_bg_update._get_relevant_sliding_sync_current_state_event_ids_txn",
|
|
1690
|
+
PersistEventsStore._get_relevant_sliding_sync_current_state_event_ids_txn,
|
|
1691
|
+
room_id,
|
|
1692
|
+
)
|
|
1693
|
+
|
|
1694
|
+
# If we're not joined to the room a) it doesn't belong in the
|
|
1695
|
+
# `sliding_sync_joined_rooms` table so we should skip and b) we won't have
|
|
1696
|
+
# any `current_state_events` for the room.
|
|
1697
|
+
if not current_state_ids_map:
|
|
1698
|
+
continue
|
|
1699
|
+
|
|
1700
|
+
try:
|
|
1701
|
+
fetched_events = await self.get_events(current_state_ids_map.values())
|
|
1702
|
+
except (DatabaseCorruptionError, InvalidEventError) as e:
|
|
1703
|
+
logger.warning(
|
|
1704
|
+
"Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s",
|
|
1705
|
+
room_id,
|
|
1706
|
+
e,
|
|
1707
|
+
)
|
|
1708
|
+
continue
|
|
1709
|
+
|
|
1710
|
+
current_state_map: StateMap[EventBase] = {
|
|
1711
|
+
state_key: fetched_events[event_id]
|
|
1712
|
+
for state_key, event_id in current_state_ids_map.items()
|
|
1713
|
+
# `get_events(...)` will filter out events for unknown room versions
|
|
1714
|
+
if event_id in fetched_events
|
|
1715
|
+
}
|
|
1716
|
+
|
|
1717
|
+
# Even if we are joined to the room, this can happen for unknown room
|
|
1718
|
+
# versions (old room versions that aren't known anymore) since
|
|
1719
|
+
# `get_events(...)` will filter out events for unknown room versions
|
|
1720
|
+
if not current_state_map:
|
|
1721
|
+
continue
|
|
1722
|
+
|
|
1723
|
+
state_insert_values = (
|
|
1724
|
+
PersistEventsStore._get_sliding_sync_insert_values_from_state_map(
|
|
1725
|
+
current_state_map
|
|
1726
|
+
)
|
|
1727
|
+
)
|
|
1728
|
+
# We should have some insert values for each room, even if they are `None`
|
|
1729
|
+
assert state_insert_values
|
|
1730
|
+
joined_room_updates[room_id] = state_insert_values
|
|
1731
|
+
|
|
1732
|
+
# Figure out the stream_ordering of the latest event in the room
|
|
1733
|
+
most_recent_event_pos_results = await self.get_last_event_pos_in_room(
|
|
1734
|
+
room_id, event_types=None
|
|
1735
|
+
)
|
|
1736
|
+
assert most_recent_event_pos_results is not None, (
|
|
1737
|
+
f"We should not be seeing `None` here because the room ({room_id}) should at-least have a create event "
|
|
1738
|
+
+ "given we pulled the room out of `current_state_events`"
|
|
1739
|
+
)
|
|
1740
|
+
most_recent_event_stream_ordering = most_recent_event_pos_results[1].stream
|
|
1741
|
+
|
|
1742
|
+
# The `most_recent_event_stream_ordering` should be positive,
|
|
1743
|
+
# however there are (very rare) rooms where that is not the case in
|
|
1744
|
+
# the matrix.org database. It's not clear how they got into that
|
|
1745
|
+
# state, but does mean that we cannot assert that the stream
|
|
1746
|
+
# ordering is indeed positive.
|
|
1747
|
+
|
|
1748
|
+
# Figure out the latest `bump_stamp` in the room. This could be `None` for a
|
|
1749
|
+
# federated room you just joined where all of events are still `outliers` or
|
|
1750
|
+
# backfilled history. In the Sliding Sync API, we default to the user's
|
|
1751
|
+
# membership event `stream_ordering` if we don't have a `bump_stamp` so
|
|
1752
|
+
# having it as `None` in this table is fine.
|
|
1753
|
+
bump_stamp_event_pos_results = await self.get_last_event_pos_in_room(
|
|
1754
|
+
room_id, event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES
|
|
1755
|
+
)
|
|
1756
|
+
most_recent_bump_stamp = None
|
|
1757
|
+
if (
|
|
1758
|
+
bump_stamp_event_pos_results is not None
|
|
1759
|
+
and bump_stamp_event_pos_results[1].stream > 0
|
|
1760
|
+
):
|
|
1761
|
+
most_recent_bump_stamp = bump_stamp_event_pos_results[1].stream
|
|
1762
|
+
|
|
1763
|
+
joined_room_stream_ordering_updates[room_id] = (
|
|
1764
|
+
_JoinedRoomStreamOrderingUpdate(
|
|
1765
|
+
most_recent_event_stream_ordering=most_recent_event_stream_ordering,
|
|
1766
|
+
most_recent_bump_stamp=most_recent_bump_stamp,
|
|
1767
|
+
)
|
|
1768
|
+
)
|
|
1769
|
+
|
|
1770
|
+
def _fill_table_txn(txn: LoggingTransaction) -> None:
|
|
1771
|
+
# Handle updating the `sliding_sync_joined_rooms` table
|
|
1772
|
+
#
|
|
1773
|
+
for (
|
|
1774
|
+
room_id,
|
|
1775
|
+
update_map,
|
|
1776
|
+
) in joined_room_updates.items():
|
|
1777
|
+
joined_room_stream_ordering_update = (
|
|
1778
|
+
joined_room_stream_ordering_updates[room_id]
|
|
1779
|
+
)
|
|
1780
|
+
event_stream_ordering = (
|
|
1781
|
+
joined_room_stream_ordering_update.most_recent_event_stream_ordering
|
|
1782
|
+
)
|
|
1783
|
+
bump_stamp = joined_room_stream_ordering_update.most_recent_bump_stamp
|
|
1784
|
+
|
|
1785
|
+
# Check if the current state has been updated since we gathered it.
|
|
1786
|
+
# We're being careful not to insert/overwrite with stale data.
|
|
1787
|
+
state_deltas_since_we_gathered_current_state = (
|
|
1788
|
+
self.get_current_state_deltas_for_room_txn(
|
|
1789
|
+
txn,
|
|
1790
|
+
room_id,
|
|
1791
|
+
from_token=RoomStreamToken(
|
|
1792
|
+
stream=most_recent_current_state_delta_stream_id
|
|
1793
|
+
),
|
|
1794
|
+
to_token=None,
|
|
1795
|
+
)
|
|
1796
|
+
)
|
|
1797
|
+
for state_delta in state_deltas_since_we_gathered_current_state:
|
|
1798
|
+
# We only need to check for the state is relevant to the
|
|
1799
|
+
# `sliding_sync_joined_rooms` table.
|
|
1800
|
+
if (
|
|
1801
|
+
state_delta.event_type,
|
|
1802
|
+
state_delta.state_key,
|
|
1803
|
+
) in SLIDING_SYNC_RELEVANT_STATE_SET:
|
|
1804
|
+
# Raising exception so we can just exit and try again. It would
|
|
1805
|
+
# be hard to resolve this within the transaction because we need
|
|
1806
|
+
# to get full events out that take redactions into account. We
|
|
1807
|
+
# could add some retry logic here, but it's easier to just let
|
|
1808
|
+
# the background update try again.
|
|
1809
|
+
raise Exception(
|
|
1810
|
+
"Current state was updated after we gathered it to update "
|
|
1811
|
+
+ "`sliding_sync_joined_rooms` in the background update. "
|
|
1812
|
+
+ "Raising exception so we can just try again."
|
|
1813
|
+
)
|
|
1814
|
+
|
|
1815
|
+
# Since we fully insert rows into `sliding_sync_joined_rooms`, we can
|
|
1816
|
+
# just do everything on insert and `ON CONFLICT DO NOTHING`.
|
|
1817
|
+
#
|
|
1818
|
+
self.db_pool.simple_upsert_txn(
|
|
1819
|
+
txn,
|
|
1820
|
+
table="sliding_sync_joined_rooms",
|
|
1821
|
+
keyvalues={"room_id": room_id},
|
|
1822
|
+
values={},
|
|
1823
|
+
insertion_values={
|
|
1824
|
+
**update_map,
|
|
1825
|
+
# The reason we're only *inserting* (not *updating*) `event_stream_ordering`
|
|
1826
|
+
# and `bump_stamp` is because if they are present, that means they are already
|
|
1827
|
+
# up-to-date.
|
|
1828
|
+
"event_stream_ordering": event_stream_ordering,
|
|
1829
|
+
"bump_stamp": bump_stamp,
|
|
1830
|
+
},
|
|
1831
|
+
)
|
|
1832
|
+
|
|
1833
|
+
# Now that we've processed all the room, we can remove them from the
|
|
1834
|
+
# queue.
|
|
1835
|
+
#
|
|
1836
|
+
# Note: we need to remove all the rooms from the queue we pulled out
|
|
1837
|
+
# from the DB, not just the ones we've processed above. Otherwise
|
|
1838
|
+
# we'll simply keep pulling out the same rooms over and over again.
|
|
1839
|
+
self.db_pool.simple_delete_many_batch_txn(
|
|
1840
|
+
txn,
|
|
1841
|
+
table="sliding_sync_joined_rooms_to_recalculate",
|
|
1842
|
+
keys=("room_id",),
|
|
1843
|
+
values=rooms_to_update,
|
|
1844
|
+
)
|
|
1845
|
+
|
|
1846
|
+
await self.db_pool.runInteraction(
|
|
1847
|
+
"sliding_sync_joined_rooms_bg_update", _fill_table_txn
|
|
1848
|
+
)
|
|
1849
|
+
|
|
1850
|
+
return len(rooms_to_update)
|
|
1851
|
+
|
|
1852
|
+
async def _sliding_sync_membership_snapshots_bg_update(
|
|
1853
|
+
self, progress: JsonDict, batch_size: int
|
|
1854
|
+
) -> int:
|
|
1855
|
+
"""
|
|
1856
|
+
Background update to populate the `sliding_sync_membership_snapshots` table.
|
|
1857
|
+
"""
|
|
1858
|
+
# We do this in two phases: a) the initial phase where we go through all
|
|
1859
|
+
# room memberships, and then b) a second phase where we look at new
|
|
1860
|
+
# memberships (this is to handle the case where we downgrade and then
|
|
1861
|
+
# upgrade again).
|
|
1862
|
+
#
|
|
1863
|
+
# We have to do this as two phases (rather than just the second phase
|
|
1864
|
+
# where we iterate on event_stream_ordering), as the
|
|
1865
|
+
# `event_stream_ordering` column may have null values for old rows.
|
|
1866
|
+
# Therefore we first do the set of historic rooms and *then* look at any
|
|
1867
|
+
# new rows (which will have a non-null `event_stream_ordering`).
|
|
1868
|
+
initial_phase = progress.get("initial_phase")
|
|
1869
|
+
if initial_phase is None:
|
|
1870
|
+
# If this is the first run, store the current max stream position.
|
|
1871
|
+
# We know we will go through all memberships less than the current
|
|
1872
|
+
# max in the initial phase.
|
|
1873
|
+
progress = {
|
|
1874
|
+
"initial_phase": True,
|
|
1875
|
+
"last_event_stream_ordering": self.get_room_max_stream_ordering(),
|
|
1876
|
+
}
|
|
1877
|
+
await self.db_pool.updates._background_update_progress(
|
|
1878
|
+
_BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
|
|
1879
|
+
progress,
|
|
1880
|
+
)
|
|
1881
|
+
initial_phase = True
|
|
1882
|
+
|
|
1883
|
+
last_room_id = progress.get("last_room_id", "")
|
|
1884
|
+
last_user_id = progress.get("last_user_id", "")
|
|
1885
|
+
last_event_stream_ordering = progress["last_event_stream_ordering"]
|
|
1886
|
+
|
|
1887
|
+
def _find_memberships_to_update_txn(
|
|
1888
|
+
txn: LoggingTransaction,
|
|
1889
|
+
) -> list[
|
|
1890
|
+
tuple[
|
|
1891
|
+
str,
|
|
1892
|
+
str | None,
|
|
1893
|
+
str | None,
|
|
1894
|
+
str,
|
|
1895
|
+
str,
|
|
1896
|
+
str,
|
|
1897
|
+
str,
|
|
1898
|
+
int,
|
|
1899
|
+
str | None,
|
|
1900
|
+
bool,
|
|
1901
|
+
]
|
|
1902
|
+
]:
|
|
1903
|
+
# Fetch the set of event IDs that we want to update
|
|
1904
|
+
#
|
|
1905
|
+
# We skip over rows which we've already handled, i.e. have a
|
|
1906
|
+
# matching row in `sliding_sync_membership_snapshots` with the same
|
|
1907
|
+
# room, user and event ID.
|
|
1908
|
+
#
|
|
1909
|
+
# We also ignore rooms that the user has left themselves (i.e. not
|
|
1910
|
+
# kicked). This is to avoid having to port lots of old rooms that we
|
|
1911
|
+
# will never send down sliding sync (as we exclude such rooms from
|
|
1912
|
+
# initial syncs).
|
|
1913
|
+
|
|
1914
|
+
if initial_phase:
|
|
1915
|
+
# There are some old out-of-band memberships (before
|
|
1916
|
+
# https://github.com/matrix-org/synapse/issues/6983) where we don't have
|
|
1917
|
+
# the corresponding room stored in the `rooms` table`. We use `LEFT JOIN
|
|
1918
|
+
# rooms AS r USING (room_id)` to find the rooms missing from `rooms` and
|
|
1919
|
+
# insert a row for them below.
|
|
1920
|
+
txn.execute(
|
|
1921
|
+
"""
|
|
1922
|
+
SELECT
|
|
1923
|
+
c.room_id,
|
|
1924
|
+
r.room_id,
|
|
1925
|
+
r.room_version,
|
|
1926
|
+
c.user_id,
|
|
1927
|
+
e.sender,
|
|
1928
|
+
c.event_id,
|
|
1929
|
+
c.membership,
|
|
1930
|
+
e.stream_ordering,
|
|
1931
|
+
e.instance_name,
|
|
1932
|
+
e.outlier
|
|
1933
|
+
FROM local_current_membership AS c
|
|
1934
|
+
LEFT JOIN sliding_sync_membership_snapshots AS m USING (room_id, user_id)
|
|
1935
|
+
INNER JOIN events AS e USING (event_id)
|
|
1936
|
+
LEFT JOIN rooms AS r ON (c.room_id = r.room_id)
|
|
1937
|
+
WHERE (c.room_id, c.user_id) > (?, ?)
|
|
1938
|
+
AND (m.user_id IS NULL OR c.event_id != m.membership_event_id)
|
|
1939
|
+
ORDER BY c.room_id ASC, c.user_id ASC
|
|
1940
|
+
LIMIT ?
|
|
1941
|
+
""",
|
|
1942
|
+
(last_room_id, last_user_id, batch_size),
|
|
1943
|
+
)
|
|
1944
|
+
elif last_event_stream_ordering is not None:
|
|
1945
|
+
# It's important to sort by `event_stream_ordering` *ascending* (oldest to
|
|
1946
|
+
# newest) so that if we see that this background update in progress and want
|
|
1947
|
+
# to start the catch-up process, we can safely assume that it will
|
|
1948
|
+
# eventually get to the rooms we want to catch-up on anyway (see
|
|
1949
|
+
# `_resolve_stale_data_in_sliding_sync_tables()`).
|
|
1950
|
+
#
|
|
1951
|
+
# `c.room_id` is duplicated to make it match what we're doing in the
|
|
1952
|
+
# `initial_phase`. But we can avoid doing the extra `rooms` table join
|
|
1953
|
+
# because we can assume all of these new events won't have this problem.
|
|
1954
|
+
txn.execute(
|
|
1955
|
+
"""
|
|
1956
|
+
SELECT
|
|
1957
|
+
c.room_id,
|
|
1958
|
+
r.room_id,
|
|
1959
|
+
r.room_version,
|
|
1960
|
+
c.user_id,
|
|
1961
|
+
e.sender,
|
|
1962
|
+
c.event_id,
|
|
1963
|
+
c.membership,
|
|
1964
|
+
c.event_stream_ordering,
|
|
1965
|
+
e.instance_name,
|
|
1966
|
+
e.outlier
|
|
1967
|
+
FROM local_current_membership AS c
|
|
1968
|
+
LEFT JOIN sliding_sync_membership_snapshots AS m USING (room_id, user_id)
|
|
1969
|
+
INNER JOIN events AS e USING (event_id)
|
|
1970
|
+
LEFT JOIN rooms AS r ON (c.room_id = r.room_id)
|
|
1971
|
+
WHERE c.event_stream_ordering > ?
|
|
1972
|
+
AND (m.user_id IS NULL OR c.event_id != m.membership_event_id)
|
|
1973
|
+
ORDER BY c.event_stream_ordering ASC
|
|
1974
|
+
LIMIT ?
|
|
1975
|
+
""",
|
|
1976
|
+
(last_event_stream_ordering, batch_size),
|
|
1977
|
+
)
|
|
1978
|
+
else:
|
|
1979
|
+
raise Exception("last_event_stream_ordering should not be None")
|
|
1980
|
+
|
|
1981
|
+
memberships_to_update_rows = cast(
|
|
1982
|
+
list[
|
|
1983
|
+
tuple[
|
|
1984
|
+
str,
|
|
1985
|
+
str | None,
|
|
1986
|
+
str | None,
|
|
1987
|
+
str,
|
|
1988
|
+
str,
|
|
1989
|
+
str,
|
|
1990
|
+
str,
|
|
1991
|
+
int,
|
|
1992
|
+
str | None,
|
|
1993
|
+
bool,
|
|
1994
|
+
]
|
|
1995
|
+
],
|
|
1996
|
+
txn.fetchall(),
|
|
1997
|
+
)
|
|
1998
|
+
|
|
1999
|
+
return memberships_to_update_rows
|
|
2000
|
+
|
|
2001
|
+
memberships_to_update_rows = await self.db_pool.runInteraction(
|
|
2002
|
+
"sliding_sync_membership_snapshots_bg_update._find_memberships_to_update_txn",
|
|
2003
|
+
_find_memberships_to_update_txn,
|
|
2004
|
+
)
|
|
2005
|
+
|
|
2006
|
+
if not memberships_to_update_rows:
|
|
2007
|
+
if initial_phase:
|
|
2008
|
+
# Move onto the next phase.
|
|
2009
|
+
await self.db_pool.updates._background_update_progress(
|
|
2010
|
+
_BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
|
|
2011
|
+
{
|
|
2012
|
+
"initial_phase": False,
|
|
2013
|
+
"last_event_stream_ordering": last_event_stream_ordering,
|
|
2014
|
+
},
|
|
2015
|
+
)
|
|
2016
|
+
return 0
|
|
2017
|
+
else:
|
|
2018
|
+
# We've finished both phases, we're done.
|
|
2019
|
+
await self.db_pool.updates._end_background_update(
|
|
2020
|
+
_BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE
|
|
2021
|
+
)
|
|
2022
|
+
return 0
|
|
2023
|
+
|
|
2024
|
+
def _find_previous_invite_or_knock_membership_txn(
|
|
2025
|
+
txn: LoggingTransaction, room_id: str, user_id: str, event_id: str
|
|
2026
|
+
) -> tuple[str, str] | None:
|
|
2027
|
+
# Find the previous invite/knock event before the leave event
|
|
2028
|
+
#
|
|
2029
|
+
# Here are some notes on how we landed on this query:
|
|
2030
|
+
#
|
|
2031
|
+
# We're using `topological_ordering` instead of `stream_ordering` because
|
|
2032
|
+
# somehow it's possible to have your `leave` event backfilled with a
|
|
2033
|
+
# negative `stream_ordering` and your previous `invite` event with a
|
|
2034
|
+
# positive `stream_ordering` so we wouldn't have a chance of finding the
|
|
2035
|
+
# previous membership with a naive `event_stream_ordering < ?` comparison.
|
|
2036
|
+
#
|
|
2037
|
+
# Also be careful because `room_memberships.event_stream_ordering` is
|
|
2038
|
+
# nullable and not always filled in. You would need to join on `events` to
|
|
2039
|
+
# rely on `events.stream_ordering` instead. Even though the
|
|
2040
|
+
# `events.stream_ordering` also doesn't have a `NOT NULL` constraint, it
|
|
2041
|
+
# doesn't have any rows where this is the case (checked on `matrix.org`).
|
|
2042
|
+
# The fact the `events.stream_ordering` is a nullable column is a holdover
|
|
2043
|
+
# from a rename of the column.
|
|
2044
|
+
#
|
|
2045
|
+
# You might also consider using the `event_auth` table to find the previous
|
|
2046
|
+
# membership, but there are cases where somehow a membership event doesn't
|
|
2047
|
+
# point back to the previous membership event in the auth events (unknown
|
|
2048
|
+
# cause).
|
|
2049
|
+
txn.execute(
|
|
2050
|
+
"""
|
|
2051
|
+
SELECT event_id, membership
|
|
2052
|
+
FROM room_memberships AS m
|
|
2053
|
+
INNER JOIN events AS e USING (room_id, event_id)
|
|
2054
|
+
WHERE
|
|
2055
|
+
room_id = ?
|
|
2056
|
+
AND m.user_id = ?
|
|
2057
|
+
AND (m.membership = ? OR m.membership = ?)
|
|
2058
|
+
AND e.event_id != ?
|
|
2059
|
+
ORDER BY e.topological_ordering DESC
|
|
2060
|
+
LIMIT 1
|
|
2061
|
+
""",
|
|
2062
|
+
(
|
|
2063
|
+
room_id,
|
|
2064
|
+
user_id,
|
|
2065
|
+
# We look explicitly for `invite` and `knock` events instead of
|
|
2066
|
+
# just their previous membership as someone could have been `invite`
|
|
2067
|
+
# -> `ban` -> unbanned (`leave`) and we want to find the `invite`
|
|
2068
|
+
# event where the stripped state is.
|
|
2069
|
+
Membership.INVITE,
|
|
2070
|
+
Membership.KNOCK,
|
|
2071
|
+
event_id,
|
|
2072
|
+
),
|
|
2073
|
+
)
|
|
2074
|
+
row = txn.fetchone()
|
|
2075
|
+
|
|
2076
|
+
if row is None:
|
|
2077
|
+
# Generally we should have an invite or knock event for leaves
|
|
2078
|
+
# that are outliers, however this may not always be the case
|
|
2079
|
+
# (e.g. a local user got kicked but the kick event got pulled in
|
|
2080
|
+
# as an outlier).
|
|
2081
|
+
return None
|
|
2082
|
+
|
|
2083
|
+
event_id, membership = row
|
|
2084
|
+
|
|
2085
|
+
return event_id, membership
|
|
2086
|
+
|
|
2087
|
+
# Map from (room_id, user_id) to ...
|
|
2088
|
+
to_insert_membership_snapshots: dict[
|
|
2089
|
+
tuple[str, str], SlidingSyncMembershipSnapshotSharedInsertValues
|
|
2090
|
+
] = {}
|
|
2091
|
+
to_insert_membership_infos: dict[
|
|
2092
|
+
tuple[str, str], SlidingSyncMembershipInfoWithEventPos
|
|
2093
|
+
] = {}
|
|
2094
|
+
for (
|
|
2095
|
+
room_id,
|
|
2096
|
+
room_id_from_rooms_table,
|
|
2097
|
+
room_version_id,
|
|
2098
|
+
user_id,
|
|
2099
|
+
sender,
|
|
2100
|
+
membership_event_id,
|
|
2101
|
+
membership,
|
|
2102
|
+
membership_event_stream_ordering,
|
|
2103
|
+
membership_event_instance_name,
|
|
2104
|
+
is_outlier,
|
|
2105
|
+
) in memberships_to_update_rows:
|
|
2106
|
+
# We don't know how to handle `membership` values other than these. The
|
|
2107
|
+
# code below would need to be updated.
|
|
2108
|
+
assert membership in (
|
|
2109
|
+
Membership.JOIN,
|
|
2110
|
+
Membership.INVITE,
|
|
2111
|
+
Membership.KNOCK,
|
|
2112
|
+
Membership.LEAVE,
|
|
2113
|
+
Membership.BAN,
|
|
2114
|
+
)
|
|
2115
|
+
|
|
2116
|
+
if (
|
|
2117
|
+
room_version_id is not None
|
|
2118
|
+
and room_version_id not in KNOWN_ROOM_VERSIONS
|
|
2119
|
+
):
|
|
2120
|
+
# Ignore rooms with unknown room versions (these were
|
|
2121
|
+
# experimental rooms, that we no longer support).
|
|
2122
|
+
continue
|
|
2123
|
+
|
|
2124
|
+
# There are some old out-of-band memberships (before
|
|
2125
|
+
# https://github.com/matrix-org/synapse/issues/6983) where we don't have the
|
|
2126
|
+
# corresponding room stored in the `rooms` table`. We have a `FOREIGN KEY`
|
|
2127
|
+
# constraint on the `sliding_sync_membership_snapshots` table so we have to
|
|
2128
|
+
# fix-up these memberships by adding the room to the `rooms` table.
|
|
2129
|
+
if room_id_from_rooms_table is None:
|
|
2130
|
+
await self.db_pool.simple_insert(
|
|
2131
|
+
table="rooms",
|
|
2132
|
+
values={
|
|
2133
|
+
"room_id": room_id,
|
|
2134
|
+
# Only out-of-band memberships are missing from the `rooms`
|
|
2135
|
+
# table so that is the only type of membership we're dealing
|
|
2136
|
+
# with here. Since we don't calculate the "chain cover" for
|
|
2137
|
+
# out-of-band memberships, we can just set this to `True` as if
|
|
2138
|
+
# the user ever joins the room, we will end up calculating the
|
|
2139
|
+
# "chain cover" anyway.
|
|
2140
|
+
"has_auth_chain_index": True,
|
|
2141
|
+
},
|
|
2142
|
+
)
|
|
2143
|
+
|
|
2144
|
+
# Map of values to insert/update in the `sliding_sync_membership_snapshots` table
|
|
2145
|
+
sliding_sync_membership_snapshots_insert_map: SlidingSyncMembershipSnapshotSharedInsertValues = {}
|
|
2146
|
+
if membership == Membership.JOIN:
|
|
2147
|
+
# If we're still joined, we can pull from current state.
|
|
2148
|
+
current_state_ids_map: StateMap[
|
|
2149
|
+
str
|
|
2150
|
+
] = await self.hs.get_storage_controllers().state.get_current_state_ids(
|
|
2151
|
+
room_id,
|
|
2152
|
+
state_filter=StateFilter.from_types(
|
|
2153
|
+
SLIDING_SYNC_RELEVANT_STATE_SET
|
|
2154
|
+
),
|
|
2155
|
+
# Partially-stated rooms should have all state events except for
|
|
2156
|
+
# remote membership events so we don't need to wait at all because
|
|
2157
|
+
# we only want some non-membership state
|
|
2158
|
+
await_full_state=False,
|
|
2159
|
+
)
|
|
2160
|
+
# We're iterating over rooms that we are joined to so they should
|
|
2161
|
+
# have `current_state_events` and we should have some current state
|
|
2162
|
+
# for each room
|
|
2163
|
+
if current_state_ids_map:
|
|
2164
|
+
try:
|
|
2165
|
+
fetched_events = await self.get_events(
|
|
2166
|
+
current_state_ids_map.values()
|
|
2167
|
+
)
|
|
2168
|
+
except (DatabaseCorruptionError, InvalidEventError) as e:
|
|
2169
|
+
logger.warning(
|
|
2170
|
+
"Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s",
|
|
2171
|
+
room_id,
|
|
2172
|
+
e,
|
|
2173
|
+
)
|
|
2174
|
+
continue
|
|
2175
|
+
|
|
2176
|
+
current_state_map: StateMap[EventBase] = {
|
|
2177
|
+
state_key: fetched_events[event_id]
|
|
2178
|
+
for state_key, event_id in current_state_ids_map.items()
|
|
2179
|
+
# `get_events(...)` will filter out events for unknown room versions
|
|
2180
|
+
if event_id in fetched_events
|
|
2181
|
+
}
|
|
2182
|
+
|
|
2183
|
+
# Can happen for unknown room versions (old room versions that aren't known
|
|
2184
|
+
# anymore) since `get_events(...)` will filter out events for unknown room
|
|
2185
|
+
# versions
|
|
2186
|
+
if not current_state_map:
|
|
2187
|
+
continue
|
|
2188
|
+
|
|
2189
|
+
state_insert_values = PersistEventsStore._get_sliding_sync_insert_values_from_state_map(
|
|
2190
|
+
current_state_map
|
|
2191
|
+
)
|
|
2192
|
+
sliding_sync_membership_snapshots_insert_map.update(
|
|
2193
|
+
state_insert_values
|
|
2194
|
+
)
|
|
2195
|
+
# We should have some insert values for each room, even if they are `None`
|
|
2196
|
+
assert sliding_sync_membership_snapshots_insert_map
|
|
2197
|
+
|
|
2198
|
+
# We have current state to work from
|
|
2199
|
+
sliding_sync_membership_snapshots_insert_map["has_known_state"] = (
|
|
2200
|
+
True
|
|
2201
|
+
)
|
|
2202
|
+
else:
|
|
2203
|
+
# Although we expect every room to have a create event (even
|
|
2204
|
+
# past unknown room versions since we haven't supported one
|
|
2205
|
+
# without it), there seem to be some corrupted rooms in
|
|
2206
|
+
# practice that don't have the create event in the
|
|
2207
|
+
# `current_state_events` table. The create event does exist
|
|
2208
|
+
# in the events table though. We'll just say that we don't
|
|
2209
|
+
# know the state for these rooms and continue on with our
|
|
2210
|
+
# day.
|
|
2211
|
+
sliding_sync_membership_snapshots_insert_map = {
|
|
2212
|
+
"has_known_state": False,
|
|
2213
|
+
"room_type": None,
|
|
2214
|
+
"room_name": None,
|
|
2215
|
+
"is_encrypted": False,
|
|
2216
|
+
}
|
|
2217
|
+
elif membership in (Membership.INVITE, Membership.KNOCK) or (
|
|
2218
|
+
membership in (Membership.LEAVE, Membership.BAN) and is_outlier
|
|
2219
|
+
):
|
|
2220
|
+
invite_or_knock_event_id = None
|
|
2221
|
+
invite_or_knock_membership = None
|
|
2222
|
+
|
|
2223
|
+
# If the event is an `out_of_band_membership` (special case of
|
|
2224
|
+
# `outlier`), we never had historical state so we have to pull from
|
|
2225
|
+
# the stripped state on the previous invite/knock event. This gives
|
|
2226
|
+
# us a consistent view of the room state regardless of your
|
|
2227
|
+
# membership (i.e. the room shouldn't disappear if your using the
|
|
2228
|
+
# `is_encrypted` filter and you leave).
|
|
2229
|
+
if membership in (Membership.LEAVE, Membership.BAN) and is_outlier:
|
|
2230
|
+
previous_membership = await self.db_pool.runInteraction(
|
|
2231
|
+
"sliding_sync_membership_snapshots_bg_update._find_previous_invite_or_knock_membership_txn",
|
|
2232
|
+
_find_previous_invite_or_knock_membership_txn,
|
|
2233
|
+
room_id,
|
|
2234
|
+
user_id,
|
|
2235
|
+
membership_event_id,
|
|
2236
|
+
)
|
|
2237
|
+
if previous_membership is not None:
|
|
2238
|
+
(
|
|
2239
|
+
invite_or_knock_event_id,
|
|
2240
|
+
invite_or_knock_membership,
|
|
2241
|
+
) = previous_membership
|
|
2242
|
+
else:
|
|
2243
|
+
invite_or_knock_event_id = membership_event_id
|
|
2244
|
+
invite_or_knock_membership = membership
|
|
2245
|
+
|
|
2246
|
+
if (
|
|
2247
|
+
invite_or_knock_event_id is not None
|
|
2248
|
+
and invite_or_knock_membership is not None
|
|
2249
|
+
):
|
|
2250
|
+
# Pull from the stripped state on the invite/knock event
|
|
2251
|
+
invite_or_knock_event = await self.get_event(
|
|
2252
|
+
invite_or_knock_event_id
|
|
2253
|
+
)
|
|
2254
|
+
|
|
2255
|
+
raw_stripped_state_events = None
|
|
2256
|
+
if invite_or_knock_membership == Membership.INVITE:
|
|
2257
|
+
invite_room_state = invite_or_knock_event.unsigned.get(
|
|
2258
|
+
"invite_room_state"
|
|
2259
|
+
)
|
|
2260
|
+
raw_stripped_state_events = invite_room_state
|
|
2261
|
+
elif invite_or_knock_membership == Membership.KNOCK:
|
|
2262
|
+
knock_room_state = invite_or_knock_event.unsigned.get(
|
|
2263
|
+
"knock_room_state"
|
|
2264
|
+
)
|
|
2265
|
+
raw_stripped_state_events = knock_room_state
|
|
2266
|
+
|
|
2267
|
+
sliding_sync_membership_snapshots_insert_map = PersistEventsStore._get_sliding_sync_insert_values_from_stripped_state(
|
|
2268
|
+
raw_stripped_state_events
|
|
2269
|
+
)
|
|
2270
|
+
else:
|
|
2271
|
+
# We couldn't find any state for the membership, so we just have to
|
|
2272
|
+
# leave it as empty.
|
|
2273
|
+
sliding_sync_membership_snapshots_insert_map = {
|
|
2274
|
+
"has_known_state": False,
|
|
2275
|
+
"room_type": None,
|
|
2276
|
+
"room_name": None,
|
|
2277
|
+
"is_encrypted": False,
|
|
2278
|
+
}
|
|
2279
|
+
|
|
2280
|
+
# We should have some insert values for each room, even if no
|
|
2281
|
+
# stripped state is on the event because we still want to record
|
|
2282
|
+
# that we have no known state
|
|
2283
|
+
assert sliding_sync_membership_snapshots_insert_map
|
|
2284
|
+
elif membership in (Membership.LEAVE, Membership.BAN):
|
|
2285
|
+
# Pull from historical state
|
|
2286
|
+
state_ids_map = await self.hs.get_storage_controllers().state.get_state_ids_for_event(
|
|
2287
|
+
membership_event_id,
|
|
2288
|
+
state_filter=StateFilter.from_types(
|
|
2289
|
+
SLIDING_SYNC_RELEVANT_STATE_SET
|
|
2290
|
+
),
|
|
2291
|
+
# Partially-stated rooms should have all state events except for
|
|
2292
|
+
# remote membership events so we don't need to wait at all because
|
|
2293
|
+
# we only want some non-membership state
|
|
2294
|
+
await_full_state=False,
|
|
2295
|
+
)
|
|
2296
|
+
|
|
2297
|
+
try:
|
|
2298
|
+
fetched_events = await self.get_events(state_ids_map.values())
|
|
2299
|
+
except (DatabaseCorruptionError, InvalidEventError) as e:
|
|
2300
|
+
logger.warning(
|
|
2301
|
+
"Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s",
|
|
2302
|
+
room_id,
|
|
2303
|
+
e,
|
|
2304
|
+
)
|
|
2305
|
+
continue
|
|
2306
|
+
|
|
2307
|
+
state_map: StateMap[EventBase] = {
|
|
2308
|
+
state_key: fetched_events[event_id]
|
|
2309
|
+
for state_key, event_id in state_ids_map.items()
|
|
2310
|
+
# `get_events(...)` will filter out events for unknown room versions
|
|
2311
|
+
if event_id in fetched_events
|
|
2312
|
+
}
|
|
2313
|
+
|
|
2314
|
+
# Can happen for unknown room versions (old room versions that aren't known
|
|
2315
|
+
# anymore) since `get_events(...)` will filter out events for unknown room
|
|
2316
|
+
# versions
|
|
2317
|
+
if not state_map:
|
|
2318
|
+
continue
|
|
2319
|
+
|
|
2320
|
+
state_insert_values = (
|
|
2321
|
+
PersistEventsStore._get_sliding_sync_insert_values_from_state_map(
|
|
2322
|
+
state_map
|
|
2323
|
+
)
|
|
2324
|
+
)
|
|
2325
|
+
sliding_sync_membership_snapshots_insert_map.update(state_insert_values)
|
|
2326
|
+
# We should have some insert values for each room, even if they are `None`
|
|
2327
|
+
assert sliding_sync_membership_snapshots_insert_map
|
|
2328
|
+
|
|
2329
|
+
# We have historical state to work from
|
|
2330
|
+
sliding_sync_membership_snapshots_insert_map["has_known_state"] = True
|
|
2331
|
+
else:
|
|
2332
|
+
# We don't know how to handle this type of membership yet
|
|
2333
|
+
#
|
|
2334
|
+
# FIXME: We should use `assert_never` here but for some reason
|
|
2335
|
+
# the exhaustive matching doesn't recognize the `Never` here.
|
|
2336
|
+
# assert_never(membership)
|
|
2337
|
+
raise AssertionError(
|
|
2338
|
+
f"Unexpected membership {membership} ({membership_event_id}) that we don't know how to handle yet"
|
|
2339
|
+
)
|
|
2340
|
+
|
|
2341
|
+
to_insert_membership_snapshots[(room_id, user_id)] = (
|
|
2342
|
+
sliding_sync_membership_snapshots_insert_map
|
|
2343
|
+
)
|
|
2344
|
+
to_insert_membership_infos[(room_id, user_id)] = (
|
|
2345
|
+
SlidingSyncMembershipInfoWithEventPos(
|
|
2346
|
+
user_id=user_id,
|
|
2347
|
+
sender=sender,
|
|
2348
|
+
membership_event_id=membership_event_id,
|
|
2349
|
+
membership=membership,
|
|
2350
|
+
membership_event_stream_ordering=membership_event_stream_ordering,
|
|
2351
|
+
# If instance_name is null we default to "master"
|
|
2352
|
+
membership_event_instance_name=membership_event_instance_name
|
|
2353
|
+
or "master",
|
|
2354
|
+
)
|
|
2355
|
+
)
|
|
2356
|
+
|
|
2357
|
+
def _fill_table_txn(txn: LoggingTransaction) -> None:
|
|
2358
|
+
# Handle updating the `sliding_sync_membership_snapshots` table
|
|
2359
|
+
#
|
|
2360
|
+
for key, insert_map in to_insert_membership_snapshots.items():
|
|
2361
|
+
room_id, user_id = key
|
|
2362
|
+
membership_info = to_insert_membership_infos[key]
|
|
2363
|
+
sender = membership_info.sender
|
|
2364
|
+
membership_event_id = membership_info.membership_event_id
|
|
2365
|
+
membership = membership_info.membership
|
|
2366
|
+
membership_event_stream_ordering = (
|
|
2367
|
+
membership_info.membership_event_stream_ordering
|
|
2368
|
+
)
|
|
2369
|
+
membership_event_instance_name = (
|
|
2370
|
+
membership_info.membership_event_instance_name
|
|
2371
|
+
)
|
|
2372
|
+
|
|
2373
|
+
# We don't need to upsert the state because we never partially
|
|
2374
|
+
# insert/update the snapshots and anything already there is up-to-date
|
|
2375
|
+
# EXCEPT for the `forgotten` field since that is updated out-of-band
|
|
2376
|
+
# from the membership changes.
|
|
2377
|
+
#
|
|
2378
|
+
# Even though we're only doing insertions, we're using
|
|
2379
|
+
# `simple_upsert_txn()` here to avoid unique violation errors that would
|
|
2380
|
+
# happen from `simple_insert_txn()`
|
|
2381
|
+
self.db_pool.simple_upsert_txn(
|
|
2382
|
+
txn,
|
|
2383
|
+
table="sliding_sync_membership_snapshots",
|
|
2384
|
+
keyvalues={"room_id": room_id, "user_id": user_id},
|
|
2385
|
+
values={},
|
|
2386
|
+
insertion_values={
|
|
2387
|
+
**insert_map,
|
|
2388
|
+
"sender": sender,
|
|
2389
|
+
"membership_event_id": membership_event_id,
|
|
2390
|
+
"membership": membership,
|
|
2391
|
+
"event_stream_ordering": membership_event_stream_ordering,
|
|
2392
|
+
"event_instance_name": membership_event_instance_name,
|
|
2393
|
+
},
|
|
2394
|
+
)
|
|
2395
|
+
# We need to find the `forgotten` value during the transaction because
|
|
2396
|
+
# we can't risk inserting stale data.
|
|
2397
|
+
if isinstance(txn.database_engine, PostgresEngine):
|
|
2398
|
+
txn.execute(
|
|
2399
|
+
"""
|
|
2400
|
+
UPDATE sliding_sync_membership_snapshots
|
|
2401
|
+
SET
|
|
2402
|
+
forgotten = m.forgotten
|
|
2403
|
+
FROM room_memberships AS m
|
|
2404
|
+
WHERE sliding_sync_membership_snapshots.room_id = ?
|
|
2405
|
+
AND sliding_sync_membership_snapshots.user_id = ?
|
|
2406
|
+
AND membership_event_id = ?
|
|
2407
|
+
AND membership_event_id = m.event_id
|
|
2408
|
+
AND m.event_id IS NOT NULL
|
|
2409
|
+
""",
|
|
2410
|
+
(
|
|
2411
|
+
room_id,
|
|
2412
|
+
user_id,
|
|
2413
|
+
membership_event_id,
|
|
2414
|
+
),
|
|
2415
|
+
)
|
|
2416
|
+
else:
|
|
2417
|
+
# SQLite doesn't support UPDATE FROM before 3.33.0, so we do
|
|
2418
|
+
# this via sub-selects.
|
|
2419
|
+
txn.execute(
|
|
2420
|
+
"""
|
|
2421
|
+
UPDATE sliding_sync_membership_snapshots
|
|
2422
|
+
SET
|
|
2423
|
+
forgotten = (SELECT forgotten FROM room_memberships WHERE event_id = ?)
|
|
2424
|
+
WHERE room_id = ? and user_id = ? AND membership_event_id = ?
|
|
2425
|
+
""",
|
|
2426
|
+
(
|
|
2427
|
+
membership_event_id,
|
|
2428
|
+
room_id,
|
|
2429
|
+
user_id,
|
|
2430
|
+
membership_event_id,
|
|
2431
|
+
),
|
|
2432
|
+
)
|
|
2433
|
+
|
|
2434
|
+
await self.db_pool.runInteraction(
|
|
2435
|
+
"sliding_sync_membership_snapshots_bg_update", _fill_table_txn
|
|
2436
|
+
)
|
|
2437
|
+
|
|
2438
|
+
# Update the progress
|
|
2439
|
+
(
|
|
2440
|
+
room_id,
|
|
2441
|
+
_room_id_from_rooms_table,
|
|
2442
|
+
_room_version_id,
|
|
2443
|
+
user_id,
|
|
2444
|
+
_sender,
|
|
2445
|
+
_membership_event_id,
|
|
2446
|
+
_membership,
|
|
2447
|
+
membership_event_stream_ordering,
|
|
2448
|
+
_membership_event_instance_name,
|
|
2449
|
+
_is_outlier,
|
|
2450
|
+
) = memberships_to_update_rows[-1]
|
|
2451
|
+
|
|
2452
|
+
progress = {
|
|
2453
|
+
"initial_phase": initial_phase,
|
|
2454
|
+
"last_room_id": room_id,
|
|
2455
|
+
"last_user_id": user_id,
|
|
2456
|
+
"last_event_stream_ordering": last_event_stream_ordering,
|
|
2457
|
+
}
|
|
2458
|
+
if not initial_phase:
|
|
2459
|
+
progress["last_event_stream_ordering"] = membership_event_stream_ordering
|
|
2460
|
+
|
|
2461
|
+
await self.db_pool.updates._background_update_progress(
|
|
2462
|
+
_BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
|
|
2463
|
+
progress,
|
|
2464
|
+
)
|
|
2465
|
+
|
|
2466
|
+
return len(memberships_to_update_rows)
|
|
2467
|
+
|
|
2468
|
+
async def _sliding_sync_membership_snapshots_fix_forgotten_column_bg_update(
|
|
2469
|
+
self, progress: JsonDict, batch_size: int
|
|
2470
|
+
) -> int:
|
|
2471
|
+
"""
|
|
2472
|
+
Background update to update the `sliding_sync_membership_snapshots` ->
|
|
2473
|
+
`forgotten` column to be in sync with the `room_memberships` table.
|
|
2474
|
+
|
|
2475
|
+
Because of previously flawed code (now fixed); any room that someone has
|
|
2476
|
+
forgotten and subsequently re-joined or had any new membership on, we need to go
|
|
2477
|
+
and update the column to match the `room_memberships` table as it has fallen out
|
|
2478
|
+
of sync.
|
|
2479
|
+
"""
|
|
2480
|
+
last_event_stream_ordering = progress.get(
|
|
2481
|
+
"last_event_stream_ordering", -(1 << 31)
|
|
2482
|
+
)
|
|
2483
|
+
|
|
2484
|
+
def _txn(
|
|
2485
|
+
txn: LoggingTransaction,
|
|
2486
|
+
) -> int:
|
|
2487
|
+
"""
|
|
2488
|
+
Returns:
|
|
2489
|
+
The number of rows updated.
|
|
2490
|
+
"""
|
|
2491
|
+
|
|
2492
|
+
# To simplify things, we can just recheck any row in
|
|
2493
|
+
# `sliding_sync_membership_snapshots` with `forgotten=1`
|
|
2494
|
+
txn.execute(
|
|
2495
|
+
"""
|
|
2496
|
+
SELECT
|
|
2497
|
+
s.room_id,
|
|
2498
|
+
s.user_id,
|
|
2499
|
+
s.membership_event_id,
|
|
2500
|
+
s.event_stream_ordering,
|
|
2501
|
+
m.forgotten
|
|
2502
|
+
FROM sliding_sync_membership_snapshots AS s
|
|
2503
|
+
INNER JOIN room_memberships AS m ON (s.membership_event_id = m.event_id)
|
|
2504
|
+
WHERE s.event_stream_ordering > ?
|
|
2505
|
+
AND s.forgotten = 1
|
|
2506
|
+
ORDER BY s.event_stream_ordering ASC
|
|
2507
|
+
LIMIT ?
|
|
2508
|
+
""",
|
|
2509
|
+
(last_event_stream_ordering, batch_size),
|
|
2510
|
+
)
|
|
2511
|
+
|
|
2512
|
+
memberships_to_update_rows = cast(
|
|
2513
|
+
list[tuple[str, str, str, int, int]],
|
|
2514
|
+
txn.fetchall(),
|
|
2515
|
+
)
|
|
2516
|
+
if not memberships_to_update_rows:
|
|
2517
|
+
return 0
|
|
2518
|
+
|
|
2519
|
+
# Assemble the values to update
|
|
2520
|
+
#
|
|
2521
|
+
# (room_id, user_id)
|
|
2522
|
+
key_values: list[tuple[str, str]] = []
|
|
2523
|
+
# (forgotten,)
|
|
2524
|
+
value_values: list[tuple[int]] = []
|
|
2525
|
+
for (
|
|
2526
|
+
room_id,
|
|
2527
|
+
user_id,
|
|
2528
|
+
_membership_event_id,
|
|
2529
|
+
_event_stream_ordering,
|
|
2530
|
+
forgotten,
|
|
2531
|
+
) in memberships_to_update_rows:
|
|
2532
|
+
key_values.append(
|
|
2533
|
+
(
|
|
2534
|
+
room_id,
|
|
2535
|
+
user_id,
|
|
2536
|
+
)
|
|
2537
|
+
)
|
|
2538
|
+
value_values.append((forgotten,))
|
|
2539
|
+
|
|
2540
|
+
# Update all of the rows in one go
|
|
2541
|
+
self.db_pool.simple_update_many_txn(
|
|
2542
|
+
txn,
|
|
2543
|
+
table="sliding_sync_membership_snapshots",
|
|
2544
|
+
key_names=("room_id", "user_id"),
|
|
2545
|
+
key_values=key_values,
|
|
2546
|
+
value_names=("forgotten",),
|
|
2547
|
+
value_values=value_values,
|
|
2548
|
+
)
|
|
2549
|
+
|
|
2550
|
+
# Update the progress
|
|
2551
|
+
(
|
|
2552
|
+
_room_id,
|
|
2553
|
+
_user_id,
|
|
2554
|
+
_membership_event_id,
|
|
2555
|
+
event_stream_ordering,
|
|
2556
|
+
_forgotten,
|
|
2557
|
+
) = memberships_to_update_rows[-1]
|
|
2558
|
+
self.db_pool.updates._background_update_progress_txn(
|
|
2559
|
+
txn,
|
|
2560
|
+
_BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE,
|
|
2561
|
+
{
|
|
2562
|
+
"last_event_stream_ordering": event_stream_ordering,
|
|
2563
|
+
},
|
|
2564
|
+
)
|
|
2565
|
+
|
|
2566
|
+
return len(memberships_to_update_rows)
|
|
2567
|
+
|
|
2568
|
+
num_rows = await self.db_pool.runInteraction(
|
|
2569
|
+
"_sliding_sync_membership_snapshots_fix_forgotten_column_bg_update",
|
|
2570
|
+
_txn,
|
|
2571
|
+
)
|
|
2572
|
+
|
|
2573
|
+
if not num_rows:
|
|
2574
|
+
await self.db_pool.updates._end_background_update(
|
|
2575
|
+
_BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE
|
|
2576
|
+
)
|
|
2577
|
+
|
|
2578
|
+
return num_rows
|
|
2579
|
+
|
|
2580
|
+
async def fixup_max_depth_cap_bg_update(
|
|
2581
|
+
self, progress: JsonDict, batch_size: int
|
|
2582
|
+
) -> int:
|
|
2583
|
+
"""Fixes the topological ordering for events that have a depth greater
|
|
2584
|
+
than MAX_DEPTH. This should fix /messages ordering oddities."""
|
|
2585
|
+
|
|
2586
|
+
room_id_bound = progress.get("room_id", "")
|
|
2587
|
+
|
|
2588
|
+
def redo_max_depth_bg_update_txn(txn: LoggingTransaction) -> tuple[bool, int]:
|
|
2589
|
+
txn.execute(
|
|
2590
|
+
"""
|
|
2591
|
+
SELECT room_id, room_version FROM rooms
|
|
2592
|
+
WHERE room_id > ?
|
|
2593
|
+
ORDER BY room_id
|
|
2594
|
+
LIMIT ?
|
|
2595
|
+
""",
|
|
2596
|
+
(room_id_bound, batch_size),
|
|
2597
|
+
)
|
|
2598
|
+
|
|
2599
|
+
# Find the next room ID to process, with a relevant room version.
|
|
2600
|
+
room_ids: list[str] = []
|
|
2601
|
+
max_room_id: str | None = None
|
|
2602
|
+
for room_id, room_version_str in txn:
|
|
2603
|
+
max_room_id = room_id
|
|
2604
|
+
|
|
2605
|
+
# We only want to process rooms with a known room version that
|
|
2606
|
+
# has strict canonical json validation enabled.
|
|
2607
|
+
room_version = KNOWN_ROOM_VERSIONS.get(room_version_str)
|
|
2608
|
+
if room_version and room_version.strict_canonicaljson:
|
|
2609
|
+
room_ids.append(room_id)
|
|
2610
|
+
|
|
2611
|
+
if max_room_id is None:
|
|
2612
|
+
# The query did not return any rooms, so we are done.
|
|
2613
|
+
return True, 0
|
|
2614
|
+
|
|
2615
|
+
# Update the progress to the last room ID we pulled from the DB,
|
|
2616
|
+
# this ensures we always make progress.
|
|
2617
|
+
self.db_pool.updates._background_update_progress_txn(
|
|
2618
|
+
txn,
|
|
2619
|
+
_BackgroundUpdates.FIXUP_MAX_DEPTH_CAP,
|
|
2620
|
+
progress={"room_id": max_room_id},
|
|
2621
|
+
)
|
|
2622
|
+
|
|
2623
|
+
if not room_ids:
|
|
2624
|
+
# There were no rooms in this batch that required the fix.
|
|
2625
|
+
return False, 0
|
|
2626
|
+
|
|
2627
|
+
clause, list_args = make_in_list_sql_clause(
|
|
2628
|
+
self.database_engine, "room_id", room_ids
|
|
2629
|
+
)
|
|
2630
|
+
sql = f"""
|
|
2631
|
+
UPDATE events SET topological_ordering = ?
|
|
2632
|
+
WHERE topological_ordering > ? AND {clause}
|
|
2633
|
+
"""
|
|
2634
|
+
args = [MAX_DEPTH, MAX_DEPTH]
|
|
2635
|
+
args.extend(list_args)
|
|
2636
|
+
txn.execute(sql, args)
|
|
2637
|
+
|
|
2638
|
+
return False, len(room_ids)
|
|
2639
|
+
|
|
2640
|
+
done, num_rooms = await self.db_pool.runInteraction(
|
|
2641
|
+
"redo_max_depth_bg_update", redo_max_depth_bg_update_txn
|
|
2642
|
+
)
|
|
2643
|
+
|
|
2644
|
+
if done:
|
|
2645
|
+
await self.db_pool.updates._end_background_update(
|
|
2646
|
+
_BackgroundUpdates.FIXUP_MAX_DEPTH_CAP
|
|
2647
|
+
)
|
|
2648
|
+
|
|
2649
|
+
return num_rooms
|
|
2650
|
+
|
|
2651
|
+
|
|
2652
|
+
def _resolve_stale_data_in_sliding_sync_tables(
|
|
2653
|
+
txn: LoggingTransaction,
|
|
2654
|
+
) -> None:
|
|
2655
|
+
"""
|
|
2656
|
+
Clears stale/out-of-date entries from the
|
|
2657
|
+
`sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables.
|
|
2658
|
+
|
|
2659
|
+
This accounts for when someone downgrades their Synapse version and then upgrades it
|
|
2660
|
+
again. This will ensure that we don't have any stale/out-of-date data in the
|
|
2661
|
+
`sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables since any new
|
|
2662
|
+
events sent in rooms would have also needed to be written to the sliding sync
|
|
2663
|
+
tables. For example a new event needs to bump `event_stream_ordering` in
|
|
2664
|
+
`sliding_sync_joined_rooms` table or some state in the room changing (like the room
|
|
2665
|
+
name). Or another example of someone's membership changing in a room affecting
|
|
2666
|
+
`sliding_sync_membership_snapshots`.
|
|
2667
|
+
|
|
2668
|
+
This way, if a row exists in the sliding sync tables, we are able to rely on it
|
|
2669
|
+
(accurate data). And if a row doesn't exist, we use a fallback to get the same info
|
|
2670
|
+
until the background updates fill in the rows or a new event comes in triggering it
|
|
2671
|
+
to be fully inserted.
|
|
2672
|
+
|
|
2673
|
+
FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
|
|
2674
|
+
foreground update for
|
|
2675
|
+
`sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
|
|
2676
|
+
https://github.com/element-hq/synapse/issues/17623)
|
|
2677
|
+
"""
|
|
2678
|
+
|
|
2679
|
+
_resolve_stale_data_in_sliding_sync_joined_rooms_table(txn)
|
|
2680
|
+
_resolve_stale_data_in_sliding_sync_membership_snapshots_table(txn)
|
|
2681
|
+
|
|
2682
|
+
|
|
2683
|
+
def _resolve_stale_data_in_sliding_sync_joined_rooms_table(
|
|
2684
|
+
txn: LoggingTransaction,
|
|
2685
|
+
) -> None:
|
|
2686
|
+
"""
|
|
2687
|
+
Clears stale/out-of-date entries from the `sliding_sync_joined_rooms` table and
|
|
2688
|
+
kicks-off the background update to catch-up with what we missed while Synapse was
|
|
2689
|
+
downgraded.
|
|
2690
|
+
|
|
2691
|
+
See `_resolve_stale_data_in_sliding_sync_tables()` description above for more
|
|
2692
|
+
context.
|
|
2693
|
+
"""
|
|
2694
|
+
|
|
2695
|
+
# Find the point when we stopped writing to the `sliding_sync_joined_rooms` table
|
|
2696
|
+
txn.execute(
|
|
2697
|
+
"""
|
|
2698
|
+
SELECT event_stream_ordering
|
|
2699
|
+
FROM sliding_sync_joined_rooms
|
|
2700
|
+
ORDER BY event_stream_ordering DESC
|
|
2701
|
+
LIMIT 1
|
|
2702
|
+
""",
|
|
2703
|
+
)
|
|
2704
|
+
|
|
2705
|
+
# If we have nothing written to the `sliding_sync_joined_rooms` table, there is
|
|
2706
|
+
# nothing to clean up
|
|
2707
|
+
row = cast(tuple[int] | None, txn.fetchone())
|
|
2708
|
+
max_stream_ordering_sliding_sync_joined_rooms_table = None
|
|
2709
|
+
depends_on = None
|
|
2710
|
+
if row is not None:
|
|
2711
|
+
(max_stream_ordering_sliding_sync_joined_rooms_table,) = row
|
|
2712
|
+
|
|
2713
|
+
txn.execute(
|
|
2714
|
+
"""
|
|
2715
|
+
SELECT room_id
|
|
2716
|
+
FROM events
|
|
2717
|
+
WHERE stream_ordering > ?
|
|
2718
|
+
GROUP BY room_id
|
|
2719
|
+
ORDER BY MAX(stream_ordering) ASC
|
|
2720
|
+
""",
|
|
2721
|
+
(max_stream_ordering_sliding_sync_joined_rooms_table,),
|
|
2722
|
+
)
|
|
2723
|
+
|
|
2724
|
+
room_rows = txn.fetchall()
|
|
2725
|
+
# No new events have been written to the `events` table since the last time we wrote
|
|
2726
|
+
# to the `sliding_sync_joined_rooms` table so there is nothing to clean up. This is
|
|
2727
|
+
# the expected normal scenario for people who have not downgraded their Synapse
|
|
2728
|
+
# version.
|
|
2729
|
+
if not room_rows:
|
|
2730
|
+
return
|
|
2731
|
+
|
|
2732
|
+
# 1000 is an arbitrary batch size with no testing
|
|
2733
|
+
for chunk in batch_iter(room_rows, 1000):
|
|
2734
|
+
# Handle updating the `sliding_sync_joined_rooms` table
|
|
2735
|
+
#
|
|
2736
|
+
# Clear out the stale data
|
|
2737
|
+
DatabasePool.simple_delete_many_batch_txn(
|
|
2738
|
+
txn,
|
|
2739
|
+
table="sliding_sync_joined_rooms",
|
|
2740
|
+
keys=("room_id",),
|
|
2741
|
+
values=chunk,
|
|
2742
|
+
)
|
|
2743
|
+
|
|
2744
|
+
# Update the `sliding_sync_joined_rooms_to_recalculate` table with the rooms
|
|
2745
|
+
# that went stale and now need to be recalculated.
|
|
2746
|
+
DatabasePool.simple_upsert_many_txn_native_upsert(
|
|
2747
|
+
txn,
|
|
2748
|
+
table="sliding_sync_joined_rooms_to_recalculate",
|
|
2749
|
+
key_names=("room_id",),
|
|
2750
|
+
key_values=chunk,
|
|
2751
|
+
value_names=(),
|
|
2752
|
+
# No value columns, therefore make a blank list so that the following
|
|
2753
|
+
# zip() works correctly.
|
|
2754
|
+
value_values=[() for x in range(len(chunk))],
|
|
2755
|
+
)
|
|
2756
|
+
else:
|
|
2757
|
+
# Avoid adding the background updates when there is no data to run them on (if
|
|
2758
|
+
# the homeserver has no rooms). The portdb script refuses to run with pending
|
|
2759
|
+
# background updates and since we potentially add them every time the server
|
|
2760
|
+
# starts, we add this check for to allow the script to breath.
|
|
2761
|
+
txn.execute("SELECT 1 FROM local_current_membership LIMIT 1")
|
|
2762
|
+
row = txn.fetchone()
|
|
2763
|
+
if row is None:
|
|
2764
|
+
# There are no rooms, so don't schedule the bg update.
|
|
2765
|
+
return
|
|
2766
|
+
|
|
2767
|
+
# Re-run the `sliding_sync_joined_rooms_to_recalculate` prefill if there is
|
|
2768
|
+
# nothing in the `sliding_sync_joined_rooms` table
|
|
2769
|
+
DatabasePool.simple_upsert_txn_native_upsert(
|
|
2770
|
+
txn,
|
|
2771
|
+
table="background_updates",
|
|
2772
|
+
keyvalues={
|
|
2773
|
+
"update_name": _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE
|
|
2774
|
+
},
|
|
2775
|
+
values={},
|
|
2776
|
+
# Only insert the row if it doesn't already exist. If it already exists,
|
|
2777
|
+
# we're already working on it
|
|
2778
|
+
insertion_values={
|
|
2779
|
+
"progress_json": "{}",
|
|
2780
|
+
},
|
|
2781
|
+
)
|
|
2782
|
+
depends_on = _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE
|
|
2783
|
+
|
|
2784
|
+
# Now kick-off the background update to catch-up with what we missed while Synapse
|
|
2785
|
+
# was downgraded.
|
|
2786
|
+
#
|
|
2787
|
+
# We may need to catch-up on everything if we have nothing written to the
|
|
2788
|
+
# `sliding_sync_joined_rooms` table yet. This could happen if someone had zero rooms
|
|
2789
|
+
# on their server (so the normal background update completes), downgrade Synapse
|
|
2790
|
+
# versions, join and create some new rooms, and upgrade again.
|
|
2791
|
+
DatabasePool.simple_upsert_txn_native_upsert(
|
|
2792
|
+
txn,
|
|
2793
|
+
table="background_updates",
|
|
2794
|
+
keyvalues={
|
|
2795
|
+
"update_name": _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE
|
|
2796
|
+
},
|
|
2797
|
+
values={},
|
|
2798
|
+
# Only insert the row if it doesn't already exist. If it already exists, we will
|
|
2799
|
+
# eventually fill in the rows we're trying to populate.
|
|
2800
|
+
insertion_values={
|
|
2801
|
+
# Empty progress is expected since it's not used for this background update.
|
|
2802
|
+
"progress_json": "{}",
|
|
2803
|
+
# Wait for the prefill to finish
|
|
2804
|
+
"depends_on": depends_on,
|
|
2805
|
+
},
|
|
2806
|
+
)
|
|
2807
|
+
|
|
2808
|
+
|
|
2809
|
+
def _resolve_stale_data_in_sliding_sync_membership_snapshots_table(
|
|
2810
|
+
txn: LoggingTransaction,
|
|
2811
|
+
) -> None:
|
|
2812
|
+
"""
|
|
2813
|
+
Clears stale/out-of-date entries from the `sliding_sync_membership_snapshots` table
|
|
2814
|
+
and kicks-off the background update to catch-up with what we missed while Synapse
|
|
2815
|
+
was downgraded.
|
|
2816
|
+
|
|
2817
|
+
See `_resolve_stale_data_in_sliding_sync_tables()` description above for more
|
|
2818
|
+
context.
|
|
2819
|
+
"""
|
|
2820
|
+
|
|
2821
|
+
# Find the point when we stopped writing to the `sliding_sync_membership_snapshots` table
|
|
2822
|
+
txn.execute(
|
|
2823
|
+
"""
|
|
2824
|
+
SELECT event_stream_ordering
|
|
2825
|
+
FROM sliding_sync_membership_snapshots
|
|
2826
|
+
ORDER BY event_stream_ordering DESC
|
|
2827
|
+
LIMIT 1
|
|
2828
|
+
""",
|
|
2829
|
+
)
|
|
2830
|
+
|
|
2831
|
+
# If we have nothing written to the `sliding_sync_membership_snapshots` table,
|
|
2832
|
+
# there is nothing to clean up
|
|
2833
|
+
row = cast(tuple[int] | None, txn.fetchone())
|
|
2834
|
+
max_stream_ordering_sliding_sync_membership_snapshots_table = None
|
|
2835
|
+
if row is not None:
|
|
2836
|
+
(max_stream_ordering_sliding_sync_membership_snapshots_table,) = row
|
|
2837
|
+
|
|
2838
|
+
# XXX: Since `forgotten` is simply a flag on the `room_memberships` table that is
|
|
2839
|
+
# set out-of-band, there is no way to tell whether it was set while Synapse was
|
|
2840
|
+
# downgraded. The only thing the user can do is `/forget` again if they run into
|
|
2841
|
+
# this.
|
|
2842
|
+
#
|
|
2843
|
+
# This only picks up changes to memberships.
|
|
2844
|
+
txn.execute(
|
|
2845
|
+
"""
|
|
2846
|
+
SELECT user_id, room_id
|
|
2847
|
+
FROM local_current_membership
|
|
2848
|
+
WHERE event_stream_ordering > ?
|
|
2849
|
+
ORDER BY event_stream_ordering ASC
|
|
2850
|
+
""",
|
|
2851
|
+
(max_stream_ordering_sliding_sync_membership_snapshots_table,),
|
|
2852
|
+
)
|
|
2853
|
+
|
|
2854
|
+
membership_rows = txn.fetchall()
|
|
2855
|
+
# No new events have been written to the `events` table since the last time we wrote
|
|
2856
|
+
# to the `sliding_sync_membership_snapshots` table so there is nothing to clean up.
|
|
2857
|
+
# This is the expected normal scenario for people who have not downgraded their
|
|
2858
|
+
# Synapse version.
|
|
2859
|
+
if not membership_rows:
|
|
2860
|
+
return
|
|
2861
|
+
|
|
2862
|
+
# 1000 is an arbitrary batch size with no testing
|
|
2863
|
+
for chunk in batch_iter(membership_rows, 1000):
|
|
2864
|
+
# Handle updating the `sliding_sync_membership_snapshots` table
|
|
2865
|
+
#
|
|
2866
|
+
DatabasePool.simple_delete_many_batch_txn(
|
|
2867
|
+
txn,
|
|
2868
|
+
table="sliding_sync_membership_snapshots",
|
|
2869
|
+
keys=("user_id", "room_id"),
|
|
2870
|
+
values=chunk,
|
|
2871
|
+
)
|
|
2872
|
+
else:
|
|
2873
|
+
# Avoid adding the background updates when there is no data to run them on (if
|
|
2874
|
+
# the homeserver has no rooms). The portdb script refuses to run with pending
|
|
2875
|
+
# background updates and since we potentially add them every time the server
|
|
2876
|
+
# starts, we add this check for to allow the script to breath.
|
|
2877
|
+
txn.execute("SELECT 1 FROM local_current_membership LIMIT 1")
|
|
2878
|
+
row = txn.fetchone()
|
|
2879
|
+
if row is None:
|
|
2880
|
+
# There are no rooms, so don't schedule the bg update.
|
|
2881
|
+
return
|
|
2882
|
+
|
|
2883
|
+
# Now kick-off the background update to catch-up with what we missed while Synapse
|
|
2884
|
+
# was downgraded.
|
|
2885
|
+
#
|
|
2886
|
+
# We may need to catch-up on everything if we have nothing written to the
|
|
2887
|
+
# `sliding_sync_membership_snapshots` table yet. This could happen if someone had
|
|
2888
|
+
# zero rooms on their server (so the normal background update completes), downgrade
|
|
2889
|
+
# Synapse versions, join and create some new rooms, and upgrade again.
|
|
2890
|
+
#
|
|
2891
|
+
progress_json: JsonDict = {}
|
|
2892
|
+
if max_stream_ordering_sliding_sync_membership_snapshots_table is not None:
|
|
2893
|
+
progress_json["initial_phase"] = False
|
|
2894
|
+
progress_json["last_event_stream_ordering"] = (
|
|
2895
|
+
max_stream_ordering_sliding_sync_membership_snapshots_table
|
|
2896
|
+
)
|
|
2897
|
+
|
|
2898
|
+
DatabasePool.simple_upsert_txn_native_upsert(
|
|
2899
|
+
txn,
|
|
2900
|
+
table="background_updates",
|
|
2901
|
+
keyvalues={
|
|
2902
|
+
"update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE
|
|
2903
|
+
},
|
|
2904
|
+
values={},
|
|
2905
|
+
# Only insert the row if it doesn't already exist. If it already exists, we will
|
|
2906
|
+
# eventually fill in the rows we're trying to populate.
|
|
2907
|
+
insertion_values={
|
|
2908
|
+
"progress_json": json_encoder.encode(progress_json),
|
|
2909
|
+
},
|
|
2910
|
+
)
|