matrix-synapse 1.143.0__cp310-abi3-manylinux_2_28_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of matrix-synapse might be problematic. Click here for more details.

Files changed (1058) hide show
  1. matrix_synapse-1.143.0.dist-info/AUTHORS.rst +51 -0
  2. matrix_synapse-1.143.0.dist-info/LICENSE-AGPL-3.0 +661 -0
  3. matrix_synapse-1.143.0.dist-info/LICENSE-COMMERCIAL +6 -0
  4. matrix_synapse-1.143.0.dist-info/METADATA +385 -0
  5. matrix_synapse-1.143.0.dist-info/RECORD +1058 -0
  6. matrix_synapse-1.143.0.dist-info/WHEEL +4 -0
  7. matrix_synapse-1.143.0.dist-info/entry_points.txt +14 -0
  8. synapse/__init__.py +97 -0
  9. synapse/_scripts/__init__.py +0 -0
  10. synapse/_scripts/export_signing_key.py +109 -0
  11. synapse/_scripts/generate_config.py +83 -0
  12. synapse/_scripts/generate_log_config.py +56 -0
  13. synapse/_scripts/generate_signing_key.py +55 -0
  14. synapse/_scripts/generate_workers_map.py +318 -0
  15. synapse/_scripts/hash_password.py +95 -0
  16. synapse/_scripts/move_remote_media_to_new_store.py +128 -0
  17. synapse/_scripts/register_new_matrix_user.py +402 -0
  18. synapse/_scripts/review_recent_signups.py +212 -0
  19. synapse/_scripts/synapse_port_db.py +1604 -0
  20. synapse/_scripts/synctl.py +365 -0
  21. synapse/_scripts/update_synapse_database.py +130 -0
  22. synapse/api/__init__.py +20 -0
  23. synapse/api/auth/__init__.py +207 -0
  24. synapse/api/auth/base.py +406 -0
  25. synapse/api/auth/internal.py +299 -0
  26. synapse/api/auth/mas.py +436 -0
  27. synapse/api/auth/msc3861_delegated.py +617 -0
  28. synapse/api/auth_blocking.py +144 -0
  29. synapse/api/constants.py +362 -0
  30. synapse/api/errors.py +907 -0
  31. synapse/api/filtering.py +537 -0
  32. synapse/api/presence.py +102 -0
  33. synapse/api/ratelimiting.py +480 -0
  34. synapse/api/room_versions.py +535 -0
  35. synapse/api/urls.py +118 -0
  36. synapse/app/__init__.py +60 -0
  37. synapse/app/_base.py +862 -0
  38. synapse/app/admin_cmd.py +388 -0
  39. synapse/app/appservice.py +30 -0
  40. synapse/app/client_reader.py +30 -0
  41. synapse/app/complement_fork_starter.py +206 -0
  42. synapse/app/event_creator.py +29 -0
  43. synapse/app/federation_reader.py +30 -0
  44. synapse/app/federation_sender.py +30 -0
  45. synapse/app/frontend_proxy.py +30 -0
  46. synapse/app/generic_worker.py +474 -0
  47. synapse/app/homeserver.py +505 -0
  48. synapse/app/media_repository.py +30 -0
  49. synapse/app/phone_stats_home.py +296 -0
  50. synapse/app/pusher.py +30 -0
  51. synapse/app/synchrotron.py +30 -0
  52. synapse/app/user_dir.py +31 -0
  53. synapse/appservice/__init__.py +458 -0
  54. synapse/appservice/api.py +567 -0
  55. synapse/appservice/scheduler.py +564 -0
  56. synapse/config/__init__.py +27 -0
  57. synapse/config/__main__.py +62 -0
  58. synapse/config/_base.py +1106 -0
  59. synapse/config/_base.pyi +215 -0
  60. synapse/config/_util.py +99 -0
  61. synapse/config/account_validity.py +116 -0
  62. synapse/config/api.py +141 -0
  63. synapse/config/appservice.py +210 -0
  64. synapse/config/auth.py +80 -0
  65. synapse/config/auto_accept_invites.py +43 -0
  66. synapse/config/background_updates.py +44 -0
  67. synapse/config/cache.py +231 -0
  68. synapse/config/captcha.py +90 -0
  69. synapse/config/cas.py +116 -0
  70. synapse/config/consent.py +73 -0
  71. synapse/config/database.py +184 -0
  72. synapse/config/emailconfig.py +367 -0
  73. synapse/config/experimental.py +595 -0
  74. synapse/config/federation.py +114 -0
  75. synapse/config/homeserver.py +141 -0
  76. synapse/config/jwt.py +55 -0
  77. synapse/config/key.py +447 -0
  78. synapse/config/logger.py +390 -0
  79. synapse/config/mas.py +192 -0
  80. synapse/config/matrixrtc.py +66 -0
  81. synapse/config/metrics.py +84 -0
  82. synapse/config/modules.py +40 -0
  83. synapse/config/oembed.py +185 -0
  84. synapse/config/oidc.py +509 -0
  85. synapse/config/password_auth_providers.py +82 -0
  86. synapse/config/push.py +64 -0
  87. synapse/config/ratelimiting.py +254 -0
  88. synapse/config/redis.py +74 -0
  89. synapse/config/registration.py +296 -0
  90. synapse/config/repository.py +311 -0
  91. synapse/config/retention.py +162 -0
  92. synapse/config/room.py +88 -0
  93. synapse/config/room_directory.py +165 -0
  94. synapse/config/saml2.py +251 -0
  95. synapse/config/server.py +1170 -0
  96. synapse/config/server_notices.py +84 -0
  97. synapse/config/spam_checker.py +66 -0
  98. synapse/config/sso.py +121 -0
  99. synapse/config/stats.py +54 -0
  100. synapse/config/third_party_event_rules.py +40 -0
  101. synapse/config/tls.py +192 -0
  102. synapse/config/tracer.py +71 -0
  103. synapse/config/user_directory.py +47 -0
  104. synapse/config/user_types.py +42 -0
  105. synapse/config/voip.py +59 -0
  106. synapse/config/workers.py +642 -0
  107. synapse/crypto/__init__.py +20 -0
  108. synapse/crypto/context_factory.py +278 -0
  109. synapse/crypto/event_signing.py +194 -0
  110. synapse/crypto/keyring.py +931 -0
  111. synapse/event_auth.py +1266 -0
  112. synapse/events/__init__.py +667 -0
  113. synapse/events/auto_accept_invites.py +216 -0
  114. synapse/events/builder.py +387 -0
  115. synapse/events/presence_router.py +243 -0
  116. synapse/events/snapshot.py +559 -0
  117. synapse/events/utils.py +924 -0
  118. synapse/events/validator.py +305 -0
  119. synapse/federation/__init__.py +22 -0
  120. synapse/federation/federation_base.py +382 -0
  121. synapse/federation/federation_client.py +2132 -0
  122. synapse/federation/federation_server.py +1540 -0
  123. synapse/federation/persistence.py +70 -0
  124. synapse/federation/send_queue.py +531 -0
  125. synapse/federation/sender/__init__.py +1164 -0
  126. synapse/federation/sender/per_destination_queue.py +886 -0
  127. synapse/federation/sender/transaction_manager.py +210 -0
  128. synapse/federation/transport/__init__.py +28 -0
  129. synapse/federation/transport/client.py +1199 -0
  130. synapse/federation/transport/server/__init__.py +334 -0
  131. synapse/federation/transport/server/_base.py +429 -0
  132. synapse/federation/transport/server/federation.py +910 -0
  133. synapse/federation/units.py +133 -0
  134. synapse/handlers/__init__.py +20 -0
  135. synapse/handlers/account.py +162 -0
  136. synapse/handlers/account_data.py +360 -0
  137. synapse/handlers/account_validity.py +361 -0
  138. synapse/handlers/admin.py +615 -0
  139. synapse/handlers/appservice.py +989 -0
  140. synapse/handlers/auth.py +2481 -0
  141. synapse/handlers/cas.py +413 -0
  142. synapse/handlers/deactivate_account.py +363 -0
  143. synapse/handlers/delayed_events.py +599 -0
  144. synapse/handlers/device.py +1870 -0
  145. synapse/handlers/devicemessage.py +399 -0
  146. synapse/handlers/directory.py +545 -0
  147. synapse/handlers/e2e_keys.py +1834 -0
  148. synapse/handlers/e2e_room_keys.py +455 -0
  149. synapse/handlers/event_auth.py +390 -0
  150. synapse/handlers/events.py +201 -0
  151. synapse/handlers/federation.py +2039 -0
  152. synapse/handlers/federation_event.py +2419 -0
  153. synapse/handlers/identity.py +812 -0
  154. synapse/handlers/initial_sync.py +528 -0
  155. synapse/handlers/jwt.py +120 -0
  156. synapse/handlers/message.py +2347 -0
  157. synapse/handlers/oidc.py +1801 -0
  158. synapse/handlers/pagination.py +768 -0
  159. synapse/handlers/password_policy.py +102 -0
  160. synapse/handlers/presence.py +2633 -0
  161. synapse/handlers/profile.py +655 -0
  162. synapse/handlers/push_rules.py +164 -0
  163. synapse/handlers/read_marker.py +79 -0
  164. synapse/handlers/receipts.py +351 -0
  165. synapse/handlers/register.py +1059 -0
  166. synapse/handlers/relations.py +623 -0
  167. synapse/handlers/reports.py +98 -0
  168. synapse/handlers/room.py +2448 -0
  169. synapse/handlers/room_list.py +632 -0
  170. synapse/handlers/room_member.py +2365 -0
  171. synapse/handlers/room_member_worker.py +146 -0
  172. synapse/handlers/room_policy.py +186 -0
  173. synapse/handlers/room_summary.py +1057 -0
  174. synapse/handlers/saml.py +524 -0
  175. synapse/handlers/search.py +723 -0
  176. synapse/handlers/send_email.py +209 -0
  177. synapse/handlers/set_password.py +71 -0
  178. synapse/handlers/sliding_sync/__init__.py +1701 -0
  179. synapse/handlers/sliding_sync/extensions.py +969 -0
  180. synapse/handlers/sliding_sync/room_lists.py +2262 -0
  181. synapse/handlers/sliding_sync/store.py +128 -0
  182. synapse/handlers/sso.py +1291 -0
  183. synapse/handlers/state_deltas.py +82 -0
  184. synapse/handlers/stats.py +321 -0
  185. synapse/handlers/sync.py +3106 -0
  186. synapse/handlers/thread_subscriptions.py +190 -0
  187. synapse/handlers/typing.py +606 -0
  188. synapse/handlers/ui_auth/__init__.py +48 -0
  189. synapse/handlers/ui_auth/checkers.py +332 -0
  190. synapse/handlers/user_directory.py +783 -0
  191. synapse/handlers/worker_lock.py +371 -0
  192. synapse/http/__init__.py +105 -0
  193. synapse/http/additional_resource.py +62 -0
  194. synapse/http/client.py +1373 -0
  195. synapse/http/connectproxyclient.py +316 -0
  196. synapse/http/federation/__init__.py +19 -0
  197. synapse/http/federation/matrix_federation_agent.py +490 -0
  198. synapse/http/federation/srv_resolver.py +196 -0
  199. synapse/http/federation/well_known_resolver.py +367 -0
  200. synapse/http/matrixfederationclient.py +1873 -0
  201. synapse/http/proxy.py +290 -0
  202. synapse/http/proxyagent.py +497 -0
  203. synapse/http/replicationagent.py +202 -0
  204. synapse/http/request_metrics.py +309 -0
  205. synapse/http/server.py +1110 -0
  206. synapse/http/servlet.py +1018 -0
  207. synapse/http/site.py +825 -0
  208. synapse/http/types.py +27 -0
  209. synapse/logging/__init__.py +31 -0
  210. synapse/logging/_remote.py +261 -0
  211. synapse/logging/_terse_json.py +95 -0
  212. synapse/logging/context.py +1209 -0
  213. synapse/logging/formatter.py +62 -0
  214. synapse/logging/handlers.py +99 -0
  215. synapse/logging/loggers.py +25 -0
  216. synapse/logging/opentracing.py +1132 -0
  217. synapse/logging/scopecontextmanager.py +160 -0
  218. synapse/media/_base.py +830 -0
  219. synapse/media/filepath.py +417 -0
  220. synapse/media/media_repository.py +1580 -0
  221. synapse/media/media_storage.py +702 -0
  222. synapse/media/oembed.py +277 -0
  223. synapse/media/preview_html.py +556 -0
  224. synapse/media/storage_provider.py +195 -0
  225. synapse/media/thumbnailer.py +833 -0
  226. synapse/media/url_previewer.py +875 -0
  227. synapse/metrics/__init__.py +748 -0
  228. synapse/metrics/_gc.py +219 -0
  229. synapse/metrics/_reactor_metrics.py +171 -0
  230. synapse/metrics/_types.py +38 -0
  231. synapse/metrics/background_process_metrics.py +555 -0
  232. synapse/metrics/common_usage_metrics.py +94 -0
  233. synapse/metrics/jemalloc.py +248 -0
  234. synapse/module_api/__init__.py +2131 -0
  235. synapse/module_api/callbacks/__init__.py +50 -0
  236. synapse/module_api/callbacks/account_validity_callbacks.py +106 -0
  237. synapse/module_api/callbacks/media_repository_callbacks.py +157 -0
  238. synapse/module_api/callbacks/ratelimit_callbacks.py +78 -0
  239. synapse/module_api/callbacks/spamchecker_callbacks.py +991 -0
  240. synapse/module_api/callbacks/third_party_event_rules_callbacks.py +592 -0
  241. synapse/module_api/errors.py +42 -0
  242. synapse/notifier.py +970 -0
  243. synapse/push/__init__.py +212 -0
  244. synapse/push/bulk_push_rule_evaluator.py +635 -0
  245. synapse/push/clientformat.py +126 -0
  246. synapse/push/emailpusher.py +333 -0
  247. synapse/push/httppusher.py +564 -0
  248. synapse/push/mailer.py +1010 -0
  249. synapse/push/presentable_names.py +216 -0
  250. synapse/push/push_tools.py +114 -0
  251. synapse/push/push_types.py +141 -0
  252. synapse/push/pusher.py +87 -0
  253. synapse/push/pusherpool.py +501 -0
  254. synapse/push/rulekinds.py +33 -0
  255. synapse/py.typed +0 -0
  256. synapse/replication/__init__.py +20 -0
  257. synapse/replication/http/__init__.py +68 -0
  258. synapse/replication/http/_base.py +468 -0
  259. synapse/replication/http/account_data.py +297 -0
  260. synapse/replication/http/deactivate_account.py +81 -0
  261. synapse/replication/http/delayed_events.py +62 -0
  262. synapse/replication/http/devices.py +254 -0
  263. synapse/replication/http/federation.py +334 -0
  264. synapse/replication/http/login.py +106 -0
  265. synapse/replication/http/membership.py +364 -0
  266. synapse/replication/http/presence.py +133 -0
  267. synapse/replication/http/push.py +156 -0
  268. synapse/replication/http/register.py +172 -0
  269. synapse/replication/http/send_events.py +182 -0
  270. synapse/replication/http/state.py +82 -0
  271. synapse/replication/http/streams.py +101 -0
  272. synapse/replication/tcp/__init__.py +56 -0
  273. synapse/replication/tcp/client.py +552 -0
  274. synapse/replication/tcp/commands.py +569 -0
  275. synapse/replication/tcp/context.py +41 -0
  276. synapse/replication/tcp/external_cache.py +156 -0
  277. synapse/replication/tcp/handler.py +922 -0
  278. synapse/replication/tcp/protocol.py +608 -0
  279. synapse/replication/tcp/redis.py +509 -0
  280. synapse/replication/tcp/resource.py +348 -0
  281. synapse/replication/tcp/streams/__init__.py +96 -0
  282. synapse/replication/tcp/streams/_base.py +765 -0
  283. synapse/replication/tcp/streams/events.py +287 -0
  284. synapse/replication/tcp/streams/federation.py +92 -0
  285. synapse/replication/tcp/streams/partial_state.py +80 -0
  286. synapse/res/providers.json +29 -0
  287. synapse/res/templates/_base.html +29 -0
  288. synapse/res/templates/account_previously_renewed.html +6 -0
  289. synapse/res/templates/account_renewed.html +6 -0
  290. synapse/res/templates/add_threepid.html +8 -0
  291. synapse/res/templates/add_threepid.txt +6 -0
  292. synapse/res/templates/add_threepid_failure.html +7 -0
  293. synapse/res/templates/add_threepid_success.html +6 -0
  294. synapse/res/templates/already_in_use.html +12 -0
  295. synapse/res/templates/already_in_use.txt +10 -0
  296. synapse/res/templates/auth_success.html +21 -0
  297. synapse/res/templates/invalid_token.html +6 -0
  298. synapse/res/templates/mail-Element.css +7 -0
  299. synapse/res/templates/mail-Vector.css +7 -0
  300. synapse/res/templates/mail-expiry.css +4 -0
  301. synapse/res/templates/mail.css +156 -0
  302. synapse/res/templates/notice_expiry.html +46 -0
  303. synapse/res/templates/notice_expiry.txt +7 -0
  304. synapse/res/templates/notif.html +51 -0
  305. synapse/res/templates/notif.txt +22 -0
  306. synapse/res/templates/notif_mail.html +59 -0
  307. synapse/res/templates/notif_mail.txt +10 -0
  308. synapse/res/templates/password_reset.html +10 -0
  309. synapse/res/templates/password_reset.txt +7 -0
  310. synapse/res/templates/password_reset_confirmation.html +15 -0
  311. synapse/res/templates/password_reset_failure.html +7 -0
  312. synapse/res/templates/password_reset_success.html +6 -0
  313. synapse/res/templates/recaptcha.html +42 -0
  314. synapse/res/templates/registration.html +12 -0
  315. synapse/res/templates/registration.txt +10 -0
  316. synapse/res/templates/registration_failure.html +6 -0
  317. synapse/res/templates/registration_success.html +6 -0
  318. synapse/res/templates/registration_token.html +18 -0
  319. synapse/res/templates/room.html +33 -0
  320. synapse/res/templates/room.txt +9 -0
  321. synapse/res/templates/sso.css +129 -0
  322. synapse/res/templates/sso_account_deactivated.html +25 -0
  323. synapse/res/templates/sso_auth_account_details.html +186 -0
  324. synapse/res/templates/sso_auth_account_details.js +116 -0
  325. synapse/res/templates/sso_auth_bad_user.html +26 -0
  326. synapse/res/templates/sso_auth_confirm.html +27 -0
  327. synapse/res/templates/sso_auth_success.html +26 -0
  328. synapse/res/templates/sso_error.html +71 -0
  329. synapse/res/templates/sso_footer.html +19 -0
  330. synapse/res/templates/sso_login_idp_picker.html +60 -0
  331. synapse/res/templates/sso_new_user_consent.html +30 -0
  332. synapse/res/templates/sso_partial_profile.html +19 -0
  333. synapse/res/templates/sso_redirect_confirm.html +39 -0
  334. synapse/res/templates/style.css +33 -0
  335. synapse/res/templates/terms.html +27 -0
  336. synapse/rest/__init__.py +197 -0
  337. synapse/rest/admin/__init__.py +390 -0
  338. synapse/rest/admin/_base.py +72 -0
  339. synapse/rest/admin/background_updates.py +171 -0
  340. synapse/rest/admin/devices.py +221 -0
  341. synapse/rest/admin/event_reports.py +173 -0
  342. synapse/rest/admin/events.py +69 -0
  343. synapse/rest/admin/experimental_features.py +137 -0
  344. synapse/rest/admin/federation.py +243 -0
  345. synapse/rest/admin/media.py +540 -0
  346. synapse/rest/admin/registration_tokens.py +358 -0
  347. synapse/rest/admin/rooms.py +1061 -0
  348. synapse/rest/admin/scheduled_tasks.py +70 -0
  349. synapse/rest/admin/server_notice_servlet.py +132 -0
  350. synapse/rest/admin/statistics.py +132 -0
  351. synapse/rest/admin/username_available.py +58 -0
  352. synapse/rest/admin/users.py +1606 -0
  353. synapse/rest/client/__init__.py +20 -0
  354. synapse/rest/client/_base.py +113 -0
  355. synapse/rest/client/account.py +930 -0
  356. synapse/rest/client/account_data.py +319 -0
  357. synapse/rest/client/account_validity.py +103 -0
  358. synapse/rest/client/appservice_ping.py +125 -0
  359. synapse/rest/client/auth.py +218 -0
  360. synapse/rest/client/auth_metadata.py +122 -0
  361. synapse/rest/client/capabilities.py +121 -0
  362. synapse/rest/client/delayed_events.py +165 -0
  363. synapse/rest/client/devices.py +587 -0
  364. synapse/rest/client/directory.py +211 -0
  365. synapse/rest/client/events.py +116 -0
  366. synapse/rest/client/filter.py +112 -0
  367. synapse/rest/client/initial_sync.py +65 -0
  368. synapse/rest/client/keys.py +678 -0
  369. synapse/rest/client/knock.py +104 -0
  370. synapse/rest/client/login.py +750 -0
  371. synapse/rest/client/login_token_request.py +127 -0
  372. synapse/rest/client/logout.py +93 -0
  373. synapse/rest/client/matrixrtc.py +52 -0
  374. synapse/rest/client/media.py +285 -0
  375. synapse/rest/client/mutual_rooms.py +93 -0
  376. synapse/rest/client/notifications.py +137 -0
  377. synapse/rest/client/openid.py +109 -0
  378. synapse/rest/client/password_policy.py +69 -0
  379. synapse/rest/client/presence.py +131 -0
  380. synapse/rest/client/profile.py +291 -0
  381. synapse/rest/client/push_rule.py +331 -0
  382. synapse/rest/client/pusher.py +181 -0
  383. synapse/rest/client/read_marker.py +104 -0
  384. synapse/rest/client/receipts.py +165 -0
  385. synapse/rest/client/register.py +1067 -0
  386. synapse/rest/client/relations.py +138 -0
  387. synapse/rest/client/rendezvous.py +76 -0
  388. synapse/rest/client/reporting.py +207 -0
  389. synapse/rest/client/room.py +1669 -0
  390. synapse/rest/client/room_keys.py +426 -0
  391. synapse/rest/client/room_upgrade_rest_servlet.py +112 -0
  392. synapse/rest/client/sendtodevice.py +85 -0
  393. synapse/rest/client/sync.py +1131 -0
  394. synapse/rest/client/tags.py +129 -0
  395. synapse/rest/client/thirdparty.py +130 -0
  396. synapse/rest/client/thread_subscriptions.py +247 -0
  397. synapse/rest/client/tokenrefresh.py +52 -0
  398. synapse/rest/client/transactions.py +149 -0
  399. synapse/rest/client/user_directory.py +90 -0
  400. synapse/rest/client/versions.py +191 -0
  401. synapse/rest/client/voip.py +88 -0
  402. synapse/rest/consent/__init__.py +0 -0
  403. synapse/rest/consent/consent_resource.py +210 -0
  404. synapse/rest/health.py +38 -0
  405. synapse/rest/key/__init__.py +20 -0
  406. synapse/rest/key/v2/__init__.py +40 -0
  407. synapse/rest/key/v2/local_key_resource.py +125 -0
  408. synapse/rest/key/v2/remote_key_resource.py +302 -0
  409. synapse/rest/media/__init__.py +0 -0
  410. synapse/rest/media/config_resource.py +53 -0
  411. synapse/rest/media/create_resource.py +90 -0
  412. synapse/rest/media/download_resource.py +110 -0
  413. synapse/rest/media/media_repository_resource.py +113 -0
  414. synapse/rest/media/preview_url_resource.py +77 -0
  415. synapse/rest/media/thumbnail_resource.py +142 -0
  416. synapse/rest/media/upload_resource.py +187 -0
  417. synapse/rest/media/v1/__init__.py +39 -0
  418. synapse/rest/media/v1/_base.py +23 -0
  419. synapse/rest/media/v1/media_storage.py +23 -0
  420. synapse/rest/media/v1/storage_provider.py +23 -0
  421. synapse/rest/synapse/__init__.py +20 -0
  422. synapse/rest/synapse/client/__init__.py +93 -0
  423. synapse/rest/synapse/client/federation_whitelist.py +66 -0
  424. synapse/rest/synapse/client/jwks.py +77 -0
  425. synapse/rest/synapse/client/new_user_consent.py +115 -0
  426. synapse/rest/synapse/client/oidc/__init__.py +45 -0
  427. synapse/rest/synapse/client/oidc/backchannel_logout_resource.py +42 -0
  428. synapse/rest/synapse/client/oidc/callback_resource.py +48 -0
  429. synapse/rest/synapse/client/password_reset.py +129 -0
  430. synapse/rest/synapse/client/pick_idp.py +107 -0
  431. synapse/rest/synapse/client/pick_username.py +153 -0
  432. synapse/rest/synapse/client/rendezvous.py +58 -0
  433. synapse/rest/synapse/client/saml2/__init__.py +42 -0
  434. synapse/rest/synapse/client/saml2/metadata_resource.py +46 -0
  435. synapse/rest/synapse/client/saml2/response_resource.py +52 -0
  436. synapse/rest/synapse/client/sso_register.py +56 -0
  437. synapse/rest/synapse/client/unsubscribe.py +88 -0
  438. synapse/rest/synapse/mas/__init__.py +71 -0
  439. synapse/rest/synapse/mas/_base.py +55 -0
  440. synapse/rest/synapse/mas/devices.py +239 -0
  441. synapse/rest/synapse/mas/users.py +469 -0
  442. synapse/rest/well_known.py +148 -0
  443. synapse/server.py +1257 -0
  444. synapse/server_notices/__init__.py +0 -0
  445. synapse/server_notices/consent_server_notices.py +136 -0
  446. synapse/server_notices/resource_limits_server_notices.py +215 -0
  447. synapse/server_notices/server_notices_manager.py +388 -0
  448. synapse/server_notices/server_notices_sender.py +67 -0
  449. synapse/server_notices/worker_server_notices_sender.py +46 -0
  450. synapse/spam_checker_api/__init__.py +31 -0
  451. synapse/state/__init__.py +1022 -0
  452. synapse/state/v1.py +369 -0
  453. synapse/state/v2.py +984 -0
  454. synapse/static/client/login/index.html +47 -0
  455. synapse/static/client/login/js/jquery-3.4.1.min.js +2 -0
  456. synapse/static/client/login/js/login.js +291 -0
  457. synapse/static/client/login/spinner.gif +0 -0
  458. synapse/static/client/login/style.css +79 -0
  459. synapse/static/index.html +63 -0
  460. synapse/storage/__init__.py +43 -0
  461. synapse/storage/_base.py +245 -0
  462. synapse/storage/admin_client_config.py +25 -0
  463. synapse/storage/background_updates.py +1188 -0
  464. synapse/storage/controllers/__init__.py +57 -0
  465. synapse/storage/controllers/persist_events.py +1237 -0
  466. synapse/storage/controllers/purge_events.py +455 -0
  467. synapse/storage/controllers/state.py +950 -0
  468. synapse/storage/controllers/stats.py +119 -0
  469. synapse/storage/database.py +2719 -0
  470. synapse/storage/databases/__init__.py +175 -0
  471. synapse/storage/databases/main/__init__.py +420 -0
  472. synapse/storage/databases/main/account_data.py +1059 -0
  473. synapse/storage/databases/main/appservice.py +473 -0
  474. synapse/storage/databases/main/cache.py +911 -0
  475. synapse/storage/databases/main/censor_events.py +225 -0
  476. synapse/storage/databases/main/client_ips.py +815 -0
  477. synapse/storage/databases/main/delayed_events.py +562 -0
  478. synapse/storage/databases/main/deviceinbox.py +1271 -0
  479. synapse/storage/databases/main/devices.py +2578 -0
  480. synapse/storage/databases/main/directory.py +212 -0
  481. synapse/storage/databases/main/e2e_room_keys.py +689 -0
  482. synapse/storage/databases/main/end_to_end_keys.py +1894 -0
  483. synapse/storage/databases/main/event_federation.py +2508 -0
  484. synapse/storage/databases/main/event_push_actions.py +1933 -0
  485. synapse/storage/databases/main/events.py +3765 -0
  486. synapse/storage/databases/main/events_bg_updates.py +2910 -0
  487. synapse/storage/databases/main/events_forward_extremities.py +126 -0
  488. synapse/storage/databases/main/events_worker.py +2786 -0
  489. synapse/storage/databases/main/experimental_features.py +130 -0
  490. synapse/storage/databases/main/filtering.py +231 -0
  491. synapse/storage/databases/main/keys.py +291 -0
  492. synapse/storage/databases/main/lock.py +553 -0
  493. synapse/storage/databases/main/media_repository.py +1068 -0
  494. synapse/storage/databases/main/metrics.py +460 -0
  495. synapse/storage/databases/main/monthly_active_users.py +443 -0
  496. synapse/storage/databases/main/openid.py +60 -0
  497. synapse/storage/databases/main/presence.py +509 -0
  498. synapse/storage/databases/main/profile.py +539 -0
  499. synapse/storage/databases/main/purge_events.py +521 -0
  500. synapse/storage/databases/main/push_rule.py +970 -0
  501. synapse/storage/databases/main/pusher.py +793 -0
  502. synapse/storage/databases/main/receipts.py +1341 -0
  503. synapse/storage/databases/main/registration.py +3072 -0
  504. synapse/storage/databases/main/rejections.py +37 -0
  505. synapse/storage/databases/main/relations.py +1116 -0
  506. synapse/storage/databases/main/room.py +2779 -0
  507. synapse/storage/databases/main/roommember.py +2110 -0
  508. synapse/storage/databases/main/search.py +939 -0
  509. synapse/storage/databases/main/session.py +151 -0
  510. synapse/storage/databases/main/signatures.py +94 -0
  511. synapse/storage/databases/main/sliding_sync.py +603 -0
  512. synapse/storage/databases/main/state.py +1002 -0
  513. synapse/storage/databases/main/state_deltas.py +329 -0
  514. synapse/storage/databases/main/stats.py +789 -0
  515. synapse/storage/databases/main/stream.py +2577 -0
  516. synapse/storage/databases/main/tags.py +360 -0
  517. synapse/storage/databases/main/task_scheduler.py +225 -0
  518. synapse/storage/databases/main/thread_subscriptions.py +589 -0
  519. synapse/storage/databases/main/transactions.py +675 -0
  520. synapse/storage/databases/main/ui_auth.py +420 -0
  521. synapse/storage/databases/main/user_directory.py +1330 -0
  522. synapse/storage/databases/main/user_erasure_store.py +117 -0
  523. synapse/storage/databases/state/__init__.py +22 -0
  524. synapse/storage/databases/state/bg_updates.py +497 -0
  525. synapse/storage/databases/state/deletion.py +557 -0
  526. synapse/storage/databases/state/store.py +948 -0
  527. synapse/storage/engines/__init__.py +70 -0
  528. synapse/storage/engines/_base.py +154 -0
  529. synapse/storage/engines/postgres.py +261 -0
  530. synapse/storage/engines/sqlite.py +199 -0
  531. synapse/storage/invite_rule.py +112 -0
  532. synapse/storage/keys.py +40 -0
  533. synapse/storage/prepare_database.py +730 -0
  534. synapse/storage/push_rule.py +28 -0
  535. synapse/storage/roommember.py +88 -0
  536. synapse/storage/schema/README.md +4 -0
  537. synapse/storage/schema/__init__.py +186 -0
  538. synapse/storage/schema/common/delta/25/00background_updates.sql +40 -0
  539. synapse/storage/schema/common/delta/35/00background_updates_add_col.sql +36 -0
  540. synapse/storage/schema/common/delta/58/00background_update_ordering.sql +38 -0
  541. synapse/storage/schema/common/full_schemas/72/full.sql.postgres +8 -0
  542. synapse/storage/schema/common/full_schemas/72/full.sql.sqlite +6 -0
  543. synapse/storage/schema/common/schema_version.sql +60 -0
  544. synapse/storage/schema/main/delta/12/v12.sql +82 -0
  545. synapse/storage/schema/main/delta/13/v13.sql +38 -0
  546. synapse/storage/schema/main/delta/14/v14.sql +42 -0
  547. synapse/storage/schema/main/delta/15/appservice_txns.sql +50 -0
  548. synapse/storage/schema/main/delta/15/presence_indices.sql +2 -0
  549. synapse/storage/schema/main/delta/15/v15.sql +24 -0
  550. synapse/storage/schema/main/delta/16/events_order_index.sql +4 -0
  551. synapse/storage/schema/main/delta/16/remote_media_cache_index.sql +2 -0
  552. synapse/storage/schema/main/delta/16/remove_duplicates.sql +9 -0
  553. synapse/storage/schema/main/delta/16/room_alias_index.sql +3 -0
  554. synapse/storage/schema/main/delta/16/unique_constraints.sql +72 -0
  555. synapse/storage/schema/main/delta/16/users.sql +56 -0
  556. synapse/storage/schema/main/delta/17/drop_indexes.sql +37 -0
  557. synapse/storage/schema/main/delta/17/server_keys.sql +43 -0
  558. synapse/storage/schema/main/delta/17/user_threepids.sql +9 -0
  559. synapse/storage/schema/main/delta/18/server_keys_bigger_ints.sql +51 -0
  560. synapse/storage/schema/main/delta/19/event_index.sql +38 -0
  561. synapse/storage/schema/main/delta/20/dummy.sql +1 -0
  562. synapse/storage/schema/main/delta/20/pushers.py +93 -0
  563. synapse/storage/schema/main/delta/21/end_to_end_keys.sql +53 -0
  564. synapse/storage/schema/main/delta/21/receipts.sql +57 -0
  565. synapse/storage/schema/main/delta/22/receipts_index.sql +41 -0
  566. synapse/storage/schema/main/delta/22/user_threepids_unique.sql +19 -0
  567. synapse/storage/schema/main/delta/24/stats_reporting.sql +37 -0
  568. synapse/storage/schema/main/delta/25/fts.py +81 -0
  569. synapse/storage/schema/main/delta/25/guest_access.sql +44 -0
  570. synapse/storage/schema/main/delta/25/history_visibility.sql +44 -0
  571. synapse/storage/schema/main/delta/25/tags.sql +57 -0
  572. synapse/storage/schema/main/delta/26/account_data.sql +36 -0
  573. synapse/storage/schema/main/delta/27/account_data.sql +55 -0
  574. synapse/storage/schema/main/delta/27/forgotten_memberships.sql +45 -0
  575. synapse/storage/schema/main/delta/27/ts.py +61 -0
  576. synapse/storage/schema/main/delta/28/event_push_actions.sql +46 -0
  577. synapse/storage/schema/main/delta/28/events_room_stream.sql +39 -0
  578. synapse/storage/schema/main/delta/28/public_roms_index.sql +39 -0
  579. synapse/storage/schema/main/delta/28/receipts_user_id_index.sql +41 -0
  580. synapse/storage/schema/main/delta/28/upgrade_times.sql +40 -0
  581. synapse/storage/schema/main/delta/28/users_is_guest.sql +41 -0
  582. synapse/storage/schema/main/delta/29/push_actions.sql +54 -0
  583. synapse/storage/schema/main/delta/30/alias_creator.sql +35 -0
  584. synapse/storage/schema/main/delta/30/as_users.py +82 -0
  585. synapse/storage/schema/main/delta/30/deleted_pushers.sql +44 -0
  586. synapse/storage/schema/main/delta/30/presence_stream.sql +49 -0
  587. synapse/storage/schema/main/delta/30/public_rooms.sql +42 -0
  588. synapse/storage/schema/main/delta/30/push_rule_stream.sql +57 -0
  589. synapse/storage/schema/main/delta/30/threepid_guest_access_tokens.sql +43 -0
  590. synapse/storage/schema/main/delta/31/invites.sql +61 -0
  591. synapse/storage/schema/main/delta/31/local_media_repository_url_cache.sql +46 -0
  592. synapse/storage/schema/main/delta/31/pushers_0.py +92 -0
  593. synapse/storage/schema/main/delta/31/pushers_index.sql +41 -0
  594. synapse/storage/schema/main/delta/31/search_update.py +65 -0
  595. synapse/storage/schema/main/delta/32/events.sql +35 -0
  596. synapse/storage/schema/main/delta/32/openid.sql +9 -0
  597. synapse/storage/schema/main/delta/32/pusher_throttle.sql +42 -0
  598. synapse/storage/schema/main/delta/32/remove_indices.sql +52 -0
  599. synapse/storage/schema/main/delta/32/reports.sql +44 -0
  600. synapse/storage/schema/main/delta/33/access_tokens_device_index.sql +36 -0
  601. synapse/storage/schema/main/delta/33/devices.sql +40 -0
  602. synapse/storage/schema/main/delta/33/devices_for_e2e_keys.sql +38 -0
  603. synapse/storage/schema/main/delta/33/devices_for_e2e_keys_clear_unknown_device.sql +39 -0
  604. synapse/storage/schema/main/delta/33/event_fields.py +61 -0
  605. synapse/storage/schema/main/delta/33/remote_media_ts.py +43 -0
  606. synapse/storage/schema/main/delta/33/user_ips_index.sql +36 -0
  607. synapse/storage/schema/main/delta/34/appservice_stream.sql +42 -0
  608. synapse/storage/schema/main/delta/34/cache_stream.py +50 -0
  609. synapse/storage/schema/main/delta/34/device_inbox.sql +43 -0
  610. synapse/storage/schema/main/delta/34/push_display_name_rename.sql +39 -0
  611. synapse/storage/schema/main/delta/34/received_txn_purge.py +36 -0
  612. synapse/storage/schema/main/delta/35/contains_url.sql +36 -0
  613. synapse/storage/schema/main/delta/35/device_outbox.sql +58 -0
  614. synapse/storage/schema/main/delta/35/device_stream_id.sql +40 -0
  615. synapse/storage/schema/main/delta/35/event_push_actions_index.sql +36 -0
  616. synapse/storage/schema/main/delta/35/public_room_list_change_stream.sql +52 -0
  617. synapse/storage/schema/main/delta/35/stream_order_to_extrem.sql +56 -0
  618. synapse/storage/schema/main/delta/36/readd_public_rooms.sql +45 -0
  619. synapse/storage/schema/main/delta/37/remove_auth_idx.py +89 -0
  620. synapse/storage/schema/main/delta/37/user_threepids.sql +71 -0
  621. synapse/storage/schema/main/delta/38/postgres_fts_gist.sql +38 -0
  622. synapse/storage/schema/main/delta/39/appservice_room_list.sql +48 -0
  623. synapse/storage/schema/main/delta/39/device_federation_stream_idx.sql +35 -0
  624. synapse/storage/schema/main/delta/39/event_push_index.sql +36 -0
  625. synapse/storage/schema/main/delta/39/federation_out_position.sql +41 -0
  626. synapse/storage/schema/main/delta/39/membership_profile.sql +39 -0
  627. synapse/storage/schema/main/delta/40/current_state_idx.sql +36 -0
  628. synapse/storage/schema/main/delta/40/device_inbox.sql +40 -0
  629. synapse/storage/schema/main/delta/40/device_list_streams.sql +79 -0
  630. synapse/storage/schema/main/delta/40/event_push_summary.sql +57 -0
  631. synapse/storage/schema/main/delta/40/pushers.sql +58 -0
  632. synapse/storage/schema/main/delta/41/device_list_stream_idx.sql +36 -0
  633. synapse/storage/schema/main/delta/41/device_outbound_index.sql +35 -0
  634. synapse/storage/schema/main/delta/41/event_search_event_id_idx.sql +36 -0
  635. synapse/storage/schema/main/delta/41/ratelimit.sql +41 -0
  636. synapse/storage/schema/main/delta/42/current_state_delta.sql +48 -0
  637. synapse/storage/schema/main/delta/42/device_list_last_id.sql +52 -0
  638. synapse/storage/schema/main/delta/42/event_auth_state_only.sql +36 -0
  639. synapse/storage/schema/main/delta/42/user_dir.py +88 -0
  640. synapse/storage/schema/main/delta/43/blocked_rooms.sql +40 -0
  641. synapse/storage/schema/main/delta/43/quarantine_media.sql +36 -0
  642. synapse/storage/schema/main/delta/43/url_cache.sql +35 -0
  643. synapse/storage/schema/main/delta/43/user_share.sql +52 -0
  644. synapse/storage/schema/main/delta/44/expire_url_cache.sql +60 -0
  645. synapse/storage/schema/main/delta/45/group_server.sql +186 -0
  646. synapse/storage/schema/main/delta/45/profile_cache.sql +47 -0
  647. synapse/storage/schema/main/delta/46/drop_refresh_tokens.sql +36 -0
  648. synapse/storage/schema/main/delta/46/drop_unique_deleted_pushers.sql +54 -0
  649. synapse/storage/schema/main/delta/46/group_server.sql +51 -0
  650. synapse/storage/schema/main/delta/46/local_media_repository_url_idx.sql +43 -0
  651. synapse/storage/schema/main/delta/46/user_dir_null_room_ids.sql +54 -0
  652. synapse/storage/schema/main/delta/46/user_dir_typos.sql +43 -0
  653. synapse/storage/schema/main/delta/47/last_access_media.sql +35 -0
  654. synapse/storage/schema/main/delta/47/postgres_fts_gin.sql +36 -0
  655. synapse/storage/schema/main/delta/47/push_actions_staging.sql +47 -0
  656. synapse/storage/schema/main/delta/48/add_user_consent.sql +37 -0
  657. synapse/storage/schema/main/delta/48/add_user_ips_last_seen_index.sql +36 -0
  658. synapse/storage/schema/main/delta/48/deactivated_users.sql +44 -0
  659. synapse/storage/schema/main/delta/48/group_unique_indexes.py +67 -0
  660. synapse/storage/schema/main/delta/48/groups_joinable.sql +41 -0
  661. synapse/storage/schema/main/delta/49/add_user_consent_server_notice_sent.sql +39 -0
  662. synapse/storage/schema/main/delta/49/add_user_daily_visits.sql +40 -0
  663. synapse/storage/schema/main/delta/49/add_user_ips_last_seen_only_index.sql +36 -0
  664. synapse/storage/schema/main/delta/50/add_creation_ts_users_index.sql +38 -0
  665. synapse/storage/schema/main/delta/50/erasure_store.sql +40 -0
  666. synapse/storage/schema/main/delta/50/make_event_content_nullable.py +102 -0
  667. synapse/storage/schema/main/delta/51/e2e_room_keys.sql +58 -0
  668. synapse/storage/schema/main/delta/51/monthly_active_users.sql +46 -0
  669. synapse/storage/schema/main/delta/52/add_event_to_state_group_index.sql +38 -0
  670. synapse/storage/schema/main/delta/52/device_list_streams_unique_idx.sql +55 -0
  671. synapse/storage/schema/main/delta/52/e2e_room_keys.sql +72 -0
  672. synapse/storage/schema/main/delta/53/add_user_type_to_users.sql +38 -0
  673. synapse/storage/schema/main/delta/53/drop_sent_transactions.sql +35 -0
  674. synapse/storage/schema/main/delta/53/event_format_version.sql +35 -0
  675. synapse/storage/schema/main/delta/53/user_dir_populate.sql +49 -0
  676. synapse/storage/schema/main/delta/53/user_ips_index.sql +49 -0
  677. synapse/storage/schema/main/delta/53/user_share.sql +63 -0
  678. synapse/storage/schema/main/delta/53/user_threepid_id.sql +48 -0
  679. synapse/storage/schema/main/delta/53/users_in_public_rooms.sql +47 -0
  680. synapse/storage/schema/main/delta/54/account_validity_with_renewal.sql +49 -0
  681. synapse/storage/schema/main/delta/54/add_validity_to_server_keys.sql +42 -0
  682. synapse/storage/schema/main/delta/54/delete_forward_extremities.sql +42 -0
  683. synapse/storage/schema/main/delta/54/drop_legacy_tables.sql +49 -0
  684. synapse/storage/schema/main/delta/54/drop_presence_list.sql +35 -0
  685. synapse/storage/schema/main/delta/54/relations.sql +46 -0
  686. synapse/storage/schema/main/delta/54/stats.sql +99 -0
  687. synapse/storage/schema/main/delta/54/stats2.sql +47 -0
  688. synapse/storage/schema/main/delta/55/access_token_expiry.sql +37 -0
  689. synapse/storage/schema/main/delta/55/track_threepid_validations.sql +50 -0
  690. synapse/storage/schema/main/delta/55/users_alter_deactivated.sql +38 -0
  691. synapse/storage/schema/main/delta/56/add_spans_to_device_lists.sql +39 -0
  692. synapse/storage/schema/main/delta/56/current_state_events_membership.sql +41 -0
  693. synapse/storage/schema/main/delta/56/current_state_events_membership_mk2.sql +43 -0
  694. synapse/storage/schema/main/delta/56/delete_keys_from_deleted_backups.sql +44 -0
  695. synapse/storage/schema/main/delta/56/destinations_failure_ts.sql +44 -0
  696. synapse/storage/schema/main/delta/56/destinations_retry_interval_type.sql.postgres +18 -0
  697. synapse/storage/schema/main/delta/56/device_stream_id_insert.sql +39 -0
  698. synapse/storage/schema/main/delta/56/devices_last_seen.sql +43 -0
  699. synapse/storage/schema/main/delta/56/drop_unused_event_tables.sql +39 -0
  700. synapse/storage/schema/main/delta/56/event_expiry.sql +40 -0
  701. synapse/storage/schema/main/delta/56/event_labels.sql +49 -0
  702. synapse/storage/schema/main/delta/56/event_labels_background_update.sql +36 -0
  703. synapse/storage/schema/main/delta/56/fix_room_keys_index.sql +37 -0
  704. synapse/storage/schema/main/delta/56/hidden_devices.sql +37 -0
  705. synapse/storage/schema/main/delta/56/hidden_devices_fix.sql.sqlite +42 -0
  706. synapse/storage/schema/main/delta/56/nuke_empty_communities_from_db.sql +48 -0
  707. synapse/storage/schema/main/delta/56/public_room_list_idx.sql +35 -0
  708. synapse/storage/schema/main/delta/56/redaction_censor.sql +35 -0
  709. synapse/storage/schema/main/delta/56/redaction_censor2.sql +41 -0
  710. synapse/storage/schema/main/delta/56/redaction_censor3_fix_update.sql.postgres +25 -0
  711. synapse/storage/schema/main/delta/56/redaction_censor4.sql +35 -0
  712. synapse/storage/schema/main/delta/56/remove_tombstoned_rooms_from_directory.sql +38 -0
  713. synapse/storage/schema/main/delta/56/room_key_etag.sql +36 -0
  714. synapse/storage/schema/main/delta/56/room_membership_idx.sql +37 -0
  715. synapse/storage/schema/main/delta/56/room_retention.sql +52 -0
  716. synapse/storage/schema/main/delta/56/signing_keys.sql +75 -0
  717. synapse/storage/schema/main/delta/56/signing_keys_nonunique_signatures.sql +41 -0
  718. synapse/storage/schema/main/delta/56/stats_separated.sql +175 -0
  719. synapse/storage/schema/main/delta/56/unique_user_filter_index.py +46 -0
  720. synapse/storage/schema/main/delta/56/user_external_ids.sql +43 -0
  721. synapse/storage/schema/main/delta/56/users_in_public_rooms_idx.sql +36 -0
  722. synapse/storage/schema/main/delta/57/delete_old_current_state_events.sql +41 -0
  723. synapse/storage/schema/main/delta/57/device_list_remote_cache_stale.sql +44 -0
  724. synapse/storage/schema/main/delta/57/local_current_membership.py +111 -0
  725. synapse/storage/schema/main/delta/57/remove_sent_outbound_pokes.sql +40 -0
  726. synapse/storage/schema/main/delta/57/rooms_version_column.sql +43 -0
  727. synapse/storage/schema/main/delta/57/rooms_version_column_2.sql.postgres +35 -0
  728. synapse/storage/schema/main/delta/57/rooms_version_column_2.sql.sqlite +22 -0
  729. synapse/storage/schema/main/delta/57/rooms_version_column_3.sql.postgres +39 -0
  730. synapse/storage/schema/main/delta/57/rooms_version_column_3.sql.sqlite +23 -0
  731. synapse/storage/schema/main/delta/58/02remove_dup_outbound_pokes.sql +41 -0
  732. synapse/storage/schema/main/delta/58/03persist_ui_auth.sql +55 -0
  733. synapse/storage/schema/main/delta/58/05cache_instance.sql.postgres +30 -0
  734. synapse/storage/schema/main/delta/58/06dlols_unique_idx.py +83 -0
  735. synapse/storage/schema/main/delta/58/07add_method_to_thumbnail_constraint.sql.postgres +33 -0
  736. synapse/storage/schema/main/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite +44 -0
  737. synapse/storage/schema/main/delta/58/07persist_ui_auth_ips.sql +44 -0
  738. synapse/storage/schema/main/delta/58/08_media_safe_from_quarantine.sql.postgres +18 -0
  739. synapse/storage/schema/main/delta/58/08_media_safe_from_quarantine.sql.sqlite +18 -0
  740. synapse/storage/schema/main/delta/58/09shadow_ban.sql +37 -0
  741. synapse/storage/schema/main/delta/58/10_pushrules_enabled_delete_obsolete.sql +47 -0
  742. synapse/storage/schema/main/delta/58/10drop_local_rejections_stream.sql +41 -0
  743. synapse/storage/schema/main/delta/58/10federation_pos_instance_name.sql +41 -0
  744. synapse/storage/schema/main/delta/58/11dehydration.sql +39 -0
  745. synapse/storage/schema/main/delta/58/11fallback.sql +43 -0
  746. synapse/storage/schema/main/delta/58/11user_id_seq.py +38 -0
  747. synapse/storage/schema/main/delta/58/12room_stats.sql +51 -0
  748. synapse/storage/schema/main/delta/58/13remove_presence_allow_inbound.sql +36 -0
  749. synapse/storage/schema/main/delta/58/14events_instance_name.sql +35 -0
  750. synapse/storage/schema/main/delta/58/14events_instance_name.sql.postgres +28 -0
  751. synapse/storage/schema/main/delta/58/15_catchup_destination_rooms.sql +61 -0
  752. synapse/storage/schema/main/delta/58/15unread_count.sql +45 -0
  753. synapse/storage/schema/main/delta/58/16populate_stats_process_rooms_fix.sql +41 -0
  754. synapse/storage/schema/main/delta/58/17_catchup_last_successful.sql +40 -0
  755. synapse/storage/schema/main/delta/58/18stream_positions.sql +41 -0
  756. synapse/storage/schema/main/delta/58/19instance_map.sql.postgres +25 -0
  757. synapse/storage/schema/main/delta/58/19txn_id.sql +59 -0
  758. synapse/storage/schema/main/delta/58/20instance_name_event_tables.sql +36 -0
  759. synapse/storage/schema/main/delta/58/20user_daily_visits.sql +37 -0
  760. synapse/storage/schema/main/delta/58/21as_device_stream.sql +36 -0
  761. synapse/storage/schema/main/delta/58/21drop_device_max_stream_id.sql +1 -0
  762. synapse/storage/schema/main/delta/58/22puppet_token.sql +36 -0
  763. synapse/storage/schema/main/delta/58/22users_have_local_media.sql +2 -0
  764. synapse/storage/schema/main/delta/58/23e2e_cross_signing_keys_idx.sql +36 -0
  765. synapse/storage/schema/main/delta/58/24drop_event_json_index.sql +38 -0
  766. synapse/storage/schema/main/delta/58/25user_external_ids_user_id_idx.sql +36 -0
  767. synapse/storage/schema/main/delta/58/26access_token_last_validated.sql +37 -0
  768. synapse/storage/schema/main/delta/58/27local_invites.sql +37 -0
  769. synapse/storage/schema/main/delta/58/28drop_last_used_column.sql.postgres +16 -0
  770. synapse/storage/schema/main/delta/58/28drop_last_used_column.sql.sqlite +62 -0
  771. synapse/storage/schema/main/delta/59/01ignored_user.py +85 -0
  772. synapse/storage/schema/main/delta/59/02shard_send_to_device.sql +37 -0
  773. synapse/storage/schema/main/delta/59/03shard_send_to_device_sequence.sql.postgres +25 -0
  774. synapse/storage/schema/main/delta/59/04_event_auth_chains.sql +71 -0
  775. synapse/storage/schema/main/delta/59/04_event_auth_chains.sql.postgres +16 -0
  776. synapse/storage/schema/main/delta/59/04drop_account_data.sql +36 -0
  777. synapse/storage/schema/main/delta/59/05cache_invalidation.sql +36 -0
  778. synapse/storage/schema/main/delta/59/06chain_cover_index.sql +36 -0
  779. synapse/storage/schema/main/delta/59/06shard_account_data.sql +39 -0
  780. synapse/storage/schema/main/delta/59/06shard_account_data.sql.postgres +32 -0
  781. synapse/storage/schema/main/delta/59/07shard_account_data_fix.sql +37 -0
  782. synapse/storage/schema/main/delta/59/08delete_pushers_for_deactivated_accounts.sql +39 -0
  783. synapse/storage/schema/main/delta/59/08delete_stale_pushers.sql +39 -0
  784. synapse/storage/schema/main/delta/59/09rejected_events_metadata.sql +45 -0
  785. synapse/storage/schema/main/delta/59/10delete_purged_chain_cover.sql +36 -0
  786. synapse/storage/schema/main/delta/59/11add_knock_members_to_stats.sql +39 -0
  787. synapse/storage/schema/main/delta/59/11drop_thumbnail_constraint.sql.postgres +22 -0
  788. synapse/storage/schema/main/delta/59/12account_validity_token_used_ts_ms.sql +37 -0
  789. synapse/storage/schema/main/delta/59/12presence_stream_instance.sql +37 -0
  790. synapse/storage/schema/main/delta/59/12presence_stream_instance_seq.sql.postgres +20 -0
  791. synapse/storage/schema/main/delta/59/13users_to_send_full_presence_to.sql +53 -0
  792. synapse/storage/schema/main/delta/59/14refresh_tokens.sql +53 -0
  793. synapse/storage/schema/main/delta/59/15locks.sql +56 -0
  794. synapse/storage/schema/main/delta/59/16federation_inbound_staging.sql +51 -0
  795. synapse/storage/schema/main/delta/60/01recreate_stream_ordering.sql.postgres +45 -0
  796. synapse/storage/schema/main/delta/60/02change_stream_ordering_columns.sql.postgres +30 -0
  797. synapse/storage/schema/main/delta/61/01change_appservices_txns.sql.postgres +23 -0
  798. synapse/storage/schema/main/delta/61/01insertion_event_lookups.sql +68 -0
  799. synapse/storage/schema/main/delta/61/02drop_redundant_room_depth_index.sql +37 -0
  800. synapse/storage/schema/main/delta/61/03recreate_min_depth.py +74 -0
  801. synapse/storage/schema/main/delta/62/01insertion_event_extremities.sql +43 -0
  802. synapse/storage/schema/main/delta/63/01create_registration_tokens.sql +42 -0
  803. synapse/storage/schema/main/delta/63/02delete_unlinked_email_pushers.sql +39 -0
  804. synapse/storage/schema/main/delta/63/02populate-rooms-creator.sql +36 -0
  805. synapse/storage/schema/main/delta/63/03session_store.sql +42 -0
  806. synapse/storage/schema/main/delta/63/04add_presence_stream_not_offline_index.sql +37 -0
  807. synapse/storage/schema/main/delta/64/01msc2716_chunk_to_batch_rename.sql.postgres +23 -0
  808. synapse/storage/schema/main/delta/64/01msc2716_chunk_to_batch_rename.sql.sqlite +37 -0
  809. synapse/storage/schema/main/delta/65/01msc2716_insertion_event_edges.sql +38 -0
  810. synapse/storage/schema/main/delta/65/03remove_hidden_devices_from_device_inbox.sql +41 -0
  811. synapse/storage/schema/main/delta/65/04_local_group_updates.sql +37 -0
  812. synapse/storage/schema/main/delta/65/05_remove_room_stats_historical_and_user_stats_historical.sql +38 -0
  813. synapse/storage/schema/main/delta/65/06remove_deleted_devices_from_device_inbox.sql +53 -0
  814. synapse/storage/schema/main/delta/65/07_arbitrary_relations.sql +37 -0
  815. synapse/storage/schema/main/delta/65/08_device_inbox_background_updates.sql +37 -0
  816. synapse/storage/schema/main/delta/65/10_expirable_refresh_tokens.sql +47 -0
  817. synapse/storage/schema/main/delta/65/11_devices_auth_provider_session.sql +46 -0
  818. synapse/storage/schema/main/delta/67/01drop_public_room_list_stream.sql +37 -0
  819. synapse/storage/schema/main/delta/68/01event_columns.sql +45 -0
  820. synapse/storage/schema/main/delta/68/02_msc2409_add_device_id_appservice_stream_type.sql +40 -0
  821. synapse/storage/schema/main/delta/68/03_delete_account_data_for_deactivated_accounts.sql +39 -0
  822. synapse/storage/schema/main/delta/68/04_refresh_tokens_index_next_token_id.sql +47 -0
  823. synapse/storage/schema/main/delta/68/04partial_state_rooms.sql +60 -0
  824. synapse/storage/schema/main/delta/68/05_delete_non_strings_from_event_search.sql.sqlite +22 -0
  825. synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py +80 -0
  826. synapse/storage/schema/main/delta/68/06_msc3202_add_device_list_appservice_stream_type.sql +42 -0
  827. synapse/storage/schema/main/delta/69/01as_txn_seq.py +54 -0
  828. synapse/storage/schema/main/delta/69/01device_list_oubound_by_room.sql +57 -0
  829. synapse/storage/schema/main/delta/69/02cache_invalidation_index.sql +37 -0
  830. synapse/storage/schema/main/delta/70/01clean_table_purged_rooms.sql +39 -0
  831. synapse/storage/schema/main/delta/71/01rebuild_event_edges.sql.postgres +43 -0
  832. synapse/storage/schema/main/delta/71/01rebuild_event_edges.sql.sqlite +47 -0
  833. synapse/storage/schema/main/delta/71/01remove_noop_background_updates.sql +80 -0
  834. synapse/storage/schema/main/delta/71/02event_push_summary_unique.sql +37 -0
  835. synapse/storage/schema/main/delta/72/01add_room_type_to_state_stats.sql +38 -0
  836. synapse/storage/schema/main/delta/72/01event_push_summary_receipt.sql +54 -0
  837. synapse/storage/schema/main/delta/72/02event_push_actions_index.sql +38 -0
  838. synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py +57 -0
  839. synapse/storage/schema/main/delta/72/03drop_event_reference_hashes.sql +36 -0
  840. synapse/storage/schema/main/delta/72/03remove_groups.sql +50 -0
  841. synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.postgres +17 -0
  842. synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.sqlite +40 -0
  843. synapse/storage/schema/main/delta/72/05receipts_event_stream_ordering.sql +38 -0
  844. synapse/storage/schema/main/delta/72/05remove_unstable_private_read_receipts.sql +38 -0
  845. synapse/storage/schema/main/delta/72/06add_consent_ts_to_users.sql +35 -0
  846. synapse/storage/schema/main/delta/72/06thread_notifications.sql +49 -0
  847. synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py +67 -0
  848. synapse/storage/schema/main/delta/72/07thread_receipts.sql.postgres +30 -0
  849. synapse/storage/schema/main/delta/72/07thread_receipts.sql.sqlite +70 -0
  850. synapse/storage/schema/main/delta/72/08begin_cache_invalidation_seq_at_2.sql.postgres +23 -0
  851. synapse/storage/schema/main/delta/72/08thread_receipts.sql +39 -0
  852. synapse/storage/schema/main/delta/72/09partial_indices.sql.sqlite +56 -0
  853. synapse/storage/schema/main/delta/73/01event_failed_pull_attempts.sql +48 -0
  854. synapse/storage/schema/main/delta/73/02add_pusher_enabled.sql +35 -0
  855. synapse/storage/schema/main/delta/73/02room_id_indexes_for_purging.sql +41 -0
  856. synapse/storage/schema/main/delta/73/03pusher_device_id.sql +39 -0
  857. synapse/storage/schema/main/delta/73/03users_approved_column.sql +39 -0
  858. synapse/storage/schema/main/delta/73/04partial_join_details.sql +42 -0
  859. synapse/storage/schema/main/delta/73/04pending_device_list_updates.sql +47 -0
  860. synapse/storage/schema/main/delta/73/05old_push_actions.sql.postgres +22 -0
  861. synapse/storage/schema/main/delta/73/05old_push_actions.sql.sqlite +24 -0
  862. synapse/storage/schema/main/delta/73/06thread_notifications_thread_id_idx.sql +42 -0
  863. synapse/storage/schema/main/delta/73/08thread_receipts_non_null.sql.postgres +23 -0
  864. synapse/storage/schema/main/delta/73/08thread_receipts_non_null.sql.sqlite +76 -0
  865. synapse/storage/schema/main/delta/73/09partial_joined_via_destination.sql +37 -0
  866. synapse/storage/schema/main/delta/73/09threads_table.sql +49 -0
  867. synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py +71 -0
  868. synapse/storage/schema/main/delta/73/10login_tokens.sql +54 -0
  869. synapse/storage/schema/main/delta/73/11event_search_room_id_n_distinct.sql.postgres +33 -0
  870. synapse/storage/schema/main/delta/73/12refactor_device_list_outbound_pokes.sql +72 -0
  871. synapse/storage/schema/main/delta/73/13add_device_lists_index.sql +39 -0
  872. synapse/storage/schema/main/delta/73/20_un_partial_stated_room_stream.sql +51 -0
  873. synapse/storage/schema/main/delta/73/21_un_partial_stated_room_stream_seq.sql.postgres +20 -0
  874. synapse/storage/schema/main/delta/73/22_rebuild_user_dir_stats.sql +48 -0
  875. synapse/storage/schema/main/delta/73/22_un_partial_stated_event_stream.sql +53 -0
  876. synapse/storage/schema/main/delta/73/23_fix_thread_index.sql +52 -0
  877. synapse/storage/schema/main/delta/73/23_un_partial_stated_room_stream_seq.sql.postgres +20 -0
  878. synapse/storage/schema/main/delta/73/24_events_jump_to_date_index.sql +36 -0
  879. synapse/storage/schema/main/delta/73/25drop_presence.sql +36 -0
  880. synapse/storage/schema/main/delta/74/01_user_directory_stale_remote_users.sql +58 -0
  881. synapse/storage/schema/main/delta/74/02_set_device_id_for_pushers_bg_update.sql +38 -0
  882. synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.postgres +29 -0
  883. synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.sqlite +23 -0
  884. synapse/storage/schema/main/delta/74/03_room_membership_index.sql +38 -0
  885. synapse/storage/schema/main/delta/74/04_delete_e2e_backup_keys_for_deactivated_users.sql +36 -0
  886. synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py +87 -0
  887. synapse/storage/schema/main/delta/74/05_events_txn_id_device_id.sql +72 -0
  888. synapse/storage/schema/main/delta/74/90COMMENTS_destinations.sql.postgres +52 -0
  889. synapse/storage/schema/main/delta/76/01_add_profiles_full_user_id_column.sql +39 -0
  890. synapse/storage/schema/main/delta/76/02_add_user_filters_full_user_id_column.sql +39 -0
  891. synapse/storage/schema/main/delta/76/03_per_user_experimental_features.sql +46 -0
  892. synapse/storage/schema/main/delta/76/04_add_room_forgetter.sql +43 -0
  893. synapse/storage/schema/main/delta/77/01_add_profiles_not_valid_check.sql.postgres +16 -0
  894. synapse/storage/schema/main/delta/77/02_add_user_filters_not_valid_check.sql.postgres +16 -0
  895. synapse/storage/schema/main/delta/77/03bg_populate_full_user_id_profiles.sql +35 -0
  896. synapse/storage/schema/main/delta/77/04bg_populate_full_user_id_user_filters.sql +35 -0
  897. synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql +67 -0
  898. synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite +102 -0
  899. synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres +27 -0
  900. synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres +27 -0
  901. synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres +29 -0
  902. synapse/storage/schema/main/delta/77/14bg_indices_event_stream_ordering.sql +39 -0
  903. synapse/storage/schema/main/delta/78/01_validate_and_update_profiles.py +99 -0
  904. synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py +100 -0
  905. synapse/storage/schema/main/delta/78/03_remove_unused_indexes_user_filters.py +72 -0
  906. synapse/storage/schema/main/delta/78/03event_extremities_constraints.py +65 -0
  907. synapse/storage/schema/main/delta/78/04_add_full_user_id_index_user_filters.py +32 -0
  908. synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.postgres +102 -0
  909. synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.sqlite +72 -0
  910. synapse/storage/schema/main/delta/79/04_mitigate_stream_ordering_update_race.py +70 -0
  911. synapse/storage/schema/main/delta/79/05_read_write_locks_triggers.sql.postgres +69 -0
  912. synapse/storage/schema/main/delta/79/05_read_write_locks_triggers.sql.sqlite +65 -0
  913. synapse/storage/schema/main/delta/80/01_users_alter_locked.sql +35 -0
  914. synapse/storage/schema/main/delta/80/02_read_write_locks_unlogged.sql.postgres +30 -0
  915. synapse/storage/schema/main/delta/80/02_scheduled_tasks.sql +47 -0
  916. synapse/storage/schema/main/delta/80/03_read_write_locks_triggers.sql.postgres +37 -0
  917. synapse/storage/schema/main/delta/80/04_read_write_locks_deadlock.sql.postgres +71 -0
  918. synapse/storage/schema/main/delta/82/02_scheduled_tasks_index.sql +35 -0
  919. synapse/storage/schema/main/delta/82/04_add_indices_for_purging_rooms.sql +39 -0
  920. synapse/storage/schema/main/delta/82/05gaps.sql +44 -0
  921. synapse/storage/schema/main/delta/83/01_drop_old_tables.sql +43 -0
  922. synapse/storage/schema/main/delta/83/03_instance_name_receipts.sql.sqlite +17 -0
  923. synapse/storage/schema/main/delta/83/05_cross_signing_key_update_grant.sql +34 -0
  924. synapse/storage/schema/main/delta/83/06_event_push_summary_room.sql +36 -0
  925. synapse/storage/schema/main/delta/84/01_auth_links_stats.sql.postgres +20 -0
  926. synapse/storage/schema/main/delta/84/02_auth_links_index.sql +16 -0
  927. synapse/storage/schema/main/delta/84/03_auth_links_analyze.sql.postgres +16 -0
  928. synapse/storage/schema/main/delta/84/04_access_token_index.sql +15 -0
  929. synapse/storage/schema/main/delta/85/01_add_suspended.sql +14 -0
  930. synapse/storage/schema/main/delta/85/02_add_instance_names.sql +27 -0
  931. synapse/storage/schema/main/delta/85/03_new_sequences.sql.postgres +54 -0
  932. synapse/storage/schema/main/delta/85/04_cleanup_device_federation_outbox.sql +15 -0
  933. synapse/storage/schema/main/delta/85/05_add_instance_names_converted_pos.sql +16 -0
  934. synapse/storage/schema/main/delta/85/06_add_room_reports.sql +20 -0
  935. synapse/storage/schema/main/delta/86/01_authenticate_media.sql +15 -0
  936. synapse/storage/schema/main/delta/86/02_receipts_event_id_index.sql +15 -0
  937. synapse/storage/schema/main/delta/87/01_sliding_sync_memberships.sql +169 -0
  938. synapse/storage/schema/main/delta/87/02_per_connection_state.sql +81 -0
  939. synapse/storage/schema/main/delta/87/03_current_state_index.sql +19 -0
  940. synapse/storage/schema/main/delta/88/01_add_delayed_events.sql +43 -0
  941. synapse/storage/schema/main/delta/88/01_custom_profile_fields.sql +15 -0
  942. synapse/storage/schema/main/delta/88/02_fix_sliding_sync_membership_snapshots_forgotten_column.sql +21 -0
  943. synapse/storage/schema/main/delta/88/03_add_otk_ts_added_index.sql +18 -0
  944. synapse/storage/schema/main/delta/88/04_current_state_delta_index.sql +18 -0
  945. synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.postgres +19 -0
  946. synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.sqlite +19 -0
  947. synapse/storage/schema/main/delta/88/05_sliding_sync_room_config_index.sql +20 -0
  948. synapse/storage/schema/main/delta/88/06_events_received_ts_index.sql +17 -0
  949. synapse/storage/schema/main/delta/89/01_sliding_sync_membership_snapshot_index.sql +15 -0
  950. synapse/storage/schema/main/delta/90/01_add_column_participant_room_memberships_table.sql +16 -0
  951. synapse/storage/schema/main/delta/91/01_media_hash.sql +28 -0
  952. synapse/storage/schema/main/delta/92/01_remove_trigger.sql.postgres +16 -0
  953. synapse/storage/schema/main/delta/92/01_remove_trigger.sql.sqlite +16 -0
  954. synapse/storage/schema/main/delta/92/02_remove_populate_participant_bg_update.sql +17 -0
  955. synapse/storage/schema/main/delta/92/04_ss_membership_snapshot_idx.sql +16 -0
  956. synapse/storage/schema/main/delta/92/04_thread_subscriptions.sql +59 -0
  957. synapse/storage/schema/main/delta/92/04_thread_subscriptions_seq.sql.postgres +19 -0
  958. synapse/storage/schema/main/delta/92/05_fixup_max_depth_cap.sql +17 -0
  959. synapse/storage/schema/main/delta/92/05_thread_subscriptions_comments.sql.postgres +18 -0
  960. synapse/storage/schema/main/delta/92/06_device_federation_inbox_index.sql +16 -0
  961. synapse/storage/schema/main/delta/92/06_threads_last_sent_stream_ordering_comments.sql.postgres +24 -0
  962. synapse/storage/schema/main/delta/92/07_add_user_reports.sql +22 -0
  963. synapse/storage/schema/main/delta/92/07_event_txn_id_device_id_txn_id2.sql +15 -0
  964. synapse/storage/schema/main/delta/92/08_room_ban_redactions.sql +21 -0
  965. synapse/storage/schema/main/delta/92/08_thread_subscriptions_seq_fixup.sql.postgres +19 -0
  966. synapse/storage/schema/main/delta/92/09_thread_subscriptions_update.sql +20 -0
  967. synapse/storage/schema/main/delta/92/09_thread_subscriptions_update.sql.postgres +18 -0
  968. synapse/storage/schema/main/delta/93/01_add_delayed_events.sql +15 -0
  969. synapse/storage/schema/main/full_schemas/72/full.sql.postgres +1344 -0
  970. synapse/storage/schema/main/full_schemas/72/full.sql.sqlite +646 -0
  971. synapse/storage/schema/state/delta/23/drop_state_index.sql +35 -0
  972. synapse/storage/schema/state/delta/32/remove_state_indices.sql +38 -0
  973. synapse/storage/schema/state/delta/35/add_state_index.sql +36 -0
  974. synapse/storage/schema/state/delta/35/state.sql +41 -0
  975. synapse/storage/schema/state/delta/35/state_dedupe.sql +36 -0
  976. synapse/storage/schema/state/delta/47/state_group_seq.py +38 -0
  977. synapse/storage/schema/state/delta/56/state_group_room_idx.sql +36 -0
  978. synapse/storage/schema/state/delta/61/02state_groups_state_n_distinct.sql.postgres +34 -0
  979. synapse/storage/schema/state/delta/70/08_state_group_edges_unique.sql +36 -0
  980. synapse/storage/schema/state/delta/89/01_state_groups_deletion.sql +39 -0
  981. synapse/storage/schema/state/delta/90/02_delete_unreferenced_state_groups.sql +16 -0
  982. synapse/storage/schema/state/delta/90/03_remove_old_deletion_bg_update.sql +15 -0
  983. synapse/storage/schema/state/full_schemas/72/full.sql.postgres +30 -0
  984. synapse/storage/schema/state/full_schemas/72/full.sql.sqlite +20 -0
  985. synapse/storage/types.py +183 -0
  986. synapse/storage/util/__init__.py +20 -0
  987. synapse/storage/util/id_generators.py +928 -0
  988. synapse/storage/util/partial_state_events_tracker.py +194 -0
  989. synapse/storage/util/sequence.py +315 -0
  990. synapse/streams/__init__.py +43 -0
  991. synapse/streams/config.py +91 -0
  992. synapse/streams/events.py +203 -0
  993. synapse/synapse_rust/__init__.pyi +3 -0
  994. synapse/synapse_rust/acl.pyi +20 -0
  995. synapse/synapse_rust/events.pyi +136 -0
  996. synapse/synapse_rust/http_client.pyi +32 -0
  997. synapse/synapse_rust/push.pyi +86 -0
  998. synapse/synapse_rust/rendezvous.pyi +30 -0
  999. synapse/synapse_rust/segmenter.pyi +1 -0
  1000. synapse/synapse_rust.abi3.so +0 -0
  1001. synapse/types/__init__.py +1600 -0
  1002. synapse/types/handlers/__init__.py +93 -0
  1003. synapse/types/handlers/policy_server.py +16 -0
  1004. synapse/types/handlers/sliding_sync.py +908 -0
  1005. synapse/types/rest/__init__.py +25 -0
  1006. synapse/types/rest/client/__init__.py +413 -0
  1007. synapse/types/state.py +634 -0
  1008. synapse/types/storage/__init__.py +66 -0
  1009. synapse/util/__init__.py +169 -0
  1010. synapse/util/async_helpers.py +1045 -0
  1011. synapse/util/background_queue.py +142 -0
  1012. synapse/util/batching_queue.py +202 -0
  1013. synapse/util/caches/__init__.py +300 -0
  1014. synapse/util/caches/cached_call.py +143 -0
  1015. synapse/util/caches/deferred_cache.py +530 -0
  1016. synapse/util/caches/descriptors.py +692 -0
  1017. synapse/util/caches/dictionary_cache.py +346 -0
  1018. synapse/util/caches/expiringcache.py +249 -0
  1019. synapse/util/caches/lrucache.py +975 -0
  1020. synapse/util/caches/response_cache.py +322 -0
  1021. synapse/util/caches/stream_change_cache.py +370 -0
  1022. synapse/util/caches/treecache.py +189 -0
  1023. synapse/util/caches/ttlcache.py +197 -0
  1024. synapse/util/cancellation.py +63 -0
  1025. synapse/util/check_dependencies.py +335 -0
  1026. synapse/util/clock.py +567 -0
  1027. synapse/util/constants.py +22 -0
  1028. synapse/util/daemonize.py +165 -0
  1029. synapse/util/distributor.py +157 -0
  1030. synapse/util/events.py +134 -0
  1031. synapse/util/file_consumer.py +164 -0
  1032. synapse/util/frozenutils.py +57 -0
  1033. synapse/util/gai_resolver.py +178 -0
  1034. synapse/util/hash.py +38 -0
  1035. synapse/util/httpresourcetree.py +108 -0
  1036. synapse/util/iterutils.py +189 -0
  1037. synapse/util/json.py +56 -0
  1038. synapse/util/linked_list.py +156 -0
  1039. synapse/util/logcontext.py +46 -0
  1040. synapse/util/logformatter.py +28 -0
  1041. synapse/util/macaroons.py +325 -0
  1042. synapse/util/manhole.py +191 -0
  1043. synapse/util/metrics.py +339 -0
  1044. synapse/util/module_loader.py +116 -0
  1045. synapse/util/msisdn.py +51 -0
  1046. synapse/util/patch_inline_callbacks.py +250 -0
  1047. synapse/util/pydantic_models.py +63 -0
  1048. synapse/util/ratelimitutils.py +419 -0
  1049. synapse/util/retryutils.py +339 -0
  1050. synapse/util/rlimit.py +42 -0
  1051. synapse/util/rust.py +133 -0
  1052. synapse/util/sentinel.py +21 -0
  1053. synapse/util/stringutils.py +293 -0
  1054. synapse/util/task_scheduler.py +493 -0
  1055. synapse/util/templates.py +126 -0
  1056. synapse/util/threepids.py +123 -0
  1057. synapse/util/wheel_timer.py +112 -0
  1058. synapse/visibility.py +835 -0
@@ -0,0 +1,2419 @@
1
+ #
2
+ # This file is licensed under the Affero General Public License (AGPL) version 3.
3
+ #
4
+ # Copyright 2021 The Matrix.org Foundation C.I.C.
5
+ # Copyright (C) 2023 New Vector, Ltd
6
+ #
7
+ # This program is free software: you can redistribute it and/or modify
8
+ # it under the terms of the GNU Affero General Public License as
9
+ # published by the Free Software Foundation, either version 3 of the
10
+ # License, or (at your option) any later version.
11
+ #
12
+ # See the GNU Affero General Public License for more details:
13
+ # <https://www.gnu.org/licenses/agpl-3.0.html>.
14
+ #
15
+ # Originally licensed under the Apache License, Version 2.0:
16
+ # <http://www.apache.org/licenses/LICENSE-2.0>.
17
+ #
18
+ # [This file includes modifications made by New Vector Limited]
19
+ #
20
+ #
21
+
22
+ import collections
23
+ import itertools
24
+ import logging
25
+ from http import HTTPStatus
26
+ from typing import (
27
+ TYPE_CHECKING,
28
+ Collection,
29
+ Container,
30
+ Iterable,
31
+ Sequence,
32
+ )
33
+
34
+ from prometheus_client import Counter, Histogram
35
+
36
+ from synapse import event_auth
37
+ from synapse.api.constants import (
38
+ EventContentFields,
39
+ EventTypes,
40
+ GuestAccess,
41
+ Membership,
42
+ RejectedReason,
43
+ RoomEncryptionAlgorithms,
44
+ )
45
+ from synapse.api.errors import (
46
+ AuthError,
47
+ Codes,
48
+ EventSizeError,
49
+ FederationError,
50
+ FederationPullAttemptBackoffError,
51
+ HttpResponseException,
52
+ PartialStateConflictError,
53
+ RequestSendFailed,
54
+ SynapseError,
55
+ )
56
+ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions
57
+ from synapse.event_auth import (
58
+ auth_types_for_event,
59
+ check_state_dependent_auth_rules,
60
+ check_state_independent_auth_rules,
61
+ validate_event_for_room_version,
62
+ )
63
+ from synapse.events import EventBase
64
+ from synapse.events.snapshot import (
65
+ EventContext,
66
+ EventPersistencePair,
67
+ UnpersistedEventContextBase,
68
+ )
69
+ from synapse.federation.federation_client import InvalidResponseError, PulledPduInfo
70
+ from synapse.logging.context import nested_logging_context
71
+ from synapse.logging.opentracing import (
72
+ SynapseTags,
73
+ set_tag,
74
+ start_active_span,
75
+ tag_args,
76
+ trace,
77
+ )
78
+ from synapse.metrics import SERVER_NAME_LABEL
79
+ from synapse.replication.http.federation import (
80
+ ReplicationFederationSendEventsRestServlet,
81
+ )
82
+ from synapse.state import StateResolutionStore
83
+ from synapse.storage.databases.main.events_worker import EventRedactBehaviour
84
+ from synapse.types import (
85
+ PersistedEventPosition,
86
+ RoomStreamToken,
87
+ StateMap,
88
+ StrCollection,
89
+ UserID,
90
+ get_domain_from_id,
91
+ )
92
+ from synapse.types.state import StateFilter
93
+ from synapse.util.async_helpers import Linearizer, concurrently_execute
94
+ from synapse.util.iterutils import batch_iter, partition, sorted_topologically
95
+ from synapse.util.retryutils import NotRetryingDestination
96
+ from synapse.util.stringutils import shortstr
97
+
98
+ if TYPE_CHECKING:
99
+ from synapse.server import HomeServer
100
+
101
+
102
+ logger = logging.getLogger(__name__)
103
+
104
+ soft_failed_event_counter = Counter(
105
+ "synapse_federation_soft_failed_events_total",
106
+ "Events received over federation that we marked as soft_failed",
107
+ labelnames=[SERVER_NAME_LABEL],
108
+ )
109
+
110
+ # Added to debug performance and track progress on optimizations
111
+ backfill_processing_after_timer = Histogram(
112
+ "synapse_federation_backfill_processing_after_time_seconds",
113
+ "sec",
114
+ labelnames=[SERVER_NAME_LABEL],
115
+ buckets=(
116
+ 0.1,
117
+ 0.25,
118
+ 0.5,
119
+ 1.0,
120
+ 2.5,
121
+ 5.0,
122
+ 7.5,
123
+ 10.0,
124
+ 15.0,
125
+ 20.0,
126
+ 25.0,
127
+ 30.0,
128
+ 40.0,
129
+ 50.0,
130
+ 60.0,
131
+ 80.0,
132
+ 100.0,
133
+ 120.0,
134
+ 150.0,
135
+ 180.0,
136
+ "+Inf",
137
+ ),
138
+ )
139
+
140
+
141
+ class FederationEventHandler:
142
+ """Handles events that originated from federation.
143
+
144
+ Responsible for handing incoming events and passing them on to the rest
145
+ of the homeserver (including auth and state conflict resolutions)
146
+ """
147
+
148
+ def __init__(self, hs: "HomeServer"):
149
+ self.server_name = hs.hostname
150
+ self.hs = hs
151
+ self._clock = hs.get_clock()
152
+ self._store = hs.get_datastores().main
153
+ self._state_store = hs.get_datastores().state
154
+ self._state_deletion_store = hs.get_datastores().state_deletion
155
+ self._storage_controllers = hs.get_storage_controllers()
156
+ self._state_storage_controller = self._storage_controllers.state
157
+
158
+ self._state_handler = hs.get_state_handler()
159
+ self._event_creation_handler = hs.get_event_creation_handler()
160
+ self._event_auth_handler = hs.get_event_auth_handler()
161
+ self._message_handler = hs.get_message_handler()
162
+ self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator()
163
+ self._state_resolution_handler = hs.get_state_resolution_handler()
164
+ # avoid a circular dependency by deferring execution here
165
+ self._get_room_member_handler = hs.get_room_member_handler
166
+
167
+ self._federation_client = hs.get_federation_client()
168
+ self._third_party_event_rules = (
169
+ hs.get_module_api_callbacks().third_party_event_rules
170
+ )
171
+ self._notifier = hs.get_notifier()
172
+
173
+ self._server_name = hs.hostname
174
+ self._is_mine_id = hs.is_mine_id
175
+ self._is_mine_server_name = hs.is_mine_server_name
176
+ self._instance_name = hs.get_instance_name()
177
+
178
+ self._config = hs.config
179
+ self._ephemeral_messages_enabled = hs.config.server.enable_ephemeral_messages
180
+
181
+ self._send_events = ReplicationFederationSendEventsRestServlet.make_client(hs)
182
+ self._device_list_updater = hs.get_device_handler().device_list_updater
183
+
184
+ # When joining a room we need to queue any events for that room up.
185
+ # For each room, a list of (pdu, origin) tuples.
186
+ # TODO: replace this with something more elegant, probably based around the
187
+ # federation event staging area.
188
+ self.room_queues: dict[str, list[tuple[EventBase, str]]] = {}
189
+
190
+ self._room_pdu_linearizer = Linearizer(name="fed_room_pdu", clock=self._clock)
191
+
192
+ async def on_receive_pdu(self, origin: str, pdu: EventBase) -> None:
193
+ """Process a PDU received via a federation /send/ transaction
194
+
195
+ Args:
196
+ origin: server which initiated the /send/ transaction. Will
197
+ be used to fetch missing events or state.
198
+ pdu: received PDU
199
+ """
200
+
201
+ # We should never see any outliers here.
202
+ assert not pdu.internal_metadata.outlier
203
+
204
+ room_id = pdu.room_id
205
+ event_id = pdu.event_id
206
+
207
+ # We reprocess pdus when we have seen them only as outliers
208
+ existing = await self._store.get_event(
209
+ event_id, allow_none=True, allow_rejected=True
210
+ )
211
+
212
+ # FIXME: Currently we fetch an event again when we already have it
213
+ # if it has been marked as an outlier.
214
+ if existing:
215
+ if not existing.internal_metadata.is_outlier():
216
+ logger.info(
217
+ "Ignoring received event %s which we have already seen", event_id
218
+ )
219
+ return
220
+ if pdu.internal_metadata.is_outlier():
221
+ logger.info(
222
+ "Ignoring received outlier %s which we already have as an outlier",
223
+ event_id,
224
+ )
225
+ return
226
+ logger.info("De-outliering event %s", event_id)
227
+
228
+ # do some initial sanity-checking of the event. In particular, make
229
+ # sure it doesn't have hundreds of prev_events or auth_events, which
230
+ # could cause a huge state resolution or cascade of event fetches.
231
+ try:
232
+ self._sanity_check_event(pdu)
233
+ except SynapseError as err:
234
+ logger.warning("Received event failed sanity checks")
235
+ raise FederationError("ERROR", err.code, err.msg, affected=pdu.event_id)
236
+
237
+ # If we are currently in the process of joining this room, then we
238
+ # queue up events for later processing.
239
+ if room_id in self.room_queues:
240
+ logger.info(
241
+ "Queuing PDU from %s for now: join in progress",
242
+ origin,
243
+ )
244
+ self.room_queues[room_id].append((pdu, origin))
245
+ return
246
+
247
+ # If we're not in the room just ditch the event entirely (and not
248
+ # invited). This is probably an old server that has come back and thinks
249
+ # we're still in the room (or we've been rejoined to the room by a state
250
+ # reset).
251
+ #
252
+ # Note that if we were never in the room then we would have already
253
+ # dropped the event, since we wouldn't know the room version.
254
+ is_in_room = await self._event_auth_handler.is_host_in_room(
255
+ room_id, self.server_name
256
+ )
257
+ if not is_in_room:
258
+ # Check if this is a leave event rescinding an invite
259
+ if (
260
+ pdu.type == EventTypes.Member
261
+ and pdu.membership == Membership.LEAVE
262
+ and pdu.state_key != pdu.sender
263
+ and self._is_mine_id(pdu.state_key)
264
+ ):
265
+ (
266
+ membership,
267
+ membership_event_id,
268
+ ) = await self._store.get_local_current_membership_for_user_in_room(
269
+ pdu.state_key, pdu.room_id
270
+ )
271
+ if (
272
+ membership == Membership.INVITE
273
+ and membership_event_id
274
+ and membership_event_id
275
+ in pdu.auth_event_ids() # The invite should be in the auth events of the rescission.
276
+ ):
277
+ invite_event = await self._store.get_event(
278
+ membership_event_id, allow_none=True
279
+ )
280
+
281
+ # We cannot fully auth the rescission event, but we can
282
+ # check if the sender of the leave event is the same as the
283
+ # invite.
284
+ #
285
+ # Technically, a room admin could rescind the invite, but we
286
+ # have no way of knowing who is and isn't a room admin.
287
+ if invite_event and pdu.sender == invite_event.sender:
288
+ # Handle the rescission event
289
+ pdu.internal_metadata.outlier = True
290
+ pdu.internal_metadata.out_of_band_membership = True
291
+ context = EventContext.for_outlier(self._storage_controllers)
292
+ await self.persist_events_and_notify(room_id, [(pdu, context)])
293
+ return
294
+
295
+ logger.info(
296
+ "Ignoring PDU from %s as we're not in the room",
297
+ origin,
298
+ )
299
+ return None
300
+
301
+ # Try to fetch any missing prev events to fill in gaps in the graph
302
+ prevs = set(pdu.prev_event_ids())
303
+ seen = await self._store.have_events_in_timeline(prevs)
304
+ missing_prevs = prevs - seen
305
+
306
+ if missing_prevs:
307
+ # We only backfill backwards to the min depth.
308
+ min_depth = await self._store.get_min_depth(pdu.room_id)
309
+ logger.debug("min_depth: %d", min_depth)
310
+
311
+ if min_depth is not None and pdu.depth > min_depth:
312
+ # If we're missing stuff, ensure we only fetch stuff one
313
+ # at a time.
314
+ logger.info(
315
+ "Acquiring room lock to fetch %d missing prev_events: %s",
316
+ len(missing_prevs),
317
+ shortstr(missing_prevs),
318
+ )
319
+ async with self._room_pdu_linearizer.queue(pdu.room_id):
320
+ logger.info(
321
+ "Acquired room lock to fetch %d missing prev_events",
322
+ len(missing_prevs),
323
+ )
324
+
325
+ try:
326
+ await self._get_missing_events_for_pdu(
327
+ origin, pdu, prevs, min_depth
328
+ )
329
+ except Exception as e:
330
+ raise Exception(
331
+ "Error fetching missing prev_events for %s: %s"
332
+ % (event_id, e)
333
+ ) from e
334
+
335
+ # Update the set of things we've seen after trying to
336
+ # fetch the missing stuff
337
+ seen = await self._store.have_events_in_timeline(prevs)
338
+ missing_prevs = prevs - seen
339
+
340
+ if not missing_prevs:
341
+ logger.info("Found all missing prev_events")
342
+
343
+ if missing_prevs:
344
+ # since this event was pushed to us, it is possible for it to
345
+ # become the only forward-extremity in the room, and we would then
346
+ # trust its state to be the state for the whole room. This is very
347
+ # bad. Further, if the event was pushed to us, there is no excuse
348
+ # for us not to have all the prev_events. (XXX: apart from
349
+ # min_depth?)
350
+ #
351
+ # We therefore reject any such events.
352
+ logger.warning(
353
+ "Rejecting: failed to fetch %d prev events: %s",
354
+ len(missing_prevs),
355
+ shortstr(missing_prevs),
356
+ )
357
+ raise FederationError(
358
+ "ERROR",
359
+ 403,
360
+ (
361
+ "Your server isn't divulging details about prev_events "
362
+ "referenced in this event."
363
+ ),
364
+ affected=pdu.event_id,
365
+ )
366
+
367
+ try:
368
+ context = await self._state_handler.compute_event_context(pdu)
369
+ await self._process_received_pdu(origin, pdu, context)
370
+ except PartialStateConflictError:
371
+ # The room was un-partial stated while we were processing the PDU.
372
+ # Try once more, with full state this time.
373
+ logger.info(
374
+ "Room %s was un-partial stated while processing the PDU, trying again.",
375
+ room_id,
376
+ )
377
+ context = await self._state_handler.compute_event_context(pdu)
378
+ await self._process_received_pdu(origin, pdu, context)
379
+
380
+ async def on_send_membership_event(
381
+ self, origin: str, event: EventBase
382
+ ) -> EventPersistencePair:
383
+ """
384
+ We have received a join/leave/knock event for a room via send_join/leave/knock.
385
+
386
+ Verify that event and send it into the room on the remote homeserver's behalf.
387
+
388
+ This is quite similar to on_receive_pdu, with the following principal
389
+ differences:
390
+ * only membership events are permitted (and only events with
391
+ sender==state_key -- ie, no kicks or bans)
392
+ * *We* send out the event on behalf of the remote server.
393
+ * We enforce the membership restrictions of restricted rooms.
394
+ * Rejected events result in an exception rather than being stored.
395
+
396
+ There are also other differences, however it is not clear if these are by
397
+ design or omission. In particular, we do not attempt to backfill any missing
398
+ prev_events.
399
+
400
+ Args:
401
+ origin: The homeserver of the remote (joining/invited/knocking) user.
402
+ event: The member event that has been signed by the remote homeserver.
403
+
404
+ Returns:
405
+ The event and context of the event after inserting it into the room graph.
406
+
407
+ Raises:
408
+ RuntimeError if any prev_events are missing
409
+ SynapseError if the event is not accepted into the room
410
+ PartialStateConflictError if the room was un-partial stated in between
411
+ computing the state at the event and persisting it. The caller should
412
+ retry exactly once in this case.
413
+ """
414
+ logger.debug(
415
+ "on_send_membership_event: Got event: %s, signatures: %s",
416
+ event.event_id,
417
+ event.signatures,
418
+ )
419
+
420
+ if get_domain_from_id(event.sender) != origin:
421
+ logger.info(
422
+ "Got send_membership request for user %r from different origin %s",
423
+ event.sender,
424
+ origin,
425
+ )
426
+ raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
427
+
428
+ if event.sender != event.state_key:
429
+ raise SynapseError(400, "state_key and sender must match", Codes.BAD_JSON)
430
+
431
+ assert not event.internal_metadata.outlier
432
+
433
+ # Send this event on behalf of the other server.
434
+ #
435
+ # The remote server isn't a full participant in the room at this point, so
436
+ # may not have an up-to-date list of the other homeservers participating in
437
+ # the room, so we send it on their behalf.
438
+ event.internal_metadata.send_on_behalf_of = origin
439
+
440
+ context = await self._state_handler.compute_event_context(event)
441
+ await self._check_event_auth(origin, event, context)
442
+ if context.rejected:
443
+ raise SynapseError(
444
+ 403, f"{event.membership} event was rejected", Codes.FORBIDDEN
445
+ )
446
+
447
+ # for joins, we need to check the restrictions of restricted rooms
448
+ if event.membership == Membership.JOIN:
449
+ await self.check_join_restrictions(context, event)
450
+
451
+ # for knock events, we run the third-party event rules. It's not entirely clear
452
+ # why we don't do this for other sorts of membership events.
453
+ if event.membership == Membership.KNOCK:
454
+ event_allowed, _ = await self._third_party_event_rules.check_event_allowed(
455
+ event, context
456
+ )
457
+ if not event_allowed:
458
+ logger.info("Sending of knock %s forbidden by third-party rules", event)
459
+ raise SynapseError(
460
+ 403, "This event is not allowed in this context", Codes.FORBIDDEN
461
+ )
462
+
463
+ # all looks good, we can persist the event.
464
+
465
+ # First, precalculate the joined hosts so that the federation sender doesn't
466
+ # need to.
467
+ await self._event_creation_handler.cache_joined_hosts_for_events(
468
+ [(event, context)]
469
+ )
470
+
471
+ await self._check_for_soft_fail(event, context=context, origin=origin)
472
+ await self._run_push_actions_and_persist_event(event, context)
473
+ return event, context
474
+
475
+ async def check_join_restrictions(
476
+ self,
477
+ context: UnpersistedEventContextBase,
478
+ event: EventBase,
479
+ ) -> None:
480
+ """Check that restrictions in restricted join rules are matched
481
+
482
+ Called when we receive a join event via send_join.
483
+
484
+ Raises an auth error if the restrictions are not matched.
485
+ """
486
+ prev_state_ids = await context.get_prev_state_ids()
487
+
488
+ # Check if the user is already in the room or invited to the room.
489
+ user_id = event.state_key
490
+ prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
491
+ prev_membership = None
492
+ if prev_member_event_id:
493
+ prev_member_event = await self._store.get_event(prev_member_event_id)
494
+ prev_membership = prev_member_event.membership
495
+
496
+ # Check if the member should be allowed access via membership in a space.
497
+ await self._event_auth_handler.check_restricted_join_rules(
498
+ prev_state_ids,
499
+ event.room_version,
500
+ user_id,
501
+ prev_membership,
502
+ )
503
+
504
+ @trace
505
+ async def process_remote_join(
506
+ self,
507
+ origin: str,
508
+ room_id: str,
509
+ auth_events: list[EventBase],
510
+ state: list[EventBase],
511
+ event: EventBase,
512
+ room_version: RoomVersion,
513
+ partial_state: bool,
514
+ ) -> int:
515
+ """Persists the events returned by a send_join
516
+
517
+ Checks the auth chain is valid (and passes auth checks) for the
518
+ state and event. Then persists all of the events.
519
+ Notifies about the persisted events where appropriate.
520
+
521
+ Args:
522
+ origin: Where the events came from
523
+ room_id:
524
+ auth_events
525
+ state
526
+ event
527
+ room_version: The room version we expect this room to have, and
528
+ will raise if it doesn't match the version in the create event.
529
+ partial_state: True if the state omits non-critical membership events
530
+
531
+ Returns:
532
+ The stream ID after which all events have been persisted.
533
+
534
+ Raises:
535
+ SynapseError if the response is in some way invalid.
536
+ PartialStateConflictError if the homeserver is already in the room and it
537
+ has been un-partial stated.
538
+ """
539
+ create_event = None
540
+ for e in state:
541
+ if (e.type, e.state_key) == (EventTypes.Create, ""):
542
+ create_event = e
543
+ break
544
+
545
+ if create_event is None:
546
+ # If the state doesn't have a create event then the room is
547
+ # invalid, and it would fail auth checks anyway.
548
+ raise SynapseError(400, "No create event in state")
549
+
550
+ room_version_id = create_event.content.get(
551
+ "room_version", RoomVersions.V1.identifier
552
+ )
553
+
554
+ if room_version.identifier != room_version_id:
555
+ raise SynapseError(400, "Room version mismatch")
556
+
557
+ # persist the auth chain and state events.
558
+ #
559
+ # any invalid events here will be marked as rejected, and we'll carry on.
560
+ #
561
+ # any events whose auth events are missing (ie, not in the send_join response,
562
+ # and not already in our db) will just be ignored. This is correct behaviour,
563
+ # because the reason that auth_events are missing might be due to us being
564
+ # unable to validate their signatures. The fact that we can't validate their
565
+ # signatures right now doesn't mean that we will *never* be able to, so it
566
+ # is premature to reject them.
567
+ #
568
+ await self._auth_and_persist_outliers(
569
+ room_id, itertools.chain(auth_events, state)
570
+ )
571
+
572
+ # and now persist the join event itself.
573
+ logger.info(
574
+ "Peristing join-via-remote %s (partial_state: %s)", event, partial_state
575
+ )
576
+ with nested_logging_context(suffix=event.event_id):
577
+ if partial_state:
578
+ # When handling a second partial state join into a partial state room,
579
+ # the returned state will exclude the membership from the first join. To
580
+ # preserve prior memberships, we try to compute the partial state before
581
+ # the event ourselves if we know about any of the prev events.
582
+ #
583
+ # When we don't know about any of the prev events, it's fine to just use
584
+ # the returned state, since the new join will create a new forward
585
+ # extremity, and leave the forward extremity containing our prior
586
+ # memberships alone.
587
+ prev_event_ids = set(event.prev_event_ids())
588
+ seen_event_ids = await self._store.have_events_in_timeline(
589
+ prev_event_ids
590
+ )
591
+ missing_event_ids = prev_event_ids - seen_event_ids
592
+
593
+ state_maps_to_resolve: list[StateMap[str]] = []
594
+
595
+ # Fetch the state after the prev events that we know about.
596
+ state_maps_to_resolve.extend(
597
+ (
598
+ await self._state_storage_controller.get_state_groups_ids(
599
+ room_id, seen_event_ids, await_full_state=False
600
+ )
601
+ ).values()
602
+ )
603
+
604
+ # When there are prev events we do not have the state for, we state
605
+ # resolve with the state returned by the remote homeserver.
606
+ if missing_event_ids or len(state_maps_to_resolve) == 0:
607
+ state_maps_to_resolve.append(
608
+ {(e.type, e.state_key): e.event_id for e in state}
609
+ )
610
+
611
+ state_ids_before_event = (
612
+ await self._state_resolution_handler.resolve_events_with_store(
613
+ event.room_id,
614
+ room_version.identifier,
615
+ state_maps_to_resolve,
616
+ event_map=None,
617
+ state_res_store=StateResolutionStore(
618
+ self._store, self._state_deletion_store
619
+ ),
620
+ )
621
+ )
622
+ else:
623
+ state_ids_before_event = {
624
+ (e.type, e.state_key): e.event_id for e in state
625
+ }
626
+
627
+ context = await self._state_handler.compute_event_context(
628
+ event,
629
+ state_ids_before_event=state_ids_before_event,
630
+ partial_state=partial_state,
631
+ )
632
+
633
+ await self._check_event_auth(origin, event, context)
634
+ if context.rejected:
635
+ raise SynapseError(403, "Join event was rejected")
636
+
637
+ # the remote server is responsible for sending our join event to the rest
638
+ # of the federation. Indeed, attempting to do so will result in problems
639
+ # when we try to look up the state before the join (to get the server list)
640
+ # and discover that we do not have it.
641
+ event.internal_metadata.proactively_send = False
642
+
643
+ stream_id_after_persist = await self.persist_events_and_notify(
644
+ room_id, [(event, context)]
645
+ )
646
+
647
+ return stream_id_after_persist
648
+
649
+ async def update_state_for_partial_state_event(
650
+ self, destination: str, event: EventBase
651
+ ) -> None:
652
+ """Recalculate the state at an event as part of a de-partial-stating process
653
+
654
+ Args:
655
+ destination: server to request full state from
656
+ event: partial-state event to be de-partial-stated
657
+
658
+ Raises:
659
+ FederationPullAttemptBackoffError if we are are deliberately not attempting
660
+ to pull the given event over federation because we've already done so
661
+ recently and are backing off.
662
+ FederationError if we fail to request state from the remote server.
663
+ """
664
+ logger.info("Updating state for %s", event.event_id)
665
+ with nested_logging_context(suffix=event.event_id):
666
+ # if we have all the event's prev_events, then we can work out the
667
+ # state based on their states. Otherwise, we request it from the destination
668
+ # server.
669
+ #
670
+ # This is the same operation as we do when we receive a regular event
671
+ # over federation.
672
+ context = await self._compute_event_context_with_maybe_missing_prevs(
673
+ destination, event
674
+ )
675
+ if context.partial_state:
676
+ # this can happen if some or all of the event's prev_events still have
677
+ # partial state. We were careful to only pick events from the db without
678
+ # partial-state prev events, so that implies that a prev event has
679
+ # been persisted (with partial state) since we did the query.
680
+ #
681
+ # So, let's just ignore `event` for now; when we re-run the db query
682
+ # we should instead get its partial-state prev event, which we will
683
+ # de-partial-state, and then come back to event.
684
+ logger.warning(
685
+ "%s still has prev_events with partial state: can't de-partial-state it yet",
686
+ event.event_id,
687
+ )
688
+ return
689
+
690
+ # since the state at this event has changed, we should now re-evaluate
691
+ # whether it should have been rejected. We must already have all of the
692
+ # auth events (from last time we went round this path), so there is no
693
+ # need to pass the origin.
694
+ await self._check_event_auth(None, event, context)
695
+
696
+ await self._store.update_state_for_partial_state_event(event, context)
697
+ self._state_storage_controller.notify_event_un_partial_stated(
698
+ event.event_id
699
+ )
700
+ # Notify that there's a new row in the un_partial_stated_events stream.
701
+ self._notifier.notify_replication()
702
+
703
+ @trace
704
+ async def backfill(
705
+ self, dest: str, room_id: str, limit: int, extremities: StrCollection
706
+ ) -> None:
707
+ """Trigger a backfill request to `dest` for the given `room_id`
708
+
709
+ This will attempt to get more events from the remote. If the other side
710
+ has no new events to offer, this will return an empty list.
711
+
712
+ As the events are received, we check their signatures, and also do some
713
+ sanity-checking on them. If any of the backfilled events are invalid,
714
+ this method throws a SynapseError.
715
+
716
+ We might also raise an InvalidResponseError if the response from the remote
717
+ server is just bogus.
718
+
719
+ TODO: make this more useful to distinguish failures of the remote
720
+ server from invalid events (there is probably no point in trying to
721
+ re-fetch invalid events from every other HS in the room.)
722
+ """
723
+ if self._is_mine_server_name(dest):
724
+ raise SynapseError(400, "Can't backfill from self.")
725
+
726
+ events = await self._federation_client.backfill(
727
+ dest, room_id, limit=limit, extremities=extremities
728
+ )
729
+
730
+ if not events:
731
+ return
732
+
733
+ with backfill_processing_after_timer.labels(
734
+ **{SERVER_NAME_LABEL: self.server_name}
735
+ ).time():
736
+ # if there are any events in the wrong room, the remote server is buggy and
737
+ # should not be trusted.
738
+ for ev in events:
739
+ if ev.room_id != room_id:
740
+ raise InvalidResponseError(
741
+ f"Remote server {dest} returned event {ev.event_id} which is in "
742
+ f"room {ev.room_id}, when we were backfilling in {room_id}"
743
+ )
744
+
745
+ await self._process_pulled_events(
746
+ dest,
747
+ events,
748
+ backfilled=True,
749
+ )
750
+
751
+ @trace
752
+ async def _get_missing_events_for_pdu(
753
+ self, origin: str, pdu: EventBase, prevs: set[str], min_depth: int
754
+ ) -> None:
755
+ """
756
+ Args:
757
+ origin: Origin of the pdu. Will be called to get the missing events
758
+ pdu: received pdu
759
+ prevs: List of event ids which we are missing
760
+ min_depth: Minimum depth of events to return.
761
+ """
762
+
763
+ room_id = pdu.room_id
764
+ event_id = pdu.event_id
765
+
766
+ seen = await self._store.have_events_in_timeline(prevs)
767
+
768
+ if not prevs - seen:
769
+ return
770
+
771
+ latest_frozen = await self._store.get_latest_event_ids_in_room(room_id)
772
+
773
+ # We add the prev events that we have seen to the latest
774
+ # list to ensure the remote server doesn't give them to us
775
+ latest = seen | latest_frozen
776
+
777
+ logger.info(
778
+ "Requesting missing events between %s and %s",
779
+ shortstr(latest),
780
+ event_id,
781
+ )
782
+
783
+ # XXX: we set timeout to 10s to help workaround
784
+ # https://github.com/matrix-org/synapse/issues/1733.
785
+ # The reason is to avoid holding the linearizer lock
786
+ # whilst processing inbound /send transactions, causing
787
+ # FDs to stack up and block other inbound transactions
788
+ # which empirically can currently take up to 30 minutes.
789
+ #
790
+ # N.B. this explicitly disables retry attempts.
791
+ #
792
+ # N.B. this also increases our chances of falling back to
793
+ # fetching fresh state for the room if the missing event
794
+ # can't be found, which slightly reduces our security.
795
+ # it may also increase our DAG extremity count for the room,
796
+ # causing additional state resolution? See https://github.com/matrix-org/synapse/issues/1760.
797
+ # However, fetching state doesn't hold the linearizer lock
798
+ # apparently.
799
+ #
800
+ # see https://github.com/matrix-org/synapse/pull/1744
801
+ #
802
+ # ----
803
+ #
804
+ # Update richvdh 2018/09/18: There are a number of problems with timing this
805
+ # request out aggressively on the client side:
806
+ #
807
+ # - it plays badly with the server-side rate-limiter, which starts tarpitting you
808
+ # if you send too many requests at once, so you end up with the server carefully
809
+ # working through the backlog of your requests, which you have already timed
810
+ # out.
811
+ #
812
+ # - for this request in particular, we now (as of
813
+ # https://github.com/matrix-org/synapse/pull/3456) reject any PDUs where the
814
+ # server can't produce a plausible-looking set of prev_events - so we becone
815
+ # much more likely to reject the event.
816
+ #
817
+ # - contrary to what it says above, we do *not* fall back to fetching fresh state
818
+ # for the room if get_missing_events times out. Rather, we give up processing
819
+ # the PDU whose prevs we are missing, which then makes it much more likely that
820
+ # we'll end up back here for the *next* PDU in the list, which exacerbates the
821
+ # problem.
822
+ #
823
+ # - the aggressive 10s timeout was introduced to deal with incoming federation
824
+ # requests taking 8 hours to process. It's not entirely clear why that was going
825
+ # on; certainly there were other issues causing traffic storms which are now
826
+ # resolved, and I think in any case we may be more sensible about our locking
827
+ # now. We're *certainly* more sensible about our logging.
828
+ #
829
+ # All that said: Let's try increasing the timeout to 60s and see what happens.
830
+
831
+ try:
832
+ missing_events = await self._federation_client.get_missing_events(
833
+ origin,
834
+ room_id,
835
+ earliest_events_ids=list(latest),
836
+ latest_events=[pdu],
837
+ limit=10,
838
+ min_depth=min_depth,
839
+ timeout=60000,
840
+ )
841
+ except (RequestSendFailed, HttpResponseException, NotRetryingDestination) as e:
842
+ # We failed to get the missing events, but since we need to handle
843
+ # the case of `get_missing_events` not returning the necessary
844
+ # events anyway, it is safe to simply log the error and continue.
845
+ logger.warning("Failed to get prev_events: %s", e)
846
+ return
847
+
848
+ logger.info("Got %d prev_events", len(missing_events))
849
+ await self._process_pulled_events(origin, missing_events, backfilled=False)
850
+
851
+ @trace
852
+ async def _process_pulled_events(
853
+ self, origin: str, events: Collection[EventBase], backfilled: bool
854
+ ) -> None:
855
+ """Process a batch of events we have pulled from a remote server
856
+
857
+ Pulls in any events required to auth the events, persists the received events,
858
+ and notifies clients, if appropriate.
859
+
860
+ Assumes the events have already had their signatures and hashes checked.
861
+
862
+ Params:
863
+ origin: The server we received these events from
864
+ events: The received events.
865
+ backfilled: True if this is part of a historical batch of events (inhibits
866
+ notification to clients, and validation of device keys.)
867
+ """
868
+ set_tag(
869
+ SynapseTags.FUNC_ARG_PREFIX + "event_ids",
870
+ str([event.event_id for event in events]),
871
+ )
872
+ set_tag(
873
+ SynapseTags.FUNC_ARG_PREFIX + "event_ids.length",
874
+ str(len(events)),
875
+ )
876
+ set_tag(SynapseTags.FUNC_ARG_PREFIX + "backfilled", str(backfilled))
877
+ logger.debug(
878
+ "processing pulled backfilled=%s events=%s",
879
+ backfilled,
880
+ [
881
+ "event_id=%s,depth=%d,body=%s,prevs=%s\n"
882
+ % (
883
+ event.event_id,
884
+ event.depth,
885
+ event.content.get("body", event.type),
886
+ event.prev_event_ids(),
887
+ )
888
+ for event in events
889
+ ],
890
+ )
891
+
892
+ # Check if we already any of these have these events.
893
+ # Note: we currently make a lookup in the database directly here rather than
894
+ # checking the event cache, due to:
895
+ # https://github.com/matrix-org/synapse/issues/13476
896
+ existing_events_map = await self._store._get_events_from_db(
897
+ [event.event_id for event in events]
898
+ )
899
+
900
+ new_events: list[EventBase] = []
901
+ for event in events:
902
+ event_id = event.event_id
903
+
904
+ # If we've already seen this event ID...
905
+ if event_id in existing_events_map:
906
+ existing_event = existing_events_map[event_id]
907
+
908
+ # ...and the event itself was not previously stored as an outlier...
909
+ if not existing_event.event.internal_metadata.is_outlier():
910
+ # ...then there's no need to persist it. We have it already.
911
+ logger.info(
912
+ "_process_pulled_event: Ignoring received event %s which we "
913
+ "have already seen",
914
+ event.event_id,
915
+ )
916
+ continue
917
+
918
+ # While we have seen this event before, it was stored as an outlier.
919
+ # We'll now persist it as a non-outlier.
920
+ logger.info("De-outliering event %s", event_id)
921
+
922
+ # Continue on with the events that are new to us.
923
+ new_events.append(event)
924
+
925
+ set_tag(
926
+ SynapseTags.RESULT_PREFIX + "new_events.length",
927
+ str(len(new_events)),
928
+ )
929
+
930
+ @trace
931
+ async def _process_new_pulled_events(new_events: Collection[EventBase]) -> None:
932
+ # We want to sort these by depth so we process them and tell clients about
933
+ # them in order. It's also more efficient to backfill this way (`depth`
934
+ # ascending) because one backfill event is likely to be the `prev_event` of
935
+ # the next event we're going to process.
936
+ sorted_events = sorted(new_events, key=lambda x: x.depth)
937
+ for ev in sorted_events:
938
+ with nested_logging_context(ev.event_id):
939
+ await self._process_pulled_event(origin, ev, backfilled=backfilled)
940
+
941
+ # Check if we've already tried to process these events at some point in the
942
+ # past. We aren't concerned with the expontntial backoff here, just whether it
943
+ # has failed to be processed before.
944
+ event_ids_with_failed_pull_attempts = (
945
+ await self._store.get_event_ids_with_failed_pull_attempts(
946
+ [event.event_id for event in new_events]
947
+ )
948
+ )
949
+
950
+ events_with_failed_pull_attempts, fresh_events = partition(
951
+ new_events, lambda e: e.event_id in event_ids_with_failed_pull_attempts
952
+ )
953
+ set_tag(
954
+ SynapseTags.FUNC_ARG_PREFIX + "events_with_failed_pull_attempts",
955
+ str(event_ids_with_failed_pull_attempts),
956
+ )
957
+ set_tag(
958
+ SynapseTags.RESULT_PREFIX + "events_with_failed_pull_attempts.length",
959
+ str(len(events_with_failed_pull_attempts)),
960
+ )
961
+ set_tag(
962
+ SynapseTags.FUNC_ARG_PREFIX + "fresh_events",
963
+ str([event.event_id for event in fresh_events]),
964
+ )
965
+ set_tag(
966
+ SynapseTags.RESULT_PREFIX + "fresh_events.length",
967
+ str(len(fresh_events)),
968
+ )
969
+
970
+ # Process previously failed backfill events in the background to not waste
971
+ # time on something that is likely to fail again.
972
+ if len(events_with_failed_pull_attempts) > 0:
973
+ self.hs.run_as_background_process(
974
+ "_process_new_pulled_events_with_failed_pull_attempts",
975
+ _process_new_pulled_events,
976
+ events_with_failed_pull_attempts,
977
+ )
978
+
979
+ # We can optimistically try to process and wait for the event to be fully
980
+ # persisted if we've never tried before.
981
+ if len(fresh_events) > 0:
982
+ await _process_new_pulled_events(fresh_events)
983
+
984
+ @trace
985
+ @tag_args
986
+ async def _process_pulled_event(
987
+ self, origin: str, event: EventBase, backfilled: bool
988
+ ) -> None:
989
+ """Process a single event that we have pulled from a remote server
990
+
991
+ Pulls in any events required to auth the event, persists the received event,
992
+ and notifies clients, if appropriate.
993
+
994
+ Assumes the event has already had its signatures and hashes checked.
995
+
996
+ This is somewhat equivalent to on_receive_pdu, but applies somewhat different
997
+ logic in the case that we are missing prev_events (in particular, it just
998
+ requests the state at that point, rather than triggering a get_missing_events) -
999
+ so is appropriate when we have pulled the event from a remote server, rather
1000
+ than having it pushed to us.
1001
+
1002
+ Params:
1003
+ origin: The server we received this event from
1004
+ events: The received event
1005
+ backfilled: True if this is part of a historical batch of events (inhibits
1006
+ notification to clients, and validation of device keys.)
1007
+ """
1008
+ logger.info("Processing pulled event %s", event)
1009
+
1010
+ # This function should not be used to persist outliers (use something
1011
+ # else) because this does a bunch of operations that aren't necessary
1012
+ # (extra work; in particular, it makes sure we have all the prev_events
1013
+ # and resolves the state across those prev events). If you happen to run
1014
+ # into a situation where the event you're trying to process/backfill is
1015
+ # marked as an `outlier`, then you should update that spot to return an
1016
+ # `EventBase` copy that doesn't have `outlier` flag set.
1017
+ #
1018
+ # `EventBase` is used to represent both an event we have not yet
1019
+ # persisted, and one that we have persisted and now keep in the cache.
1020
+ # In an ideal world this method would only be called with the first type
1021
+ # of event, but it turns out that's not actually the case and for
1022
+ # example, you could get an event from cache that is marked as an
1023
+ # `outlier` (fix up that spot though).
1024
+ assert not event.internal_metadata.is_outlier(), (
1025
+ "Outlier event passed to _process_pulled_event. "
1026
+ "To persist an event as a non-outlier, make sure to pass in a copy without `event.internal_metadata.outlier = true`."
1027
+ )
1028
+
1029
+ event_id = event.event_id
1030
+
1031
+ try:
1032
+ self._sanity_check_event(event)
1033
+ except SynapseError as err:
1034
+ logger.warning("Event %s failed sanity check: %s", event_id, err)
1035
+ await self._store.record_event_failed_pull_attempt(
1036
+ event.room_id, event_id, str(err)
1037
+ )
1038
+ return
1039
+
1040
+ try:
1041
+ try:
1042
+ context = await self._compute_event_context_with_maybe_missing_prevs(
1043
+ origin, event
1044
+ )
1045
+ await self._process_received_pdu(
1046
+ origin,
1047
+ event,
1048
+ context,
1049
+ backfilled=backfilled,
1050
+ )
1051
+ except PartialStateConflictError:
1052
+ # The room was un-partial stated while we were processing the event.
1053
+ # Try once more, with full state this time.
1054
+ context = await self._compute_event_context_with_maybe_missing_prevs(
1055
+ origin, event
1056
+ )
1057
+
1058
+ # We ought to have full state now, barring some unlikely race where we left and
1059
+ # rejoned the room in the background.
1060
+ if context.partial_state:
1061
+ raise AssertionError(
1062
+ f"Event {event.event_id} still has a partial resolved state "
1063
+ f"after room {event.room_id} was un-partial stated"
1064
+ )
1065
+
1066
+ await self._process_received_pdu(
1067
+ origin,
1068
+ event,
1069
+ context,
1070
+ backfilled=backfilled,
1071
+ )
1072
+ except FederationPullAttemptBackoffError as exc:
1073
+ # Log a warning about why we failed to process the event (the error message
1074
+ # for `FederationPullAttemptBackoffError` is pretty good)
1075
+ logger.warning("_process_pulled_event: %s", exc)
1076
+ # We do not record a failed pull attempt when we backoff fetching a missing
1077
+ # `prev_event` because not being able to fetch the `prev_events` just means
1078
+ # we won't be able to de-outlier the pulled event. But we can still use an
1079
+ # `outlier` in the state/auth chain for another event. So we shouldn't stop
1080
+ # a downstream event from trying to pull it.
1081
+ #
1082
+ # This avoids a cascade of backoff for all events in the DAG downstream from
1083
+ # one event backoff upstream.
1084
+ except FederationError as e:
1085
+ await self._store.record_event_failed_pull_attempt(
1086
+ event.room_id, event_id, str(e)
1087
+ )
1088
+
1089
+ if e.code == 403:
1090
+ logger.warning("Pulled event %s failed history check.", event_id)
1091
+ else:
1092
+ raise
1093
+
1094
+ @trace
1095
+ async def _compute_event_context_with_maybe_missing_prevs(
1096
+ self, dest: str, event: EventBase
1097
+ ) -> EventContext:
1098
+ """Build an EventContext structure for a non-outlier event whose prev_events may
1099
+ be missing.
1100
+
1101
+ This is used when we have pulled a batch of events from a remote server, and may
1102
+ not have all the prev_events.
1103
+
1104
+ To build an EventContext, we need to calculate the state before the event. If we
1105
+ already have all the prev_events for `event`, we can simply use the state after
1106
+ the prev_events to calculate the state before `event`.
1107
+
1108
+ Otherwise, the missing prevs become new backwards extremities, and we fall back
1109
+ to asking the remote server for the state after each missing `prev_event`,
1110
+ and resolving across them.
1111
+
1112
+ That's ok provided we then resolve the state against other bits of the DAG
1113
+ before using it - in other words, that the received event `event` is not going
1114
+ to become the only forwards_extremity in the room (which will ensure that you
1115
+ can't just take over a room by sending an event, withholding its prev_events,
1116
+ and declaring yourself to be an admin in the subsequent state request).
1117
+
1118
+ In other words: we should only call this method if `event` has been *pulled*
1119
+ as part of a batch of missing prev events, or similar.
1120
+
1121
+ Params:
1122
+ dest: the remote server to ask for state at the missing prevs. Typically,
1123
+ this will be the server we got `event` from.
1124
+ event: an event to check for missing prevs.
1125
+
1126
+ Returns:
1127
+ The event context.
1128
+
1129
+ Raises:
1130
+ FederationPullAttemptBackoffError if we are are deliberately not attempting
1131
+ to pull one of the given event's `prev_event`s over federation because
1132
+ we've already done so recently and are backing off.
1133
+ FederationError if we fail to get the state from the remote server after any
1134
+ missing `prev_event`s.
1135
+ """
1136
+ room_id = event.room_id
1137
+ event_id = event.event_id
1138
+
1139
+ prevs = set(event.prev_event_ids())
1140
+ seen = await self._store.have_events_in_timeline(prevs)
1141
+ missing_prevs = prevs - seen
1142
+
1143
+ # If we've already recently attempted to pull this missing event, don't
1144
+ # try it again so soon. Since we have to fetch all of the prev_events, we can
1145
+ # bail early here if we find any to ignore.
1146
+ prevs_with_pull_backoff = (
1147
+ await self._store.get_event_ids_to_not_pull_from_backoff(
1148
+ room_id, missing_prevs
1149
+ )
1150
+ )
1151
+ if len(prevs_with_pull_backoff) > 0:
1152
+ raise FederationPullAttemptBackoffError(
1153
+ event_ids=prevs_with_pull_backoff.keys(),
1154
+ message=(
1155
+ f"While computing context for event={event_id}, not attempting to "
1156
+ f"pull missing prev_events={list(prevs_with_pull_backoff.keys())} "
1157
+ "because we already tried to pull recently (backing off)."
1158
+ ),
1159
+ retry_after_ms=(
1160
+ max(prevs_with_pull_backoff.values()) - self._clock.time_msec()
1161
+ ),
1162
+ )
1163
+
1164
+ if not missing_prevs:
1165
+ return await self._state_handler.compute_event_context(event)
1166
+
1167
+ logger.info(
1168
+ "Event %s is missing prev_events %s: calculating state for a "
1169
+ "backwards extremity",
1170
+ event_id,
1171
+ shortstr(missing_prevs),
1172
+ )
1173
+ # Calculate the state after each of the previous events, and
1174
+ # resolve them to find the correct state at the current event.
1175
+
1176
+ try:
1177
+ # Determine whether we may be about to retrieve partial state
1178
+ # Events may be un-partial stated right after we compute the partial state
1179
+ # flag, but that's okay, as long as the flag errs on the conservative side.
1180
+ partial_state_flags = await self._store.get_partial_state_events(seen)
1181
+ partial_state = any(partial_state_flags.values())
1182
+
1183
+ # state_maps is a list of mappings from (type, state_key) to event_id
1184
+ state_maps: list[StateMap[str]] = []
1185
+
1186
+ # Ask the remote server for the states we don't
1187
+ # know about
1188
+ for p in missing_prevs:
1189
+ logger.info("Requesting state after missing prev_event %s", p)
1190
+
1191
+ with nested_logging_context(p):
1192
+ # note that if any of the missing prevs share missing state or
1193
+ # auth events, the requests to fetch those events are deduped
1194
+ # by the get_pdu_cache in federation_client.
1195
+ remote_state_map = (
1196
+ await self._get_state_ids_after_missing_prev_event(
1197
+ dest, room_id, p
1198
+ )
1199
+ )
1200
+
1201
+ state_maps.append(remote_state_map)
1202
+
1203
+ # Get the state of the events we know about. We do this *after*
1204
+ # trying to fetch missing state over federation as that might fail
1205
+ # and then we can skip loading the local state.
1206
+ ours = await self._state_storage_controller.get_state_groups_ids(
1207
+ room_id, seen, await_full_state=False
1208
+ )
1209
+ state_maps.extend(ours.values())
1210
+
1211
+ # we don't need this any more, let's delete it.
1212
+ del ours
1213
+
1214
+ room_version = await self._store.get_room_version_id(room_id)
1215
+ state_map = await self._state_resolution_handler.resolve_events_with_store(
1216
+ room_id,
1217
+ room_version,
1218
+ state_maps,
1219
+ event_map={event_id: event},
1220
+ state_res_store=StateResolutionStore(
1221
+ self._store, self._state_deletion_store
1222
+ ),
1223
+ )
1224
+
1225
+ except Exception as e:
1226
+ logger.warning(
1227
+ "Error attempting to resolve state at missing prev_events: %s", e
1228
+ )
1229
+ raise FederationError(
1230
+ "ERROR",
1231
+ 403,
1232
+ "We can't get valid state history.",
1233
+ affected=event_id,
1234
+ )
1235
+ return await self._state_handler.compute_event_context(
1236
+ event, state_ids_before_event=state_map, partial_state=partial_state
1237
+ )
1238
+
1239
+ @trace
1240
+ @tag_args
1241
+ async def _get_state_ids_after_missing_prev_event(
1242
+ self,
1243
+ destination: str,
1244
+ room_id: str,
1245
+ event_id: str,
1246
+ ) -> StateMap[str]:
1247
+ """Requests all of the room state at a given event from a remote homeserver.
1248
+
1249
+ Args:
1250
+ destination: The remote homeserver to query for the state.
1251
+ room_id: The id of the room we're interested in.
1252
+ event_id: The id of the event we want the state at.
1253
+
1254
+ Returns:
1255
+ The event ids of the state *after* the given event.
1256
+
1257
+ Raises:
1258
+ InvalidResponseError: if the remote homeserver's response contains fields
1259
+ of the wrong type.
1260
+ """
1261
+
1262
+ # It would be better if we could query the difference from our known
1263
+ # state to the given `event_id` so the sending server doesn't have to
1264
+ # send as much and we don't have to process as many events. For example
1265
+ # in a room like #matrix:matrix.org, we get 200k events (77k state_events, 122k
1266
+ # auth_events) from this call.
1267
+ #
1268
+ # Tracked by https://github.com/matrix-org/synapse/issues/13618
1269
+ (
1270
+ state_event_ids,
1271
+ auth_event_ids,
1272
+ ) = await self._federation_client.get_room_state_ids(
1273
+ destination, room_id, event_id=event_id
1274
+ )
1275
+
1276
+ logger.debug(
1277
+ "state_ids returned %i state events, %i auth events",
1278
+ len(state_event_ids),
1279
+ len(auth_event_ids),
1280
+ )
1281
+
1282
+ # Start by checking events we already have in the DB
1283
+ desired_events = set(state_event_ids)
1284
+ desired_events.add(event_id)
1285
+ logger.debug("Fetching %i events from cache/store", len(desired_events))
1286
+ have_events = await self._store.have_seen_events(room_id, desired_events)
1287
+
1288
+ missing_desired_event_ids = desired_events - have_events
1289
+ logger.debug(
1290
+ "We are missing %i events (got %i)",
1291
+ len(missing_desired_event_ids),
1292
+ len(have_events),
1293
+ )
1294
+
1295
+ # We probably won't need most of the auth events, so let's just check which
1296
+ # we have for now, rather than thrashing the event cache with them all
1297
+ # unnecessarily.
1298
+
1299
+ # TODO: we probably won't actually need all of the auth events, since we
1300
+ # already have a bunch of the state events. It would be nice if the
1301
+ # federation api gave us a way of finding out which we actually need.
1302
+
1303
+ missing_auth_event_ids = set(auth_event_ids) - have_events
1304
+ missing_auth_event_ids.difference_update(
1305
+ await self._store.have_seen_events(room_id, missing_auth_event_ids)
1306
+ )
1307
+ logger.debug("We are also missing %i auth events", len(missing_auth_event_ids))
1308
+
1309
+ missing_event_ids = missing_desired_event_ids | missing_auth_event_ids
1310
+
1311
+ set_tag(
1312
+ SynapseTags.RESULT_PREFIX + "missing_auth_event_ids",
1313
+ str(missing_auth_event_ids),
1314
+ )
1315
+ set_tag(
1316
+ SynapseTags.RESULT_PREFIX + "missing_auth_event_ids.length",
1317
+ str(len(missing_auth_event_ids)),
1318
+ )
1319
+ set_tag(
1320
+ SynapseTags.RESULT_PREFIX + "missing_desired_event_ids",
1321
+ str(missing_desired_event_ids),
1322
+ )
1323
+ set_tag(
1324
+ SynapseTags.RESULT_PREFIX + "missing_desired_event_ids.length",
1325
+ str(len(missing_desired_event_ids)),
1326
+ )
1327
+
1328
+ # Making an individual request for each of 1000s of events has a lot of
1329
+ # overhead. On the other hand, we don't really want to fetch all of the events
1330
+ # if we already have most of them.
1331
+ #
1332
+ # As an arbitrary heuristic, if we are missing more than 10% of the events, then
1333
+ # we fetch the whole state.
1334
+ #
1335
+ # TODO: might it be better to have an API which lets us do an aggregate event
1336
+ # request
1337
+ if (len(missing_event_ids) * 10) >= len(auth_event_ids) + len(state_event_ids):
1338
+ logger.debug("Requesting complete state from remote")
1339
+ await self._get_state_and_persist(destination, room_id, event_id)
1340
+ else:
1341
+ logger.debug("Fetching %i events from remote", len(missing_event_ids))
1342
+ await self._get_events_and_persist(
1343
+ destination=destination, room_id=room_id, event_ids=missing_event_ids
1344
+ )
1345
+
1346
+ # We now need to fill out the state map, which involves fetching the
1347
+ # type and state key for each event ID in the state.
1348
+ state_map = {}
1349
+
1350
+ event_metadata = await self._store.get_metadata_for_events(state_event_ids)
1351
+ for state_event_id, metadata in event_metadata.items():
1352
+ if metadata.room_id != room_id:
1353
+ # This is a bogus situation, but since we may only discover it a long time
1354
+ # after it happened, we try our best to carry on, by just omitting the
1355
+ # bad events from the returned state set.
1356
+ #
1357
+ # This can happen if a remote server claims that the state or
1358
+ # auth_events at an event in room A are actually events in room B
1359
+ logger.warning(
1360
+ "Remote server %s claims event %s in room %s is an auth/state "
1361
+ "event in room %s",
1362
+ destination,
1363
+ state_event_id,
1364
+ metadata.room_id,
1365
+ room_id,
1366
+ )
1367
+ continue
1368
+
1369
+ if metadata.state_key is None:
1370
+ logger.warning(
1371
+ "Remote server gave us non-state event in state: %s", state_event_id
1372
+ )
1373
+ continue
1374
+
1375
+ state_map[(metadata.event_type, metadata.state_key)] = state_event_id
1376
+
1377
+ # if we couldn't get the prev event in question, that's a problem.
1378
+ remote_event = await self._store.get_event(
1379
+ event_id,
1380
+ allow_none=True,
1381
+ allow_rejected=True,
1382
+ redact_behaviour=EventRedactBehaviour.as_is,
1383
+ )
1384
+ if not remote_event:
1385
+ raise Exception("Unable to get missing prev_event %s" % (event_id,))
1386
+
1387
+ # missing state at that event is a warning, not a blocker
1388
+ # XXX: this doesn't sound right? it means that we'll end up with incomplete
1389
+ # state.
1390
+ failed_to_fetch = desired_events - event_metadata.keys()
1391
+ # `event_id` could be missing from `event_metadata` because it's not necessarily
1392
+ # a state event. We've already checked that we've fetched it above.
1393
+ failed_to_fetch.discard(event_id)
1394
+ if failed_to_fetch:
1395
+ logger.warning(
1396
+ "Failed to fetch missing state events for %s %s",
1397
+ event_id,
1398
+ failed_to_fetch,
1399
+ )
1400
+ set_tag(
1401
+ SynapseTags.RESULT_PREFIX + "failed_to_fetch",
1402
+ str(failed_to_fetch),
1403
+ )
1404
+ set_tag(
1405
+ SynapseTags.RESULT_PREFIX + "failed_to_fetch.length",
1406
+ str(len(failed_to_fetch)),
1407
+ )
1408
+
1409
+ if remote_event.is_state() and remote_event.rejected_reason is None:
1410
+ state_map[(remote_event.type, remote_event.state_key)] = (
1411
+ remote_event.event_id
1412
+ )
1413
+
1414
+ return state_map
1415
+
1416
+ @trace
1417
+ @tag_args
1418
+ async def _get_state_and_persist(
1419
+ self, destination: str, room_id: str, event_id: str
1420
+ ) -> None:
1421
+ """Get the complete room state at a given event, and persist any new events
1422
+ as outliers"""
1423
+ room_version = await self._store.get_room_version(room_id)
1424
+ auth_events, state_events = await self._federation_client.get_room_state(
1425
+ destination, room_id, event_id=event_id, room_version=room_version
1426
+ )
1427
+ logger.info("/state returned %i events", len(auth_events) + len(state_events))
1428
+
1429
+ await self._auth_and_persist_outliers(
1430
+ room_id, itertools.chain(auth_events, state_events)
1431
+ )
1432
+
1433
+ # we also need the event itself.
1434
+ if not await self._store.have_seen_event(room_id, event_id):
1435
+ await self._get_events_and_persist(
1436
+ destination=destination, room_id=room_id, event_ids=(event_id,)
1437
+ )
1438
+
1439
+ @trace
1440
+ async def _process_received_pdu(
1441
+ self,
1442
+ origin: str,
1443
+ event: EventBase,
1444
+ context: EventContext,
1445
+ backfilled: bool = False,
1446
+ ) -> None:
1447
+ """Called when we have a new non-outlier event.
1448
+
1449
+ This is called when we have a new event to add to the room DAG. This can be
1450
+ due to:
1451
+ * events received directly via a /send request
1452
+ * events retrieved via get_missing_events after a /send request
1453
+ * events backfilled after a client request.
1454
+
1455
+ It's not currently used for events received from incoming send_{join,knock,leave}
1456
+ requests (which go via on_send_membership_event), nor for joins created by a
1457
+ remote join dance (which go via process_remote_join).
1458
+
1459
+ We need to do auth checks and put it through the StateHandler.
1460
+
1461
+ Args:
1462
+ origin: server sending the event
1463
+
1464
+ event: event to be persisted
1465
+
1466
+ context: The `EventContext` to persist the event with.
1467
+
1468
+ backfilled: True if this is part of a historical batch of events (inhibits
1469
+ notification to clients, and validation of device keys.)
1470
+
1471
+ PartialStateConflictError: if the room was un-partial stated in between
1472
+ computing the state at the event and persisting it. The caller should
1473
+ recompute `context` and retry exactly once when this happens.
1474
+ """
1475
+ logger.debug("Processing event: %s", event)
1476
+ assert not event.internal_metadata.outlier
1477
+
1478
+ try:
1479
+ await self._check_event_auth(origin, event, context)
1480
+ except AuthError as e:
1481
+ # This happens only if we couldn't find the auth events. We'll already have
1482
+ # logged a warning, so now we just convert to a FederationError.
1483
+ raise FederationError("ERROR", e.code, e.msg, affected=event.event_id)
1484
+
1485
+ if not backfilled and not context.rejected:
1486
+ # For new (non-backfilled and non-outlier) events we check if the event
1487
+ # passes auth based on the current state. If it doesn't then we
1488
+ # "soft-fail" the event.
1489
+ await self._check_for_soft_fail(event, context=context, origin=origin)
1490
+
1491
+ await self._run_push_actions_and_persist_event(event, context, backfilled)
1492
+
1493
+ if backfilled or context.rejected:
1494
+ return
1495
+
1496
+ await self._maybe_kick_guest_users(event)
1497
+
1498
+ # For encrypted messages we check that we know about the sending device,
1499
+ # if we don't then we mark the device cache for that user as stale.
1500
+ if event.type == EventTypes.Encrypted:
1501
+ device_id = event.content.get("device_id")
1502
+ sender_key = event.content.get("sender_key")
1503
+
1504
+ cached_devices = await self._store.get_cached_devices_for_user(event.sender)
1505
+
1506
+ resync = False # Whether we should resync device lists.
1507
+
1508
+ device = None
1509
+ if device_id is not None:
1510
+ device = cached_devices.get(device_id)
1511
+ if device is None:
1512
+ logger.info(
1513
+ "Received event from remote device not in our cache: %s %s",
1514
+ event.sender,
1515
+ device_id,
1516
+ )
1517
+ resync = True
1518
+
1519
+ # We also check if the `sender_key` matches what we expect.
1520
+ if sender_key is not None:
1521
+ # Figure out what sender key we're expecting. If we know the
1522
+ # device and recognize the algorithm then we can work out the
1523
+ # exact key to expect. Otherwise check it matches any key we
1524
+ # have for that device.
1525
+
1526
+ current_keys: Container[str] = []
1527
+
1528
+ if device:
1529
+ keys = device.get("keys", {}).get("keys", {})
1530
+
1531
+ if (
1532
+ event.content.get("algorithm")
1533
+ == RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2
1534
+ ):
1535
+ # For this algorithm we expect a curve25519 key.
1536
+ key_name = "curve25519:%s" % (device_id,)
1537
+ current_keys = [keys.get(key_name)]
1538
+ else:
1539
+ # We don't know understand the algorithm, so we just
1540
+ # check it matches a key for the device.
1541
+ current_keys = keys.values()
1542
+ elif device_id:
1543
+ # We don't have any keys for the device ID.
1544
+ pass
1545
+ else:
1546
+ # The event didn't include a device ID, so we just look for
1547
+ # keys across all devices.
1548
+ current_keys = [
1549
+ key
1550
+ for device in cached_devices.values()
1551
+ for key in device.get("keys", {}).get("keys", {}).values()
1552
+ ]
1553
+
1554
+ # We now check that the sender key matches (one of) the expected
1555
+ # keys.
1556
+ if sender_key not in current_keys:
1557
+ logger.info(
1558
+ "Received event from remote device with unexpected sender key: %s %s: %s",
1559
+ event.sender,
1560
+ device_id or "<no device_id>",
1561
+ sender_key,
1562
+ )
1563
+ resync = True
1564
+
1565
+ if resync:
1566
+ self.hs.run_as_background_process(
1567
+ "resync_device_due_to_pdu",
1568
+ self._resync_device,
1569
+ event.sender,
1570
+ )
1571
+
1572
+ async def _resync_device(self, sender: str) -> None:
1573
+ """We have detected that the device list for the given user may be out
1574
+ of sync, so we try and resync them.
1575
+ """
1576
+
1577
+ try:
1578
+ await self._store.mark_remote_users_device_caches_as_stale((sender,))
1579
+
1580
+ # Immediately attempt a resync in the background
1581
+ await self._device_list_updater.multi_user_device_resync(user_ids=[sender])
1582
+ except Exception:
1583
+ logger.exception("Failed to resync device for %s", sender)
1584
+
1585
+ async def backfill_event_id(
1586
+ self, destinations: StrCollection, room_id: str, event_id: str
1587
+ ) -> PulledPduInfo:
1588
+ """Backfill a single event and persist it as a non-outlier which means
1589
+ we also pull in all of the state and auth events necessary for it.
1590
+
1591
+ Args:
1592
+ destination: The homeserver to pull the given event_id from.
1593
+ room_id: The room where the event is from.
1594
+ event_id: The event ID to backfill.
1595
+
1596
+ Raises:
1597
+ FederationError if we are unable to find the event from the destination
1598
+ """
1599
+ logger.info("backfill_event_id: event_id=%s", event_id)
1600
+
1601
+ room_version = await self._store.get_room_version(room_id)
1602
+
1603
+ pulled_pdu_info = await self._federation_client.get_pdu(
1604
+ destinations,
1605
+ event_id,
1606
+ room_version,
1607
+ )
1608
+
1609
+ if not pulled_pdu_info:
1610
+ raise FederationError(
1611
+ "ERROR",
1612
+ 404,
1613
+ f"Unable to find event_id={event_id} from remote servers to backfill.",
1614
+ affected=event_id,
1615
+ )
1616
+
1617
+ # Persist the event we just fetched, including pulling all of the state
1618
+ # and auth events to de-outlier it. This also sets up the necessary
1619
+ # `state_groups` for the event.
1620
+ await self._process_pulled_events(
1621
+ pulled_pdu_info.pull_origin,
1622
+ [pulled_pdu_info.pdu],
1623
+ # Prevent notifications going to clients
1624
+ backfilled=True,
1625
+ )
1626
+
1627
+ return pulled_pdu_info
1628
+
1629
+ @trace
1630
+ @tag_args
1631
+ async def _get_events_and_persist(
1632
+ self, destination: str, room_id: str, event_ids: StrCollection
1633
+ ) -> None:
1634
+ """Fetch the given events from a server, and persist them as outliers.
1635
+
1636
+ This function *does not* recursively get missing auth events of the
1637
+ newly fetched events. Callers must include in the `event_ids` argument
1638
+ any missing events from the auth chain.
1639
+
1640
+ Logs a warning if we can't find the given event.
1641
+ """
1642
+
1643
+ room_version = await self._store.get_room_version(room_id)
1644
+
1645
+ events: list[EventBase] = []
1646
+
1647
+ async def get_event(event_id: str) -> None:
1648
+ with nested_logging_context(event_id):
1649
+ try:
1650
+ pulled_pdu_info = await self._federation_client.get_pdu(
1651
+ [destination],
1652
+ event_id,
1653
+ room_version,
1654
+ )
1655
+ if pulled_pdu_info is None:
1656
+ logger.warning(
1657
+ "Server %s didn't return event %s",
1658
+ destination,
1659
+ event_id,
1660
+ )
1661
+ return
1662
+ events.append(pulled_pdu_info.pdu)
1663
+
1664
+ except Exception as e:
1665
+ logger.warning(
1666
+ "Error fetching missing state/auth event %s: %s %s",
1667
+ event_id,
1668
+ type(e),
1669
+ e,
1670
+ )
1671
+
1672
+ await concurrently_execute(get_event, event_ids, 5)
1673
+ logger.info("Fetched %i events of %i requested", len(events), len(event_ids))
1674
+ await self._auth_and_persist_outliers(room_id, events)
1675
+
1676
+ @trace
1677
+ async def _auth_and_persist_outliers(
1678
+ self, room_id: str, events: Iterable[EventBase]
1679
+ ) -> None:
1680
+ """Persist a batch of outlier events fetched from remote servers.
1681
+
1682
+ We first sort the events to make sure that we process each event's auth_events
1683
+ before the event itself.
1684
+
1685
+ We then mark the events as outliers, persist them to the database, and, where
1686
+ appropriate (eg, an invite), awake the notifier.
1687
+
1688
+ Params:
1689
+ room_id: the room that the events are meant to be in (though this has
1690
+ not yet been checked)
1691
+ events: the events that have been fetched
1692
+ """
1693
+ event_map = {event.event_id: event for event in events}
1694
+
1695
+ event_ids = event_map.keys()
1696
+ set_tag(
1697
+ SynapseTags.FUNC_ARG_PREFIX + "event_ids",
1698
+ str(event_ids),
1699
+ )
1700
+ set_tag(
1701
+ SynapseTags.FUNC_ARG_PREFIX + "event_ids.length",
1702
+ str(len(event_ids)),
1703
+ )
1704
+
1705
+ # filter out any events we have already seen. This might happen because
1706
+ # the events were eagerly pushed to us (eg, during a room join), or because
1707
+ # another thread has raced against us since we decided to request the event.
1708
+ #
1709
+ # This is just an optimisation, so it doesn't need to be watertight - the event
1710
+ # persister does another round of deduplication.
1711
+ seen_remotes = await self._store.have_seen_events(room_id, event_map.keys())
1712
+ for s in seen_remotes:
1713
+ event_map.pop(s, None)
1714
+
1715
+ # XXX: it might be possible to kick this process off in parallel with fetching
1716
+ # the events.
1717
+
1718
+ # We need to persist an event's auth events before the event.
1719
+ auth_graph = {
1720
+ ev.event_id: [e_id for e_id in ev.auth_event_ids() if e_id in event_map]
1721
+ for ev in event_map.values()
1722
+ }
1723
+ sorted_auth_event_ids = sorted_topologically(event_map.keys(), auth_graph)
1724
+ sorted_auth_events = [event_map[e_id] for e_id in sorted_auth_event_ids]
1725
+ logger.info(
1726
+ "Persisting %i remaining outliers: %s",
1727
+ len(sorted_auth_events),
1728
+ shortstr(e.event_id for e in sorted_auth_events),
1729
+ )
1730
+
1731
+ # get all the auth events for all the events in this batch. By now, they should
1732
+ # have been persisted.
1733
+ auth_event_ids = {
1734
+ aid for event in sorted_auth_events for aid in event.auth_event_ids()
1735
+ }
1736
+ auth_map = {
1737
+ ev.event_id: ev
1738
+ for ev in sorted_auth_events
1739
+ if ev.event_id in auth_event_ids
1740
+ }
1741
+
1742
+ missing_events = auth_event_ids.difference(auth_map)
1743
+ if missing_events:
1744
+ persisted_events = await self._store.get_events(
1745
+ missing_events,
1746
+ allow_rejected=True,
1747
+ redact_behaviour=EventRedactBehaviour.as_is,
1748
+ )
1749
+ auth_map.update(persisted_events)
1750
+
1751
+ events_and_contexts_to_persist: list[EventPersistencePair] = []
1752
+
1753
+ async def prep(event: EventBase) -> None:
1754
+ with nested_logging_context(suffix=event.event_id):
1755
+ auth = []
1756
+ for auth_event_id in event.auth_event_ids():
1757
+ ae = auth_map.get(auth_event_id)
1758
+ if not ae:
1759
+ # the fact we can't find the auth event doesn't mean it doesn't
1760
+ # exist, which means it is premature to reject `event`. Instead we
1761
+ # just ignore it for now.
1762
+ logger.warning(
1763
+ "Dropping event %s, which relies on auth_event %s, which could not be found",
1764
+ event,
1765
+ auth_event_id,
1766
+ )
1767
+ # Drop the event from the auth_map too, else we may incorrectly persist
1768
+ # events which depend on this dropped event.
1769
+ auth_map.pop(event.event_id, None)
1770
+ return
1771
+ auth.append(ae)
1772
+
1773
+ # we're not bothering about room state, so flag the event as an outlier.
1774
+ event.internal_metadata.outlier = True
1775
+
1776
+ context = EventContext.for_outlier(self._storage_controllers)
1777
+ try:
1778
+ validate_event_for_room_version(event)
1779
+ await check_state_independent_auth_rules(
1780
+ self._store, event, batched_auth_events=auth_map
1781
+ )
1782
+ check_state_dependent_auth_rules(event, auth)
1783
+ except AuthError as e:
1784
+ logger.warning("Rejecting %r because %s", event, e)
1785
+ context.rejected = RejectedReason.AUTH_ERROR
1786
+ except EventSizeError as e:
1787
+ if e.unpersistable:
1788
+ # This event is completely unpersistable.
1789
+ raise e
1790
+ # Otherwise, we are somewhat lenient and just persist the event
1791
+ # as rejected, for moderate compatibility with older Synapse
1792
+ # versions.
1793
+ logger.warning("While validating received event %r: %s", event, e)
1794
+ context.rejected = RejectedReason.OVERSIZED_EVENT
1795
+
1796
+ events_and_contexts_to_persist.append((event, context))
1797
+
1798
+ for i, event in enumerate(sorted_auth_events):
1799
+ await prep(event)
1800
+
1801
+ # The above function is typically not async, and so won't yield to
1802
+ # the reactor. For large rooms let's yield to the reactor
1803
+ # occasionally to ensure we don't block other work.
1804
+ if (i + 1) % 1000 == 0:
1805
+ await self._clock.sleep(0)
1806
+
1807
+ # Also persist the new event in batches for similar reasons as above.
1808
+ for batch in batch_iter(events_and_contexts_to_persist, 1000):
1809
+ await self.persist_events_and_notify(
1810
+ room_id,
1811
+ batch,
1812
+ # Mark these events as backfilled as they're historic events that will
1813
+ # eventually be backfilled. For example, missing events we fetch
1814
+ # during backfill should be marked as backfilled as well.
1815
+ backfilled=True,
1816
+ )
1817
+
1818
+ @trace
1819
+ async def _check_event_auth(
1820
+ self, origin: str | None, event: EventBase, context: EventContext
1821
+ ) -> None:
1822
+ """
1823
+ Checks whether an event should be rejected (for failing auth checks).
1824
+
1825
+ Args:
1826
+ origin: The host the event originates from. This is used to fetch
1827
+ any missing auth events. It can be set to None, but only if we are
1828
+ sure that we already have all the auth events.
1829
+ event: The event itself.
1830
+ context:
1831
+ The event context.
1832
+
1833
+ Raises:
1834
+ AuthError if we were unable to find copies of the event's auth events.
1835
+ (Most other failures just cause us to set `context.rejected`.)
1836
+ """
1837
+ # This method should only be used for non-outliers
1838
+ assert not event.internal_metadata.outlier
1839
+
1840
+ # first of all, check that the event itself is valid.
1841
+ try:
1842
+ validate_event_for_room_version(event)
1843
+ except AuthError as e:
1844
+ logger.warning("While validating received event %r: %s", event, e)
1845
+ # TODO: use a different rejected reason here?
1846
+ context.rejected = RejectedReason.AUTH_ERROR
1847
+ return
1848
+ except EventSizeError as e:
1849
+ if e.unpersistable:
1850
+ # This event is completely unpersistable.
1851
+ raise e
1852
+ # Otherwise, we are somewhat lenient and just persist the event
1853
+ # as rejected, for moderate compatibility with older Synapse
1854
+ # versions.
1855
+ logger.warning("While validating received event %r: %s", event, e)
1856
+ context.rejected = RejectedReason.OVERSIZED_EVENT
1857
+ return
1858
+
1859
+ # next, check that we have all of the event's auth events.
1860
+ #
1861
+ # Note that this can raise AuthError, which we want to propagate to the
1862
+ # caller rather than swallow with `context.rejected` (since we cannot be
1863
+ # certain that there is a permanent problem with the event).
1864
+ claimed_auth_events = await self._load_or_fetch_auth_events_for_event(
1865
+ origin, event
1866
+ )
1867
+ set_tag(
1868
+ SynapseTags.RESULT_PREFIX + "claimed_auth_events",
1869
+ str([ev.event_id for ev in claimed_auth_events]),
1870
+ )
1871
+ set_tag(
1872
+ SynapseTags.RESULT_PREFIX + "claimed_auth_events.length",
1873
+ str(len(claimed_auth_events)),
1874
+ )
1875
+
1876
+ # ... and check that the event passes auth at those auth events.
1877
+ # https://spec.matrix.org/v1.3/server-server-api/#checks-performed-on-receipt-of-a-pdu:
1878
+ # 4. Passes authorization rules based on the event’s auth events,
1879
+ # otherwise it is rejected.
1880
+ try:
1881
+ await check_state_independent_auth_rules(self._store, event)
1882
+ check_state_dependent_auth_rules(event, claimed_auth_events)
1883
+ except AuthError as e:
1884
+ logger.warning(
1885
+ "While checking auth of %r against auth_events: %s", event, e
1886
+ )
1887
+ context.rejected = RejectedReason.AUTH_ERROR
1888
+ return
1889
+
1890
+ # now check the auth rules pass against the room state before the event
1891
+ # https://spec.matrix.org/v1.3/server-server-api/#checks-performed-on-receipt-of-a-pdu:
1892
+ # 5. Passes authorization rules based on the state before the event,
1893
+ # otherwise it is rejected.
1894
+ #
1895
+ # ... however, if we only have partial state for the room, then there is a good
1896
+ # chance that we'll be missing some of the state needed to auth the new event.
1897
+ # So, we state-resolve the auth events that we are given against the state that
1898
+ # we know about, which ensures things like bans are applied. (Note that we'll
1899
+ # already have checked we have all the auth events, in
1900
+ # _load_or_fetch_auth_events_for_event above)
1901
+ if context.partial_state:
1902
+ room_version = await self._store.get_room_version_id(event.room_id)
1903
+
1904
+ local_state_id_map = await context.get_prev_state_ids()
1905
+ claimed_auth_events_id_map = {
1906
+ (ev.type, ev.state_key): ev.event_id for ev in claimed_auth_events
1907
+ }
1908
+
1909
+ state_for_auth_id_map = (
1910
+ await self._state_resolution_handler.resolve_events_with_store(
1911
+ event.room_id,
1912
+ room_version,
1913
+ [local_state_id_map, claimed_auth_events_id_map],
1914
+ event_map=None,
1915
+ state_res_store=StateResolutionStore(
1916
+ self._store, self._state_deletion_store
1917
+ ),
1918
+ )
1919
+ )
1920
+ else:
1921
+ event_types = event_auth.auth_types_for_event(event.room_version, event)
1922
+ state_for_auth_id_map = await context.get_prev_state_ids(
1923
+ StateFilter.from_types(event_types)
1924
+ )
1925
+
1926
+ calculated_auth_event_ids = self._event_auth_handler.compute_auth_events(
1927
+ event, state_for_auth_id_map, for_verification=True
1928
+ )
1929
+
1930
+ # if those are the same, we're done here.
1931
+ if collections.Counter(event.auth_event_ids()) == collections.Counter(
1932
+ calculated_auth_event_ids
1933
+ ):
1934
+ return
1935
+
1936
+ # otherwise, re-run the auth checks based on what we calculated.
1937
+ calculated_auth_events = await self._store.get_events_as_list(
1938
+ calculated_auth_event_ids
1939
+ )
1940
+
1941
+ # log the differences
1942
+
1943
+ claimed_auth_event_map = {(e.type, e.state_key): e for e in claimed_auth_events}
1944
+ calculated_auth_event_map = {
1945
+ (e.type, e.state_key): e for e in calculated_auth_events
1946
+ }
1947
+ logger.info(
1948
+ "event's auth_events are different to our calculated auth_events. "
1949
+ "Claimed but not calculated: %s. Calculated but not claimed: %s",
1950
+ [
1951
+ ev
1952
+ for k, ev in claimed_auth_event_map.items()
1953
+ if k not in calculated_auth_event_map
1954
+ or calculated_auth_event_map[k].event_id != ev.event_id
1955
+ ],
1956
+ [
1957
+ ev
1958
+ for k, ev in calculated_auth_event_map.items()
1959
+ if k not in claimed_auth_event_map
1960
+ or claimed_auth_event_map[k].event_id != ev.event_id
1961
+ ],
1962
+ )
1963
+
1964
+ try:
1965
+ check_state_dependent_auth_rules(event, calculated_auth_events)
1966
+ except AuthError as e:
1967
+ logger.warning(
1968
+ "While checking auth of %r against room state before the event: %s",
1969
+ event,
1970
+ e,
1971
+ )
1972
+ context.rejected = RejectedReason.AUTH_ERROR
1973
+
1974
+ @trace
1975
+ async def _maybe_kick_guest_users(self, event: EventBase) -> None:
1976
+ if event.type != EventTypes.GuestAccess:
1977
+ return
1978
+
1979
+ guest_access = event.content.get(EventContentFields.GUEST_ACCESS)
1980
+ if guest_access == GuestAccess.CAN_JOIN:
1981
+ return
1982
+
1983
+ current_state = await self._storage_controllers.state.get_current_state(
1984
+ event.room_id
1985
+ )
1986
+ current_state_list = list(current_state.values())
1987
+ await self._get_room_member_handler().kick_guest_users(current_state_list)
1988
+
1989
+ async def _check_for_soft_fail(
1990
+ self,
1991
+ event: EventBase,
1992
+ context: EventContext,
1993
+ origin: str,
1994
+ ) -> None:
1995
+ """Checks if we should soft fail the event; if so, marks the event as
1996
+ such.
1997
+
1998
+ Does nothing for events in rooms with partial state, since we may not have an
1999
+ accurate membership event for the sender in the current state.
2000
+
2001
+ Args:
2002
+ event
2003
+ context: The `EventContext` which we are about to persist the event with.
2004
+ origin: The host the event originates from.
2005
+ """
2006
+ if await self._store.is_partial_state_room(event.room_id):
2007
+ # We might not know the sender's membership in the current state, so don't
2008
+ # soft fail anything. Even if we do have a membership for the sender in the
2009
+ # current state, it may have been derived from state resolution between
2010
+ # partial and full state and may not be accurate.
2011
+ return
2012
+
2013
+ extrem_ids = await self._store.get_latest_event_ids_in_room(event.room_id)
2014
+ prev_event_ids = set(event.prev_event_ids())
2015
+
2016
+ if extrem_ids == prev_event_ids:
2017
+ # If they're the same then the current state is the same as the
2018
+ # state at the event, so no point rechecking auth for soft fail.
2019
+ return
2020
+
2021
+ room_version = await self._store.get_room_version_id(event.room_id)
2022
+ room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
2023
+
2024
+ # The event types we want to pull from the "current" state.
2025
+ auth_types = auth_types_for_event(room_version_obj, event)
2026
+
2027
+ # Calculate the "current state".
2028
+ seen_event_ids = await self._store.have_events_in_timeline(prev_event_ids)
2029
+ has_missing_prevs = bool(prev_event_ids - seen_event_ids)
2030
+ if has_missing_prevs:
2031
+ # We don't have all the prev_events of this event, which means we have a
2032
+ # gap in the graph, and the new event is going to become a new backwards
2033
+ # extremity.
2034
+ #
2035
+ # In this case we want to be a little careful as we might have been
2036
+ # down for a while and have an incorrect view of the current state,
2037
+ # however we still want to do checks as gaps are easy to
2038
+ # maliciously manufacture.
2039
+ #
2040
+ # So we use a "current state" that is actually a state
2041
+ # resolution across the current forward extremities and the
2042
+ # given state at the event. This should correctly handle cases
2043
+ # like bans, especially with state res v2.
2044
+
2045
+ state_sets_d = await self._state_storage_controller.get_state_groups_ids(
2046
+ event.room_id, extrem_ids
2047
+ )
2048
+ state_sets: list[StateMap[str]] = list(state_sets_d.values())
2049
+ state_ids = await context.get_prev_state_ids()
2050
+ state_sets.append(state_ids)
2051
+ current_state_ids = (
2052
+ await self._state_resolution_handler.resolve_events_with_store(
2053
+ event.room_id,
2054
+ room_version,
2055
+ state_sets,
2056
+ event_map=None,
2057
+ state_res_store=StateResolutionStore(
2058
+ self._store, self._state_deletion_store
2059
+ ),
2060
+ )
2061
+ )
2062
+ else:
2063
+ current_state_ids = (
2064
+ await self._state_storage_controller.get_current_state_ids(
2065
+ event.room_id, StateFilter.from_types(auth_types)
2066
+ )
2067
+ )
2068
+
2069
+ logger.debug(
2070
+ "Doing soft-fail check for %s: state %s",
2071
+ event.event_id,
2072
+ current_state_ids,
2073
+ )
2074
+
2075
+ # Now check if event pass auth against said current state
2076
+ current_state_ids_list = [
2077
+ e for k, e in current_state_ids.items() if k in auth_types
2078
+ ]
2079
+ current_auth_events = await self._store.get_events_as_list(
2080
+ current_state_ids_list
2081
+ )
2082
+
2083
+ try:
2084
+ check_state_dependent_auth_rules(event, current_auth_events)
2085
+ except AuthError as e:
2086
+ logger.warning(
2087
+ "Soft-failing %r (from %s) because %s",
2088
+ event,
2089
+ e,
2090
+ origin,
2091
+ extra={
2092
+ "room_id": event.room_id,
2093
+ "mxid": event.sender,
2094
+ "hs": origin,
2095
+ },
2096
+ )
2097
+ soft_failed_event_counter.labels(
2098
+ **{SERVER_NAME_LABEL: self.server_name}
2099
+ ).inc()
2100
+ event.internal_metadata.soft_failed = True
2101
+
2102
+ async def _load_or_fetch_auth_events_for_event(
2103
+ self, destination: str | None, event: EventBase
2104
+ ) -> Collection[EventBase]:
2105
+ """Fetch this event's auth_events, from database or remote
2106
+
2107
+ Loads any of the auth_events that we already have from the database/cache. If
2108
+ there are any that are missing, calls /event_auth to get the complete auth
2109
+ chain for the event (and then attempts to load the auth_events again).
2110
+
2111
+ If any of the auth_events cannot be found, raises an AuthError. This can happen
2112
+ for a number of reasons; eg: the events don't exist, or we were unable to talk
2113
+ to `destination`, or we couldn't validate the signature on the event (which
2114
+ in turn has multiple potential causes).
2115
+
2116
+ Args:
2117
+ destination: where to send the /event_auth request. Typically the server
2118
+ that sent us `event` in the first place.
2119
+
2120
+ If this is None, no attempt is made to load any missing auth events:
2121
+ rather, an AssertionError is raised if there are any missing events.
2122
+
2123
+ event: the event whose auth_events we want
2124
+
2125
+ Returns:
2126
+ all of the events listed in `event.auth_events_ids`, after deduplication
2127
+
2128
+ Raises:
2129
+ AssertionError if some auth events were missing and no `destination` was
2130
+ supplied.
2131
+
2132
+ AuthError if we were unable to fetch the auth_events for any reason.
2133
+ """
2134
+ event_auth_event_ids = set(event.auth_event_ids())
2135
+ event_auth_events = await self._store.get_events(
2136
+ event_auth_event_ids, allow_rejected=True
2137
+ )
2138
+ missing_auth_event_ids = event_auth_event_ids.difference(
2139
+ event_auth_events.keys()
2140
+ )
2141
+ if not missing_auth_event_ids:
2142
+ return event_auth_events.values()
2143
+ if destination is None:
2144
+ # this shouldn't happen: destination must be set unless we know we have already
2145
+ # persisted the auth events.
2146
+ raise AssertionError(
2147
+ "_load_or_fetch_auth_events_for_event() called with no destination for "
2148
+ "an event with missing auth_events"
2149
+ )
2150
+
2151
+ logger.info(
2152
+ "Event %s refers to unknown auth events %s: fetching auth chain",
2153
+ event,
2154
+ missing_auth_event_ids,
2155
+ )
2156
+ try:
2157
+ await self._get_remote_auth_chain_for_event(
2158
+ destination, event.room_id, event.event_id
2159
+ )
2160
+ except Exception as e:
2161
+ logger.warning("Failed to get auth chain for %s: %s", event, e)
2162
+ # in this case, it's very likely we still won't have all the auth
2163
+ # events - but we pick that up below.
2164
+
2165
+ # try to fetch the auth events we missed list time.
2166
+ extra_auth_events = await self._store.get_events(
2167
+ missing_auth_event_ids, allow_rejected=True
2168
+ )
2169
+ missing_auth_event_ids.difference_update(extra_auth_events.keys())
2170
+ event_auth_events.update(extra_auth_events)
2171
+ if not missing_auth_event_ids:
2172
+ return event_auth_events.values()
2173
+
2174
+ # we still don't have all the auth events.
2175
+ logger.warning(
2176
+ "Missing auth events for %s: %s",
2177
+ event,
2178
+ shortstr(missing_auth_event_ids),
2179
+ )
2180
+ # the fact we can't find the auth event doesn't mean it doesn't
2181
+ # exist, which means it is premature to store `event` as rejected.
2182
+ # instead we raise an AuthError, which will make the caller ignore it.
2183
+ raise AuthError(code=HTTPStatus.FORBIDDEN, msg="Auth events could not be found")
2184
+
2185
+ @trace
2186
+ @tag_args
2187
+ async def _get_remote_auth_chain_for_event(
2188
+ self, destination: str, room_id: str, event_id: str
2189
+ ) -> None:
2190
+ """If we are missing some of an event's auth events, attempt to request them
2191
+
2192
+ Args:
2193
+ destination: where to fetch the auth tree from
2194
+ room_id: the room in which we are lacking auth events
2195
+ event_id: the event for which we are lacking auth events
2196
+ """
2197
+ try:
2198
+ remote_events = await self._federation_client.get_event_auth(
2199
+ destination, room_id, event_id
2200
+ )
2201
+
2202
+ except RequestSendFailed as e1:
2203
+ # The other side isn't around or doesn't implement the
2204
+ # endpoint, so lets just bail out.
2205
+ logger.info("Failed to get event auth from remote: %s", e1)
2206
+ return
2207
+
2208
+ logger.info("/event_auth returned %i events", len(remote_events))
2209
+
2210
+ # `event` may be returned, but we should not yet process it.
2211
+ remote_auth_events = (e for e in remote_events if e.event_id != event_id)
2212
+
2213
+ await self._auth_and_persist_outliers(room_id, remote_auth_events)
2214
+
2215
+ @trace
2216
+ async def _run_push_actions_and_persist_event(
2217
+ self, event: EventBase, context: EventContext, backfilled: bool = False
2218
+ ) -> None:
2219
+ """Run the push actions for a received event, and persist it.
2220
+
2221
+ Args:
2222
+ event: The event itself.
2223
+ context: The event context.
2224
+ backfilled: True if the event was backfilled.
2225
+
2226
+ PartialStateConflictError: if attempting to persist a partial state event in
2227
+ a room that has been un-partial stated.
2228
+ """
2229
+ # this method should not be called on outliers (those code paths call
2230
+ # persist_events_and_notify directly.)
2231
+ assert not event.internal_metadata.outlier
2232
+
2233
+ if not backfilled and not context.rejected:
2234
+ min_depth = await self._store.get_min_depth(event.room_id)
2235
+ if min_depth is None or min_depth > event.depth:
2236
+ # XXX richvdh 2021/10/07: I don't really understand what this
2237
+ # condition is doing. I think it's trying not to send pushes
2238
+ # for events that predate our join - but that's not really what
2239
+ # min_depth means, and anyway ancient events are a more general
2240
+ # problem.
2241
+ #
2242
+ # for now I'm just going to log about it.
2243
+ logger.info(
2244
+ "Skipping push actions for old event with depth %s < %s",
2245
+ event.depth,
2246
+ min_depth,
2247
+ )
2248
+ else:
2249
+ await self._bulk_push_rule_evaluator.action_for_events_by_user(
2250
+ [(event, context)]
2251
+ )
2252
+
2253
+ try:
2254
+ await self.persist_events_and_notify(
2255
+ event.room_id, [(event, context)], backfilled=backfilled
2256
+ )
2257
+ except Exception:
2258
+ await self._store.remove_push_actions_from_staging(event.event_id)
2259
+ raise
2260
+
2261
+ async def persist_events_and_notify(
2262
+ self,
2263
+ room_id: str,
2264
+ event_and_contexts: Sequence[EventPersistencePair],
2265
+ backfilled: bool = False,
2266
+ ) -> int:
2267
+ """Persists events and tells the notifier/pushers about them, if
2268
+ necessary.
2269
+
2270
+ Args:
2271
+ room_id: The room ID of events being persisted.
2272
+ event_and_contexts: Sequence of events with their associated
2273
+ context that should be persisted. All events must belong to
2274
+ the same room.
2275
+ backfilled: Whether these events are a result of
2276
+ backfilling or not
2277
+
2278
+ Returns:
2279
+ The stream ID after which all events have been persisted.
2280
+
2281
+ Raises:
2282
+ PartialStateConflictError: if attempting to persist a partial state event in
2283
+ a room that has been un-partial stated.
2284
+ """
2285
+ if not event_and_contexts:
2286
+ return self._store.get_room_max_stream_ordering()
2287
+
2288
+ instance = self._config.worker.events_shard_config.get_instance(room_id)
2289
+ if instance != self._instance_name:
2290
+ # Limit the number of events sent over replication. We choose 200
2291
+ # here as that is what we default to in `max_request_body_size(..)`
2292
+ result = {}
2293
+ try:
2294
+ for batch in batch_iter(event_and_contexts, 200):
2295
+ result = await self._send_events(
2296
+ instance_name=instance,
2297
+ store=self._store,
2298
+ room_id=room_id,
2299
+ event_and_contexts=batch,
2300
+ backfilled=backfilled,
2301
+ )
2302
+ except SynapseError as e:
2303
+ if e.code == HTTPStatus.CONFLICT:
2304
+ raise PartialStateConflictError()
2305
+ raise
2306
+ return result["max_stream_id"]
2307
+ else:
2308
+ assert self._storage_controllers.persistence
2309
+
2310
+ # Note that this returns the events that were persisted, which may not be
2311
+ # the same as were passed in if some were deduplicated due to transaction IDs.
2312
+ (
2313
+ events,
2314
+ max_stream_token,
2315
+ ) = await self._storage_controllers.persistence.persist_events(
2316
+ event_and_contexts, backfilled=backfilled
2317
+ )
2318
+
2319
+ # After persistence, we never notify clients (wake up `/sync` streams) about
2320
+ # backfilled events but it's important to let all the workers know about any
2321
+ # new event (backfilled or not) because TODO
2322
+ self._notifier.notify_replication()
2323
+
2324
+ if self._ephemeral_messages_enabled:
2325
+ for event in events:
2326
+ # If there's an expiry timestamp on the event, schedule its expiry.
2327
+ self._message_handler.maybe_schedule_expiry(event)
2328
+
2329
+ if not backfilled: # Never notify for backfilled events
2330
+ with start_active_span("notify_persisted_events"):
2331
+ set_tag(
2332
+ SynapseTags.RESULT_PREFIX + "event_ids",
2333
+ str([ev.event_id for ev in events]),
2334
+ )
2335
+ set_tag(
2336
+ SynapseTags.RESULT_PREFIX + "event_ids.length",
2337
+ str(len(events)),
2338
+ )
2339
+ for event in events:
2340
+ await self._notify_persisted_event(event, max_stream_token)
2341
+
2342
+ return max_stream_token.stream
2343
+
2344
+ async def _notify_persisted_event(
2345
+ self, event: EventBase, max_stream_token: RoomStreamToken
2346
+ ) -> None:
2347
+ """Checks to see if notifier/pushers should be notified about the
2348
+ event or not.
2349
+
2350
+ Args:
2351
+ event:
2352
+ max_stream_token: The max_stream_id returned by persist_events
2353
+ """
2354
+
2355
+ extra_users = []
2356
+ if event.type == EventTypes.Member:
2357
+ target_user_id = event.state_key
2358
+
2359
+ # We notify for memberships if its an invite for one of our
2360
+ # users
2361
+ if event.internal_metadata.is_outlier():
2362
+ if event.membership != Membership.INVITE:
2363
+ if not self._is_mine_id(target_user_id):
2364
+ return
2365
+
2366
+ target_user = UserID.from_string(target_user_id)
2367
+ extra_users.append(target_user)
2368
+ elif event.internal_metadata.is_outlier():
2369
+ return
2370
+
2371
+ # the event has been persisted so it should have a stream ordering.
2372
+ assert event.internal_metadata.stream_ordering
2373
+
2374
+ event_pos = PersistedEventPosition(
2375
+ self._instance_name, event.internal_metadata.stream_ordering
2376
+ )
2377
+ await self._notifier.on_new_room_events(
2378
+ [(event, event_pos)], max_stream_token, extra_users=extra_users
2379
+ )
2380
+
2381
+ if event.type == EventTypes.Member and event.membership == Membership.JOIN:
2382
+ # TODO retrieve the previous state, and exclude join -> join transitions
2383
+ self._notifier.notify_user_joined_room(event.event_id, event.room_id)
2384
+
2385
+ # If this is a server ACL event, clear the cache in the storage controller.
2386
+ if event.type == EventTypes.ServerACL:
2387
+ self._state_storage_controller.get_server_acl_for_room.invalidate(
2388
+ (event.room_id,)
2389
+ )
2390
+
2391
+ def _sanity_check_event(self, ev: EventBase) -> None:
2392
+ """
2393
+ Do some early sanity checks of a received event
2394
+
2395
+ In particular, checks it doesn't have an excessive number of
2396
+ prev_events or auth_events, which could cause a huge state resolution
2397
+ or cascade of event fetches.
2398
+
2399
+ Args:
2400
+ ev: event to be checked
2401
+
2402
+ Raises:
2403
+ SynapseError if the event does not pass muster
2404
+ """
2405
+ if len(ev.prev_event_ids()) > 20:
2406
+ logger.warning(
2407
+ "Rejecting event %s which has %i prev_events",
2408
+ ev.event_id,
2409
+ len(ev.prev_event_ids()),
2410
+ )
2411
+ raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many prev_events")
2412
+
2413
+ if len(ev.auth_event_ids()) > 10:
2414
+ logger.warning(
2415
+ "Rejecting event %s which has %i auth_events",
2416
+ ev.event_id,
2417
+ len(ev.auth_event_ids()),
2418
+ )
2419
+ raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many auth_events")