matrix-synapse 1.143.0__cp310-abi3-manylinux_2_28_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of matrix-synapse might be problematic. Click here for more details.

Files changed (1058) hide show
  1. matrix_synapse-1.143.0.dist-info/AUTHORS.rst +51 -0
  2. matrix_synapse-1.143.0.dist-info/LICENSE-AGPL-3.0 +661 -0
  3. matrix_synapse-1.143.0.dist-info/LICENSE-COMMERCIAL +6 -0
  4. matrix_synapse-1.143.0.dist-info/METADATA +385 -0
  5. matrix_synapse-1.143.0.dist-info/RECORD +1058 -0
  6. matrix_synapse-1.143.0.dist-info/WHEEL +4 -0
  7. matrix_synapse-1.143.0.dist-info/entry_points.txt +14 -0
  8. synapse/__init__.py +97 -0
  9. synapse/_scripts/__init__.py +0 -0
  10. synapse/_scripts/export_signing_key.py +109 -0
  11. synapse/_scripts/generate_config.py +83 -0
  12. synapse/_scripts/generate_log_config.py +56 -0
  13. synapse/_scripts/generate_signing_key.py +55 -0
  14. synapse/_scripts/generate_workers_map.py +318 -0
  15. synapse/_scripts/hash_password.py +95 -0
  16. synapse/_scripts/move_remote_media_to_new_store.py +128 -0
  17. synapse/_scripts/register_new_matrix_user.py +402 -0
  18. synapse/_scripts/review_recent_signups.py +212 -0
  19. synapse/_scripts/synapse_port_db.py +1604 -0
  20. synapse/_scripts/synctl.py +365 -0
  21. synapse/_scripts/update_synapse_database.py +130 -0
  22. synapse/api/__init__.py +20 -0
  23. synapse/api/auth/__init__.py +207 -0
  24. synapse/api/auth/base.py +406 -0
  25. synapse/api/auth/internal.py +299 -0
  26. synapse/api/auth/mas.py +436 -0
  27. synapse/api/auth/msc3861_delegated.py +617 -0
  28. synapse/api/auth_blocking.py +144 -0
  29. synapse/api/constants.py +362 -0
  30. synapse/api/errors.py +907 -0
  31. synapse/api/filtering.py +537 -0
  32. synapse/api/presence.py +102 -0
  33. synapse/api/ratelimiting.py +480 -0
  34. synapse/api/room_versions.py +535 -0
  35. synapse/api/urls.py +118 -0
  36. synapse/app/__init__.py +60 -0
  37. synapse/app/_base.py +862 -0
  38. synapse/app/admin_cmd.py +388 -0
  39. synapse/app/appservice.py +30 -0
  40. synapse/app/client_reader.py +30 -0
  41. synapse/app/complement_fork_starter.py +206 -0
  42. synapse/app/event_creator.py +29 -0
  43. synapse/app/federation_reader.py +30 -0
  44. synapse/app/federation_sender.py +30 -0
  45. synapse/app/frontend_proxy.py +30 -0
  46. synapse/app/generic_worker.py +474 -0
  47. synapse/app/homeserver.py +505 -0
  48. synapse/app/media_repository.py +30 -0
  49. synapse/app/phone_stats_home.py +296 -0
  50. synapse/app/pusher.py +30 -0
  51. synapse/app/synchrotron.py +30 -0
  52. synapse/app/user_dir.py +31 -0
  53. synapse/appservice/__init__.py +458 -0
  54. synapse/appservice/api.py +567 -0
  55. synapse/appservice/scheduler.py +564 -0
  56. synapse/config/__init__.py +27 -0
  57. synapse/config/__main__.py +62 -0
  58. synapse/config/_base.py +1106 -0
  59. synapse/config/_base.pyi +215 -0
  60. synapse/config/_util.py +99 -0
  61. synapse/config/account_validity.py +116 -0
  62. synapse/config/api.py +141 -0
  63. synapse/config/appservice.py +210 -0
  64. synapse/config/auth.py +80 -0
  65. synapse/config/auto_accept_invites.py +43 -0
  66. synapse/config/background_updates.py +44 -0
  67. synapse/config/cache.py +231 -0
  68. synapse/config/captcha.py +90 -0
  69. synapse/config/cas.py +116 -0
  70. synapse/config/consent.py +73 -0
  71. synapse/config/database.py +184 -0
  72. synapse/config/emailconfig.py +367 -0
  73. synapse/config/experimental.py +595 -0
  74. synapse/config/federation.py +114 -0
  75. synapse/config/homeserver.py +141 -0
  76. synapse/config/jwt.py +55 -0
  77. synapse/config/key.py +447 -0
  78. synapse/config/logger.py +390 -0
  79. synapse/config/mas.py +192 -0
  80. synapse/config/matrixrtc.py +66 -0
  81. synapse/config/metrics.py +84 -0
  82. synapse/config/modules.py +40 -0
  83. synapse/config/oembed.py +185 -0
  84. synapse/config/oidc.py +509 -0
  85. synapse/config/password_auth_providers.py +82 -0
  86. synapse/config/push.py +64 -0
  87. synapse/config/ratelimiting.py +254 -0
  88. synapse/config/redis.py +74 -0
  89. synapse/config/registration.py +296 -0
  90. synapse/config/repository.py +311 -0
  91. synapse/config/retention.py +162 -0
  92. synapse/config/room.py +88 -0
  93. synapse/config/room_directory.py +165 -0
  94. synapse/config/saml2.py +251 -0
  95. synapse/config/server.py +1170 -0
  96. synapse/config/server_notices.py +84 -0
  97. synapse/config/spam_checker.py +66 -0
  98. synapse/config/sso.py +121 -0
  99. synapse/config/stats.py +54 -0
  100. synapse/config/third_party_event_rules.py +40 -0
  101. synapse/config/tls.py +192 -0
  102. synapse/config/tracer.py +71 -0
  103. synapse/config/user_directory.py +47 -0
  104. synapse/config/user_types.py +42 -0
  105. synapse/config/voip.py +59 -0
  106. synapse/config/workers.py +642 -0
  107. synapse/crypto/__init__.py +20 -0
  108. synapse/crypto/context_factory.py +278 -0
  109. synapse/crypto/event_signing.py +194 -0
  110. synapse/crypto/keyring.py +931 -0
  111. synapse/event_auth.py +1266 -0
  112. synapse/events/__init__.py +667 -0
  113. synapse/events/auto_accept_invites.py +216 -0
  114. synapse/events/builder.py +387 -0
  115. synapse/events/presence_router.py +243 -0
  116. synapse/events/snapshot.py +559 -0
  117. synapse/events/utils.py +924 -0
  118. synapse/events/validator.py +305 -0
  119. synapse/federation/__init__.py +22 -0
  120. synapse/federation/federation_base.py +382 -0
  121. synapse/federation/federation_client.py +2132 -0
  122. synapse/federation/federation_server.py +1540 -0
  123. synapse/federation/persistence.py +70 -0
  124. synapse/federation/send_queue.py +531 -0
  125. synapse/federation/sender/__init__.py +1164 -0
  126. synapse/federation/sender/per_destination_queue.py +886 -0
  127. synapse/federation/sender/transaction_manager.py +210 -0
  128. synapse/federation/transport/__init__.py +28 -0
  129. synapse/federation/transport/client.py +1199 -0
  130. synapse/federation/transport/server/__init__.py +334 -0
  131. synapse/federation/transport/server/_base.py +429 -0
  132. synapse/federation/transport/server/federation.py +910 -0
  133. synapse/federation/units.py +133 -0
  134. synapse/handlers/__init__.py +20 -0
  135. synapse/handlers/account.py +162 -0
  136. synapse/handlers/account_data.py +360 -0
  137. synapse/handlers/account_validity.py +361 -0
  138. synapse/handlers/admin.py +615 -0
  139. synapse/handlers/appservice.py +989 -0
  140. synapse/handlers/auth.py +2481 -0
  141. synapse/handlers/cas.py +413 -0
  142. synapse/handlers/deactivate_account.py +363 -0
  143. synapse/handlers/delayed_events.py +599 -0
  144. synapse/handlers/device.py +1870 -0
  145. synapse/handlers/devicemessage.py +399 -0
  146. synapse/handlers/directory.py +545 -0
  147. synapse/handlers/e2e_keys.py +1834 -0
  148. synapse/handlers/e2e_room_keys.py +455 -0
  149. synapse/handlers/event_auth.py +390 -0
  150. synapse/handlers/events.py +201 -0
  151. synapse/handlers/federation.py +2039 -0
  152. synapse/handlers/federation_event.py +2419 -0
  153. synapse/handlers/identity.py +812 -0
  154. synapse/handlers/initial_sync.py +528 -0
  155. synapse/handlers/jwt.py +120 -0
  156. synapse/handlers/message.py +2347 -0
  157. synapse/handlers/oidc.py +1801 -0
  158. synapse/handlers/pagination.py +768 -0
  159. synapse/handlers/password_policy.py +102 -0
  160. synapse/handlers/presence.py +2633 -0
  161. synapse/handlers/profile.py +655 -0
  162. synapse/handlers/push_rules.py +164 -0
  163. synapse/handlers/read_marker.py +79 -0
  164. synapse/handlers/receipts.py +351 -0
  165. synapse/handlers/register.py +1059 -0
  166. synapse/handlers/relations.py +623 -0
  167. synapse/handlers/reports.py +98 -0
  168. synapse/handlers/room.py +2448 -0
  169. synapse/handlers/room_list.py +632 -0
  170. synapse/handlers/room_member.py +2365 -0
  171. synapse/handlers/room_member_worker.py +146 -0
  172. synapse/handlers/room_policy.py +186 -0
  173. synapse/handlers/room_summary.py +1057 -0
  174. synapse/handlers/saml.py +524 -0
  175. synapse/handlers/search.py +723 -0
  176. synapse/handlers/send_email.py +209 -0
  177. synapse/handlers/set_password.py +71 -0
  178. synapse/handlers/sliding_sync/__init__.py +1701 -0
  179. synapse/handlers/sliding_sync/extensions.py +969 -0
  180. synapse/handlers/sliding_sync/room_lists.py +2262 -0
  181. synapse/handlers/sliding_sync/store.py +128 -0
  182. synapse/handlers/sso.py +1291 -0
  183. synapse/handlers/state_deltas.py +82 -0
  184. synapse/handlers/stats.py +321 -0
  185. synapse/handlers/sync.py +3106 -0
  186. synapse/handlers/thread_subscriptions.py +190 -0
  187. synapse/handlers/typing.py +606 -0
  188. synapse/handlers/ui_auth/__init__.py +48 -0
  189. synapse/handlers/ui_auth/checkers.py +332 -0
  190. synapse/handlers/user_directory.py +783 -0
  191. synapse/handlers/worker_lock.py +371 -0
  192. synapse/http/__init__.py +105 -0
  193. synapse/http/additional_resource.py +62 -0
  194. synapse/http/client.py +1373 -0
  195. synapse/http/connectproxyclient.py +316 -0
  196. synapse/http/federation/__init__.py +19 -0
  197. synapse/http/federation/matrix_federation_agent.py +490 -0
  198. synapse/http/federation/srv_resolver.py +196 -0
  199. synapse/http/federation/well_known_resolver.py +367 -0
  200. synapse/http/matrixfederationclient.py +1873 -0
  201. synapse/http/proxy.py +290 -0
  202. synapse/http/proxyagent.py +497 -0
  203. synapse/http/replicationagent.py +202 -0
  204. synapse/http/request_metrics.py +309 -0
  205. synapse/http/server.py +1110 -0
  206. synapse/http/servlet.py +1018 -0
  207. synapse/http/site.py +825 -0
  208. synapse/http/types.py +27 -0
  209. synapse/logging/__init__.py +31 -0
  210. synapse/logging/_remote.py +261 -0
  211. synapse/logging/_terse_json.py +95 -0
  212. synapse/logging/context.py +1209 -0
  213. synapse/logging/formatter.py +62 -0
  214. synapse/logging/handlers.py +99 -0
  215. synapse/logging/loggers.py +25 -0
  216. synapse/logging/opentracing.py +1132 -0
  217. synapse/logging/scopecontextmanager.py +160 -0
  218. synapse/media/_base.py +830 -0
  219. synapse/media/filepath.py +417 -0
  220. synapse/media/media_repository.py +1580 -0
  221. synapse/media/media_storage.py +702 -0
  222. synapse/media/oembed.py +277 -0
  223. synapse/media/preview_html.py +556 -0
  224. synapse/media/storage_provider.py +195 -0
  225. synapse/media/thumbnailer.py +833 -0
  226. synapse/media/url_previewer.py +875 -0
  227. synapse/metrics/__init__.py +748 -0
  228. synapse/metrics/_gc.py +219 -0
  229. synapse/metrics/_reactor_metrics.py +171 -0
  230. synapse/metrics/_types.py +38 -0
  231. synapse/metrics/background_process_metrics.py +555 -0
  232. synapse/metrics/common_usage_metrics.py +94 -0
  233. synapse/metrics/jemalloc.py +248 -0
  234. synapse/module_api/__init__.py +2131 -0
  235. synapse/module_api/callbacks/__init__.py +50 -0
  236. synapse/module_api/callbacks/account_validity_callbacks.py +106 -0
  237. synapse/module_api/callbacks/media_repository_callbacks.py +157 -0
  238. synapse/module_api/callbacks/ratelimit_callbacks.py +78 -0
  239. synapse/module_api/callbacks/spamchecker_callbacks.py +991 -0
  240. synapse/module_api/callbacks/third_party_event_rules_callbacks.py +592 -0
  241. synapse/module_api/errors.py +42 -0
  242. synapse/notifier.py +970 -0
  243. synapse/push/__init__.py +212 -0
  244. synapse/push/bulk_push_rule_evaluator.py +635 -0
  245. synapse/push/clientformat.py +126 -0
  246. synapse/push/emailpusher.py +333 -0
  247. synapse/push/httppusher.py +564 -0
  248. synapse/push/mailer.py +1010 -0
  249. synapse/push/presentable_names.py +216 -0
  250. synapse/push/push_tools.py +114 -0
  251. synapse/push/push_types.py +141 -0
  252. synapse/push/pusher.py +87 -0
  253. synapse/push/pusherpool.py +501 -0
  254. synapse/push/rulekinds.py +33 -0
  255. synapse/py.typed +0 -0
  256. synapse/replication/__init__.py +20 -0
  257. synapse/replication/http/__init__.py +68 -0
  258. synapse/replication/http/_base.py +468 -0
  259. synapse/replication/http/account_data.py +297 -0
  260. synapse/replication/http/deactivate_account.py +81 -0
  261. synapse/replication/http/delayed_events.py +62 -0
  262. synapse/replication/http/devices.py +254 -0
  263. synapse/replication/http/federation.py +334 -0
  264. synapse/replication/http/login.py +106 -0
  265. synapse/replication/http/membership.py +364 -0
  266. synapse/replication/http/presence.py +133 -0
  267. synapse/replication/http/push.py +156 -0
  268. synapse/replication/http/register.py +172 -0
  269. synapse/replication/http/send_events.py +182 -0
  270. synapse/replication/http/state.py +82 -0
  271. synapse/replication/http/streams.py +101 -0
  272. synapse/replication/tcp/__init__.py +56 -0
  273. synapse/replication/tcp/client.py +552 -0
  274. synapse/replication/tcp/commands.py +569 -0
  275. synapse/replication/tcp/context.py +41 -0
  276. synapse/replication/tcp/external_cache.py +156 -0
  277. synapse/replication/tcp/handler.py +922 -0
  278. synapse/replication/tcp/protocol.py +608 -0
  279. synapse/replication/tcp/redis.py +509 -0
  280. synapse/replication/tcp/resource.py +348 -0
  281. synapse/replication/tcp/streams/__init__.py +96 -0
  282. synapse/replication/tcp/streams/_base.py +765 -0
  283. synapse/replication/tcp/streams/events.py +287 -0
  284. synapse/replication/tcp/streams/federation.py +92 -0
  285. synapse/replication/tcp/streams/partial_state.py +80 -0
  286. synapse/res/providers.json +29 -0
  287. synapse/res/templates/_base.html +29 -0
  288. synapse/res/templates/account_previously_renewed.html +6 -0
  289. synapse/res/templates/account_renewed.html +6 -0
  290. synapse/res/templates/add_threepid.html +8 -0
  291. synapse/res/templates/add_threepid.txt +6 -0
  292. synapse/res/templates/add_threepid_failure.html +7 -0
  293. synapse/res/templates/add_threepid_success.html +6 -0
  294. synapse/res/templates/already_in_use.html +12 -0
  295. synapse/res/templates/already_in_use.txt +10 -0
  296. synapse/res/templates/auth_success.html +21 -0
  297. synapse/res/templates/invalid_token.html +6 -0
  298. synapse/res/templates/mail-Element.css +7 -0
  299. synapse/res/templates/mail-Vector.css +7 -0
  300. synapse/res/templates/mail-expiry.css +4 -0
  301. synapse/res/templates/mail.css +156 -0
  302. synapse/res/templates/notice_expiry.html +46 -0
  303. synapse/res/templates/notice_expiry.txt +7 -0
  304. synapse/res/templates/notif.html +51 -0
  305. synapse/res/templates/notif.txt +22 -0
  306. synapse/res/templates/notif_mail.html +59 -0
  307. synapse/res/templates/notif_mail.txt +10 -0
  308. synapse/res/templates/password_reset.html +10 -0
  309. synapse/res/templates/password_reset.txt +7 -0
  310. synapse/res/templates/password_reset_confirmation.html +15 -0
  311. synapse/res/templates/password_reset_failure.html +7 -0
  312. synapse/res/templates/password_reset_success.html +6 -0
  313. synapse/res/templates/recaptcha.html +42 -0
  314. synapse/res/templates/registration.html +12 -0
  315. synapse/res/templates/registration.txt +10 -0
  316. synapse/res/templates/registration_failure.html +6 -0
  317. synapse/res/templates/registration_success.html +6 -0
  318. synapse/res/templates/registration_token.html +18 -0
  319. synapse/res/templates/room.html +33 -0
  320. synapse/res/templates/room.txt +9 -0
  321. synapse/res/templates/sso.css +129 -0
  322. synapse/res/templates/sso_account_deactivated.html +25 -0
  323. synapse/res/templates/sso_auth_account_details.html +186 -0
  324. synapse/res/templates/sso_auth_account_details.js +116 -0
  325. synapse/res/templates/sso_auth_bad_user.html +26 -0
  326. synapse/res/templates/sso_auth_confirm.html +27 -0
  327. synapse/res/templates/sso_auth_success.html +26 -0
  328. synapse/res/templates/sso_error.html +71 -0
  329. synapse/res/templates/sso_footer.html +19 -0
  330. synapse/res/templates/sso_login_idp_picker.html +60 -0
  331. synapse/res/templates/sso_new_user_consent.html +30 -0
  332. synapse/res/templates/sso_partial_profile.html +19 -0
  333. synapse/res/templates/sso_redirect_confirm.html +39 -0
  334. synapse/res/templates/style.css +33 -0
  335. synapse/res/templates/terms.html +27 -0
  336. synapse/rest/__init__.py +197 -0
  337. synapse/rest/admin/__init__.py +390 -0
  338. synapse/rest/admin/_base.py +72 -0
  339. synapse/rest/admin/background_updates.py +171 -0
  340. synapse/rest/admin/devices.py +221 -0
  341. synapse/rest/admin/event_reports.py +173 -0
  342. synapse/rest/admin/events.py +69 -0
  343. synapse/rest/admin/experimental_features.py +137 -0
  344. synapse/rest/admin/federation.py +243 -0
  345. synapse/rest/admin/media.py +540 -0
  346. synapse/rest/admin/registration_tokens.py +358 -0
  347. synapse/rest/admin/rooms.py +1061 -0
  348. synapse/rest/admin/scheduled_tasks.py +70 -0
  349. synapse/rest/admin/server_notice_servlet.py +132 -0
  350. synapse/rest/admin/statistics.py +132 -0
  351. synapse/rest/admin/username_available.py +58 -0
  352. synapse/rest/admin/users.py +1606 -0
  353. synapse/rest/client/__init__.py +20 -0
  354. synapse/rest/client/_base.py +113 -0
  355. synapse/rest/client/account.py +930 -0
  356. synapse/rest/client/account_data.py +319 -0
  357. synapse/rest/client/account_validity.py +103 -0
  358. synapse/rest/client/appservice_ping.py +125 -0
  359. synapse/rest/client/auth.py +218 -0
  360. synapse/rest/client/auth_metadata.py +122 -0
  361. synapse/rest/client/capabilities.py +121 -0
  362. synapse/rest/client/delayed_events.py +165 -0
  363. synapse/rest/client/devices.py +587 -0
  364. synapse/rest/client/directory.py +211 -0
  365. synapse/rest/client/events.py +116 -0
  366. synapse/rest/client/filter.py +112 -0
  367. synapse/rest/client/initial_sync.py +65 -0
  368. synapse/rest/client/keys.py +678 -0
  369. synapse/rest/client/knock.py +104 -0
  370. synapse/rest/client/login.py +750 -0
  371. synapse/rest/client/login_token_request.py +127 -0
  372. synapse/rest/client/logout.py +93 -0
  373. synapse/rest/client/matrixrtc.py +52 -0
  374. synapse/rest/client/media.py +285 -0
  375. synapse/rest/client/mutual_rooms.py +93 -0
  376. synapse/rest/client/notifications.py +137 -0
  377. synapse/rest/client/openid.py +109 -0
  378. synapse/rest/client/password_policy.py +69 -0
  379. synapse/rest/client/presence.py +131 -0
  380. synapse/rest/client/profile.py +291 -0
  381. synapse/rest/client/push_rule.py +331 -0
  382. synapse/rest/client/pusher.py +181 -0
  383. synapse/rest/client/read_marker.py +104 -0
  384. synapse/rest/client/receipts.py +165 -0
  385. synapse/rest/client/register.py +1067 -0
  386. synapse/rest/client/relations.py +138 -0
  387. synapse/rest/client/rendezvous.py +76 -0
  388. synapse/rest/client/reporting.py +207 -0
  389. synapse/rest/client/room.py +1669 -0
  390. synapse/rest/client/room_keys.py +426 -0
  391. synapse/rest/client/room_upgrade_rest_servlet.py +112 -0
  392. synapse/rest/client/sendtodevice.py +85 -0
  393. synapse/rest/client/sync.py +1131 -0
  394. synapse/rest/client/tags.py +129 -0
  395. synapse/rest/client/thirdparty.py +130 -0
  396. synapse/rest/client/thread_subscriptions.py +247 -0
  397. synapse/rest/client/tokenrefresh.py +52 -0
  398. synapse/rest/client/transactions.py +149 -0
  399. synapse/rest/client/user_directory.py +90 -0
  400. synapse/rest/client/versions.py +191 -0
  401. synapse/rest/client/voip.py +88 -0
  402. synapse/rest/consent/__init__.py +0 -0
  403. synapse/rest/consent/consent_resource.py +210 -0
  404. synapse/rest/health.py +38 -0
  405. synapse/rest/key/__init__.py +20 -0
  406. synapse/rest/key/v2/__init__.py +40 -0
  407. synapse/rest/key/v2/local_key_resource.py +125 -0
  408. synapse/rest/key/v2/remote_key_resource.py +302 -0
  409. synapse/rest/media/__init__.py +0 -0
  410. synapse/rest/media/config_resource.py +53 -0
  411. synapse/rest/media/create_resource.py +90 -0
  412. synapse/rest/media/download_resource.py +110 -0
  413. synapse/rest/media/media_repository_resource.py +113 -0
  414. synapse/rest/media/preview_url_resource.py +77 -0
  415. synapse/rest/media/thumbnail_resource.py +142 -0
  416. synapse/rest/media/upload_resource.py +187 -0
  417. synapse/rest/media/v1/__init__.py +39 -0
  418. synapse/rest/media/v1/_base.py +23 -0
  419. synapse/rest/media/v1/media_storage.py +23 -0
  420. synapse/rest/media/v1/storage_provider.py +23 -0
  421. synapse/rest/synapse/__init__.py +20 -0
  422. synapse/rest/synapse/client/__init__.py +93 -0
  423. synapse/rest/synapse/client/federation_whitelist.py +66 -0
  424. synapse/rest/synapse/client/jwks.py +77 -0
  425. synapse/rest/synapse/client/new_user_consent.py +115 -0
  426. synapse/rest/synapse/client/oidc/__init__.py +45 -0
  427. synapse/rest/synapse/client/oidc/backchannel_logout_resource.py +42 -0
  428. synapse/rest/synapse/client/oidc/callback_resource.py +48 -0
  429. synapse/rest/synapse/client/password_reset.py +129 -0
  430. synapse/rest/synapse/client/pick_idp.py +107 -0
  431. synapse/rest/synapse/client/pick_username.py +153 -0
  432. synapse/rest/synapse/client/rendezvous.py +58 -0
  433. synapse/rest/synapse/client/saml2/__init__.py +42 -0
  434. synapse/rest/synapse/client/saml2/metadata_resource.py +46 -0
  435. synapse/rest/synapse/client/saml2/response_resource.py +52 -0
  436. synapse/rest/synapse/client/sso_register.py +56 -0
  437. synapse/rest/synapse/client/unsubscribe.py +88 -0
  438. synapse/rest/synapse/mas/__init__.py +71 -0
  439. synapse/rest/synapse/mas/_base.py +55 -0
  440. synapse/rest/synapse/mas/devices.py +239 -0
  441. synapse/rest/synapse/mas/users.py +469 -0
  442. synapse/rest/well_known.py +148 -0
  443. synapse/server.py +1257 -0
  444. synapse/server_notices/__init__.py +0 -0
  445. synapse/server_notices/consent_server_notices.py +136 -0
  446. synapse/server_notices/resource_limits_server_notices.py +215 -0
  447. synapse/server_notices/server_notices_manager.py +388 -0
  448. synapse/server_notices/server_notices_sender.py +67 -0
  449. synapse/server_notices/worker_server_notices_sender.py +46 -0
  450. synapse/spam_checker_api/__init__.py +31 -0
  451. synapse/state/__init__.py +1022 -0
  452. synapse/state/v1.py +369 -0
  453. synapse/state/v2.py +984 -0
  454. synapse/static/client/login/index.html +47 -0
  455. synapse/static/client/login/js/jquery-3.4.1.min.js +2 -0
  456. synapse/static/client/login/js/login.js +291 -0
  457. synapse/static/client/login/spinner.gif +0 -0
  458. synapse/static/client/login/style.css +79 -0
  459. synapse/static/index.html +63 -0
  460. synapse/storage/__init__.py +43 -0
  461. synapse/storage/_base.py +245 -0
  462. synapse/storage/admin_client_config.py +25 -0
  463. synapse/storage/background_updates.py +1188 -0
  464. synapse/storage/controllers/__init__.py +57 -0
  465. synapse/storage/controllers/persist_events.py +1237 -0
  466. synapse/storage/controllers/purge_events.py +455 -0
  467. synapse/storage/controllers/state.py +950 -0
  468. synapse/storage/controllers/stats.py +119 -0
  469. synapse/storage/database.py +2719 -0
  470. synapse/storage/databases/__init__.py +175 -0
  471. synapse/storage/databases/main/__init__.py +420 -0
  472. synapse/storage/databases/main/account_data.py +1059 -0
  473. synapse/storage/databases/main/appservice.py +473 -0
  474. synapse/storage/databases/main/cache.py +911 -0
  475. synapse/storage/databases/main/censor_events.py +225 -0
  476. synapse/storage/databases/main/client_ips.py +815 -0
  477. synapse/storage/databases/main/delayed_events.py +562 -0
  478. synapse/storage/databases/main/deviceinbox.py +1271 -0
  479. synapse/storage/databases/main/devices.py +2578 -0
  480. synapse/storage/databases/main/directory.py +212 -0
  481. synapse/storage/databases/main/e2e_room_keys.py +689 -0
  482. synapse/storage/databases/main/end_to_end_keys.py +1894 -0
  483. synapse/storage/databases/main/event_federation.py +2508 -0
  484. synapse/storage/databases/main/event_push_actions.py +1933 -0
  485. synapse/storage/databases/main/events.py +3765 -0
  486. synapse/storage/databases/main/events_bg_updates.py +2910 -0
  487. synapse/storage/databases/main/events_forward_extremities.py +126 -0
  488. synapse/storage/databases/main/events_worker.py +2786 -0
  489. synapse/storage/databases/main/experimental_features.py +130 -0
  490. synapse/storage/databases/main/filtering.py +231 -0
  491. synapse/storage/databases/main/keys.py +291 -0
  492. synapse/storage/databases/main/lock.py +553 -0
  493. synapse/storage/databases/main/media_repository.py +1068 -0
  494. synapse/storage/databases/main/metrics.py +460 -0
  495. synapse/storage/databases/main/monthly_active_users.py +443 -0
  496. synapse/storage/databases/main/openid.py +60 -0
  497. synapse/storage/databases/main/presence.py +509 -0
  498. synapse/storage/databases/main/profile.py +539 -0
  499. synapse/storage/databases/main/purge_events.py +521 -0
  500. synapse/storage/databases/main/push_rule.py +970 -0
  501. synapse/storage/databases/main/pusher.py +793 -0
  502. synapse/storage/databases/main/receipts.py +1341 -0
  503. synapse/storage/databases/main/registration.py +3072 -0
  504. synapse/storage/databases/main/rejections.py +37 -0
  505. synapse/storage/databases/main/relations.py +1116 -0
  506. synapse/storage/databases/main/room.py +2779 -0
  507. synapse/storage/databases/main/roommember.py +2110 -0
  508. synapse/storage/databases/main/search.py +939 -0
  509. synapse/storage/databases/main/session.py +151 -0
  510. synapse/storage/databases/main/signatures.py +94 -0
  511. synapse/storage/databases/main/sliding_sync.py +603 -0
  512. synapse/storage/databases/main/state.py +1002 -0
  513. synapse/storage/databases/main/state_deltas.py +329 -0
  514. synapse/storage/databases/main/stats.py +789 -0
  515. synapse/storage/databases/main/stream.py +2577 -0
  516. synapse/storage/databases/main/tags.py +360 -0
  517. synapse/storage/databases/main/task_scheduler.py +225 -0
  518. synapse/storage/databases/main/thread_subscriptions.py +589 -0
  519. synapse/storage/databases/main/transactions.py +675 -0
  520. synapse/storage/databases/main/ui_auth.py +420 -0
  521. synapse/storage/databases/main/user_directory.py +1330 -0
  522. synapse/storage/databases/main/user_erasure_store.py +117 -0
  523. synapse/storage/databases/state/__init__.py +22 -0
  524. synapse/storage/databases/state/bg_updates.py +497 -0
  525. synapse/storage/databases/state/deletion.py +557 -0
  526. synapse/storage/databases/state/store.py +948 -0
  527. synapse/storage/engines/__init__.py +70 -0
  528. synapse/storage/engines/_base.py +154 -0
  529. synapse/storage/engines/postgres.py +261 -0
  530. synapse/storage/engines/sqlite.py +199 -0
  531. synapse/storage/invite_rule.py +112 -0
  532. synapse/storage/keys.py +40 -0
  533. synapse/storage/prepare_database.py +730 -0
  534. synapse/storage/push_rule.py +28 -0
  535. synapse/storage/roommember.py +88 -0
  536. synapse/storage/schema/README.md +4 -0
  537. synapse/storage/schema/__init__.py +186 -0
  538. synapse/storage/schema/common/delta/25/00background_updates.sql +40 -0
  539. synapse/storage/schema/common/delta/35/00background_updates_add_col.sql +36 -0
  540. synapse/storage/schema/common/delta/58/00background_update_ordering.sql +38 -0
  541. synapse/storage/schema/common/full_schemas/72/full.sql.postgres +8 -0
  542. synapse/storage/schema/common/full_schemas/72/full.sql.sqlite +6 -0
  543. synapse/storage/schema/common/schema_version.sql +60 -0
  544. synapse/storage/schema/main/delta/12/v12.sql +82 -0
  545. synapse/storage/schema/main/delta/13/v13.sql +38 -0
  546. synapse/storage/schema/main/delta/14/v14.sql +42 -0
  547. synapse/storage/schema/main/delta/15/appservice_txns.sql +50 -0
  548. synapse/storage/schema/main/delta/15/presence_indices.sql +2 -0
  549. synapse/storage/schema/main/delta/15/v15.sql +24 -0
  550. synapse/storage/schema/main/delta/16/events_order_index.sql +4 -0
  551. synapse/storage/schema/main/delta/16/remote_media_cache_index.sql +2 -0
  552. synapse/storage/schema/main/delta/16/remove_duplicates.sql +9 -0
  553. synapse/storage/schema/main/delta/16/room_alias_index.sql +3 -0
  554. synapse/storage/schema/main/delta/16/unique_constraints.sql +72 -0
  555. synapse/storage/schema/main/delta/16/users.sql +56 -0
  556. synapse/storage/schema/main/delta/17/drop_indexes.sql +37 -0
  557. synapse/storage/schema/main/delta/17/server_keys.sql +43 -0
  558. synapse/storage/schema/main/delta/17/user_threepids.sql +9 -0
  559. synapse/storage/schema/main/delta/18/server_keys_bigger_ints.sql +51 -0
  560. synapse/storage/schema/main/delta/19/event_index.sql +38 -0
  561. synapse/storage/schema/main/delta/20/dummy.sql +1 -0
  562. synapse/storage/schema/main/delta/20/pushers.py +93 -0
  563. synapse/storage/schema/main/delta/21/end_to_end_keys.sql +53 -0
  564. synapse/storage/schema/main/delta/21/receipts.sql +57 -0
  565. synapse/storage/schema/main/delta/22/receipts_index.sql +41 -0
  566. synapse/storage/schema/main/delta/22/user_threepids_unique.sql +19 -0
  567. synapse/storage/schema/main/delta/24/stats_reporting.sql +37 -0
  568. synapse/storage/schema/main/delta/25/fts.py +81 -0
  569. synapse/storage/schema/main/delta/25/guest_access.sql +44 -0
  570. synapse/storage/schema/main/delta/25/history_visibility.sql +44 -0
  571. synapse/storage/schema/main/delta/25/tags.sql +57 -0
  572. synapse/storage/schema/main/delta/26/account_data.sql +36 -0
  573. synapse/storage/schema/main/delta/27/account_data.sql +55 -0
  574. synapse/storage/schema/main/delta/27/forgotten_memberships.sql +45 -0
  575. synapse/storage/schema/main/delta/27/ts.py +61 -0
  576. synapse/storage/schema/main/delta/28/event_push_actions.sql +46 -0
  577. synapse/storage/schema/main/delta/28/events_room_stream.sql +39 -0
  578. synapse/storage/schema/main/delta/28/public_roms_index.sql +39 -0
  579. synapse/storage/schema/main/delta/28/receipts_user_id_index.sql +41 -0
  580. synapse/storage/schema/main/delta/28/upgrade_times.sql +40 -0
  581. synapse/storage/schema/main/delta/28/users_is_guest.sql +41 -0
  582. synapse/storage/schema/main/delta/29/push_actions.sql +54 -0
  583. synapse/storage/schema/main/delta/30/alias_creator.sql +35 -0
  584. synapse/storage/schema/main/delta/30/as_users.py +82 -0
  585. synapse/storage/schema/main/delta/30/deleted_pushers.sql +44 -0
  586. synapse/storage/schema/main/delta/30/presence_stream.sql +49 -0
  587. synapse/storage/schema/main/delta/30/public_rooms.sql +42 -0
  588. synapse/storage/schema/main/delta/30/push_rule_stream.sql +57 -0
  589. synapse/storage/schema/main/delta/30/threepid_guest_access_tokens.sql +43 -0
  590. synapse/storage/schema/main/delta/31/invites.sql +61 -0
  591. synapse/storage/schema/main/delta/31/local_media_repository_url_cache.sql +46 -0
  592. synapse/storage/schema/main/delta/31/pushers_0.py +92 -0
  593. synapse/storage/schema/main/delta/31/pushers_index.sql +41 -0
  594. synapse/storage/schema/main/delta/31/search_update.py +65 -0
  595. synapse/storage/schema/main/delta/32/events.sql +35 -0
  596. synapse/storage/schema/main/delta/32/openid.sql +9 -0
  597. synapse/storage/schema/main/delta/32/pusher_throttle.sql +42 -0
  598. synapse/storage/schema/main/delta/32/remove_indices.sql +52 -0
  599. synapse/storage/schema/main/delta/32/reports.sql +44 -0
  600. synapse/storage/schema/main/delta/33/access_tokens_device_index.sql +36 -0
  601. synapse/storage/schema/main/delta/33/devices.sql +40 -0
  602. synapse/storage/schema/main/delta/33/devices_for_e2e_keys.sql +38 -0
  603. synapse/storage/schema/main/delta/33/devices_for_e2e_keys_clear_unknown_device.sql +39 -0
  604. synapse/storage/schema/main/delta/33/event_fields.py +61 -0
  605. synapse/storage/schema/main/delta/33/remote_media_ts.py +43 -0
  606. synapse/storage/schema/main/delta/33/user_ips_index.sql +36 -0
  607. synapse/storage/schema/main/delta/34/appservice_stream.sql +42 -0
  608. synapse/storage/schema/main/delta/34/cache_stream.py +50 -0
  609. synapse/storage/schema/main/delta/34/device_inbox.sql +43 -0
  610. synapse/storage/schema/main/delta/34/push_display_name_rename.sql +39 -0
  611. synapse/storage/schema/main/delta/34/received_txn_purge.py +36 -0
  612. synapse/storage/schema/main/delta/35/contains_url.sql +36 -0
  613. synapse/storage/schema/main/delta/35/device_outbox.sql +58 -0
  614. synapse/storage/schema/main/delta/35/device_stream_id.sql +40 -0
  615. synapse/storage/schema/main/delta/35/event_push_actions_index.sql +36 -0
  616. synapse/storage/schema/main/delta/35/public_room_list_change_stream.sql +52 -0
  617. synapse/storage/schema/main/delta/35/stream_order_to_extrem.sql +56 -0
  618. synapse/storage/schema/main/delta/36/readd_public_rooms.sql +45 -0
  619. synapse/storage/schema/main/delta/37/remove_auth_idx.py +89 -0
  620. synapse/storage/schema/main/delta/37/user_threepids.sql +71 -0
  621. synapse/storage/schema/main/delta/38/postgres_fts_gist.sql +38 -0
  622. synapse/storage/schema/main/delta/39/appservice_room_list.sql +48 -0
  623. synapse/storage/schema/main/delta/39/device_federation_stream_idx.sql +35 -0
  624. synapse/storage/schema/main/delta/39/event_push_index.sql +36 -0
  625. synapse/storage/schema/main/delta/39/federation_out_position.sql +41 -0
  626. synapse/storage/schema/main/delta/39/membership_profile.sql +39 -0
  627. synapse/storage/schema/main/delta/40/current_state_idx.sql +36 -0
  628. synapse/storage/schema/main/delta/40/device_inbox.sql +40 -0
  629. synapse/storage/schema/main/delta/40/device_list_streams.sql +79 -0
  630. synapse/storage/schema/main/delta/40/event_push_summary.sql +57 -0
  631. synapse/storage/schema/main/delta/40/pushers.sql +58 -0
  632. synapse/storage/schema/main/delta/41/device_list_stream_idx.sql +36 -0
  633. synapse/storage/schema/main/delta/41/device_outbound_index.sql +35 -0
  634. synapse/storage/schema/main/delta/41/event_search_event_id_idx.sql +36 -0
  635. synapse/storage/schema/main/delta/41/ratelimit.sql +41 -0
  636. synapse/storage/schema/main/delta/42/current_state_delta.sql +48 -0
  637. synapse/storage/schema/main/delta/42/device_list_last_id.sql +52 -0
  638. synapse/storage/schema/main/delta/42/event_auth_state_only.sql +36 -0
  639. synapse/storage/schema/main/delta/42/user_dir.py +88 -0
  640. synapse/storage/schema/main/delta/43/blocked_rooms.sql +40 -0
  641. synapse/storage/schema/main/delta/43/quarantine_media.sql +36 -0
  642. synapse/storage/schema/main/delta/43/url_cache.sql +35 -0
  643. synapse/storage/schema/main/delta/43/user_share.sql +52 -0
  644. synapse/storage/schema/main/delta/44/expire_url_cache.sql +60 -0
  645. synapse/storage/schema/main/delta/45/group_server.sql +186 -0
  646. synapse/storage/schema/main/delta/45/profile_cache.sql +47 -0
  647. synapse/storage/schema/main/delta/46/drop_refresh_tokens.sql +36 -0
  648. synapse/storage/schema/main/delta/46/drop_unique_deleted_pushers.sql +54 -0
  649. synapse/storage/schema/main/delta/46/group_server.sql +51 -0
  650. synapse/storage/schema/main/delta/46/local_media_repository_url_idx.sql +43 -0
  651. synapse/storage/schema/main/delta/46/user_dir_null_room_ids.sql +54 -0
  652. synapse/storage/schema/main/delta/46/user_dir_typos.sql +43 -0
  653. synapse/storage/schema/main/delta/47/last_access_media.sql +35 -0
  654. synapse/storage/schema/main/delta/47/postgres_fts_gin.sql +36 -0
  655. synapse/storage/schema/main/delta/47/push_actions_staging.sql +47 -0
  656. synapse/storage/schema/main/delta/48/add_user_consent.sql +37 -0
  657. synapse/storage/schema/main/delta/48/add_user_ips_last_seen_index.sql +36 -0
  658. synapse/storage/schema/main/delta/48/deactivated_users.sql +44 -0
  659. synapse/storage/schema/main/delta/48/group_unique_indexes.py +67 -0
  660. synapse/storage/schema/main/delta/48/groups_joinable.sql +41 -0
  661. synapse/storage/schema/main/delta/49/add_user_consent_server_notice_sent.sql +39 -0
  662. synapse/storage/schema/main/delta/49/add_user_daily_visits.sql +40 -0
  663. synapse/storage/schema/main/delta/49/add_user_ips_last_seen_only_index.sql +36 -0
  664. synapse/storage/schema/main/delta/50/add_creation_ts_users_index.sql +38 -0
  665. synapse/storage/schema/main/delta/50/erasure_store.sql +40 -0
  666. synapse/storage/schema/main/delta/50/make_event_content_nullable.py +102 -0
  667. synapse/storage/schema/main/delta/51/e2e_room_keys.sql +58 -0
  668. synapse/storage/schema/main/delta/51/monthly_active_users.sql +46 -0
  669. synapse/storage/schema/main/delta/52/add_event_to_state_group_index.sql +38 -0
  670. synapse/storage/schema/main/delta/52/device_list_streams_unique_idx.sql +55 -0
  671. synapse/storage/schema/main/delta/52/e2e_room_keys.sql +72 -0
  672. synapse/storage/schema/main/delta/53/add_user_type_to_users.sql +38 -0
  673. synapse/storage/schema/main/delta/53/drop_sent_transactions.sql +35 -0
  674. synapse/storage/schema/main/delta/53/event_format_version.sql +35 -0
  675. synapse/storage/schema/main/delta/53/user_dir_populate.sql +49 -0
  676. synapse/storage/schema/main/delta/53/user_ips_index.sql +49 -0
  677. synapse/storage/schema/main/delta/53/user_share.sql +63 -0
  678. synapse/storage/schema/main/delta/53/user_threepid_id.sql +48 -0
  679. synapse/storage/schema/main/delta/53/users_in_public_rooms.sql +47 -0
  680. synapse/storage/schema/main/delta/54/account_validity_with_renewal.sql +49 -0
  681. synapse/storage/schema/main/delta/54/add_validity_to_server_keys.sql +42 -0
  682. synapse/storage/schema/main/delta/54/delete_forward_extremities.sql +42 -0
  683. synapse/storage/schema/main/delta/54/drop_legacy_tables.sql +49 -0
  684. synapse/storage/schema/main/delta/54/drop_presence_list.sql +35 -0
  685. synapse/storage/schema/main/delta/54/relations.sql +46 -0
  686. synapse/storage/schema/main/delta/54/stats.sql +99 -0
  687. synapse/storage/schema/main/delta/54/stats2.sql +47 -0
  688. synapse/storage/schema/main/delta/55/access_token_expiry.sql +37 -0
  689. synapse/storage/schema/main/delta/55/track_threepid_validations.sql +50 -0
  690. synapse/storage/schema/main/delta/55/users_alter_deactivated.sql +38 -0
  691. synapse/storage/schema/main/delta/56/add_spans_to_device_lists.sql +39 -0
  692. synapse/storage/schema/main/delta/56/current_state_events_membership.sql +41 -0
  693. synapse/storage/schema/main/delta/56/current_state_events_membership_mk2.sql +43 -0
  694. synapse/storage/schema/main/delta/56/delete_keys_from_deleted_backups.sql +44 -0
  695. synapse/storage/schema/main/delta/56/destinations_failure_ts.sql +44 -0
  696. synapse/storage/schema/main/delta/56/destinations_retry_interval_type.sql.postgres +18 -0
  697. synapse/storage/schema/main/delta/56/device_stream_id_insert.sql +39 -0
  698. synapse/storage/schema/main/delta/56/devices_last_seen.sql +43 -0
  699. synapse/storage/schema/main/delta/56/drop_unused_event_tables.sql +39 -0
  700. synapse/storage/schema/main/delta/56/event_expiry.sql +40 -0
  701. synapse/storage/schema/main/delta/56/event_labels.sql +49 -0
  702. synapse/storage/schema/main/delta/56/event_labels_background_update.sql +36 -0
  703. synapse/storage/schema/main/delta/56/fix_room_keys_index.sql +37 -0
  704. synapse/storage/schema/main/delta/56/hidden_devices.sql +37 -0
  705. synapse/storage/schema/main/delta/56/hidden_devices_fix.sql.sqlite +42 -0
  706. synapse/storage/schema/main/delta/56/nuke_empty_communities_from_db.sql +48 -0
  707. synapse/storage/schema/main/delta/56/public_room_list_idx.sql +35 -0
  708. synapse/storage/schema/main/delta/56/redaction_censor.sql +35 -0
  709. synapse/storage/schema/main/delta/56/redaction_censor2.sql +41 -0
  710. synapse/storage/schema/main/delta/56/redaction_censor3_fix_update.sql.postgres +25 -0
  711. synapse/storage/schema/main/delta/56/redaction_censor4.sql +35 -0
  712. synapse/storage/schema/main/delta/56/remove_tombstoned_rooms_from_directory.sql +38 -0
  713. synapse/storage/schema/main/delta/56/room_key_etag.sql +36 -0
  714. synapse/storage/schema/main/delta/56/room_membership_idx.sql +37 -0
  715. synapse/storage/schema/main/delta/56/room_retention.sql +52 -0
  716. synapse/storage/schema/main/delta/56/signing_keys.sql +75 -0
  717. synapse/storage/schema/main/delta/56/signing_keys_nonunique_signatures.sql +41 -0
  718. synapse/storage/schema/main/delta/56/stats_separated.sql +175 -0
  719. synapse/storage/schema/main/delta/56/unique_user_filter_index.py +46 -0
  720. synapse/storage/schema/main/delta/56/user_external_ids.sql +43 -0
  721. synapse/storage/schema/main/delta/56/users_in_public_rooms_idx.sql +36 -0
  722. synapse/storage/schema/main/delta/57/delete_old_current_state_events.sql +41 -0
  723. synapse/storage/schema/main/delta/57/device_list_remote_cache_stale.sql +44 -0
  724. synapse/storage/schema/main/delta/57/local_current_membership.py +111 -0
  725. synapse/storage/schema/main/delta/57/remove_sent_outbound_pokes.sql +40 -0
  726. synapse/storage/schema/main/delta/57/rooms_version_column.sql +43 -0
  727. synapse/storage/schema/main/delta/57/rooms_version_column_2.sql.postgres +35 -0
  728. synapse/storage/schema/main/delta/57/rooms_version_column_2.sql.sqlite +22 -0
  729. synapse/storage/schema/main/delta/57/rooms_version_column_3.sql.postgres +39 -0
  730. synapse/storage/schema/main/delta/57/rooms_version_column_3.sql.sqlite +23 -0
  731. synapse/storage/schema/main/delta/58/02remove_dup_outbound_pokes.sql +41 -0
  732. synapse/storage/schema/main/delta/58/03persist_ui_auth.sql +55 -0
  733. synapse/storage/schema/main/delta/58/05cache_instance.sql.postgres +30 -0
  734. synapse/storage/schema/main/delta/58/06dlols_unique_idx.py +83 -0
  735. synapse/storage/schema/main/delta/58/07add_method_to_thumbnail_constraint.sql.postgres +33 -0
  736. synapse/storage/schema/main/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite +44 -0
  737. synapse/storage/schema/main/delta/58/07persist_ui_auth_ips.sql +44 -0
  738. synapse/storage/schema/main/delta/58/08_media_safe_from_quarantine.sql.postgres +18 -0
  739. synapse/storage/schema/main/delta/58/08_media_safe_from_quarantine.sql.sqlite +18 -0
  740. synapse/storage/schema/main/delta/58/09shadow_ban.sql +37 -0
  741. synapse/storage/schema/main/delta/58/10_pushrules_enabled_delete_obsolete.sql +47 -0
  742. synapse/storage/schema/main/delta/58/10drop_local_rejections_stream.sql +41 -0
  743. synapse/storage/schema/main/delta/58/10federation_pos_instance_name.sql +41 -0
  744. synapse/storage/schema/main/delta/58/11dehydration.sql +39 -0
  745. synapse/storage/schema/main/delta/58/11fallback.sql +43 -0
  746. synapse/storage/schema/main/delta/58/11user_id_seq.py +38 -0
  747. synapse/storage/schema/main/delta/58/12room_stats.sql +51 -0
  748. synapse/storage/schema/main/delta/58/13remove_presence_allow_inbound.sql +36 -0
  749. synapse/storage/schema/main/delta/58/14events_instance_name.sql +35 -0
  750. synapse/storage/schema/main/delta/58/14events_instance_name.sql.postgres +28 -0
  751. synapse/storage/schema/main/delta/58/15_catchup_destination_rooms.sql +61 -0
  752. synapse/storage/schema/main/delta/58/15unread_count.sql +45 -0
  753. synapse/storage/schema/main/delta/58/16populate_stats_process_rooms_fix.sql +41 -0
  754. synapse/storage/schema/main/delta/58/17_catchup_last_successful.sql +40 -0
  755. synapse/storage/schema/main/delta/58/18stream_positions.sql +41 -0
  756. synapse/storage/schema/main/delta/58/19instance_map.sql.postgres +25 -0
  757. synapse/storage/schema/main/delta/58/19txn_id.sql +59 -0
  758. synapse/storage/schema/main/delta/58/20instance_name_event_tables.sql +36 -0
  759. synapse/storage/schema/main/delta/58/20user_daily_visits.sql +37 -0
  760. synapse/storage/schema/main/delta/58/21as_device_stream.sql +36 -0
  761. synapse/storage/schema/main/delta/58/21drop_device_max_stream_id.sql +1 -0
  762. synapse/storage/schema/main/delta/58/22puppet_token.sql +36 -0
  763. synapse/storage/schema/main/delta/58/22users_have_local_media.sql +2 -0
  764. synapse/storage/schema/main/delta/58/23e2e_cross_signing_keys_idx.sql +36 -0
  765. synapse/storage/schema/main/delta/58/24drop_event_json_index.sql +38 -0
  766. synapse/storage/schema/main/delta/58/25user_external_ids_user_id_idx.sql +36 -0
  767. synapse/storage/schema/main/delta/58/26access_token_last_validated.sql +37 -0
  768. synapse/storage/schema/main/delta/58/27local_invites.sql +37 -0
  769. synapse/storage/schema/main/delta/58/28drop_last_used_column.sql.postgres +16 -0
  770. synapse/storage/schema/main/delta/58/28drop_last_used_column.sql.sqlite +62 -0
  771. synapse/storage/schema/main/delta/59/01ignored_user.py +85 -0
  772. synapse/storage/schema/main/delta/59/02shard_send_to_device.sql +37 -0
  773. synapse/storage/schema/main/delta/59/03shard_send_to_device_sequence.sql.postgres +25 -0
  774. synapse/storage/schema/main/delta/59/04_event_auth_chains.sql +71 -0
  775. synapse/storage/schema/main/delta/59/04_event_auth_chains.sql.postgres +16 -0
  776. synapse/storage/schema/main/delta/59/04drop_account_data.sql +36 -0
  777. synapse/storage/schema/main/delta/59/05cache_invalidation.sql +36 -0
  778. synapse/storage/schema/main/delta/59/06chain_cover_index.sql +36 -0
  779. synapse/storage/schema/main/delta/59/06shard_account_data.sql +39 -0
  780. synapse/storage/schema/main/delta/59/06shard_account_data.sql.postgres +32 -0
  781. synapse/storage/schema/main/delta/59/07shard_account_data_fix.sql +37 -0
  782. synapse/storage/schema/main/delta/59/08delete_pushers_for_deactivated_accounts.sql +39 -0
  783. synapse/storage/schema/main/delta/59/08delete_stale_pushers.sql +39 -0
  784. synapse/storage/schema/main/delta/59/09rejected_events_metadata.sql +45 -0
  785. synapse/storage/schema/main/delta/59/10delete_purged_chain_cover.sql +36 -0
  786. synapse/storage/schema/main/delta/59/11add_knock_members_to_stats.sql +39 -0
  787. synapse/storage/schema/main/delta/59/11drop_thumbnail_constraint.sql.postgres +22 -0
  788. synapse/storage/schema/main/delta/59/12account_validity_token_used_ts_ms.sql +37 -0
  789. synapse/storage/schema/main/delta/59/12presence_stream_instance.sql +37 -0
  790. synapse/storage/schema/main/delta/59/12presence_stream_instance_seq.sql.postgres +20 -0
  791. synapse/storage/schema/main/delta/59/13users_to_send_full_presence_to.sql +53 -0
  792. synapse/storage/schema/main/delta/59/14refresh_tokens.sql +53 -0
  793. synapse/storage/schema/main/delta/59/15locks.sql +56 -0
  794. synapse/storage/schema/main/delta/59/16federation_inbound_staging.sql +51 -0
  795. synapse/storage/schema/main/delta/60/01recreate_stream_ordering.sql.postgres +45 -0
  796. synapse/storage/schema/main/delta/60/02change_stream_ordering_columns.sql.postgres +30 -0
  797. synapse/storage/schema/main/delta/61/01change_appservices_txns.sql.postgres +23 -0
  798. synapse/storage/schema/main/delta/61/01insertion_event_lookups.sql +68 -0
  799. synapse/storage/schema/main/delta/61/02drop_redundant_room_depth_index.sql +37 -0
  800. synapse/storage/schema/main/delta/61/03recreate_min_depth.py +74 -0
  801. synapse/storage/schema/main/delta/62/01insertion_event_extremities.sql +43 -0
  802. synapse/storage/schema/main/delta/63/01create_registration_tokens.sql +42 -0
  803. synapse/storage/schema/main/delta/63/02delete_unlinked_email_pushers.sql +39 -0
  804. synapse/storage/schema/main/delta/63/02populate-rooms-creator.sql +36 -0
  805. synapse/storage/schema/main/delta/63/03session_store.sql +42 -0
  806. synapse/storage/schema/main/delta/63/04add_presence_stream_not_offline_index.sql +37 -0
  807. synapse/storage/schema/main/delta/64/01msc2716_chunk_to_batch_rename.sql.postgres +23 -0
  808. synapse/storage/schema/main/delta/64/01msc2716_chunk_to_batch_rename.sql.sqlite +37 -0
  809. synapse/storage/schema/main/delta/65/01msc2716_insertion_event_edges.sql +38 -0
  810. synapse/storage/schema/main/delta/65/03remove_hidden_devices_from_device_inbox.sql +41 -0
  811. synapse/storage/schema/main/delta/65/04_local_group_updates.sql +37 -0
  812. synapse/storage/schema/main/delta/65/05_remove_room_stats_historical_and_user_stats_historical.sql +38 -0
  813. synapse/storage/schema/main/delta/65/06remove_deleted_devices_from_device_inbox.sql +53 -0
  814. synapse/storage/schema/main/delta/65/07_arbitrary_relations.sql +37 -0
  815. synapse/storage/schema/main/delta/65/08_device_inbox_background_updates.sql +37 -0
  816. synapse/storage/schema/main/delta/65/10_expirable_refresh_tokens.sql +47 -0
  817. synapse/storage/schema/main/delta/65/11_devices_auth_provider_session.sql +46 -0
  818. synapse/storage/schema/main/delta/67/01drop_public_room_list_stream.sql +37 -0
  819. synapse/storage/schema/main/delta/68/01event_columns.sql +45 -0
  820. synapse/storage/schema/main/delta/68/02_msc2409_add_device_id_appservice_stream_type.sql +40 -0
  821. synapse/storage/schema/main/delta/68/03_delete_account_data_for_deactivated_accounts.sql +39 -0
  822. synapse/storage/schema/main/delta/68/04_refresh_tokens_index_next_token_id.sql +47 -0
  823. synapse/storage/schema/main/delta/68/04partial_state_rooms.sql +60 -0
  824. synapse/storage/schema/main/delta/68/05_delete_non_strings_from_event_search.sql.sqlite +22 -0
  825. synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py +80 -0
  826. synapse/storage/schema/main/delta/68/06_msc3202_add_device_list_appservice_stream_type.sql +42 -0
  827. synapse/storage/schema/main/delta/69/01as_txn_seq.py +54 -0
  828. synapse/storage/schema/main/delta/69/01device_list_oubound_by_room.sql +57 -0
  829. synapse/storage/schema/main/delta/69/02cache_invalidation_index.sql +37 -0
  830. synapse/storage/schema/main/delta/70/01clean_table_purged_rooms.sql +39 -0
  831. synapse/storage/schema/main/delta/71/01rebuild_event_edges.sql.postgres +43 -0
  832. synapse/storage/schema/main/delta/71/01rebuild_event_edges.sql.sqlite +47 -0
  833. synapse/storage/schema/main/delta/71/01remove_noop_background_updates.sql +80 -0
  834. synapse/storage/schema/main/delta/71/02event_push_summary_unique.sql +37 -0
  835. synapse/storage/schema/main/delta/72/01add_room_type_to_state_stats.sql +38 -0
  836. synapse/storage/schema/main/delta/72/01event_push_summary_receipt.sql +54 -0
  837. synapse/storage/schema/main/delta/72/02event_push_actions_index.sql +38 -0
  838. synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py +57 -0
  839. synapse/storage/schema/main/delta/72/03drop_event_reference_hashes.sql +36 -0
  840. synapse/storage/schema/main/delta/72/03remove_groups.sql +50 -0
  841. synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.postgres +17 -0
  842. synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.sqlite +40 -0
  843. synapse/storage/schema/main/delta/72/05receipts_event_stream_ordering.sql +38 -0
  844. synapse/storage/schema/main/delta/72/05remove_unstable_private_read_receipts.sql +38 -0
  845. synapse/storage/schema/main/delta/72/06add_consent_ts_to_users.sql +35 -0
  846. synapse/storage/schema/main/delta/72/06thread_notifications.sql +49 -0
  847. synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py +67 -0
  848. synapse/storage/schema/main/delta/72/07thread_receipts.sql.postgres +30 -0
  849. synapse/storage/schema/main/delta/72/07thread_receipts.sql.sqlite +70 -0
  850. synapse/storage/schema/main/delta/72/08begin_cache_invalidation_seq_at_2.sql.postgres +23 -0
  851. synapse/storage/schema/main/delta/72/08thread_receipts.sql +39 -0
  852. synapse/storage/schema/main/delta/72/09partial_indices.sql.sqlite +56 -0
  853. synapse/storage/schema/main/delta/73/01event_failed_pull_attempts.sql +48 -0
  854. synapse/storage/schema/main/delta/73/02add_pusher_enabled.sql +35 -0
  855. synapse/storage/schema/main/delta/73/02room_id_indexes_for_purging.sql +41 -0
  856. synapse/storage/schema/main/delta/73/03pusher_device_id.sql +39 -0
  857. synapse/storage/schema/main/delta/73/03users_approved_column.sql +39 -0
  858. synapse/storage/schema/main/delta/73/04partial_join_details.sql +42 -0
  859. synapse/storage/schema/main/delta/73/04pending_device_list_updates.sql +47 -0
  860. synapse/storage/schema/main/delta/73/05old_push_actions.sql.postgres +22 -0
  861. synapse/storage/schema/main/delta/73/05old_push_actions.sql.sqlite +24 -0
  862. synapse/storage/schema/main/delta/73/06thread_notifications_thread_id_idx.sql +42 -0
  863. synapse/storage/schema/main/delta/73/08thread_receipts_non_null.sql.postgres +23 -0
  864. synapse/storage/schema/main/delta/73/08thread_receipts_non_null.sql.sqlite +76 -0
  865. synapse/storage/schema/main/delta/73/09partial_joined_via_destination.sql +37 -0
  866. synapse/storage/schema/main/delta/73/09threads_table.sql +49 -0
  867. synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py +71 -0
  868. synapse/storage/schema/main/delta/73/10login_tokens.sql +54 -0
  869. synapse/storage/schema/main/delta/73/11event_search_room_id_n_distinct.sql.postgres +33 -0
  870. synapse/storage/schema/main/delta/73/12refactor_device_list_outbound_pokes.sql +72 -0
  871. synapse/storage/schema/main/delta/73/13add_device_lists_index.sql +39 -0
  872. synapse/storage/schema/main/delta/73/20_un_partial_stated_room_stream.sql +51 -0
  873. synapse/storage/schema/main/delta/73/21_un_partial_stated_room_stream_seq.sql.postgres +20 -0
  874. synapse/storage/schema/main/delta/73/22_rebuild_user_dir_stats.sql +48 -0
  875. synapse/storage/schema/main/delta/73/22_un_partial_stated_event_stream.sql +53 -0
  876. synapse/storage/schema/main/delta/73/23_fix_thread_index.sql +52 -0
  877. synapse/storage/schema/main/delta/73/23_un_partial_stated_room_stream_seq.sql.postgres +20 -0
  878. synapse/storage/schema/main/delta/73/24_events_jump_to_date_index.sql +36 -0
  879. synapse/storage/schema/main/delta/73/25drop_presence.sql +36 -0
  880. synapse/storage/schema/main/delta/74/01_user_directory_stale_remote_users.sql +58 -0
  881. synapse/storage/schema/main/delta/74/02_set_device_id_for_pushers_bg_update.sql +38 -0
  882. synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.postgres +29 -0
  883. synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.sqlite +23 -0
  884. synapse/storage/schema/main/delta/74/03_room_membership_index.sql +38 -0
  885. synapse/storage/schema/main/delta/74/04_delete_e2e_backup_keys_for_deactivated_users.sql +36 -0
  886. synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py +87 -0
  887. synapse/storage/schema/main/delta/74/05_events_txn_id_device_id.sql +72 -0
  888. synapse/storage/schema/main/delta/74/90COMMENTS_destinations.sql.postgres +52 -0
  889. synapse/storage/schema/main/delta/76/01_add_profiles_full_user_id_column.sql +39 -0
  890. synapse/storage/schema/main/delta/76/02_add_user_filters_full_user_id_column.sql +39 -0
  891. synapse/storage/schema/main/delta/76/03_per_user_experimental_features.sql +46 -0
  892. synapse/storage/schema/main/delta/76/04_add_room_forgetter.sql +43 -0
  893. synapse/storage/schema/main/delta/77/01_add_profiles_not_valid_check.sql.postgres +16 -0
  894. synapse/storage/schema/main/delta/77/02_add_user_filters_not_valid_check.sql.postgres +16 -0
  895. synapse/storage/schema/main/delta/77/03bg_populate_full_user_id_profiles.sql +35 -0
  896. synapse/storage/schema/main/delta/77/04bg_populate_full_user_id_user_filters.sql +35 -0
  897. synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql +67 -0
  898. synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite +102 -0
  899. synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres +27 -0
  900. synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres +27 -0
  901. synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres +29 -0
  902. synapse/storage/schema/main/delta/77/14bg_indices_event_stream_ordering.sql +39 -0
  903. synapse/storage/schema/main/delta/78/01_validate_and_update_profiles.py +99 -0
  904. synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py +100 -0
  905. synapse/storage/schema/main/delta/78/03_remove_unused_indexes_user_filters.py +72 -0
  906. synapse/storage/schema/main/delta/78/03event_extremities_constraints.py +65 -0
  907. synapse/storage/schema/main/delta/78/04_add_full_user_id_index_user_filters.py +32 -0
  908. synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.postgres +102 -0
  909. synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.sqlite +72 -0
  910. synapse/storage/schema/main/delta/79/04_mitigate_stream_ordering_update_race.py +70 -0
  911. synapse/storage/schema/main/delta/79/05_read_write_locks_triggers.sql.postgres +69 -0
  912. synapse/storage/schema/main/delta/79/05_read_write_locks_triggers.sql.sqlite +65 -0
  913. synapse/storage/schema/main/delta/80/01_users_alter_locked.sql +35 -0
  914. synapse/storage/schema/main/delta/80/02_read_write_locks_unlogged.sql.postgres +30 -0
  915. synapse/storage/schema/main/delta/80/02_scheduled_tasks.sql +47 -0
  916. synapse/storage/schema/main/delta/80/03_read_write_locks_triggers.sql.postgres +37 -0
  917. synapse/storage/schema/main/delta/80/04_read_write_locks_deadlock.sql.postgres +71 -0
  918. synapse/storage/schema/main/delta/82/02_scheduled_tasks_index.sql +35 -0
  919. synapse/storage/schema/main/delta/82/04_add_indices_for_purging_rooms.sql +39 -0
  920. synapse/storage/schema/main/delta/82/05gaps.sql +44 -0
  921. synapse/storage/schema/main/delta/83/01_drop_old_tables.sql +43 -0
  922. synapse/storage/schema/main/delta/83/03_instance_name_receipts.sql.sqlite +17 -0
  923. synapse/storage/schema/main/delta/83/05_cross_signing_key_update_grant.sql +34 -0
  924. synapse/storage/schema/main/delta/83/06_event_push_summary_room.sql +36 -0
  925. synapse/storage/schema/main/delta/84/01_auth_links_stats.sql.postgres +20 -0
  926. synapse/storage/schema/main/delta/84/02_auth_links_index.sql +16 -0
  927. synapse/storage/schema/main/delta/84/03_auth_links_analyze.sql.postgres +16 -0
  928. synapse/storage/schema/main/delta/84/04_access_token_index.sql +15 -0
  929. synapse/storage/schema/main/delta/85/01_add_suspended.sql +14 -0
  930. synapse/storage/schema/main/delta/85/02_add_instance_names.sql +27 -0
  931. synapse/storage/schema/main/delta/85/03_new_sequences.sql.postgres +54 -0
  932. synapse/storage/schema/main/delta/85/04_cleanup_device_federation_outbox.sql +15 -0
  933. synapse/storage/schema/main/delta/85/05_add_instance_names_converted_pos.sql +16 -0
  934. synapse/storage/schema/main/delta/85/06_add_room_reports.sql +20 -0
  935. synapse/storage/schema/main/delta/86/01_authenticate_media.sql +15 -0
  936. synapse/storage/schema/main/delta/86/02_receipts_event_id_index.sql +15 -0
  937. synapse/storage/schema/main/delta/87/01_sliding_sync_memberships.sql +169 -0
  938. synapse/storage/schema/main/delta/87/02_per_connection_state.sql +81 -0
  939. synapse/storage/schema/main/delta/87/03_current_state_index.sql +19 -0
  940. synapse/storage/schema/main/delta/88/01_add_delayed_events.sql +43 -0
  941. synapse/storage/schema/main/delta/88/01_custom_profile_fields.sql +15 -0
  942. synapse/storage/schema/main/delta/88/02_fix_sliding_sync_membership_snapshots_forgotten_column.sql +21 -0
  943. synapse/storage/schema/main/delta/88/03_add_otk_ts_added_index.sql +18 -0
  944. synapse/storage/schema/main/delta/88/04_current_state_delta_index.sql +18 -0
  945. synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.postgres +19 -0
  946. synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.sqlite +19 -0
  947. synapse/storage/schema/main/delta/88/05_sliding_sync_room_config_index.sql +20 -0
  948. synapse/storage/schema/main/delta/88/06_events_received_ts_index.sql +17 -0
  949. synapse/storage/schema/main/delta/89/01_sliding_sync_membership_snapshot_index.sql +15 -0
  950. synapse/storage/schema/main/delta/90/01_add_column_participant_room_memberships_table.sql +16 -0
  951. synapse/storage/schema/main/delta/91/01_media_hash.sql +28 -0
  952. synapse/storage/schema/main/delta/92/01_remove_trigger.sql.postgres +16 -0
  953. synapse/storage/schema/main/delta/92/01_remove_trigger.sql.sqlite +16 -0
  954. synapse/storage/schema/main/delta/92/02_remove_populate_participant_bg_update.sql +17 -0
  955. synapse/storage/schema/main/delta/92/04_ss_membership_snapshot_idx.sql +16 -0
  956. synapse/storage/schema/main/delta/92/04_thread_subscriptions.sql +59 -0
  957. synapse/storage/schema/main/delta/92/04_thread_subscriptions_seq.sql.postgres +19 -0
  958. synapse/storage/schema/main/delta/92/05_fixup_max_depth_cap.sql +17 -0
  959. synapse/storage/schema/main/delta/92/05_thread_subscriptions_comments.sql.postgres +18 -0
  960. synapse/storage/schema/main/delta/92/06_device_federation_inbox_index.sql +16 -0
  961. synapse/storage/schema/main/delta/92/06_threads_last_sent_stream_ordering_comments.sql.postgres +24 -0
  962. synapse/storage/schema/main/delta/92/07_add_user_reports.sql +22 -0
  963. synapse/storage/schema/main/delta/92/07_event_txn_id_device_id_txn_id2.sql +15 -0
  964. synapse/storage/schema/main/delta/92/08_room_ban_redactions.sql +21 -0
  965. synapse/storage/schema/main/delta/92/08_thread_subscriptions_seq_fixup.sql.postgres +19 -0
  966. synapse/storage/schema/main/delta/92/09_thread_subscriptions_update.sql +20 -0
  967. synapse/storage/schema/main/delta/92/09_thread_subscriptions_update.sql.postgres +18 -0
  968. synapse/storage/schema/main/delta/93/01_add_delayed_events.sql +15 -0
  969. synapse/storage/schema/main/full_schemas/72/full.sql.postgres +1344 -0
  970. synapse/storage/schema/main/full_schemas/72/full.sql.sqlite +646 -0
  971. synapse/storage/schema/state/delta/23/drop_state_index.sql +35 -0
  972. synapse/storage/schema/state/delta/32/remove_state_indices.sql +38 -0
  973. synapse/storage/schema/state/delta/35/add_state_index.sql +36 -0
  974. synapse/storage/schema/state/delta/35/state.sql +41 -0
  975. synapse/storage/schema/state/delta/35/state_dedupe.sql +36 -0
  976. synapse/storage/schema/state/delta/47/state_group_seq.py +38 -0
  977. synapse/storage/schema/state/delta/56/state_group_room_idx.sql +36 -0
  978. synapse/storage/schema/state/delta/61/02state_groups_state_n_distinct.sql.postgres +34 -0
  979. synapse/storage/schema/state/delta/70/08_state_group_edges_unique.sql +36 -0
  980. synapse/storage/schema/state/delta/89/01_state_groups_deletion.sql +39 -0
  981. synapse/storage/schema/state/delta/90/02_delete_unreferenced_state_groups.sql +16 -0
  982. synapse/storage/schema/state/delta/90/03_remove_old_deletion_bg_update.sql +15 -0
  983. synapse/storage/schema/state/full_schemas/72/full.sql.postgres +30 -0
  984. synapse/storage/schema/state/full_schemas/72/full.sql.sqlite +20 -0
  985. synapse/storage/types.py +183 -0
  986. synapse/storage/util/__init__.py +20 -0
  987. synapse/storage/util/id_generators.py +928 -0
  988. synapse/storage/util/partial_state_events_tracker.py +194 -0
  989. synapse/storage/util/sequence.py +315 -0
  990. synapse/streams/__init__.py +43 -0
  991. synapse/streams/config.py +91 -0
  992. synapse/streams/events.py +203 -0
  993. synapse/synapse_rust/__init__.pyi +3 -0
  994. synapse/synapse_rust/acl.pyi +20 -0
  995. synapse/synapse_rust/events.pyi +136 -0
  996. synapse/synapse_rust/http_client.pyi +32 -0
  997. synapse/synapse_rust/push.pyi +86 -0
  998. synapse/synapse_rust/rendezvous.pyi +30 -0
  999. synapse/synapse_rust/segmenter.pyi +1 -0
  1000. synapse/synapse_rust.abi3.so +0 -0
  1001. synapse/types/__init__.py +1600 -0
  1002. synapse/types/handlers/__init__.py +93 -0
  1003. synapse/types/handlers/policy_server.py +16 -0
  1004. synapse/types/handlers/sliding_sync.py +908 -0
  1005. synapse/types/rest/__init__.py +25 -0
  1006. synapse/types/rest/client/__init__.py +413 -0
  1007. synapse/types/state.py +634 -0
  1008. synapse/types/storage/__init__.py +66 -0
  1009. synapse/util/__init__.py +169 -0
  1010. synapse/util/async_helpers.py +1045 -0
  1011. synapse/util/background_queue.py +142 -0
  1012. synapse/util/batching_queue.py +202 -0
  1013. synapse/util/caches/__init__.py +300 -0
  1014. synapse/util/caches/cached_call.py +143 -0
  1015. synapse/util/caches/deferred_cache.py +530 -0
  1016. synapse/util/caches/descriptors.py +692 -0
  1017. synapse/util/caches/dictionary_cache.py +346 -0
  1018. synapse/util/caches/expiringcache.py +249 -0
  1019. synapse/util/caches/lrucache.py +975 -0
  1020. synapse/util/caches/response_cache.py +322 -0
  1021. synapse/util/caches/stream_change_cache.py +370 -0
  1022. synapse/util/caches/treecache.py +189 -0
  1023. synapse/util/caches/ttlcache.py +197 -0
  1024. synapse/util/cancellation.py +63 -0
  1025. synapse/util/check_dependencies.py +335 -0
  1026. synapse/util/clock.py +567 -0
  1027. synapse/util/constants.py +22 -0
  1028. synapse/util/daemonize.py +165 -0
  1029. synapse/util/distributor.py +157 -0
  1030. synapse/util/events.py +134 -0
  1031. synapse/util/file_consumer.py +164 -0
  1032. synapse/util/frozenutils.py +57 -0
  1033. synapse/util/gai_resolver.py +178 -0
  1034. synapse/util/hash.py +38 -0
  1035. synapse/util/httpresourcetree.py +108 -0
  1036. synapse/util/iterutils.py +189 -0
  1037. synapse/util/json.py +56 -0
  1038. synapse/util/linked_list.py +156 -0
  1039. synapse/util/logcontext.py +46 -0
  1040. synapse/util/logformatter.py +28 -0
  1041. synapse/util/macaroons.py +325 -0
  1042. synapse/util/manhole.py +191 -0
  1043. synapse/util/metrics.py +339 -0
  1044. synapse/util/module_loader.py +116 -0
  1045. synapse/util/msisdn.py +51 -0
  1046. synapse/util/patch_inline_callbacks.py +250 -0
  1047. synapse/util/pydantic_models.py +63 -0
  1048. synapse/util/ratelimitutils.py +419 -0
  1049. synapse/util/retryutils.py +339 -0
  1050. synapse/util/rlimit.py +42 -0
  1051. synapse/util/rust.py +133 -0
  1052. synapse/util/sentinel.py +21 -0
  1053. synapse/util/stringutils.py +293 -0
  1054. synapse/util/task_scheduler.py +493 -0
  1055. synapse/util/templates.py +126 -0
  1056. synapse/util/threepids.py +123 -0
  1057. synapse/util/wheel_timer.py +112 -0
  1058. synapse/visibility.py +835 -0
@@ -0,0 +1,2633 @@
1
+ #
2
+ # This file is licensed under the Affero General Public License (AGPL) version 3.
3
+ #
4
+ # Copyright 2020 The Matrix.org Foundation C.I.C.
5
+ # Copyright 2014-2016 OpenMarket Ltd
6
+ # Copyright (C) 2023 New Vector, Ltd
7
+ #
8
+ # This program is free software: you can redistribute it and/or modify
9
+ # it under the terms of the GNU Affero General Public License as
10
+ # published by the Free Software Foundation, either version 3 of the
11
+ # License, or (at your option) any later version.
12
+ #
13
+ # See the GNU Affero General Public License for more details:
14
+ # <https://www.gnu.org/licenses/agpl-3.0.html>.
15
+ #
16
+ # Originally licensed under the Apache License, Version 2.0:
17
+ # <http://www.apache.org/licenses/LICENSE-2.0>.
18
+ #
19
+ # [This file includes modifications made by New Vector Limited]
20
+ #
21
+ #
22
+
23
+ """
24
+ This module is responsible for keeping track of presence status of local
25
+ and remote users.
26
+
27
+ The methods that define policy are:
28
+ - PresenceHandler._update_states
29
+ - PresenceHandler._handle_timeouts
30
+ - should_notify
31
+
32
+ # Tracking local presence
33
+
34
+ For local users, presence is tracked on a per-device basis. When a user has multiple
35
+ devices the user presence state is derived by coalescing the presence from each
36
+ device:
37
+
38
+ BUSY > ONLINE > UNAVAILABLE > OFFLINE
39
+
40
+ The time that each device was last active and last synced is tracked in order to
41
+ automatically downgrade a device's presence state:
42
+
43
+ A device may move from ONLINE -> UNAVAILABLE, if it has not been active for
44
+ a period of time.
45
+
46
+ A device may go from any state -> OFFLINE, if it is not active and has not
47
+ synced for a period of time.
48
+
49
+ The timeouts are handled using a wheel timer, which has coarse buckets. Timings
50
+ do not need to be exact.
51
+
52
+ Generally a device's presence state is updated whenever a user syncs (via the
53
+ set_presence parameter), when the presence API is called, or if "pro-active"
54
+ events occur, including:
55
+
56
+ * Sending an event, receipt, read marker.
57
+ * Updating typing status.
58
+
59
+ The busy state has special status that it cannot is not downgraded by a call to
60
+ sync with a lower priority state *and* it takes a long period of time to transition
61
+ to offline.
62
+
63
+ # Persisting (and restoring) presence
64
+
65
+ For all users, presence is persisted on a per-user basis. Data is kept in-memory
66
+ and persisted periodically. When Synapse starts each worker loads the current
67
+ presence state and then tracks the presence stream to keep itself up-to-date.
68
+
69
+ When restoring presence for local users a pseudo-device is created to match the
70
+ user state; this device follows the normal timeout logic (see above) and will
71
+ automatically be replaced with any information from currently available devices.
72
+
73
+ """
74
+
75
+ import abc
76
+ import contextlib
77
+ import itertools
78
+ import logging
79
+ from bisect import bisect
80
+ from contextlib import contextmanager
81
+ from types import TracebackType
82
+ from typing import (
83
+ TYPE_CHECKING,
84
+ AbstractSet,
85
+ Any,
86
+ Callable,
87
+ Collection,
88
+ ContextManager,
89
+ Generator,
90
+ Iterable,
91
+ )
92
+
93
+ from prometheus_client import Counter
94
+
95
+ import synapse.metrics
96
+ from synapse.api.constants import EduTypes, EventTypes, Membership, PresenceState
97
+ from synapse.api.errors import SynapseError
98
+ from synapse.api.presence import UserDevicePresenceState, UserPresenceState
99
+ from synapse.appservice import ApplicationService
100
+ from synapse.events.presence_router import PresenceRouter
101
+ from synapse.logging.context import run_in_background
102
+ from synapse.metrics import SERVER_NAME_LABEL, LaterGauge
103
+ from synapse.metrics.background_process_metrics import (
104
+ wrap_as_background_process,
105
+ )
106
+ from synapse.replication.http.presence import (
107
+ ReplicationBumpPresenceActiveTime,
108
+ ReplicationPresenceSetState,
109
+ )
110
+ from synapse.replication.http.streams import ReplicationGetStreamUpdates
111
+ from synapse.replication.tcp.commands import ClearUserSyncsCommand
112
+ from synapse.replication.tcp.streams import PresenceFederationStream, PresenceStream
113
+ from synapse.storage.databases.main import DataStore
114
+ from synapse.storage.databases.main.state_deltas import StateDelta
115
+ from synapse.streams import EventSource
116
+ from synapse.types import (
117
+ JsonDict,
118
+ StrCollection,
119
+ StreamKeyType,
120
+ UserID,
121
+ get_domain_from_id,
122
+ )
123
+ from synapse.util.async_helpers import Linearizer
124
+ from synapse.util.metrics import Measure
125
+ from synapse.util.wheel_timer import WheelTimer
126
+
127
+ if TYPE_CHECKING:
128
+ from synapse.server import HomeServer
129
+
130
+ logger = logging.getLogger(__name__)
131
+
132
+
133
+ notified_presence_counter = Counter(
134
+ "synapse_handler_presence_notified_presence", "", labelnames=[SERVER_NAME_LABEL]
135
+ )
136
+ federation_presence_out_counter = Counter(
137
+ "synapse_handler_presence_federation_presence_out",
138
+ "",
139
+ labelnames=[SERVER_NAME_LABEL],
140
+ )
141
+ presence_updates_counter = Counter(
142
+ "synapse_handler_presence_presence_updates", "", labelnames=[SERVER_NAME_LABEL]
143
+ )
144
+ timers_fired_counter = Counter(
145
+ "synapse_handler_presence_timers_fired", "", labelnames=[SERVER_NAME_LABEL]
146
+ )
147
+ federation_presence_counter = Counter(
148
+ "synapse_handler_presence_federation_presence", "", labelnames=[SERVER_NAME_LABEL]
149
+ )
150
+ bump_active_time_counter = Counter(
151
+ "synapse_handler_presence_bump_active_time", "", labelnames=[SERVER_NAME_LABEL]
152
+ )
153
+
154
+ get_updates_counter = Counter(
155
+ "synapse_handler_presence_get_updates", "", labelnames=["type", SERVER_NAME_LABEL]
156
+ )
157
+
158
+ notify_reason_counter = Counter(
159
+ "synapse_handler_presence_notify_reason",
160
+ "",
161
+ labelnames=["locality", "reason", SERVER_NAME_LABEL],
162
+ )
163
+ state_transition_counter = Counter(
164
+ "synapse_handler_presence_state_transition",
165
+ "",
166
+ labelnames=["locality", "from", "to", SERVER_NAME_LABEL],
167
+ )
168
+
169
+ presence_user_to_current_state_size_gauge = LaterGauge(
170
+ name="synapse_handlers_presence_user_to_current_state_size",
171
+ desc="",
172
+ labelnames=[SERVER_NAME_LABEL],
173
+ )
174
+
175
+ presence_wheel_timer_size_gauge = LaterGauge(
176
+ name="synapse_handlers_presence_wheel_timer_size",
177
+ desc="",
178
+ labelnames=[SERVER_NAME_LABEL],
179
+ )
180
+
181
+ # If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them
182
+ # "currently_active"
183
+ LAST_ACTIVE_GRANULARITY = 60 * 1000
184
+
185
+ # How long to wait until a new /events or /sync request before assuming
186
+ # the client has gone.
187
+ SYNC_ONLINE_TIMEOUT = 30 * 1000
188
+ # Busy status waits longer, but does eventually go offline.
189
+ BUSY_ONLINE_TIMEOUT = 60 * 60 * 1000
190
+
191
+ # How long to wait before marking the user as idle. Compared against last active
192
+ IDLE_TIMER = 5 * 60 * 1000
193
+
194
+ # How often we expect remote servers to resend us presence.
195
+ FEDERATION_TIMEOUT = 30 * 60 * 1000
196
+
197
+ # How often to resend presence to remote servers
198
+ FEDERATION_PING_INTERVAL = 25 * 60 * 1000
199
+
200
+ # How long we will wait before assuming that the syncs from an external process
201
+ # are dead.
202
+ EXTERNAL_PROCESS_EXPIRY = 5 * 60 * 1000
203
+
204
+ # Delay before a worker tells the presence handler that a user has stopped
205
+ # syncing.
206
+ UPDATE_SYNCING_USERS_MS = 10 * 1000
207
+
208
+ assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER
209
+
210
+
211
+ class BasePresenceHandler(abc.ABC):
212
+ """Parts of the PresenceHandler that are shared between workers and presence
213
+ writer"""
214
+
215
+ def __init__(self, hs: "HomeServer"):
216
+ self.hs = hs
217
+ self.clock = hs.get_clock()
218
+ self.store = hs.get_datastores().main
219
+ self._storage_controllers = hs.get_storage_controllers()
220
+ self.presence_router = hs.get_presence_router()
221
+ self.state = hs.get_state_handler()
222
+ self.is_mine_id = hs.is_mine_id
223
+
224
+ self._presence_enabled = hs.config.server.presence_enabled
225
+ self._track_presence = hs.config.server.track_presence
226
+
227
+ self._federation = None
228
+ if hs.should_send_federation():
229
+ self._federation = hs.get_federation_sender()
230
+
231
+ self._federation_queue = PresenceFederationQueue(hs, self)
232
+
233
+ self.VALID_PRESENCE: tuple[str, ...] = (
234
+ PresenceState.ONLINE,
235
+ PresenceState.UNAVAILABLE,
236
+ PresenceState.OFFLINE,
237
+ )
238
+
239
+ if hs.config.experimental.msc3026_enabled:
240
+ self.VALID_PRESENCE += (PresenceState.BUSY,)
241
+
242
+ active_presence = self.store.take_presence_startup_info()
243
+ # The combined status across all user devices.
244
+ self.user_to_current_state = {state.user_id: state for state in active_presence}
245
+
246
+ @abc.abstractmethod
247
+ async def user_syncing(
248
+ self,
249
+ user_id: str,
250
+ device_id: str | None,
251
+ affect_presence: bool,
252
+ presence_state: str,
253
+ ) -> ContextManager[None]:
254
+ """Returns a context manager that should surround any stream requests
255
+ from the user.
256
+
257
+ This allows us to keep track of who is currently streaming and who isn't
258
+ without having to have timers outside of this module to avoid flickering
259
+ when users disconnect/reconnect.
260
+
261
+ Args:
262
+ user_id: the user that is starting a sync
263
+ device_id: the user's device that is starting a sync
264
+ affect_presence: If false this function will be a no-op.
265
+ Useful for streams that are not associated with an actual
266
+ client that is being used by a user.
267
+ presence_state: The presence state indicated in the sync request
268
+ """
269
+
270
+ @abc.abstractmethod
271
+ def get_currently_syncing_users_for_replication(
272
+ self,
273
+ ) -> Iterable[tuple[str, str | None]]:
274
+ """Get an iterable of syncing users and devices on this worker, to send to the presence handler
275
+
276
+ This is called when a replication connection is established. It should return
277
+ a list of tuples of user ID & device ID, which are then sent as USER_SYNC commands
278
+ to inform the process handling presence about those users/devices.
279
+
280
+ Returns:
281
+ An iterable of tuples of user ID and device ID.
282
+ """
283
+
284
+ async def get_state(self, target_user: UserID) -> UserPresenceState:
285
+ results = await self.get_states([target_user.to_string()])
286
+ return results[0]
287
+
288
+ async def get_states(
289
+ self, target_user_ids: Iterable[str]
290
+ ) -> list[UserPresenceState]:
291
+ """Get the presence state for users."""
292
+
293
+ updates_d = await self.current_state_for_users(target_user_ids)
294
+ updates = list(updates_d.values())
295
+
296
+ for user_id in set(target_user_ids) - {u.user_id for u in updates}:
297
+ updates.append(UserPresenceState.default(user_id))
298
+
299
+ return updates
300
+
301
+ async def current_state_for_users(
302
+ self, user_ids: Iterable[str]
303
+ ) -> dict[str, UserPresenceState]:
304
+ """Get the current presence state for multiple users.
305
+
306
+ Returns:
307
+ A mapping of `user_id` -> `UserPresenceState`
308
+ """
309
+ states = {}
310
+ missing = []
311
+ for user_id in user_ids:
312
+ state = self.user_to_current_state.get(user_id, None)
313
+ if state:
314
+ states[user_id] = state
315
+ else:
316
+ missing.append(user_id)
317
+
318
+ if missing:
319
+ # There are things not in our in memory cache. Lets pull them out of
320
+ # the database.
321
+ res = await self.store.get_presence_for_users(missing)
322
+ states.update(res)
323
+
324
+ for user_id in missing:
325
+ # if user has no state in database, create the state
326
+ if not res.get(user_id, None):
327
+ new_state = UserPresenceState.default(user_id)
328
+ states[user_id] = new_state
329
+ self.user_to_current_state[user_id] = new_state
330
+
331
+ return states
332
+
333
+ async def current_state_for_user(self, user_id: str) -> UserPresenceState:
334
+ """Get the current presence state for a user."""
335
+ res = await self.current_state_for_users([user_id])
336
+ return res[user_id]
337
+
338
+ @abc.abstractmethod
339
+ async def set_state(
340
+ self,
341
+ target_user: UserID,
342
+ device_id: str | None,
343
+ state: JsonDict,
344
+ force_notify: bool = False,
345
+ is_sync: bool = False,
346
+ ) -> None:
347
+ """Set the presence state of the user.
348
+
349
+ Args:
350
+ target_user: The ID of the user to set the presence state of.
351
+ device_id: the device that the user is setting the presence state of.
352
+ state: The presence state as a JSON dictionary.
353
+ force_notify: Whether to force notification of the update to clients.
354
+ is_sync: True if this update was from a sync, which results in
355
+ *not* overriding a previously set BUSY status, updating the
356
+ user's last_user_sync_ts, and ignoring the "status_msg" field of
357
+ the `state` dict.
358
+ """
359
+
360
+ @abc.abstractmethod
361
+ async def bump_presence_active_time(
362
+ self, user: UserID, device_id: str | None
363
+ ) -> None:
364
+ """We've seen the user do something that indicates they're interacting
365
+ with the app.
366
+ """
367
+
368
+ async def update_external_syncs_row( # noqa: B027 (no-op by design)
369
+ self,
370
+ process_id: str,
371
+ user_id: str,
372
+ device_id: str | None,
373
+ is_syncing: bool,
374
+ sync_time_msec: int,
375
+ ) -> None:
376
+ """Update the syncing users for an external process as a delta.
377
+
378
+ This is a no-op when presence is handled by a different worker.
379
+
380
+ Args:
381
+ process_id: An identifier for the process the users are
382
+ syncing against. This allows synapse to process updates
383
+ as user start and stop syncing against a given process.
384
+ user_id: The user who has started or stopped syncing
385
+ device_id: The user's device that has started or stopped syncing
386
+ is_syncing: Whether or not the user is now syncing
387
+ sync_time_msec: Time in ms when the user was last syncing
388
+ """
389
+
390
+ async def update_external_syncs_clear( # noqa: B027 (no-op by design)
391
+ self, process_id: str
392
+ ) -> None:
393
+ """Marks all users that had been marked as syncing by a given process
394
+ as offline.
395
+
396
+ Used when the process has stopped/disappeared.
397
+
398
+ This is a no-op when presence is handled by a different worker.
399
+ """
400
+
401
+ async def process_replication_rows(
402
+ self, stream_name: str, instance_name: str, token: int, rows: list
403
+ ) -> None:
404
+ """Process streams received over replication."""
405
+ await self._federation_queue.process_replication_rows(
406
+ stream_name, instance_name, token, rows
407
+ )
408
+
409
+ def get_federation_queue(self) -> "PresenceFederationQueue":
410
+ """Get the presence federation queue."""
411
+ return self._federation_queue
412
+
413
+ async def maybe_send_presence_to_interested_destinations(
414
+ self, states: list[UserPresenceState]
415
+ ) -> None:
416
+ """If this instance is a federation sender, send the states to all
417
+ destinations that are interested. Filters out any states for remote
418
+ users.
419
+ """
420
+
421
+ if not self._federation:
422
+ return
423
+
424
+ states = [s for s in states if self.is_mine_id(s.user_id)]
425
+
426
+ if not states:
427
+ return
428
+
429
+ hosts_to_states = await get_interested_remotes(
430
+ self.store,
431
+ self.presence_router,
432
+ states,
433
+ )
434
+
435
+ for destinations, host_states in hosts_to_states:
436
+ await self._federation.send_presence_to_destinations(
437
+ host_states, destinations
438
+ )
439
+
440
+ async def send_full_presence_to_users(self, user_ids: StrCollection) -> None:
441
+ """
442
+ Adds to the list of users who should receive a full snapshot of presence
443
+ upon their next sync. Note that this only works for local users.
444
+
445
+ Then, grabs the current presence state for a given set of users and adds it
446
+ to the top of the presence stream.
447
+
448
+ Args:
449
+ user_ids: The IDs of the local users to send full presence to.
450
+ """
451
+ # Retrieve one of the users from the given set
452
+ if not user_ids:
453
+ raise Exception(
454
+ "send_full_presence_to_users must be called with at least one user"
455
+ )
456
+ user_id = next(iter(user_ids))
457
+
458
+ # Mark all users as receiving full presence on their next sync
459
+ await self.store.add_users_to_send_full_presence_to(user_ids)
460
+
461
+ # Add a new entry to the presence stream. Since we use stream tokens to determine whether a
462
+ # local user should receive a full snapshot of presence when they sync, we need to bump the
463
+ # presence stream so that subsequent syncs with no presence activity in between won't result
464
+ # in the client receiving multiple full snapshots of presence.
465
+ #
466
+ # If we bump the stream ID, then the user will get a higher stream token next sync, and thus
467
+ # correctly won't receive a second snapshot.
468
+
469
+ # Get the current presence state for one of the users (defaults to offline if not found)
470
+ current_presence_state = await self.get_state(UserID.from_string(user_id))
471
+
472
+ # Convert the UserPresenceState object into a serializable dict
473
+ state = {
474
+ "presence": current_presence_state.state,
475
+ "status_message": current_presence_state.status_msg,
476
+ }
477
+
478
+ # Copy the presence state to the tip of the presence stream.
479
+
480
+ # We set force_notify=True here so that this presence update is guaranteed to
481
+ # increment the presence stream ID (which resending the current user's presence
482
+ # otherwise would not do).
483
+ await self.set_state(
484
+ UserID.from_string(user_id), None, state, force_notify=True
485
+ )
486
+
487
+ async def is_visible(self, observed_user: UserID, observer_user: UserID) -> bool:
488
+ raise NotImplementedError(
489
+ "Attempting to check presence on a non-presence worker."
490
+ )
491
+
492
+
493
+ class _NullContextManager(ContextManager[None]):
494
+ """A context manager which does nothing."""
495
+
496
+ def __exit__(
497
+ self,
498
+ exc_type: type[BaseException] | None,
499
+ exc_val: BaseException | None,
500
+ exc_tb: TracebackType | None,
501
+ ) -> None:
502
+ pass
503
+
504
+
505
+ class WorkerPresenceHandler(BasePresenceHandler):
506
+ def __init__(self, hs: "HomeServer"):
507
+ super().__init__(hs)
508
+ self.server_name = hs.hostname
509
+ self._presence_writer_instance = hs.config.worker.writers.presence[0]
510
+
511
+ # Route presence EDUs to the right worker
512
+ hs.get_federation_registry().register_instances_for_edu(
513
+ EduTypes.PRESENCE,
514
+ hs.config.worker.writers.presence,
515
+ )
516
+
517
+ # The number of ongoing syncs on this process, by (user ID, device ID).
518
+ # Empty if _presence_enabled is false.
519
+ self._user_device_to_num_current_syncs: dict[tuple[str, str | None], int] = {}
520
+
521
+ self.notifier = hs.get_notifier()
522
+ self.instance_id = hs.get_instance_id()
523
+
524
+ # (user_id, device_id) -> last_sync_ms. Lists the devices that have stopped
525
+ # syncing but we haven't notified the presence writer of that yet
526
+ self._user_devices_going_offline: dict[tuple[str, str | None], int] = {}
527
+
528
+ self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs)
529
+ self._set_state_client = ReplicationPresenceSetState.make_client(hs)
530
+
531
+ self.clock.looping_call(self.send_stop_syncing, UPDATE_SYNCING_USERS_MS)
532
+
533
+ hs.register_async_shutdown_handler(
534
+ phase="before",
535
+ eventType="shutdown",
536
+ shutdown_func=self._on_shutdown,
537
+ )
538
+
539
+ @wrap_as_background_process("WorkerPresenceHandler._on_shutdown")
540
+ async def _on_shutdown(self) -> None:
541
+ if self._track_presence:
542
+ self.hs.get_replication_command_handler().send_command(
543
+ ClearUserSyncsCommand(self.instance_id)
544
+ )
545
+
546
+ def send_user_sync(
547
+ self,
548
+ user_id: str,
549
+ device_id: str | None,
550
+ is_syncing: bool,
551
+ last_sync_ms: int,
552
+ ) -> None:
553
+ if self._track_presence:
554
+ self.hs.get_replication_command_handler().send_user_sync(
555
+ self.instance_id, user_id, device_id, is_syncing, last_sync_ms
556
+ )
557
+
558
+ def mark_as_coming_online(self, user_id: str, device_id: str | None) -> None:
559
+ """A user has started syncing. Send a UserSync to the presence writer,
560
+ unless they had recently stopped syncing.
561
+ """
562
+ going_offline = self._user_devices_going_offline.pop((user_id, device_id), None)
563
+ if not going_offline:
564
+ # Safe to skip because we haven't yet told the presence writer they
565
+ # were offline
566
+ self.send_user_sync(user_id, device_id, True, self.clock.time_msec())
567
+
568
+ def mark_as_going_offline(self, user_id: str, device_id: str | None) -> None:
569
+ """A user has stopped syncing. We wait before notifying the presence
570
+ writer as its likely they'll come back soon. This allows us to avoid
571
+ sending a stopped syncing immediately followed by a started syncing
572
+ notification to the presence writer
573
+ """
574
+ self._user_devices_going_offline[(user_id, device_id)] = self.clock.time_msec()
575
+
576
+ def send_stop_syncing(self) -> None:
577
+ """Check if there are any users who have stopped syncing a while ago and
578
+ haven't come back yet. If there are poke the presence writer about them.
579
+ """
580
+ now = self.clock.time_msec()
581
+ for (user_id, device_id), last_sync_ms in list(
582
+ self._user_devices_going_offline.items()
583
+ ):
584
+ if now - last_sync_ms > UPDATE_SYNCING_USERS_MS:
585
+ self._user_devices_going_offline.pop((user_id, device_id), None)
586
+ self.send_user_sync(user_id, device_id, False, last_sync_ms)
587
+
588
+ async def user_syncing(
589
+ self,
590
+ user_id: str,
591
+ device_id: str | None,
592
+ affect_presence: bool,
593
+ presence_state: str,
594
+ ) -> ContextManager[None]:
595
+ """Record that a user is syncing.
596
+
597
+ Called by the sync and events servlets to record that a user has connected to
598
+ this worker and is waiting for some events.
599
+ """
600
+ if not affect_presence or not self._track_presence:
601
+ return _NullContextManager()
602
+
603
+ # Note that this causes last_active_ts to be incremented which is not
604
+ # what the spec wants.
605
+ await self.set_state(
606
+ UserID.from_string(user_id),
607
+ device_id,
608
+ state={"presence": presence_state},
609
+ is_sync=True,
610
+ )
611
+
612
+ curr_sync = self._user_device_to_num_current_syncs.get((user_id, device_id), 0)
613
+ self._user_device_to_num_current_syncs[(user_id, device_id)] = curr_sync + 1
614
+
615
+ # If this is the first in-flight sync, notify replication
616
+ if self._user_device_to_num_current_syncs[(user_id, device_id)] == 1:
617
+ self.mark_as_coming_online(user_id, device_id)
618
+
619
+ def _end() -> None:
620
+ # We check that the user_id is in user_to_num_current_syncs because
621
+ # user_to_num_current_syncs may have been cleared if we are
622
+ # shutting down.
623
+ if (user_id, device_id) in self._user_device_to_num_current_syncs:
624
+ self._user_device_to_num_current_syncs[(user_id, device_id)] -= 1
625
+
626
+ # If there are no more in-flight syncs, notify replication
627
+ if self._user_device_to_num_current_syncs[(user_id, device_id)] == 0:
628
+ self.mark_as_going_offline(user_id, device_id)
629
+
630
+ @contextlib.contextmanager
631
+ def _user_syncing() -> Generator[None, None, None]:
632
+ try:
633
+ yield
634
+ finally:
635
+ _end()
636
+
637
+ return _user_syncing()
638
+
639
+ async def notify_from_replication(
640
+ self, states: list[UserPresenceState], stream_id: int
641
+ ) -> None:
642
+ parties = await get_interested_parties(self.store, self.presence_router, states)
643
+ room_ids_to_states, users_to_states = parties
644
+
645
+ self.notifier.on_new_event(
646
+ StreamKeyType.PRESENCE,
647
+ stream_id,
648
+ rooms=room_ids_to_states.keys(),
649
+ users=users_to_states.keys(),
650
+ )
651
+
652
+ async def process_replication_rows(
653
+ self, stream_name: str, instance_name: str, token: int, rows: list
654
+ ) -> None:
655
+ await super().process_replication_rows(stream_name, instance_name, token, rows)
656
+
657
+ if stream_name != PresenceStream.NAME:
658
+ return
659
+
660
+ states = [
661
+ UserPresenceState(
662
+ row.user_id,
663
+ row.state,
664
+ row.last_active_ts,
665
+ row.last_federation_update_ts,
666
+ row.last_user_sync_ts,
667
+ row.status_msg,
668
+ row.currently_active,
669
+ )
670
+ for row in rows
671
+ ]
672
+
673
+ # The list of states to notify sync streams and remote servers about.
674
+ # This is calculated by comparing the old and new states for each user
675
+ # using `should_notify(..)`.
676
+ #
677
+ # Note that this is necessary as the presence writer will periodically
678
+ # flush presence state changes that should not be notified about to the
679
+ # DB, and so will be sent over the replication stream.
680
+ state_to_notify = []
681
+
682
+ for new_state in states:
683
+ old_state = self.user_to_current_state.get(new_state.user_id)
684
+ self.user_to_current_state[new_state.user_id] = new_state
685
+ is_mine = self.is_mine_id(new_state.user_id)
686
+ if not old_state or should_notify(
687
+ old_state, new_state, is_mine, self.server_name
688
+ ):
689
+ state_to_notify.append(new_state)
690
+
691
+ stream_id = token
692
+ await self.notify_from_replication(state_to_notify, stream_id)
693
+
694
+ # If this is a federation sender, notify about presence updates.
695
+ await self.maybe_send_presence_to_interested_destinations(state_to_notify)
696
+
697
+ def get_currently_syncing_users_for_replication(
698
+ self,
699
+ ) -> Iterable[tuple[str, str | None]]:
700
+ return [
701
+ user_id_device_id
702
+ for user_id_device_id, count in self._user_device_to_num_current_syncs.items()
703
+ if count > 0
704
+ ]
705
+
706
+ async def set_state(
707
+ self,
708
+ target_user: UserID,
709
+ device_id: str | None,
710
+ state: JsonDict,
711
+ force_notify: bool = False,
712
+ is_sync: bool = False,
713
+ ) -> None:
714
+ """Set the presence state of the user.
715
+
716
+ Args:
717
+ target_user: The ID of the user to set the presence state of.
718
+ device_id: the device that the user is setting the presence state of.
719
+ state: The presence state as a JSON dictionary.
720
+ force_notify: Whether to force notification of the update to clients.
721
+ is_sync: True if this update was from a sync, which results in
722
+ *not* overriding a previously set BUSY status, updating the
723
+ user's last_user_sync_ts, and ignoring the "status_msg" field of
724
+ the `state` dict.
725
+ """
726
+ presence = state["presence"]
727
+
728
+ if presence not in self.VALID_PRESENCE:
729
+ raise SynapseError(400, "Invalid presence state")
730
+
731
+ user_id = target_user.to_string()
732
+
733
+ # If tracking of presence is disabled, no-op
734
+ if not self._track_presence:
735
+ return
736
+
737
+ # Proxy request to instance that writes presence
738
+ await self._set_state_client(
739
+ instance_name=self._presence_writer_instance,
740
+ user_id=user_id,
741
+ device_id=device_id,
742
+ state=state,
743
+ force_notify=force_notify,
744
+ is_sync=is_sync,
745
+ )
746
+
747
+ async def bump_presence_active_time(
748
+ self, user: UserID, device_id: str | None
749
+ ) -> None:
750
+ """We've seen the user do something that indicates they're interacting
751
+ with the app.
752
+ """
753
+ # If presence is disabled, no-op
754
+ if not self._track_presence:
755
+ return
756
+
757
+ # Proxy request to instance that writes presence
758
+ user_id = user.to_string()
759
+ await self._bump_active_client(
760
+ instance_name=self._presence_writer_instance,
761
+ user_id=user_id,
762
+ device_id=device_id,
763
+ )
764
+
765
+
766
+ class PresenceHandler(BasePresenceHandler):
767
+ def __init__(self, hs: "HomeServer"):
768
+ super().__init__(hs)
769
+ self.server_name = hs.hostname
770
+ self.wheel_timer: WheelTimer[str] = WheelTimer()
771
+ self.notifier = hs.get_notifier()
772
+
773
+ federation_registry = hs.get_federation_registry()
774
+
775
+ federation_registry.register_edu_handler(
776
+ EduTypes.PRESENCE, self.incoming_presence
777
+ )
778
+
779
+ presence_user_to_current_state_size_gauge.register_hook(
780
+ homeserver_instance_id=hs.get_instance_id(),
781
+ hook=lambda: {(self.server_name,): len(self.user_to_current_state)},
782
+ )
783
+
784
+ # The per-device presence state, maps user to devices to per-device presence state.
785
+ self._user_to_device_to_current_state: dict[
786
+ str, dict[str | None, UserDevicePresenceState]
787
+ ] = {}
788
+
789
+ now = self.clock.time_msec()
790
+ if self._track_presence:
791
+ for state in self.user_to_current_state.values():
792
+ # Create a psuedo-device to properly handle time outs. This will
793
+ # be overridden by any "real" devices within SYNC_ONLINE_TIMEOUT.
794
+ pseudo_device_id = None
795
+ self._user_to_device_to_current_state[state.user_id] = {
796
+ pseudo_device_id: UserDevicePresenceState(
797
+ user_id=state.user_id,
798
+ device_id=pseudo_device_id,
799
+ state=state.state,
800
+ last_active_ts=state.last_active_ts,
801
+ last_sync_ts=state.last_user_sync_ts,
802
+ )
803
+ }
804
+
805
+ self.wheel_timer.insert(
806
+ now=now, obj=state.user_id, then=state.last_active_ts + IDLE_TIMER
807
+ )
808
+ self.wheel_timer.insert(
809
+ now=now,
810
+ obj=state.user_id,
811
+ then=state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
812
+ )
813
+ if self.is_mine_id(state.user_id):
814
+ self.wheel_timer.insert(
815
+ now=now,
816
+ obj=state.user_id,
817
+ then=state.last_federation_update_ts + FEDERATION_PING_INTERVAL,
818
+ )
819
+ else:
820
+ self.wheel_timer.insert(
821
+ now=now,
822
+ obj=state.user_id,
823
+ then=state.last_federation_update_ts + FEDERATION_TIMEOUT,
824
+ )
825
+
826
+ # Set of users who have presence in the `user_to_current_state` that
827
+ # have not yet been persisted
828
+ self.unpersisted_users_changes: set[str] = set()
829
+
830
+ hs.register_async_shutdown_handler(
831
+ phase="before",
832
+ eventType="shutdown",
833
+ shutdown_func=self._on_shutdown,
834
+ )
835
+
836
+ # Keeps track of the number of *ongoing* syncs on this process. While
837
+ # this is non zero a user will never go offline.
838
+ self._user_device_to_num_current_syncs: dict[tuple[str, str | None], int] = {}
839
+
840
+ # Keeps track of the number of *ongoing* syncs on other processes.
841
+ #
842
+ # While any sync is ongoing on another process the user's device will never
843
+ # go offline.
844
+ #
845
+ # Each process has a unique identifier and an update frequency. If
846
+ # no update is received from that process within the update period then
847
+ # we assume that all the sync requests on that process have stopped.
848
+ # Stored as a dict from process_id to set of (user_id, device_id), and
849
+ # a dict of process_id to millisecond timestamp last updated.
850
+ self.external_process_to_current_syncs: dict[
851
+ str, set[tuple[str, str | None]]
852
+ ] = {}
853
+ self.external_process_last_updated_ms: dict[str, int] = {}
854
+
855
+ self.external_sync_linearizer = Linearizer(
856
+ name="external_sync_linearizer", clock=self.clock
857
+ )
858
+
859
+ if self._track_presence:
860
+ # Start a LoopingCall in 30s that fires every 5s.
861
+ # The initial delay is to allow disconnected clients a chance to
862
+ # reconnect before we treat them as offline.
863
+ self.clock.call_later(
864
+ 30,
865
+ self.clock.looping_call,
866
+ self._handle_timeouts,
867
+ 5000,
868
+ )
869
+
870
+ # Presence information is persisted, whether or not it is being tracked
871
+ # internally.
872
+ if self._presence_enabled:
873
+ self.clock.call_later(
874
+ 60,
875
+ self.clock.looping_call,
876
+ self._persist_unpersisted_changes,
877
+ 60 * 1000,
878
+ )
879
+
880
+ presence_wheel_timer_size_gauge.register_hook(
881
+ homeserver_instance_id=hs.get_instance_id(),
882
+ hook=lambda: {(self.server_name,): len(self.wheel_timer)},
883
+ )
884
+
885
+ # Used to handle sending of presence to newly joined users/servers
886
+ if self._track_presence:
887
+ self.notifier.add_replication_callback(self.notify_new_event)
888
+
889
+ # Presence is best effort and quickly heals itself, so lets just always
890
+ # stream from the current state when we restart.
891
+ self._event_pos = self.store.get_room_max_stream_ordering()
892
+ self._event_processing = False
893
+
894
+ @wrap_as_background_process("PresenceHandler._on_shutdown")
895
+ async def _on_shutdown(self) -> None:
896
+ """Gets called when shutting down. This lets us persist any updates that
897
+ we haven't yet persisted, e.g. updates that only changes some internal
898
+ timers. This allows changes to persist across startup without having to
899
+ persist every single change.
900
+
901
+ If this does not run it simply means that some of the timers will fire
902
+ earlier than they should when synapse is restarted. This affect of this
903
+ is some spurious presence changes that will self-correct.
904
+ """
905
+ # If the DB pool has already terminated, don't try updating
906
+ if not self.store.db_pool.is_running():
907
+ return
908
+
909
+ logger.info(
910
+ "Performing _on_shutdown. Persisting %d unpersisted changes",
911
+ len(self.user_to_current_state),
912
+ )
913
+
914
+ if self.unpersisted_users_changes:
915
+ await self.store.update_presence(
916
+ [
917
+ self.user_to_current_state[user_id]
918
+ for user_id in self.unpersisted_users_changes
919
+ ]
920
+ )
921
+ logger.info("Finished _on_shutdown")
922
+
923
+ @wrap_as_background_process("persist_presence_changes")
924
+ async def _persist_unpersisted_changes(self) -> None:
925
+ """We periodically persist the unpersisted changes, as otherwise they
926
+ may stack up and slow down shutdown times.
927
+ """
928
+ unpersisted = self.unpersisted_users_changes
929
+ self.unpersisted_users_changes = set()
930
+
931
+ if unpersisted:
932
+ logger.info("Persisting %d unpersisted presence updates", len(unpersisted))
933
+ await self.store.update_presence(
934
+ [self.user_to_current_state[user_id] for user_id in unpersisted]
935
+ )
936
+
937
+ async def _update_states(
938
+ self,
939
+ new_states: Iterable[UserPresenceState],
940
+ force_notify: bool = False,
941
+ ) -> None:
942
+ """Updates presence of users. Sets the appropriate timeouts. Pokes
943
+ the notifier and federation if and only if the changed presence state
944
+ should be sent to clients/servers.
945
+
946
+ Args:
947
+ new_states: The new user presence state updates to process.
948
+ force_notify: Whether to force notifying clients of this presence state update,
949
+ even if it doesn't change the state of a user's presence (e.g online -> online).
950
+ This is currently used to bump the max presence stream ID without changing any
951
+ user's presence (see PresenceHandler.add_users_to_send_full_presence_to).
952
+ """
953
+ if not self._presence_enabled:
954
+ # We shouldn't get here if presence is disabled, but we check anyway
955
+ # to ensure that we don't a) send out presence federation and b)
956
+ # don't add things to the wheel timer that will never be handled.
957
+ logger.warning("Tried to update presence states when presence is disabled")
958
+ return
959
+
960
+ now = self.clock.time_msec()
961
+
962
+ with Measure(
963
+ self.clock, name="presence_update_states", server_name=self.server_name
964
+ ):
965
+ # NOTE: We purposefully don't await between now and when we've
966
+ # calculated what we want to do with the new states, to avoid races.
967
+
968
+ to_notify = {} # Changes we want to notify everyone about
969
+ to_federation_ping = {} # These need sending keep-alives
970
+
971
+ # Only bother handling the last presence change for each user
972
+ new_states_dict = {}
973
+ for new_state in new_states:
974
+ new_states_dict[new_state.user_id] = new_state
975
+ new_states = new_states_dict.values()
976
+
977
+ for new_state in new_states:
978
+ user_id = new_state.user_id
979
+
980
+ # It's fine to not hit the database here, as the only thing not in
981
+ # the current state cache are OFFLINE states, where the only field
982
+ # of interest is last_active which is safe enough to assume is 0
983
+ # here.
984
+ prev_state = self.user_to_current_state.get(
985
+ user_id, UserPresenceState.default(user_id)
986
+ )
987
+
988
+ new_state, should_notify, should_ping = handle_update(
989
+ prev_state,
990
+ new_state,
991
+ is_mine=self.is_mine_id(user_id),
992
+ our_server_name=self.server_name,
993
+ wheel_timer=self.wheel_timer,
994
+ now=now,
995
+ # When overriding disabled presence, don't kick off all the
996
+ # wheel timers.
997
+ persist=not self._track_presence,
998
+ )
999
+
1000
+ if force_notify:
1001
+ should_notify = True
1002
+
1003
+ self.user_to_current_state[user_id] = new_state
1004
+
1005
+ if should_notify:
1006
+ to_notify[user_id] = new_state
1007
+ elif should_ping:
1008
+ to_federation_ping[user_id] = new_state
1009
+
1010
+ # TODO: We should probably ensure there are no races hereafter
1011
+
1012
+ presence_updates_counter.labels(
1013
+ **{SERVER_NAME_LABEL: self.server_name}
1014
+ ).inc(len(new_states))
1015
+
1016
+ if to_notify:
1017
+ notified_presence_counter.labels(
1018
+ **{SERVER_NAME_LABEL: self.server_name}
1019
+ ).inc(len(to_notify))
1020
+ await self._persist_and_notify(list(to_notify.values()))
1021
+
1022
+ self.unpersisted_users_changes |= {s.user_id for s in new_states}
1023
+ self.unpersisted_users_changes -= set(to_notify.keys())
1024
+
1025
+ # Check if we need to resend any presence states to remote hosts. We
1026
+ # only do this for states that haven't been updated in a while to
1027
+ # ensure that the remote host doesn't time the presence state out.
1028
+ #
1029
+ # Note that since these are states that have *not* been updated,
1030
+ # they won't get sent down the normal presence replication stream,
1031
+ # and so we have to explicitly send them via the federation stream.
1032
+ to_federation_ping = {
1033
+ user_id: state
1034
+ for user_id, state in to_federation_ping.items()
1035
+ if user_id not in to_notify
1036
+ }
1037
+ if to_federation_ping:
1038
+ federation_presence_out_counter.labels(
1039
+ **{SERVER_NAME_LABEL: self.server_name}
1040
+ ).inc(len(to_federation_ping))
1041
+
1042
+ hosts_to_states = await get_interested_remotes(
1043
+ self.store,
1044
+ self.presence_router,
1045
+ list(to_federation_ping.values()),
1046
+ )
1047
+
1048
+ for destinations, states in hosts_to_states:
1049
+ await self._federation_queue.send_presence_to_destinations(
1050
+ states, destinations
1051
+ )
1052
+
1053
+ @wrap_as_background_process("handle_presence_timeouts")
1054
+ async def _handle_timeouts(self) -> None:
1055
+ """Checks the presence of users that have timed out and updates as
1056
+ appropriate.
1057
+ """
1058
+ logger.debug("Handling presence timeouts")
1059
+ now = self.clock.time_msec()
1060
+
1061
+ # Fetch the list of users that *may* have timed out. Things may have
1062
+ # changed since the timeout was set, so we won't necessarily have to
1063
+ # take any action.
1064
+ users_to_check = set(self.wheel_timer.fetch(now))
1065
+
1066
+ # Check whether the lists of syncing processes from an external
1067
+ # process have expired.
1068
+ expired_process_ids = [
1069
+ process_id
1070
+ for process_id, last_update in self.external_process_last_updated_ms.items()
1071
+ if now - last_update > EXTERNAL_PROCESS_EXPIRY
1072
+ ]
1073
+ for process_id in expired_process_ids:
1074
+ # For each expired process drop tracking info and check the users
1075
+ # that were syncing on that process to see if they need to be timed
1076
+ # out.
1077
+ users_to_check.update(
1078
+ user_id
1079
+ for user_id, device_id in self.external_process_to_current_syncs.pop(
1080
+ process_id, ()
1081
+ )
1082
+ )
1083
+ self.external_process_last_updated_ms.pop(process_id)
1084
+
1085
+ states = [
1086
+ self.user_to_current_state.get(user_id, UserPresenceState.default(user_id))
1087
+ for user_id in users_to_check
1088
+ ]
1089
+
1090
+ timers_fired_counter.labels(**{SERVER_NAME_LABEL: self.server_name}).inc(
1091
+ len(states)
1092
+ )
1093
+
1094
+ # Set of user ID & device IDs which are currently syncing.
1095
+ syncing_user_devices = {
1096
+ user_id_device_id
1097
+ for user_id_device_id, count in self._user_device_to_num_current_syncs.items()
1098
+ if count
1099
+ }
1100
+ syncing_user_devices.update(
1101
+ itertools.chain(*self.external_process_to_current_syncs.values())
1102
+ )
1103
+
1104
+ changes = handle_timeouts(
1105
+ states,
1106
+ is_mine_fn=self.is_mine_id,
1107
+ syncing_user_devices=syncing_user_devices,
1108
+ user_to_devices=self._user_to_device_to_current_state,
1109
+ now=now,
1110
+ )
1111
+
1112
+ return await self._update_states(changes)
1113
+
1114
+ async def bump_presence_active_time(
1115
+ self, user: UserID, device_id: str | None
1116
+ ) -> None:
1117
+ """We've seen the user do something that indicates they're interacting
1118
+ with the app.
1119
+ """
1120
+ # If presence is disabled, no-op
1121
+ if not self._track_presence:
1122
+ return
1123
+
1124
+ user_id = user.to_string()
1125
+
1126
+ bump_active_time_counter.labels(**{SERVER_NAME_LABEL: self.server_name}).inc()
1127
+
1128
+ now = self.clock.time_msec()
1129
+
1130
+ # Update the device information & mark the device as online if it was
1131
+ # unavailable.
1132
+ devices = self._user_to_device_to_current_state.setdefault(user_id, {})
1133
+ device_state = devices.setdefault(
1134
+ device_id,
1135
+ UserDevicePresenceState.default(user_id, device_id),
1136
+ )
1137
+ device_state.last_active_ts = now
1138
+ if device_state.state == PresenceState.UNAVAILABLE:
1139
+ device_state.state = PresenceState.ONLINE
1140
+
1141
+ # Update the user state, this will always update last_active_ts and
1142
+ # might update the presence state.
1143
+ prev_state = await self.current_state_for_user(user_id)
1144
+ new_fields: dict[str, Any] = {
1145
+ "last_active_ts": now,
1146
+ "state": _combine_device_states(devices.values()),
1147
+ }
1148
+
1149
+ await self._update_states([prev_state.copy_and_replace(**new_fields)])
1150
+
1151
+ async def user_syncing(
1152
+ self,
1153
+ user_id: str,
1154
+ device_id: str | None,
1155
+ affect_presence: bool = True,
1156
+ presence_state: str = PresenceState.ONLINE,
1157
+ ) -> ContextManager[None]:
1158
+ """Returns a context manager that should surround any stream requests
1159
+ from the user.
1160
+
1161
+ This allows us to keep track of who is currently streaming and who isn't
1162
+ without having to have timers outside of this module to avoid flickering
1163
+ when users disconnect/reconnect.
1164
+
1165
+ Args:
1166
+ user_id: the user that is starting a sync
1167
+ device_id: the user's device that is starting a sync
1168
+ affect_presence: If false this function will be a no-op.
1169
+ Useful for streams that are not associated with an actual
1170
+ client that is being used by a user.
1171
+ presence_state: The presence state indicated in the sync request
1172
+ """
1173
+ if not affect_presence or not self._track_presence:
1174
+ return _NullContextManager()
1175
+
1176
+ curr_sync = self._user_device_to_num_current_syncs.get((user_id, device_id), 0)
1177
+ self._user_device_to_num_current_syncs[(user_id, device_id)] = curr_sync + 1
1178
+
1179
+ # Note that this causes last_active_ts to be incremented which is not
1180
+ # what the spec wants.
1181
+ await self.set_state(
1182
+ UserID.from_string(user_id),
1183
+ device_id,
1184
+ state={"presence": presence_state},
1185
+ is_sync=True,
1186
+ )
1187
+
1188
+ async def _end() -> None:
1189
+ try:
1190
+ self._user_device_to_num_current_syncs[(user_id, device_id)] -= 1
1191
+
1192
+ prev_state = await self.current_state_for_user(user_id)
1193
+ await self._update_states(
1194
+ [
1195
+ prev_state.copy_and_replace(
1196
+ last_user_sync_ts=self.clock.time_msec()
1197
+ )
1198
+ ]
1199
+ )
1200
+ except Exception:
1201
+ logger.exception("Error updating presence after sync")
1202
+
1203
+ @contextmanager
1204
+ def _user_syncing() -> Generator[None, None, None]:
1205
+ try:
1206
+ yield
1207
+ finally:
1208
+ run_in_background(_end)
1209
+
1210
+ return _user_syncing()
1211
+
1212
+ def get_currently_syncing_users_for_replication(
1213
+ self,
1214
+ ) -> Iterable[tuple[str, str | None]]:
1215
+ # since we are the process handling presence, there is nothing to do here.
1216
+ return []
1217
+
1218
+ async def update_external_syncs_row(
1219
+ self,
1220
+ process_id: str,
1221
+ user_id: str,
1222
+ device_id: str | None,
1223
+ is_syncing: bool,
1224
+ sync_time_msec: int,
1225
+ ) -> None:
1226
+ """Update the syncing users for an external process as a delta.
1227
+
1228
+ Args:
1229
+ process_id: An identifier for the process the users are
1230
+ syncing against. This allows synapse to process updates
1231
+ as user start and stop syncing against a given process.
1232
+ user_id: The user who has started or stopped syncing
1233
+ device_id: The user's device that has started or stopped syncing
1234
+ is_syncing: Whether or not the user is now syncing
1235
+ sync_time_msec: Time in ms when the user was last syncing
1236
+ """
1237
+ async with self.external_sync_linearizer.queue(process_id):
1238
+ prev_state = await self.current_state_for_user(user_id)
1239
+
1240
+ process_presence = self.external_process_to_current_syncs.setdefault(
1241
+ process_id, set()
1242
+ )
1243
+
1244
+ # USER_SYNC is sent when a user's device starts or stops syncing on
1245
+ # a remote # process. (But only for the initial and last sync for that
1246
+ # device.)
1247
+ #
1248
+ # When a device *starts* syncing it also calls set_state(...) which
1249
+ # will update the state, last_active_ts, and last_user_sync_ts.
1250
+ # Simply ensure the user & device is tracked as syncing in this case.
1251
+ #
1252
+ # When a device *stops* syncing, update the last_user_sync_ts and mark
1253
+ # them as no longer syncing. Note this doesn't quite match the
1254
+ # monolith behaviour, which updates last_user_sync_ts at the end of
1255
+ # every sync, not just the last in-flight sync.
1256
+ if is_syncing and (user_id, device_id) not in process_presence:
1257
+ process_presence.add((user_id, device_id))
1258
+ elif not is_syncing and (user_id, device_id) in process_presence:
1259
+ devices = self._user_to_device_to_current_state.setdefault(user_id, {})
1260
+ device_state = devices.setdefault(
1261
+ device_id, UserDevicePresenceState.default(user_id, device_id)
1262
+ )
1263
+ device_state.last_sync_ts = sync_time_msec
1264
+
1265
+ new_state = prev_state.copy_and_replace(
1266
+ last_user_sync_ts=sync_time_msec
1267
+ )
1268
+ await self._update_states([new_state])
1269
+
1270
+ process_presence.discard((user_id, device_id))
1271
+
1272
+ self.external_process_last_updated_ms[process_id] = self.clock.time_msec()
1273
+
1274
+ async def update_external_syncs_clear(self, process_id: str) -> None:
1275
+ """Marks all users that had been marked as syncing by a given process
1276
+ as offline.
1277
+
1278
+ Used when the process has stopped/disappeared.
1279
+ """
1280
+ async with self.external_sync_linearizer.queue(process_id):
1281
+ process_presence = self.external_process_to_current_syncs.pop(
1282
+ process_id, set()
1283
+ )
1284
+
1285
+ time_now_ms = self.clock.time_msec()
1286
+
1287
+ # Mark each device as having a last sync time.
1288
+ updated_users = set()
1289
+ for user_id, device_id in process_presence:
1290
+ device_state = self._user_to_device_to_current_state.setdefault(
1291
+ user_id, {}
1292
+ ).setdefault(
1293
+ device_id, UserDevicePresenceState.default(user_id, device_id)
1294
+ )
1295
+
1296
+ device_state.last_sync_ts = time_now_ms
1297
+ updated_users.add(user_id)
1298
+
1299
+ # Update each user (and insert into the appropriate timers to check if
1300
+ # they've gone offline).
1301
+ prev_states = await self.current_state_for_users(updated_users)
1302
+ await self._update_states(
1303
+ [
1304
+ prev_state.copy_and_replace(last_user_sync_ts=time_now_ms)
1305
+ for prev_state in prev_states.values()
1306
+ ]
1307
+ )
1308
+ self.external_process_last_updated_ms.pop(process_id, None)
1309
+
1310
+ async def _persist_and_notify(self, states: list[UserPresenceState]) -> None:
1311
+ """Persist states in the database, poke the notifier and send to
1312
+ interested remote servers
1313
+ """
1314
+ stream_id, max_token = await self.store.update_presence(states)
1315
+
1316
+ parties = await get_interested_parties(self.store, self.presence_router, states)
1317
+ room_ids_to_states, users_to_states = parties
1318
+
1319
+ self.notifier.on_new_event(
1320
+ StreamKeyType.PRESENCE,
1321
+ stream_id,
1322
+ rooms=room_ids_to_states.keys(),
1323
+ users=[UserID.from_string(u) for u in users_to_states],
1324
+ )
1325
+
1326
+ # We only want to poke the local federation sender, if any, as other
1327
+ # workers will receive the presence updates via the presence replication
1328
+ # stream (which is updated by `store.update_presence`).
1329
+ await self.maybe_send_presence_to_interested_destinations(states)
1330
+
1331
+ async def incoming_presence(self, origin: str, content: JsonDict) -> None:
1332
+ """Called when we receive a `m.presence` EDU from a remote server."""
1333
+ if not self._track_presence:
1334
+ return
1335
+
1336
+ now = self.clock.time_msec()
1337
+ updates = []
1338
+ for push in content.get("push", []):
1339
+ # A "push" contains a list of presence that we are probably interested
1340
+ # in.
1341
+ user_id = push.get("user_id", None)
1342
+ if not user_id:
1343
+ logger.info(
1344
+ "Got presence update from %r with no 'user_id': %r", origin, push
1345
+ )
1346
+ continue
1347
+
1348
+ if get_domain_from_id(user_id) != origin:
1349
+ logger.info(
1350
+ "Got presence update from %r with bad 'user_id': %r",
1351
+ origin,
1352
+ user_id,
1353
+ )
1354
+ continue
1355
+
1356
+ presence_state = push.get("presence", None)
1357
+ if not presence_state:
1358
+ logger.info(
1359
+ "Got presence update from %r with no 'presence_state': %r",
1360
+ origin,
1361
+ push,
1362
+ )
1363
+ continue
1364
+
1365
+ new_fields = {"state": presence_state, "last_federation_update_ts": now}
1366
+
1367
+ last_active_ago = push.get("last_active_ago", None)
1368
+ if last_active_ago is not None:
1369
+ new_fields["last_active_ts"] = now - last_active_ago
1370
+
1371
+ new_fields["status_msg"] = push.get("status_msg", None)
1372
+ new_fields["currently_active"] = push.get("currently_active", False)
1373
+
1374
+ prev_state = await self.current_state_for_user(user_id)
1375
+ updates.append(prev_state.copy_and_replace(**new_fields))
1376
+
1377
+ if updates:
1378
+ federation_presence_counter.labels(
1379
+ **{SERVER_NAME_LABEL: self.server_name}
1380
+ ).inc(len(updates))
1381
+ await self._update_states(updates)
1382
+
1383
+ async def set_state(
1384
+ self,
1385
+ target_user: UserID,
1386
+ device_id: str | None,
1387
+ state: JsonDict,
1388
+ force_notify: bool = False,
1389
+ is_sync: bool = False,
1390
+ ) -> None:
1391
+ """Set the presence state of the user.
1392
+
1393
+ Args:
1394
+ target_user: The ID of the user to set the presence state of.
1395
+ device_id: the device that the user is setting the presence state of.
1396
+ state: The presence state as a JSON dictionary.
1397
+ force_notify: Whether to force notification of the update to clients.
1398
+ is_sync: True if this update was from a sync, which results in
1399
+ *not* overriding a previously set BUSY status, updating the
1400
+ user's last_user_sync_ts, and ignoring the "status_msg" field of
1401
+ the `state` dict.
1402
+ """
1403
+ status_msg = state.get("status_msg", None)
1404
+ presence = state["presence"]
1405
+
1406
+ if presence not in self.VALID_PRESENCE:
1407
+ raise SynapseError(400, "Invalid presence state")
1408
+
1409
+ # If presence is disabled, no-op
1410
+ if not self._track_presence:
1411
+ return
1412
+
1413
+ user_id = target_user.to_string()
1414
+ now = self.clock.time_msec()
1415
+
1416
+ prev_state = await self.current_state_for_user(user_id)
1417
+
1418
+ # Syncs do not override a previous presence of busy.
1419
+ #
1420
+ # TODO: This is a hack for lack of multi-device support. Unfortunately
1421
+ # removing this requires coordination with clients.
1422
+ if prev_state.state == PresenceState.BUSY and is_sync:
1423
+ presence = PresenceState.BUSY
1424
+
1425
+ # Update the device specific information.
1426
+ devices = self._user_to_device_to_current_state.setdefault(user_id, {})
1427
+ device_state = devices.setdefault(
1428
+ device_id,
1429
+ UserDevicePresenceState.default(user_id, device_id),
1430
+ )
1431
+ device_state.state = presence
1432
+ device_state.last_active_ts = now
1433
+ if is_sync:
1434
+ device_state.last_sync_ts = now
1435
+
1436
+ # Based on the state of each user's device calculate the new presence state.
1437
+ presence = _combine_device_states(devices.values())
1438
+
1439
+ new_fields: JsonDict = {"state": presence}
1440
+
1441
+ if presence == PresenceState.ONLINE or presence == PresenceState.BUSY:
1442
+ new_fields["last_active_ts"] = now
1443
+
1444
+ if is_sync:
1445
+ new_fields["last_user_sync_ts"] = now
1446
+ else:
1447
+ # Syncs do not override the status message.
1448
+ new_fields["status_msg"] = status_msg
1449
+
1450
+ await self._update_states(
1451
+ [prev_state.copy_and_replace(**new_fields)], force_notify=force_notify
1452
+ )
1453
+
1454
+ async def is_visible(self, observed_user: UserID, observer_user: UserID) -> bool:
1455
+ """Returns whether a user can see another user's presence."""
1456
+ observer_room_ids = await self.store.get_rooms_for_user(
1457
+ observer_user.to_string()
1458
+ )
1459
+ observed_room_ids = await self.store.get_rooms_for_user(
1460
+ observed_user.to_string()
1461
+ )
1462
+
1463
+ if observer_room_ids & observed_room_ids:
1464
+ return True
1465
+
1466
+ return False
1467
+
1468
+ async def get_all_presence_updates(
1469
+ self, instance_name: str, last_id: int, current_id: int, limit: int
1470
+ ) -> tuple[list[tuple[int, list]], int, bool]:
1471
+ """
1472
+ Gets a list of presence update rows from between the given stream ids.
1473
+ Each row has:
1474
+ - stream_id(str)
1475
+ - user_id(str)
1476
+ - state(str)
1477
+ - last_active_ts(int)
1478
+ - last_federation_update_ts(int)
1479
+ - last_user_sync_ts(int)
1480
+ - status_msg(int)
1481
+ - currently_active(int)
1482
+
1483
+ Args:
1484
+ instance_name: The writer we want to fetch updates from. Unused
1485
+ here since there is only ever one writer.
1486
+ last_id: The token to fetch updates from. Exclusive.
1487
+ current_id: The token to fetch updates up to. Inclusive.
1488
+ limit: The requested limit for the number of rows to return. The
1489
+ function may return more or fewer rows.
1490
+
1491
+ Returns:
1492
+ A tuple consisting of: the updates, a token to use to fetch
1493
+ subsequent updates, and whether we returned fewer rows than exists
1494
+ between the requested tokens due to the limit.
1495
+
1496
+ The token returned can be used in a subsequent call to this
1497
+ function to get further updates.
1498
+
1499
+ The updates are a list of 2-tuples of stream ID and the row data
1500
+ """
1501
+
1502
+ # TODO(markjh): replicate the unpersisted changes.
1503
+ # This could use the in-memory stores for recent changes.
1504
+ rows = await self.store.get_all_presence_updates(
1505
+ instance_name, last_id, current_id, limit
1506
+ )
1507
+ return rows
1508
+
1509
+ def notify_new_event(self) -> None:
1510
+ """Called when new events have happened. Handles users and servers
1511
+ joining rooms and require being sent presence.
1512
+ """
1513
+
1514
+ if self._event_processing:
1515
+ return
1516
+
1517
+ async def _process_presence() -> None:
1518
+ assert not self._event_processing
1519
+
1520
+ self._event_processing = True
1521
+ try:
1522
+ await self._unsafe_process()
1523
+ finally:
1524
+ self._event_processing = False
1525
+
1526
+ self.hs.run_as_background_process(
1527
+ "presence.notify_new_event", _process_presence
1528
+ )
1529
+
1530
+ async def _unsafe_process(self) -> None:
1531
+ # Loop round handling deltas until we're up to date
1532
+ while True:
1533
+ with Measure(
1534
+ self.clock, name="presence_delta", server_name=self.server_name
1535
+ ):
1536
+ room_max_stream_ordering = self.store.get_room_max_stream_ordering()
1537
+ if self._event_pos >= room_max_stream_ordering:
1538
+ return
1539
+
1540
+ logger.debug(
1541
+ "Processing presence stats %s->%s",
1542
+ self._event_pos,
1543
+ room_max_stream_ordering,
1544
+ )
1545
+ (
1546
+ max_pos,
1547
+ deltas,
1548
+ ) = await self._storage_controllers.state.get_current_state_deltas(
1549
+ self._event_pos, room_max_stream_ordering
1550
+ )
1551
+
1552
+ # We may get multiple deltas for different rooms, but we want to
1553
+ # handle them on a room by room basis, so we batch them up by
1554
+ # room.
1555
+ deltas_by_room: dict[str, list[StateDelta]] = {}
1556
+ for delta in deltas:
1557
+ deltas_by_room.setdefault(delta.room_id, []).append(delta)
1558
+
1559
+ for room_id, deltas_for_room in deltas_by_room.items():
1560
+ await self._handle_state_delta(room_id, deltas_for_room)
1561
+
1562
+ self._event_pos = max_pos
1563
+
1564
+ # Expose current event processing position to prometheus
1565
+ synapse.metrics.event_processing_positions.labels(
1566
+ name="presence", **{SERVER_NAME_LABEL: self.server_name}
1567
+ ).set(max_pos)
1568
+
1569
+ async def _handle_state_delta(self, room_id: str, deltas: list[StateDelta]) -> None:
1570
+ """Process current state deltas for the room to find new joins that need
1571
+ to be handled.
1572
+ """
1573
+
1574
+ # Sets of newly joined users. Note that if the local server is
1575
+ # joining a remote room for the first time we'll see both the joining
1576
+ # user and all remote users as newly joined.
1577
+ newly_joined_users = set()
1578
+
1579
+ for delta in deltas:
1580
+ assert room_id == delta.room_id
1581
+
1582
+ logger.debug(
1583
+ "Handling: %r %r, %s", delta.event_type, delta.state_key, delta.event_id
1584
+ )
1585
+
1586
+ # Drop any event that isn't a membership join
1587
+ if delta.event_type != EventTypes.Member:
1588
+ continue
1589
+
1590
+ if delta.event_id is None:
1591
+ # state has been deleted, so this is not a join. We only care about
1592
+ # joins.
1593
+ continue
1594
+
1595
+ event = await self.store.get_event(delta.event_id, allow_none=True)
1596
+ if not event or event.content.get("membership") != Membership.JOIN:
1597
+ # We only care about joins
1598
+ continue
1599
+
1600
+ if delta.prev_event_id:
1601
+ prev_event = await self.store.get_event(
1602
+ delta.prev_event_id, allow_none=True
1603
+ )
1604
+ if (
1605
+ prev_event
1606
+ and prev_event.content.get("membership") == Membership.JOIN
1607
+ ):
1608
+ # Ignore changes to join events.
1609
+ continue
1610
+
1611
+ newly_joined_users.add(delta.state_key)
1612
+
1613
+ if not newly_joined_users:
1614
+ # If nobody has joined then there's nothing to do.
1615
+ return
1616
+
1617
+ # We want to send:
1618
+ # 1. presence states of all local users in the room to newly joined
1619
+ # remote servers
1620
+ # 2. presence states of newly joined users to all remote servers in
1621
+ # the room.
1622
+ #
1623
+ # TODO: Only send presence states to remote hosts that don't already
1624
+ # have them (because they already share rooms).
1625
+
1626
+ # Get all the users who were already in the room, by fetching the
1627
+ # current users in the room and removing the newly joined users.
1628
+ users = await self.store.get_users_in_room(room_id)
1629
+ prev_users = set(users) - newly_joined_users
1630
+
1631
+ # Construct sets for all the local users and remote hosts that were
1632
+ # already in the room
1633
+ prev_local_users = []
1634
+ prev_remote_hosts = set()
1635
+ for user_id in prev_users:
1636
+ if self.is_mine_id(user_id):
1637
+ prev_local_users.append(user_id)
1638
+ else:
1639
+ prev_remote_hosts.add(get_domain_from_id(user_id))
1640
+
1641
+ # Similarly, construct sets for all the local users and remote hosts
1642
+ # that were *not* already in the room. Care needs to be taken with the
1643
+ # calculating the remote hosts, as a host may have already been in the
1644
+ # room even if there is a newly joined user from that host.
1645
+ newly_joined_local_users = []
1646
+ newly_joined_remote_hosts = set()
1647
+ for user_id in newly_joined_users:
1648
+ if self.is_mine_id(user_id):
1649
+ newly_joined_local_users.append(user_id)
1650
+ else:
1651
+ host = get_domain_from_id(user_id)
1652
+ if host not in prev_remote_hosts:
1653
+ newly_joined_remote_hosts.add(host)
1654
+
1655
+ # Send presence states of all local users in the room to newly joined
1656
+ # remote servers. (We actually only send states for local users already
1657
+ # in the room, as we'll send states for newly joined local users below.)
1658
+ if prev_local_users and newly_joined_remote_hosts:
1659
+ local_states = await self.current_state_for_users(prev_local_users)
1660
+
1661
+ # Filter out old presence, i.e. offline presence states where
1662
+ # the user hasn't been active for a week. We can change this
1663
+ # depending on what we want the UX to be, but at the least we
1664
+ # should filter out offline presence where the state is just the
1665
+ # default state.
1666
+ now = self.clock.time_msec()
1667
+ states = [
1668
+ state
1669
+ for state in local_states.values()
1670
+ if state.state != PresenceState.OFFLINE
1671
+ or now - state.last_active_ts < 7 * 24 * 60 * 60 * 1000
1672
+ or state.status_msg is not None
1673
+ ]
1674
+
1675
+ await self._federation_queue.send_presence_to_destinations(
1676
+ destinations=newly_joined_remote_hosts,
1677
+ states=states,
1678
+ )
1679
+
1680
+ # Send presence states of newly joined users to all remote servers in
1681
+ # the room
1682
+ if newly_joined_local_users and (
1683
+ prev_remote_hosts or newly_joined_remote_hosts
1684
+ ):
1685
+ local_states = await self.current_state_for_users(newly_joined_local_users)
1686
+ await self._federation_queue.send_presence_to_destinations(
1687
+ destinations=prev_remote_hosts | newly_joined_remote_hosts,
1688
+ states=list(local_states.values()),
1689
+ )
1690
+
1691
+
1692
+ def should_notify(
1693
+ old_state: UserPresenceState,
1694
+ new_state: UserPresenceState,
1695
+ is_mine: bool,
1696
+ our_server_name: str,
1697
+ ) -> bool:
1698
+ """Decides if a presence state change should be sent to interested parties."""
1699
+ user_location = "remote"
1700
+ if is_mine:
1701
+ user_location = "local"
1702
+
1703
+ if old_state == new_state:
1704
+ return False
1705
+
1706
+ if old_state.status_msg != new_state.status_msg:
1707
+ notify_reason_counter.labels(
1708
+ locality=user_location,
1709
+ reason="status_msg_change",
1710
+ **{SERVER_NAME_LABEL: our_server_name},
1711
+ ).inc()
1712
+ return True
1713
+
1714
+ if old_state.state != new_state.state:
1715
+ notify_reason_counter.labels(
1716
+ locality=user_location,
1717
+ reason="state_change",
1718
+ **{SERVER_NAME_LABEL: our_server_name},
1719
+ ).inc()
1720
+ state_transition_counter.labels(
1721
+ **{
1722
+ "locality": user_location,
1723
+ # `from` is a reserved word in Python so we have to label it this way if
1724
+ # we want to use keyword args.
1725
+ "from": old_state.state,
1726
+ "to": new_state.state,
1727
+ SERVER_NAME_LABEL: our_server_name,
1728
+ },
1729
+ ).inc()
1730
+ return True
1731
+
1732
+ if old_state.state == PresenceState.ONLINE:
1733
+ if new_state.currently_active != old_state.currently_active:
1734
+ notify_reason_counter.labels(
1735
+ locality=user_location,
1736
+ reason="current_active_change",
1737
+ **{SERVER_NAME_LABEL: our_server_name},
1738
+ ).inc()
1739
+ return True
1740
+
1741
+ if (
1742
+ new_state.last_active_ts - old_state.last_active_ts
1743
+ > LAST_ACTIVE_GRANULARITY
1744
+ ):
1745
+ # Only notify about last active bumps if we're not currently active
1746
+ if not new_state.currently_active:
1747
+ notify_reason_counter.labels(
1748
+ locality=user_location,
1749
+ reason="last_active_change_online",
1750
+ **{SERVER_NAME_LABEL: our_server_name},
1751
+ ).inc()
1752
+ return True
1753
+
1754
+ elif new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY:
1755
+ # Always notify for a transition where last active gets bumped.
1756
+ notify_reason_counter.labels(
1757
+ locality=user_location,
1758
+ reason="last_active_change_not_online",
1759
+ **{SERVER_NAME_LABEL: our_server_name},
1760
+ ).inc()
1761
+ return True
1762
+
1763
+ return False
1764
+
1765
+
1766
+ def format_user_presence_state(
1767
+ state: UserPresenceState, now: int, include_user_id: bool = True
1768
+ ) -> JsonDict:
1769
+ """Convert UserPresenceState to a JSON format that can be sent down to clients
1770
+ and to other servers.
1771
+
1772
+ Args:
1773
+ state: The user presence state to format.
1774
+ now: The current timestamp since the epoch in ms.
1775
+ include_user_id: Whether to include `user_id` in the returned dictionary.
1776
+ As this function can be used both to format presence updates for client /sync
1777
+ responses and for federation /send requests, only the latter needs the include
1778
+ the `user_id` field.
1779
+
1780
+ Returns:
1781
+ A JSON dictionary with the following keys:
1782
+ * presence: The presence state as a str.
1783
+ * user_id: Optional. Included if `include_user_id` is truthy. The canonical
1784
+ Matrix ID of the user.
1785
+ * last_active_ago: Optional. Included if `last_active_ts` is set on `state`.
1786
+ The timestamp that the user was last active.
1787
+ * status_msg: Optional. Included if `status_msg` is set on `state`. The user's
1788
+ status.
1789
+ * currently_active: Optional. Included only if `state.state` is "online".
1790
+
1791
+ Example:
1792
+
1793
+ {
1794
+ "presence": "online",
1795
+ "user_id": "@alice:example.com",
1796
+ "last_active_ago": 16783813918,
1797
+ "status_msg": "Hello world!",
1798
+ "currently_active": True
1799
+ }
1800
+ """
1801
+ content: JsonDict = {"presence": state.state}
1802
+ if include_user_id:
1803
+ content["user_id"] = state.user_id
1804
+ if state.last_active_ts:
1805
+ content["last_active_ago"] = now - state.last_active_ts
1806
+ if state.status_msg:
1807
+ content["status_msg"] = state.status_msg
1808
+ if state.state == PresenceState.ONLINE:
1809
+ content["currently_active"] = state.currently_active
1810
+
1811
+ return content
1812
+
1813
+
1814
+ class PresenceEventSource(EventSource[int, UserPresenceState]):
1815
+ def __init__(self, hs: "HomeServer"):
1816
+ # We can't call get_presence_handler here because there's a cycle:
1817
+ #
1818
+ # Presence -> Notifier -> PresenceEventSource -> Presence
1819
+ #
1820
+ # Same with get_presence_router:
1821
+ #
1822
+ # AuthHandler -> Notifier -> PresenceEventSource -> ModuleApi -> AuthHandler
1823
+ self.server_name = hs.hostname
1824
+ self.get_presence_handler = hs.get_presence_handler
1825
+ self.get_presence_router = hs.get_presence_router
1826
+ self.server_name = hs.hostname
1827
+ self.clock = hs.get_clock()
1828
+ self.store = hs.get_datastores().main
1829
+
1830
+ async def get_new_events(
1831
+ self,
1832
+ user: UserID,
1833
+ from_key: int | None,
1834
+ # Having a default limit doesn't match the EventSource API, but some
1835
+ # callers do not provide it. It is unused in this class.
1836
+ limit: int = 0,
1837
+ room_ids: StrCollection | None = None,
1838
+ is_guest: bool = False,
1839
+ explicit_room_id: str | None = None,
1840
+ include_offline: bool = True,
1841
+ service: ApplicationService | None = None,
1842
+ ) -> tuple[list[UserPresenceState], int]:
1843
+ # The process for getting presence events are:
1844
+ # 1. Get the rooms the user is in.
1845
+ # 2. Get the list of user in the rooms.
1846
+ # 3. Get the list of users that are in the user's presence list.
1847
+ # 4. If there is a from_key set, cross reference the list of users
1848
+ # with the `presence_stream_cache` to see which ones we actually
1849
+ # need to check.
1850
+ # 5. Load current state for the users.
1851
+ #
1852
+ # We don't try and limit the presence updates by the current token, as
1853
+ # sending down the rare duplicate is not a concern.
1854
+
1855
+ user_id = user.to_string()
1856
+ stream_change_cache = self.store.presence_stream_cache
1857
+
1858
+ with Measure(
1859
+ self.clock, name="presence.get_new_events", server_name=self.server_name
1860
+ ):
1861
+ if from_key is not None:
1862
+ from_key = int(from_key)
1863
+
1864
+ # Check if this user should receive all current, online user presence. We only
1865
+ # bother to do this if from_key is set, as otherwise the user will receive all
1866
+ # user presence anyways.
1867
+ if await self.store.should_user_receive_full_presence_with_token(
1868
+ user_id, from_key
1869
+ ):
1870
+ # This user has been specified by a module to receive all current, online
1871
+ # user presence. Removing from_key and setting include_offline to false
1872
+ # will do effectively this.
1873
+ from_key = None
1874
+ include_offline = False
1875
+
1876
+ max_token = self.store.get_current_presence_token()
1877
+ if from_key == max_token:
1878
+ # This is necessary as due to the way stream ID generators work
1879
+ # we may get updates that have a stream ID greater than the max
1880
+ # token (e.g. max_token is N but stream generator may return
1881
+ # results for N+2, due to N+1 not having finished being
1882
+ # persisted yet).
1883
+ #
1884
+ # This is usually fine, as it just means that we may send down
1885
+ # some presence updates multiple times. However, we need to be
1886
+ # careful that the sync stream either actually does make some
1887
+ # progress or doesn't return, otherwise clients will end up
1888
+ # tight looping calling /sync due to it immediately returning
1889
+ # the same token repeatedly.
1890
+ #
1891
+ # Hence this guard where we just return nothing so that the sync
1892
+ # doesn't return. C.f. https://github.com/matrix-org/synapse/issues/5503.
1893
+ return [], max_token
1894
+
1895
+ # Figure out which other users this user should explicitly receive
1896
+ # updates for
1897
+ additional_users_interested_in = (
1898
+ await self.get_presence_router().get_interested_users(user.to_string())
1899
+ )
1900
+
1901
+ # We have a set of users that we're interested in the presence of. We want to
1902
+ # cross-reference that with the users that have actually changed their presence.
1903
+
1904
+ # Check whether this user should see all user updates
1905
+
1906
+ if additional_users_interested_in == PresenceRouter.ALL_USERS:
1907
+ # Provide presence state for all users
1908
+ presence_updates = await self._filter_all_presence_updates_for_user(
1909
+ user_id, include_offline, from_key
1910
+ )
1911
+
1912
+ return presence_updates, max_token
1913
+
1914
+ # Make mypy happy. users_interested_in should now be a set
1915
+ assert not isinstance(additional_users_interested_in, str)
1916
+
1917
+ # We always care about our own presence.
1918
+ additional_users_interested_in.add(user_id)
1919
+
1920
+ if explicit_room_id:
1921
+ user_ids = await self.store.get_users_in_room(explicit_room_id)
1922
+ additional_users_interested_in.update(user_ids)
1923
+
1924
+ # The set of users that we're interested in and that have had a presence update.
1925
+ # We'll actually pull the presence updates for these users at the end.
1926
+ interested_and_updated_users: StrCollection
1927
+
1928
+ if from_key is not None:
1929
+ # First get all users that have had a presence update
1930
+ result = stream_change_cache.get_all_entities_changed(from_key)
1931
+
1932
+ # Cross-reference users we're interested in with those that have had updates.
1933
+ if result.hit:
1934
+ updated_users = result.entities
1935
+
1936
+ # If we have the full list of changes for presence we can
1937
+ # simply check which ones share a room with the user.
1938
+ get_updates_counter.labels(
1939
+ type="stream",
1940
+ **{SERVER_NAME_LABEL: self.server_name},
1941
+ ).inc()
1942
+
1943
+ sharing_users = await self.store.do_users_share_a_room(
1944
+ user_id, updated_users
1945
+ )
1946
+
1947
+ interested_and_updated_users = (
1948
+ sharing_users.union(additional_users_interested_in)
1949
+ ).intersection(updated_users)
1950
+
1951
+ else:
1952
+ # Too many possible updates. Find all users we can see and check
1953
+ # if any of them have changed.
1954
+ get_updates_counter.labels(
1955
+ type="full",
1956
+ **{SERVER_NAME_LABEL: self.server_name},
1957
+ ).inc()
1958
+
1959
+ users_interested_in = (
1960
+ await self.store.get_users_who_share_room_with_user(user_id)
1961
+ )
1962
+ users_interested_in.update(additional_users_interested_in)
1963
+
1964
+ interested_and_updated_users = (
1965
+ stream_change_cache.get_entities_changed(
1966
+ users_interested_in, from_key
1967
+ )
1968
+ )
1969
+ else:
1970
+ # No from_key has been specified. Return the presence for all users
1971
+ # this user is interested in
1972
+ interested_and_updated_users = (
1973
+ await self.store.get_users_who_share_room_with_user(user_id)
1974
+ )
1975
+ interested_and_updated_users.update(additional_users_interested_in)
1976
+
1977
+ # Retrieve the current presence state for each user
1978
+ users_to_state = await self.get_presence_handler().current_state_for_users(
1979
+ interested_and_updated_users
1980
+ )
1981
+ presence_updates = list(users_to_state.values())
1982
+
1983
+ if not include_offline:
1984
+ # Filter out offline presence states
1985
+ presence_updates = self._filter_offline_presence_state(presence_updates)
1986
+
1987
+ return presence_updates, max_token
1988
+
1989
+ async def _filter_all_presence_updates_for_user(
1990
+ self,
1991
+ user_id: str,
1992
+ include_offline: bool,
1993
+ from_key: int | None = None,
1994
+ ) -> list[UserPresenceState]:
1995
+ """
1996
+ Computes the presence updates a user should receive.
1997
+
1998
+ First pulls presence updates from the database. Then consults PresenceRouter
1999
+ for whether any updates should be excluded by user ID.
2000
+
2001
+ Args:
2002
+ user_id: The User ID of the user to compute presence updates for.
2003
+ include_offline: Whether to include offline presence states from the results.
2004
+ from_key: The minimum stream ID of updates to pull from the database
2005
+ before filtering.
2006
+
2007
+ Returns:
2008
+ A list of presence states for the given user to receive.
2009
+ """
2010
+ updated_users = None
2011
+ if from_key:
2012
+ # Only return updates since the last sync
2013
+ result = self.store.presence_stream_cache.get_all_entities_changed(from_key)
2014
+ if result.hit:
2015
+ updated_users = result.entities
2016
+
2017
+ if updated_users is not None:
2018
+ # Get the actual presence update for each change
2019
+ users_to_state = await self.get_presence_handler().current_state_for_users(
2020
+ updated_users
2021
+ )
2022
+ presence_updates = list(users_to_state.values())
2023
+
2024
+ if not include_offline:
2025
+ # Filter out offline states
2026
+ presence_updates = self._filter_offline_presence_state(presence_updates)
2027
+ else:
2028
+ users_to_state = await self.store.get_presence_for_all_users(
2029
+ include_offline=include_offline
2030
+ )
2031
+
2032
+ presence_updates = list(users_to_state.values())
2033
+
2034
+ # TODO: This feels wildly inefficient, and it's unfortunate we need to ask the
2035
+ # module for information on a number of users when we then only take the info
2036
+ # for a single user
2037
+
2038
+ # Filter through the presence router
2039
+ users_to_state_set = await self.get_presence_router().get_users_for_states(
2040
+ presence_updates
2041
+ )
2042
+
2043
+ # We only want the mapping for the syncing user
2044
+ presence_updates = list(users_to_state_set[user_id])
2045
+
2046
+ # Return presence information for all users
2047
+ return presence_updates
2048
+
2049
+ def _filter_offline_presence_state(
2050
+ self, presence_updates: Iterable[UserPresenceState]
2051
+ ) -> list[UserPresenceState]:
2052
+ """Given an iterable containing user presence updates, return a list with any offline
2053
+ presence states removed.
2054
+
2055
+ Args:
2056
+ presence_updates: Presence states to filter
2057
+
2058
+ Returns:
2059
+ A new list with any offline presence states removed.
2060
+ """
2061
+ return [
2062
+ update
2063
+ for update in presence_updates
2064
+ if update.state != PresenceState.OFFLINE
2065
+ ]
2066
+
2067
+ def get_current_key(self) -> int:
2068
+ return self.store.get_current_presence_token()
2069
+
2070
+
2071
+ def handle_timeouts(
2072
+ user_states: list[UserPresenceState],
2073
+ is_mine_fn: Callable[[str], bool],
2074
+ syncing_user_devices: AbstractSet[tuple[str, str | None]],
2075
+ user_to_devices: dict[str, dict[str | None, UserDevicePresenceState]],
2076
+ now: int,
2077
+ ) -> list[UserPresenceState]:
2078
+ """Checks the presence of users that have timed out and updates as
2079
+ appropriate.
2080
+
2081
+ Args:
2082
+ user_states: List of UserPresenceState's to check.
2083
+ is_mine_fn: Function that returns if a user_id is ours
2084
+ syncing_user_devices: A set of (user ID, device ID) tuples with active syncs..
2085
+ user_to_devices: A map of user ID to device ID to UserDevicePresenceState.
2086
+ now: Current time in ms.
2087
+
2088
+ Returns:
2089
+ List of UserPresenceState updates
2090
+ """
2091
+ changes = {} # Actual changes we need to notify people about
2092
+
2093
+ for state in user_states:
2094
+ user_id = state.user_id
2095
+ is_mine = is_mine_fn(user_id)
2096
+
2097
+ new_state = handle_timeout(
2098
+ state,
2099
+ is_mine,
2100
+ syncing_user_devices,
2101
+ user_to_devices.get(user_id, {}),
2102
+ now,
2103
+ )
2104
+ if new_state:
2105
+ changes[state.user_id] = new_state
2106
+
2107
+ return list(changes.values())
2108
+
2109
+
2110
+ def handle_timeout(
2111
+ state: UserPresenceState,
2112
+ is_mine: bool,
2113
+ syncing_device_ids: AbstractSet[tuple[str, str | None]],
2114
+ user_devices: dict[str | None, UserDevicePresenceState],
2115
+ now: int,
2116
+ ) -> UserPresenceState | None:
2117
+ """Checks the presence of the user to see if any of the timers have elapsed
2118
+
2119
+ Args:
2120
+ state: UserPresenceState to check.
2121
+ is_mine: Whether the user is ours
2122
+ syncing_user_devices: A set of (user ID, device ID) tuples with active syncs..
2123
+ user_devices: A map of device ID to UserDevicePresenceState.
2124
+ now: Current time in ms.
2125
+
2126
+ Returns:
2127
+ A UserPresenceState update or None if no update.
2128
+ """
2129
+ if state.state == PresenceState.OFFLINE:
2130
+ # No timeouts are associated with offline states.
2131
+ return None
2132
+
2133
+ changed = False
2134
+
2135
+ if is_mine:
2136
+ # Check per-device whether the device should be considered idle or offline
2137
+ # due to timeouts.
2138
+ device_changed = False
2139
+ offline_devices = []
2140
+ for device_id, device_state in user_devices.items():
2141
+ if device_state.state == PresenceState.ONLINE:
2142
+ if now - device_state.last_active_ts > IDLE_TIMER:
2143
+ # Currently online, but last activity ages ago so auto
2144
+ # idle
2145
+ device_state.state = PresenceState.UNAVAILABLE
2146
+ device_changed = True
2147
+
2148
+ # If there are have been no sync for a while (and none ongoing),
2149
+ # set presence to offline.
2150
+ if (state.user_id, device_id) not in syncing_device_ids:
2151
+ # If the user has done something recently but hasn't synced,
2152
+ # don't set them as offline.
2153
+ sync_or_active = max(
2154
+ device_state.last_sync_ts, device_state.last_active_ts
2155
+ )
2156
+
2157
+ # Implementations aren't meant to timeout a device with a busy
2158
+ # state, but it needs to timeout *eventually* or else the user
2159
+ # will be stuck in that state.
2160
+ online_timeout = (
2161
+ BUSY_ONLINE_TIMEOUT
2162
+ if device_state.state == PresenceState.BUSY
2163
+ else SYNC_ONLINE_TIMEOUT
2164
+ )
2165
+ if now - sync_or_active > online_timeout:
2166
+ # Mark the device as going offline.
2167
+ offline_devices.append(device_id)
2168
+ device_changed = True
2169
+
2170
+ # Offline devices are not needed and do not add information.
2171
+ for device_id in offline_devices:
2172
+ user_devices.pop(device_id)
2173
+
2174
+ # If the presence state of the devices changed, then (maybe) update
2175
+ # the user's overall presence state.
2176
+ if device_changed:
2177
+ new_presence = _combine_device_states(user_devices.values())
2178
+ if new_presence != state.state:
2179
+ state = state.copy_and_replace(state=new_presence)
2180
+ changed = True
2181
+
2182
+ if now - state.last_active_ts > LAST_ACTIVE_GRANULARITY:
2183
+ # So that we send down a notification that we've
2184
+ # stopped updating.
2185
+ changed = True
2186
+
2187
+ if now - state.last_federation_update_ts > FEDERATION_PING_INTERVAL:
2188
+ # Need to send ping to other servers to ensure they don't
2189
+ # timeout and set us to offline
2190
+ changed = True
2191
+ else:
2192
+ # We expect to be poked occasionally by the other side.
2193
+ # This is to protect against forgetful/buggy servers, so that
2194
+ # no one gets stuck online forever.
2195
+ if now - state.last_federation_update_ts > FEDERATION_TIMEOUT:
2196
+ # The other side seems to have disappeared.
2197
+ state = state.copy_and_replace(state=PresenceState.OFFLINE)
2198
+ changed = True
2199
+
2200
+ return state if changed else None
2201
+
2202
+
2203
+ def handle_update(
2204
+ prev_state: UserPresenceState,
2205
+ new_state: UserPresenceState,
2206
+ is_mine: bool,
2207
+ our_server_name: str,
2208
+ wheel_timer: WheelTimer,
2209
+ now: int,
2210
+ persist: bool,
2211
+ ) -> tuple[UserPresenceState, bool, bool]:
2212
+ """Given a presence update:
2213
+ 1. Add any appropriate timers.
2214
+ 2. Check if we should notify anyone.
2215
+
2216
+ Args:
2217
+ prev_state
2218
+ new_state
2219
+ is_mine: Whether the user is ours
2220
+ our_server_name: The homeserver name of the our server (`hs.hostname`)
2221
+ wheel_timer
2222
+ now: Time now in ms
2223
+ persist: True if this state should persist until another update occurs.
2224
+ Skips insertion into wheel timers.
2225
+
2226
+ Returns:
2227
+ 3-tuple: `(new_state, persist_and_notify, federation_ping)` where:
2228
+ - new_state: is the state to actually persist
2229
+ - persist_and_notify: whether to persist and notify people
2230
+ - federation_ping: whether we should send a ping over federation
2231
+ """
2232
+ user_id = new_state.user_id
2233
+
2234
+ persist_and_notify = False
2235
+ federation_ping = False
2236
+
2237
+ # If the users are ours then we want to set up a bunch of timers
2238
+ # to time things out.
2239
+ if is_mine:
2240
+ if new_state.state == PresenceState.ONLINE:
2241
+ # Idle timer
2242
+ if not persist:
2243
+ wheel_timer.insert(
2244
+ now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER
2245
+ )
2246
+
2247
+ active = now - new_state.last_active_ts < LAST_ACTIVE_GRANULARITY
2248
+ new_state = new_state.copy_and_replace(currently_active=active)
2249
+
2250
+ if active and not persist:
2251
+ wheel_timer.insert(
2252
+ now=now,
2253
+ obj=user_id,
2254
+ then=new_state.last_active_ts + LAST_ACTIVE_GRANULARITY,
2255
+ )
2256
+
2257
+ if new_state.state != PresenceState.OFFLINE:
2258
+ # User has stopped syncing
2259
+ if not persist:
2260
+ wheel_timer.insert(
2261
+ now=now,
2262
+ obj=user_id,
2263
+ then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
2264
+ )
2265
+
2266
+ last_federate = new_state.last_federation_update_ts
2267
+ if now - last_federate > FEDERATION_PING_INTERVAL:
2268
+ # Been a while since we've poked remote servers
2269
+ new_state = new_state.copy_and_replace(last_federation_update_ts=now)
2270
+ federation_ping = True
2271
+
2272
+ if new_state.state == PresenceState.BUSY and not persist:
2273
+ wheel_timer.insert(
2274
+ now=now,
2275
+ obj=user_id,
2276
+ then=new_state.last_user_sync_ts + BUSY_ONLINE_TIMEOUT,
2277
+ )
2278
+
2279
+ else:
2280
+ # An update for a remote user was received.
2281
+ if not persist:
2282
+ wheel_timer.insert(
2283
+ now=now,
2284
+ obj=user_id,
2285
+ then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT,
2286
+ )
2287
+
2288
+ # Check whether the change was something worth notifying about
2289
+ if should_notify(prev_state, new_state, is_mine, our_server_name):
2290
+ new_state = new_state.copy_and_replace(last_federation_update_ts=now)
2291
+ persist_and_notify = True
2292
+
2293
+ return new_state, persist_and_notify, federation_ping
2294
+
2295
+
2296
+ PRESENCE_BY_PRIORITY = {
2297
+ PresenceState.BUSY: 4,
2298
+ PresenceState.ONLINE: 3,
2299
+ PresenceState.UNAVAILABLE: 2,
2300
+ PresenceState.OFFLINE: 1,
2301
+ }
2302
+
2303
+
2304
+ def _combine_device_states(
2305
+ device_states: Iterable[UserDevicePresenceState],
2306
+ ) -> str:
2307
+ """
2308
+ Find the device to use presence information from.
2309
+
2310
+ Orders devices by priority, then last_active_ts.
2311
+
2312
+ Args:
2313
+ device_states: An iterable of device presence states
2314
+
2315
+ Return:
2316
+ The combined presence state.
2317
+ """
2318
+
2319
+ # Based on (all) the user's devices calculate the new presence state.
2320
+ presence = PresenceState.OFFLINE
2321
+ last_active_ts = -1
2322
+
2323
+ # Find the device to use the presence state of based on the presence priority,
2324
+ # but tie-break with how recently the device has been seen.
2325
+ for device_state in device_states:
2326
+ if (PRESENCE_BY_PRIORITY[device_state.state], device_state.last_active_ts) > (
2327
+ PRESENCE_BY_PRIORITY[presence],
2328
+ last_active_ts,
2329
+ ):
2330
+ presence = device_state.state
2331
+ last_active_ts = device_state.last_active_ts
2332
+
2333
+ return presence
2334
+
2335
+
2336
+ async def get_interested_parties(
2337
+ store: DataStore, presence_router: PresenceRouter, states: list[UserPresenceState]
2338
+ ) -> tuple[dict[str, list[UserPresenceState]], dict[str, list[UserPresenceState]]]:
2339
+ """Given a list of states return which entities (rooms, users)
2340
+ are interested in the given states.
2341
+
2342
+ Args:
2343
+ store: The homeserver's data store.
2344
+ presence_router: A module for augmenting the destinations for presence updates.
2345
+ states: A list of incoming user presence updates.
2346
+
2347
+ Returns:
2348
+ A 2-tuple of `(room_ids_to_states, users_to_states)`,
2349
+ with each item being a dict of `entity_name` -> `[UserPresenceState]`
2350
+ """
2351
+ room_ids_to_states: dict[str, list[UserPresenceState]] = {}
2352
+ users_to_states: dict[str, list[UserPresenceState]] = {}
2353
+ for state in states:
2354
+ room_ids = await store.get_rooms_for_user(state.user_id)
2355
+ for room_id in room_ids:
2356
+ room_ids_to_states.setdefault(room_id, []).append(state)
2357
+
2358
+ # Always notify self
2359
+ users_to_states.setdefault(state.user_id, []).append(state)
2360
+
2361
+ # Ask a presence routing module for any additional parties if one
2362
+ # is loaded.
2363
+ router_users_to_states = await presence_router.get_users_for_states(states)
2364
+
2365
+ # Update the dictionaries with additional destinations and state to send
2366
+ for user_id, user_states in router_users_to_states.items():
2367
+ users_to_states.setdefault(user_id, []).extend(user_states)
2368
+
2369
+ return room_ids_to_states, users_to_states
2370
+
2371
+
2372
+ async def get_interested_remotes(
2373
+ store: DataStore,
2374
+ presence_router: PresenceRouter,
2375
+ states: list[UserPresenceState],
2376
+ ) -> list[tuple[StrCollection, Collection[UserPresenceState]]]:
2377
+ """Given a list of presence states figure out which remote servers
2378
+ should be sent which.
2379
+
2380
+ All the presence states should be for local users only.
2381
+
2382
+ Args:
2383
+ store: The homeserver's data store.
2384
+ presence_router: A module for augmenting the destinations for presence updates.
2385
+ states: A list of incoming user presence updates.
2386
+
2387
+ Returns:
2388
+ A map from destinations to presence states to send to that destination.
2389
+ """
2390
+ hosts_and_states: list[tuple[StrCollection, Collection[UserPresenceState]]] = []
2391
+
2392
+ # First we look up the rooms each user is in (as well as any explicit
2393
+ # subscriptions), then for each distinct room we look up the remote
2394
+ # hosts in those rooms.
2395
+ for state in states:
2396
+ room_ids = await store.get_rooms_for_user(state.user_id)
2397
+ hosts: set[str] = set()
2398
+ for room_id in room_ids:
2399
+ room_hosts = await store.get_current_hosts_in_room(room_id)
2400
+ hosts.update(room_hosts)
2401
+ hosts_and_states.append((hosts, [state]))
2402
+
2403
+ # Ask a presence routing module for any additional parties if one
2404
+ # is loaded.
2405
+ router_users_to_states = await presence_router.get_users_for_states(states)
2406
+
2407
+ for user_id, user_states in router_users_to_states.items():
2408
+ host = get_domain_from_id(user_id)
2409
+ hosts_and_states.append(([host], user_states))
2410
+
2411
+ return hosts_and_states
2412
+
2413
+
2414
+ class PresenceFederationQueue:
2415
+ """Handles sending ad hoc presence updates over federation, which are *not*
2416
+ due to state updates (that get handled via the presence stream), e.g.
2417
+ federation pings and sending existing present states to newly joined hosts.
2418
+
2419
+ Only the last N minutes will be queued, so if a federation sender instance
2420
+ is down for longer then some updates will be dropped. This is OK as presence
2421
+ is ephemeral, and so it will self correct eventually.
2422
+
2423
+ On workers the class tracks the last received position of the stream from
2424
+ replication, and handles querying for missed updates over HTTP replication,
2425
+ c.f. `get_current_token` and `get_replication_rows`.
2426
+ """
2427
+
2428
+ # How long to keep entries in the queue for. Workers that are down for
2429
+ # longer than this duration will miss out on older updates.
2430
+ _KEEP_ITEMS_IN_QUEUE_FOR_MS = 5 * 60 * 1000
2431
+
2432
+ # How often to check if we can expire entries from the queue.
2433
+ _CLEAR_ITEMS_EVERY_MS = 60 * 1000
2434
+
2435
+ def __init__(self, hs: "HomeServer", presence_handler: BasePresenceHandler):
2436
+ self._clock = hs.get_clock()
2437
+ self._notifier = hs.get_notifier()
2438
+ self._instance_name = hs.get_instance_name()
2439
+ self._presence_handler = presence_handler
2440
+ self._repl_client = ReplicationGetStreamUpdates.make_client(hs)
2441
+
2442
+ # Should we keep a queue of recent presence updates? We only bother if
2443
+ # another process may be handling federation sending.
2444
+ self._queue_presence_updates = True
2445
+
2446
+ # Whether this instance is a presence writer.
2447
+ self._presence_writer = self._instance_name in hs.config.worker.writers.presence
2448
+
2449
+ # The FederationSender instance, if this process sends federation traffic directly.
2450
+ self._federation = None
2451
+
2452
+ if hs.should_send_federation():
2453
+ self._federation = hs.get_federation_sender()
2454
+
2455
+ # We don't bother queuing up presence states if only this instance
2456
+ # is sending federation.
2457
+ if hs.config.worker.federation_shard_config.instances == [
2458
+ self._instance_name
2459
+ ]:
2460
+ self._queue_presence_updates = False
2461
+
2462
+ # The queue of recently queued updates as tuples of: `(timestamp,
2463
+ # stream_id, destinations, user_ids)`. We don't store the full states
2464
+ # for efficiency, and remote workers will already have the full states
2465
+ # cached.
2466
+ self._queue: list[tuple[int, int, StrCollection, set[str]]] = []
2467
+
2468
+ self._next_id = 1
2469
+
2470
+ # Map from instance name to current token
2471
+ self._current_tokens: dict[str, int] = {}
2472
+
2473
+ if self._queue_presence_updates:
2474
+ self._clock.looping_call(self._clear_queue, self._CLEAR_ITEMS_EVERY_MS)
2475
+
2476
+ def _clear_queue(self) -> None:
2477
+ """Clear out older entries from the queue."""
2478
+ clear_before = self._clock.time_msec() - self._KEEP_ITEMS_IN_QUEUE_FOR_MS
2479
+
2480
+ # The queue is sorted by timestamp, so we can bisect to find the right
2481
+ # place to purge before. Note that we are searching using a 1-tuple with
2482
+ # the time, which does The Right Thing since the queue is a tuple where
2483
+ # the first item is a timestamp.
2484
+ index = bisect(self._queue, (clear_before,))
2485
+ self._queue = self._queue[index:]
2486
+
2487
+ async def send_presence_to_destinations(
2488
+ self, states: Collection[UserPresenceState], destinations: StrCollection
2489
+ ) -> None:
2490
+ """Send the presence states to the given destinations.
2491
+
2492
+ Will forward to the local federation sender (if there is one) and queue
2493
+ to send over replication (if there are other federation sender instances.).
2494
+
2495
+ Must only be called on the presence writer process.
2496
+ """
2497
+
2498
+ # This should only be called on a presence writer.
2499
+ assert self._presence_writer
2500
+
2501
+ if not states or not destinations:
2502
+ # Ignore calls which either don't have any new states or don't need
2503
+ # to be sent anywhere.
2504
+ return
2505
+
2506
+ if self._federation:
2507
+ await self._federation.send_presence_to_destinations(
2508
+ states=states,
2509
+ destinations=destinations,
2510
+ )
2511
+
2512
+ if not self._queue_presence_updates:
2513
+ return
2514
+
2515
+ now = self._clock.time_msec()
2516
+
2517
+ stream_id = self._next_id
2518
+ self._next_id += 1
2519
+
2520
+ self._queue.append((now, stream_id, destinations, {s.user_id for s in states}))
2521
+
2522
+ self._notifier.notify_replication()
2523
+
2524
+ def get_current_token(self, instance_name: str) -> int:
2525
+ """Get the current position of the stream.
2526
+
2527
+ On workers this returns the last stream ID received from replication.
2528
+ """
2529
+ if instance_name == self._instance_name:
2530
+ return self._next_id - 1
2531
+ else:
2532
+ return self._current_tokens.get(instance_name, 0)
2533
+
2534
+ async def get_replication_rows(
2535
+ self,
2536
+ instance_name: str,
2537
+ from_token: int,
2538
+ upto_token: int,
2539
+ target_row_count: int,
2540
+ ) -> tuple[list[tuple[int, tuple[str, str]]], int, bool]:
2541
+ """Get all the updates between the two tokens.
2542
+
2543
+ We return rows in the form of `(destination, user_id)` to keep the size
2544
+ of each row bounded (rather than returning the sets in a row).
2545
+
2546
+ On workers this will query the presence writer process via HTTP replication.
2547
+ """
2548
+ if instance_name != self._instance_name:
2549
+ # If not local we query over http replication from the presence
2550
+ # writer
2551
+ result = await self._repl_client(
2552
+ instance_name=instance_name,
2553
+ stream_name=PresenceFederationStream.NAME,
2554
+ from_token=from_token,
2555
+ upto_token=upto_token,
2556
+ )
2557
+ return result["updates"], result["upto_token"], result["limited"]
2558
+
2559
+ # If the from_token is the current token then there's nothing to return
2560
+ # and we can trivially no-op.
2561
+ if from_token == self._next_id - 1:
2562
+ return [], upto_token, False
2563
+
2564
+ # We can find the correct position in the queue by noting that there is
2565
+ # exactly one entry per stream ID, and that the last entry has an ID of
2566
+ # `self._next_id - 1`, so we can count backwards from the end.
2567
+ #
2568
+ # Since we are returning all states in the range `from_token < stream_id
2569
+ # <= upto_token` we look for the index with a `stream_id` of `from_token
2570
+ # + 1`.
2571
+ #
2572
+ # Since the start of the queue is periodically truncated we need to
2573
+ # handle the case where `from_token` stream ID has already been dropped.
2574
+ start_idx = max(from_token + 1 - self._next_id, -len(self._queue))
2575
+
2576
+ to_send: list[tuple[int, tuple[str, str]]] = []
2577
+ limited = False
2578
+ new_id = upto_token
2579
+ for _, stream_id, destinations, user_ids in self._queue[start_idx:]:
2580
+ if stream_id <= from_token:
2581
+ # Paranoia check that we are actually only sending states that
2582
+ # are have stream_id strictly greater than from_token. We should
2583
+ # never hit this.
2584
+ logger.warning(
2585
+ "Tried returning presence federation stream ID: %d less than from_token: %d (next_id: %d, len: %d)",
2586
+ stream_id,
2587
+ from_token,
2588
+ self._next_id,
2589
+ len(self._queue),
2590
+ )
2591
+ continue
2592
+
2593
+ if stream_id > upto_token:
2594
+ break
2595
+
2596
+ new_id = stream_id
2597
+
2598
+ to_send.extend(
2599
+ (stream_id, (destination, user_id))
2600
+ for destination in destinations
2601
+ for user_id in user_ids
2602
+ )
2603
+
2604
+ if len(to_send) > target_row_count:
2605
+ limited = True
2606
+ break
2607
+
2608
+ return to_send, new_id, limited
2609
+
2610
+ async def process_replication_rows(
2611
+ self, stream_name: str, instance_name: str, token: int, rows: list
2612
+ ) -> None:
2613
+ if stream_name != PresenceFederationStream.NAME:
2614
+ return
2615
+
2616
+ # We keep track of the current tokens (so that we can catch up with anything we missed after a disconnect)
2617
+ self._current_tokens[instance_name] = token
2618
+
2619
+ # If we're a federation sender we pull out the presence states to send
2620
+ # and forward them on.
2621
+ if not self._federation:
2622
+ return
2623
+
2624
+ hosts_to_users: dict[str, set[str]] = {}
2625
+ for row in rows:
2626
+ hosts_to_users.setdefault(row.destination, set()).add(row.user_id)
2627
+
2628
+ for host, user_ids in hosts_to_users.items():
2629
+ states = await self._presence_handler.current_state_for_users(user_ids)
2630
+ await self._federation.send_presence_to_destinations(
2631
+ states=states.values(),
2632
+ destinations=[host],
2633
+ )