protein-quest 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,534 @@
1
+ """Module for fetch Alphafold data."""
2
+
3
+ import logging
4
+ from asyncio import Semaphore
5
+ from collections.abc import AsyncGenerator, Iterable
6
+ from dataclasses import dataclass
7
+ from pathlib import Path
8
+ from typing import Literal, cast, get_args
9
+
10
+ import aiofiles
11
+ from aiofiles.ospath import exists
12
+ from aiohttp_retry import RetryClient
13
+ from tqdm.asyncio import tqdm
14
+ from yarl import URL
15
+
16
+ from protein_quest.alphafold.entry_summary import EntrySummary
17
+ from protein_quest.converter import converter
18
+ from protein_quest.utils import Cacher, PassthroughCacher, friendly_session, retrieve_files, run_async
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ DownloadableFormat = Literal[
24
+ "summary",
25
+ "bcif",
26
+ "cif",
27
+ "pdb",
28
+ "paeDoc",
29
+ "amAnnotations",
30
+ "amAnnotationsHg19",
31
+ "amAnnotationsHg38",
32
+ "msa",
33
+ "plddtDoc",
34
+ ]
35
+ """Types of formats that can be downloaded from the AlphaFold web service."""
36
+
37
+ downloadable_formats: set[DownloadableFormat] = set(get_args(DownloadableFormat))
38
+ """Set of formats that can be downloaded from the AlphaFold web service."""
39
+
40
+ UrlFileNamePair = tuple[URL, str]
41
+ """A tuple of a URL and a filename."""
42
+ UrlFileNamePairsOfFormats = dict[DownloadableFormat, UrlFileNamePair]
43
+ """A mapping of DownloadableFormat to UrlFileNamePair."""
44
+
45
+
46
+ def _camel_to_snake_case(name: str) -> str:
47
+ """Convert a camelCase string to snake_case."""
48
+ return "".join(["_" + c.lower() if c.isupper() else c for c in name]).lstrip("_")
49
+
50
+
51
+ @dataclass
52
+ class AlphaFoldEntry:
53
+ """AlphaFold entry with summary object and optionally local files.
54
+
55
+ See https://alphafold.ebi.ac.uk/api-docs for more details on the summary data structure.
56
+ """
57
+
58
+ uniprot_accession: str
59
+ summary: EntrySummary | None = None
60
+ summary_file: Path | None = None
61
+ bcif_file: Path | None = None
62
+ cif_file: Path | None = None
63
+ pdb_file: Path | None = None
64
+ pae_doc_file: Path | None = None
65
+ am_annotations_file: Path | None = None
66
+ am_annotations_hg19_file: Path | None = None
67
+ am_annotations_hg38_file: Path | None = None
68
+ msa_file: Path | None = None
69
+ plddt_doc_file: Path | None = None
70
+
71
+ @classmethod
72
+ def format2attr(cls, dl_format: DownloadableFormat) -> str:
73
+ """Get the attribute name for a specific download format.
74
+
75
+ Args:
76
+ dl_format: The format for which to get the attribute name.
77
+
78
+ Returns:
79
+ The attribute name corresponding to the download format.
80
+
81
+ Raises:
82
+ ValueError: If the format is not valid.
83
+ """
84
+ if dl_format not in downloadable_formats:
85
+ msg = f"Invalid format: {dl_format}. Valid formats are: {downloadable_formats}"
86
+ raise ValueError(msg)
87
+ return _camel_to_snake_case(dl_format) + "_file"
88
+
89
+ def by_format(self, dl_format: DownloadableFormat) -> Path | None:
90
+ """Get the file path for a specific format.
91
+
92
+ Args:
93
+ dl_format: The format for which to get the file path.
94
+
95
+ Returns:
96
+ The file path corresponding to the download format.
97
+ Or None if the file is not set.
98
+
99
+ Raises:
100
+ ValueError: If the format is not valid.
101
+ """
102
+ attr = self.format2attr(dl_format)
103
+ return getattr(self, attr, None)
104
+
105
+ def nr_of_files(self) -> int:
106
+ """Nr of _file properties that are set
107
+
108
+ Returns:
109
+ The number of _file properties that are set.
110
+ """
111
+ return sum(1 for attr in vars(self) if attr.endswith("_file") and getattr(self, attr) is not None)
112
+
113
+ def relative_to(self, session_dir: Path) -> "AlphaFoldEntry":
114
+ """Convert paths in an AlphaFoldEntry to be relative to the session directory.
115
+
116
+ Args:
117
+ entry: An AlphaFoldEntry instance with absolute paths.
118
+ session_dir: The session directory to which the paths should be made relative.
119
+
120
+ Returns:
121
+ An AlphaFoldEntry instance with paths relative to the session directory.
122
+ """
123
+ return AlphaFoldEntry(
124
+ uniprot_accession=self.uniprot_accession,
125
+ summary=self.summary,
126
+ summary_file=self.summary_file.relative_to(session_dir) if self.summary_file else None,
127
+ bcif_file=self.bcif_file.relative_to(session_dir) if self.bcif_file else None,
128
+ cif_file=self.cif_file.relative_to(session_dir) if self.cif_file else None,
129
+ pdb_file=self.pdb_file.relative_to(session_dir) if self.pdb_file else None,
130
+ pae_doc_file=self.pae_doc_file.relative_to(session_dir) if self.pae_doc_file else None,
131
+ am_annotations_file=self.am_annotations_file.relative_to(session_dir) if self.am_annotations_file else None,
132
+ am_annotations_hg19_file=(
133
+ self.am_annotations_hg19_file.relative_to(session_dir) if self.am_annotations_hg19_file else None
134
+ ),
135
+ am_annotations_hg38_file=(
136
+ self.am_annotations_hg38_file.relative_to(session_dir) if self.am_annotations_hg38_file else None
137
+ ),
138
+ msa_file=self.msa_file.relative_to(session_dir) if self.msa_file else None,
139
+ plddt_doc_file=self.plddt_doc_file.relative_to(session_dir) if self.plddt_doc_file else None,
140
+ )
141
+
142
+
143
+ async def fetch_summary(
144
+ qualifier: str, session: RetryClient, semaphore: Semaphore, save_dir: Path | None, cacher: Cacher
145
+ ) -> list[EntrySummary]:
146
+ """Fetches a summary from the AlphaFold database for a given qualifier.
147
+
148
+ Args:
149
+ qualifier: The uniprot accession for the protein or entry to fetch.
150
+ For example `Q5VSL9`.
151
+ session: An asynchronous HTTP client session with retry capabilities.
152
+ semaphore: A semaphore to limit the number of concurrent requests.
153
+ save_dir: An optional directory to save the fetched summary as a JSON file.
154
+ If set and summary exists then summary will be loaded from disk instead of being fetched from the API.
155
+ If not set then the summary will not be saved to disk and will always be fetched from the API.
156
+ cacher: A cacher to use for caching the fetched summary. Only used if save_dir is not None.
157
+
158
+ Returns:
159
+ A list of EntrySummary objects representing the fetched summary.
160
+ When qualifier has multiple isoforms then multiple summaries are returned,
161
+ otherwise a list of a single summary is returned.
162
+ """
163
+ url = f"https://alphafold.ebi.ac.uk/api/prediction/{qualifier}"
164
+ fn: Path | None = None
165
+ if save_dir is not None:
166
+ fn = save_dir / f"{qualifier}.json"
167
+ if await exists(fn):
168
+ logger.debug(f"File {fn} already exists. Skipping download from {url}.")
169
+ async with aiofiles.open(fn, "rb") as f:
170
+ raw_data = await f.read()
171
+ return converter.loads(raw_data, list[EntrySummary])
172
+ cached_file = await cacher.copy_from_cache(Path(fn))
173
+ if cached_file is not None:
174
+ logger.debug(f"Using cached file {cached_file} for summary of {qualifier}.")
175
+ async with aiofiles.open(cached_file, "rb") as f:
176
+ raw_data = await f.read()
177
+ return converter.loads(raw_data, list[EntrySummary])
178
+ async with semaphore, session.get(url) as response:
179
+ response.raise_for_status()
180
+ raw_data = await response.content.read()
181
+ if fn is not None:
182
+ await cacher.write_bytes(Path(fn), raw_data)
183
+ return converter.loads(raw_data, list[EntrySummary])
184
+
185
+
186
+ async def fetch_summaries(
187
+ qualifiers: Iterable[str],
188
+ save_dir: Path | None = None,
189
+ max_parallel_downloads: int = 5,
190
+ cacher: Cacher | None = None,
191
+ ) -> AsyncGenerator[tuple[str, EntrySummary]]:
192
+ semaphore = Semaphore(max_parallel_downloads)
193
+ if save_dir is not None:
194
+ save_dir.mkdir(parents=True, exist_ok=True)
195
+ if cacher is None:
196
+ cacher = PassthroughCacher()
197
+ async with friendly_session() as session:
198
+ tasks = [fetch_summary(qualifier, session, semaphore, save_dir, cacher) for qualifier in qualifiers]
199
+ summaries_per_qualifier: list[list[EntrySummary]] = await tqdm.gather(
200
+ *tasks, desc="Fetching Alphafold summaries"
201
+ )
202
+ for qualifier, summaries in zip(qualifiers, summaries_per_qualifier, strict=True):
203
+ for summary in summaries:
204
+ yield qualifier, summary
205
+
206
+
207
+ async def _fetch_many_async_with_summary(
208
+ uniprot_accessions: Iterable[str],
209
+ save_dir: Path,
210
+ formats: set[DownloadableFormat],
211
+ max_parallel_downloads: int = 5,
212
+ cacher: Cacher | None = None,
213
+ gzip_files: bool = False,
214
+ all_isoforms: bool = False,
215
+ ) -> AsyncGenerator[AlphaFoldEntry]:
216
+ save_dir_for_summaries = save_dir if "summary" in formats else None
217
+
218
+ summaries = [
219
+ s
220
+ async for s in fetch_summaries(
221
+ uniprot_accessions, save_dir_for_summaries, max_parallel_downloads=max_parallel_downloads, cacher=cacher
222
+ )
223
+ # Filter out isoforms if all_isoforms is False
224
+ # O60481 is canonical and O60481-2 is isoform, so we skip the isoform
225
+ if all_isoforms or s[0] == s[1].uniprotAccession
226
+ ]
227
+ files = files_to_download(formats, summaries, gzip_files)
228
+
229
+ await retrieve_files(
230
+ files,
231
+ save_dir,
232
+ desc="Downloading AlphaFold files",
233
+ max_parallel_downloads=max_parallel_downloads,
234
+ cacher=cacher,
235
+ gzip_files=gzip_files,
236
+ )
237
+
238
+ gzext = ".gz" if gzip_files else ""
239
+ for uniprot_accession, summary in summaries:
240
+ yield AlphaFoldEntry(
241
+ uniprot_accession=uniprot_accession,
242
+ summary=summary,
243
+ summary_file=save_dir / f"{uniprot_accession}.json" if save_dir_for_summaries is not None else None,
244
+ bcif_file=save_dir / (summary.bcifUrl.name + gzext) if "bcif" in formats else None,
245
+ cif_file=save_dir / (summary.cifUrl.name + gzext) if "cif" in formats else None,
246
+ pdb_file=save_dir / (summary.pdbUrl.name + gzext) if "pdb" in formats else None,
247
+ pae_doc_file=save_dir / (summary.paeDocUrl.name + gzext) if "paeDoc" in formats else None,
248
+ am_annotations_file=(
249
+ save_dir / (summary.amAnnotationsUrl.name + gzext)
250
+ if "amAnnotations" in formats and summary.amAnnotationsUrl
251
+ else None
252
+ ),
253
+ am_annotations_hg19_file=(
254
+ save_dir / (summary.amAnnotationsHg19Url.name + gzext)
255
+ if "amAnnotationsHg19" in formats and summary.amAnnotationsHg19Url
256
+ else None
257
+ ),
258
+ am_annotations_hg38_file=(
259
+ save_dir / (summary.amAnnotationsHg38Url.name + gzext)
260
+ if "amAnnotationsHg38" in formats and summary.amAnnotationsHg38Url
261
+ else None
262
+ ),
263
+ msa_file=(save_dir / (summary.msaUrl.name + gzext) if "msa" in formats and summary.msaUrl else None),
264
+ plddt_doc_file=(
265
+ save_dir / (summary.plddtDocUrl.name + gzext) if "plddtDoc" in formats and summary.plddtDocUrl else None
266
+ ),
267
+ )
268
+
269
+
270
+ def files_to_download(
271
+ formats: set[DownloadableFormat], summaries: Iterable[tuple[str, EntrySummary]], gzip_files: bool
272
+ ) -> set[UrlFileNamePair]:
273
+ if not (set(formats) <= downloadable_formats):
274
+ msg = (
275
+ f"Invalid format(s) specified: {set(formats) - downloadable_formats}. "
276
+ f"Valid formats are: {downloadable_formats}"
277
+ )
278
+ raise ValueError(msg)
279
+
280
+ url_filename_pairs: set[UrlFileNamePair] = set()
281
+ for _, summary in summaries:
282
+ for fmt in formats:
283
+ if fmt == "summary":
284
+ # summary is handled already in fetch_summary
285
+ continue
286
+ url = cast("URL | None", getattr(summary, f"{fmt}Url", None))
287
+ if url is None:
288
+ logger.warning(f"Summary {summary.modelEntityId} does not have a URL for format '{fmt}'. Skipping.")
289
+ continue
290
+ fn = url.name + (".gz" if gzip_files else "")
291
+ url_filename_pair = (url, fn)
292
+ url_filename_pairs.add(url_filename_pair)
293
+ return url_filename_pairs
294
+
295
+
296
+ async def fetch_alphafold_db_version() -> str:
297
+ """Fetch the current version of the AlphaFold database.
298
+
299
+ Returns:
300
+ The current version of the AlphaFold database as a string. For example: "6".
301
+ """
302
+ url = "https://ftp.ebi.ac.uk/pub/databases/alphafold/accession_ids.csv"
303
+ headers = {"Range": "bytes=0-200"}
304
+ logger.debug(f"Detecting AlphaFold DB version from head of {url}")
305
+ async with friendly_session() as session, session.get(url, headers=headers) as response:
306
+ response.raise_for_status()
307
+ raw = await response.content.read(200)
308
+ text = raw.decode("utf-8")
309
+ first_line = text.splitlines()[1]
310
+ version = first_line.split(",")[-1]
311
+ logger.debug(f"Found current AlphaFold DB version is '{version}'")
312
+ return version
313
+
314
+
315
+ def _files_for_alphafold_entry(
316
+ uniprot_accession: str,
317
+ formats: set[DownloadableFormat],
318
+ db_version: str,
319
+ gzip_files: bool,
320
+ ) -> UrlFileNamePairsOfFormats:
321
+ templates: dict[DownloadableFormat, URL] = {
322
+ "bcif": URL(f"https://alphafold.ebi.ac.uk/files/AF-{uniprot_accession}-F1-model_v{db_version}.bcif"),
323
+ "cif": URL(f"https://alphafold.ebi.ac.uk/files/AF-{uniprot_accession}-F1-model_v{db_version}.cif"),
324
+ "pdb": URL(f"https://alphafold.ebi.ac.uk/files/AF-{uniprot_accession}-F1-model_v{db_version}.pdb"),
325
+ "paeDoc": URL(
326
+ f"https://alphafold.ebi.ac.uk/files/AF-{uniprot_accession}-F1-predicted_aligned_error_v{db_version}.json"
327
+ ),
328
+ "amAnnotations": URL(f"https://alphafold.ebi.ac.uk/files/AF-{uniprot_accession}-F1-aa-substitutions.csv"),
329
+ "amAnnotationsHg19": URL(f"https://alphafold.ebi.ac.uk/files/AF-{uniprot_accession}-F1-hg19.csv"),
330
+ "amAnnotationsHg38": URL(f"https://alphafold.ebi.ac.uk/files/AF-{uniprot_accession}-F1-hg38.csv"),
331
+ "msa": URL(f"https://alphafold.ebi.ac.uk/files/msa/AF-{uniprot_accession}-F1-msa_v{db_version}.a3m"),
332
+ "plddtDoc": URL(f"https://alphafold.ebi.ac.uk/files/AF-{uniprot_accession}-F1-confidence_v{db_version}.json"),
333
+ }
334
+ url_filename_pairs = {}
335
+ for fmt in formats:
336
+ if fmt == "summary":
337
+ # Summaries are downloaded separately as its using API instead of static files
338
+ continue
339
+ if fmt not in templates:
340
+ logger.warning(f"No URL template found for format '{fmt}'. Skipping.")
341
+ continue
342
+ url = templates[cast("DownloadableFormat", fmt)]
343
+ fn = url.name
344
+ if gzip_files:
345
+ fn += ".gz"
346
+ url_filename_pair = (url, fn)
347
+ url_filename_pairs[fmt] = url_filename_pair
348
+ return url_filename_pairs
349
+
350
+
351
+ def files_for_alphafold_entries(
352
+ uniprot_accessions: Iterable[str],
353
+ formats: set[DownloadableFormat],
354
+ db_version: str,
355
+ gzip_files: bool,
356
+ ) -> dict[str, UrlFileNamePairsOfFormats]:
357
+ """Get the files to download for multiple AlphaFold entries.
358
+
359
+ Args:
360
+ uniprot_accessions: A set of Uniprot accessions.
361
+ formats: A set of formats to download.
362
+ db_version: The version of the AlphaFold database to use.
363
+ gzip_files: Whether to download gzipped files. Otherwise downloads uncompressed files.
364
+
365
+ Returns:
366
+ A mapping of Uniprot accession to a mapping of DownloadableFormat to UrlFileNamePair.
367
+ """
368
+ return {
369
+ uniprot_accession: _files_for_alphafold_entry(
370
+ uniprot_accession, formats=formats, db_version=db_version, gzip_files=gzip_files
371
+ )
372
+ for uniprot_accession in uniprot_accessions
373
+ }
374
+
375
+
376
+ async def _fetch_many_async_without_summary(
377
+ uniprot_accessions: Iterable[str],
378
+ save_dir: Path,
379
+ formats: set[DownloadableFormat],
380
+ db_version: str | None = None,
381
+ max_parallel_downloads: int = 5,
382
+ cacher: Cacher | None = None,
383
+ gzip_files: bool = False,
384
+ ) -> AsyncGenerator[AlphaFoldEntry]:
385
+ if db_version is None:
386
+ db_version = await fetch_alphafold_db_version()
387
+ nested_files = files_for_alphafold_entries(
388
+ uniprot_accessions, formats=formats, db_version=db_version, gzip_files=gzip_files
389
+ )
390
+ files: set[UrlFileNamePair] = set()
391
+ for uniprot_accession in uniprot_accessions:
392
+ files.update(nested_files[uniprot_accession].values())
393
+
394
+ retrieved_files = await retrieve_files(
395
+ files,
396
+ save_dir,
397
+ desc="Downloading AlphaFold files",
398
+ max_parallel_downloads=max_parallel_downloads,
399
+ cacher=cacher,
400
+ gzip_files=gzip_files,
401
+ raise_for_not_found=False,
402
+ )
403
+
404
+ retrieved_files_set = set(retrieved_files)
405
+ for uniprot_accession in uniprot_accessions:
406
+ entry = AlphaFoldEntry(
407
+ uniprot_accession=uniprot_accession,
408
+ )
409
+
410
+ for af_format, url_filename_pair in nested_files[uniprot_accession].items():
411
+ _, filename = url_filename_pair
412
+ filepath = save_dir / filename
413
+ if filepath in retrieved_files_set:
414
+ attr = AlphaFoldEntry.format2attr(af_format)
415
+ setattr(entry, attr, filepath)
416
+ # else: File was not found (404) during download, so we leave the attribute as None
417
+
418
+ yield entry
419
+
420
+
421
+ def fetch_many_async(
422
+ uniprot_accessions: Iterable[str],
423
+ save_dir: Path,
424
+ formats: set[DownloadableFormat],
425
+ db_version: str | None = None,
426
+ max_parallel_downloads: int = 5,
427
+ cacher: Cacher | None = None,
428
+ gzip_files: bool = False,
429
+ all_isoforms: bool = False,
430
+ ) -> AsyncGenerator[AlphaFoldEntry]:
431
+ """Asynchronously fetches summaries and/or files from
432
+ [AlphaFold Protein Structure Database](https://alphafold.ebi.ac.uk/).
433
+
434
+ Args:
435
+ uniprot_accessions: A set of Uniprot accessions to fetch.
436
+ save_dir: The directory to save the fetched files to.
437
+ formats: A set of formats to download.
438
+ If `summary` is in the set then summaries will be fetched using the API endpoint.
439
+ and later the other files will be downloaded using static file URLs.
440
+ If `summary` is not in the set then all files will be downloaded using static file
441
+ URLs only.
442
+ db_version: The version of the AlphaFold database to use. If None, the latest version will be used.
443
+ max_parallel_downloads: The maximum number of parallel downloads.
444
+ cacher: A cacher to use for caching the fetched files.
445
+ gzip_files: Whether to gzip the downloaded files.
446
+ Summaries are never gzipped.
447
+ all_isoforms: Whether to yield all isoforms of each uniprot entry.
448
+ When False then yields only the canonical sequence per uniprot entry.
449
+
450
+ Yields:
451
+ A dataclass containing the summary, pdb file, and pae file.
452
+
453
+ Raises:
454
+ ValueError: If 'formats' set is empty.
455
+ ValueError: If all_isoforms is True and 'summary' is not in 'formats' set.
456
+ """
457
+ if len(formats) == 0:
458
+ msg = "At least one format must be specified. The 'formats' argument is empty."
459
+ raise ValueError(msg)
460
+ if "summary" in formats:
461
+ if db_version is not None:
462
+ logger.warning("db_version is ignored when 'summary' is in 'formats' set. Always uses latest version.")
463
+ return _fetch_many_async_with_summary(
464
+ uniprot_accessions,
465
+ save_dir,
466
+ formats,
467
+ max_parallel_downloads=max_parallel_downloads,
468
+ cacher=cacher,
469
+ gzip_files=gzip_files,
470
+ all_isoforms=all_isoforms,
471
+ )
472
+ if all_isoforms:
473
+ msg = "Cannot fetch all isoforms when 'summary' is not in 'formats' set."
474
+ raise ValueError(msg)
475
+ return _fetch_many_async_without_summary(
476
+ uniprot_accessions,
477
+ save_dir,
478
+ formats,
479
+ db_version=db_version,
480
+ max_parallel_downloads=max_parallel_downloads,
481
+ cacher=cacher,
482
+ gzip_files=gzip_files,
483
+ )
484
+
485
+
486
+ def fetch_many(
487
+ uniprot_accessions: Iterable[str],
488
+ save_dir: Path,
489
+ formats: set[DownloadableFormat],
490
+ db_version: str | None = None,
491
+ max_parallel_downloads: int = 5,
492
+ cacher: Cacher | None = None,
493
+ gzip_files: bool = False,
494
+ all_isoforms: bool = False,
495
+ ) -> list[AlphaFoldEntry]:
496
+ """Synchronously fetches summaries and/or files like cif from AlphaFold Protein Structure Database.
497
+
498
+ Args:
499
+ uniprot_accessions: A set of Uniprot accessions to fetch.
500
+ save_dir: The directory to save the fetched files to.
501
+ formats: A set of formats to download.
502
+ If `summary` is in the set then summaries will be fetched using the API endpoint.
503
+ and later the other files will be downloaded using static file URLs.
504
+ If `summary` is not in the set then all files will be downloaded using static file
505
+ URLs only.
506
+ Excluding 'summary' is much faster as it avoids slow API calls.
507
+ db_version: The version of the AlphaFold database to use. If None, the latest version will be used.
508
+ max_parallel_downloads: The maximum number of parallel downloads.
509
+ cacher: A cacher to use for caching the fetched files.
510
+ gzip_files: Whether to gzip the downloaded files.
511
+ Summaries are never gzipped.
512
+ all_isoforms: Whether to yield all isoforms of each uniprot entry.
513
+ When False then yields only the canonical sequence per uniprot entry.
514
+
515
+ Returns:
516
+ A list of AlphaFoldEntry dataclasses containing the summary, pdb file, and pae file.
517
+ """
518
+
519
+ async def gather_entries():
520
+ return [
521
+ entry
522
+ async for entry in fetch_many_async(
523
+ uniprot_accessions,
524
+ save_dir,
525
+ formats,
526
+ db_version=db_version,
527
+ max_parallel_downloads=max_parallel_downloads,
528
+ cacher=cacher,
529
+ gzip_files=gzip_files,
530
+ all_isoforms=all_isoforms,
531
+ )
532
+ ]
533
+
534
+ return run_async(gather_entries())