mgnify-pipelines-toolkit 1.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mgnify-pipelines-toolkit might be problematic. Click here for more details.

@@ -1,5 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
+ import shutil
4
+ from shutil import SameFileError
3
5
 
4
6
  # Copyright 2024-2025 EMBL - European Bioinformatics Institute
5
7
  #
@@ -33,6 +35,7 @@ from mgnify_pipelines_toolkit.schemas.schemas import (
33
35
  AmpliconNonINSDCPassedRunsSchema,
34
36
  TaxonSchema,
35
37
  PR2TaxonSchema,
38
+ validate_dataframe,
36
39
  )
37
40
 
38
41
  logging.basicConfig(level=logging.DEBUG)
@@ -127,9 +130,9 @@ def parse_one_tax_file(
127
130
  # Two different schemas used for validation depending on the database
128
131
  # because PR2 schema has different taxonomic ranks than the standard
129
132
  if len(long_tax_ranks) == 8:
130
- TaxonSchema(res_df)
133
+ validate_dataframe(res_df, TaxonSchema, str(tax_file))
131
134
  elif len(long_tax_ranks) == 9:
132
- PR2TaxonSchema(res_df)
135
+ validate_dataframe(res_df, PR2TaxonSchema, str(tax_file))
133
136
 
134
137
  res_df["full_taxon"] = res_df.iloc[:, 1:].apply(
135
138
  lambda x: ";".join(x).strip(";"), axis=1
@@ -205,9 +208,7 @@ def generate_db_summary(
205
208
  amp_region_dict[amp_region].append(amp_region_df)
206
209
 
207
210
  for amp_region, amp_region_dfs in amp_region_dict.items():
208
- if (
209
- len(amp_region_dfs) > 1
210
- ): # Need at least two analyses with this amp_region to bother with the summary
211
+ if amp_region_dfs:
211
212
  amp_res_df = amp_region_dfs[0]
212
213
  for amp_df in amp_region_dfs[1:]:
213
214
  amp_res_df = amp_res_df.join(amp_df, how="outer")
@@ -319,9 +320,7 @@ def summarise_analyses(
319
320
  if tax_file:
320
321
  tax_files[run_acc] = tax_file
321
322
 
322
- if (
323
- len(tax_files) > 1
324
- ): # If at least two analyses have results from the current DB, generate a study-level summary for it
323
+ if tax_files:
325
324
  generate_db_summary(db_label, tax_files, output_prefix)
326
325
 
327
326
 
@@ -356,12 +355,12 @@ def merge_summaries(analyses_dir: str, output_prefix: str) -> None:
356
355
  :type output_prefix: str
357
356
  """
358
357
 
359
- # TODO: The way we grab all the summaries might change depending on how the prefect side does things
360
358
  all_study_summaries = glob.glob(f"{analyses_dir}/*_study_summary.tsv")
361
359
 
362
360
  summaries_dict = organise_study_summaries(all_study_summaries)
363
361
 
364
362
  for db_label, summaries in summaries_dict.items():
363
+ merged_summary_name = f"{output_prefix}_{db_label}_study_summary.tsv"
365
364
  if len(summaries) > 1:
366
365
  res_df = pd.read_csv(summaries[0], sep="\t", index_col=0)
367
366
  for summary in summaries[1:]:
@@ -372,10 +371,18 @@ def merge_summaries(analyses_dir: str, output_prefix: str) -> None:
372
371
 
373
372
  res_df = res_df.reindex(sorted(res_df.columns), axis=1)
374
373
  res_df.to_csv(
375
- f"{output_prefix}_{db_label}_study_summary.tsv",
374
+ merged_summary_name,
376
375
  sep="\t",
377
376
  index_label="taxonomy",
378
377
  )
378
+ elif len(summaries) == 1:
379
+ logging.info(
380
+ f"Only one summary ({summaries[0]}) so will use that as {merged_summary_name}"
381
+ )
382
+ try:
383
+ shutil.copyfile(summaries[0], merged_summary_name)
384
+ except SameFileError:
385
+ pass
379
386
 
380
387
 
381
388
  if __name__ == "__main__":
@@ -1,6 +1,5 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
-
4
3
  # Copyright 2024-2025 EMBL - European Bioinformatics Institute
5
4
  #
6
5
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,11 +13,15 @@
14
13
  # See the License for the specific language governing permissions and
15
14
  # limitations under the License.
16
15
 
16
+ import logging
17
17
  import re
18
18
 
19
19
  from enum import Enum
20
- from typing import ClassVar, Optional
20
+ from typing import ClassVar, Optional, Type
21
+
22
+ import pandas as pd
21
23
  import pandera as pa
24
+ from pandera.typing.common import DataFrameBase
22
25
 
23
26
  from pydantic import (
24
27
  Field,
@@ -215,3 +218,18 @@ class PR2TaxonSchema(pa.DataFrameModel):
215
218
 
216
219
  dtype = PydanticModel(PR2TaxonRecord)
217
220
  coerce = True
221
+
222
+
223
+ def validate_dataframe(
224
+ df: pd.DataFrame, schema: Type[pa.DataFrameModel], df_metadata: str
225
+ ) -> DataFrameBase:
226
+ """
227
+ Validate a pandas dataframe using a pandera schema.
228
+ df_metadata will be shown in logs on failure: example, the TSV filename from which the df was read.
229
+ """
230
+ try:
231
+ dfs = schema.validate(df, lazy=True)
232
+ except pa.errors.SchemaErrors as e:
233
+ logging.error(f"{schema.__name__} validation failure for {df_metadata}")
234
+ raise e
235
+ return dfs
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: mgnify_pipelines_toolkit
3
- Version: 1.0.0
3
+ Version: 1.0.1
4
4
  Summary: Collection of scripts and tools for MGnify pipelines
5
5
  Author-email: MGnify team <metagenomics-help@ebi.ac.uk>
6
6
  License: Apache Software License 2.0
@@ -29,20 +29,20 @@ mgnify_pipelines_toolkit/analysis/shared/get_subunits_coords.py,sha256=EH5RyzesL
29
29
  mgnify_pipelines_toolkit/analysis/shared/library_strategy_check.py,sha256=6Ck2NhwRWw66GctUtKDdPT5fwJhWFR_YOZq-Vxwoa8A,1996
30
30
  mgnify_pipelines_toolkit/analysis/shared/mapseq2biom.py,sha256=7-U0DN1joVu0ifLOoDUK2Pfqy8rb1RDKT6khVg3jky0,5559
31
31
  mgnify_pipelines_toolkit/analysis/shared/markergene_study_summary.py,sha256=sKAo_rKEyVAZXSaIFMkpSoYZxiWwXMA3XDA6Z-hbHgg,7904
32
- mgnify_pipelines_toolkit/analysis/shared/study_summary_generator.py,sha256=SosRFtW2PWr3dzvLEvYHQFZgGFX0LkQe30sGl3ozThA,13685
32
+ mgnify_pipelines_toolkit/analysis/shared/study_summary_generator.py,sha256=OOqKaQmKGAya6_BZgfcWBZSVlmZ918PQTVMv6KwGIns,13827
33
33
  mgnify_pipelines_toolkit/constants/db_labels.py,sha256=omPINMylAjO2PxeFhSk2MbYNcGZH3P82optSlMey3dw,858
34
34
  mgnify_pipelines_toolkit/constants/regex_ambiguous_bases.py,sha256=7nEOODQq35y9wx9YnvJuo29oBpwTpXg_kIbf_t7N4TQ,1093
35
35
  mgnify_pipelines_toolkit/constants/regex_fasta_header.py,sha256=G-xrc9b8zdmPTaOICD2b3RCVeFAEOVkfRkIfotQ7gek,1193
36
36
  mgnify_pipelines_toolkit/constants/tax_ranks.py,sha256=kMq__kOJcbiwsgolkdvb-XLo3WMnJdEXgedjUyMOYjI,1081
37
37
  mgnify_pipelines_toolkit/constants/thresholds.py,sha256=guDE7c4KrVJEfg_AcO_cQoJM6LGGaRlmo_U2i8d4N7g,1157
38
38
  mgnify_pipelines_toolkit/constants/var_region_coordinates.py,sha256=0bM4MwarFiM5yTcp5AbAmQ0o-q-gWy7kknir9zJ9R0A,1312
39
- mgnify_pipelines_toolkit/schemas/schemas.py,sha256=Iwps_YtOrIzCuADBgjjJU5VSKb4G0OQZLJfvwRNGN3A,7103
39
+ mgnify_pipelines_toolkit/schemas/schemas.py,sha256=pnH8LUH8i2ACNvFNWyG-n-eIHZcI5O9UDYulkh43mec,7692
40
40
  mgnify_pipelines_toolkit/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
41
  mgnify_pipelines_toolkit/utils/fasta_to_delimited.py,sha256=lgYIR1S4crURY7C7nFtgE6QMV4u4zCNsUrVkcRnsEEo,3996
42
42
  mgnify_pipelines_toolkit/utils/get_mpt_version.py,sha256=aS9bWrC9CP7tpxoEVg6eEYt18-pmjG7fJl5Mchz4YOU,798
43
- mgnify_pipelines_toolkit-1.0.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
44
- mgnify_pipelines_toolkit-1.0.0.dist-info/METADATA,sha256=46IhEb_9fA1DuCMiQDWyc3yv4EcoZ9KhZ77hWBmjHjA,6181
45
- mgnify_pipelines_toolkit-1.0.0.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
46
- mgnify_pipelines_toolkit-1.0.0.dist-info/entry_points.txt,sha256=cTTjlAPQafv9uLrsV4PUGWZgU61qaY8j6uvu0FEpO4A,2309
47
- mgnify_pipelines_toolkit-1.0.0.dist-info/top_level.txt,sha256=xA_wC7C01V3VwuDnqwRM2QYeJJ45WtvF6LVav4tYxuE,25
48
- mgnify_pipelines_toolkit-1.0.0.dist-info/RECORD,,
43
+ mgnify_pipelines_toolkit-1.0.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
44
+ mgnify_pipelines_toolkit-1.0.1.dist-info/METADATA,sha256=3xW9nS84AonTMO6tWU03fii6CqyV5-oa7pa4XrlYvWE,6181
45
+ mgnify_pipelines_toolkit-1.0.1.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
46
+ mgnify_pipelines_toolkit-1.0.1.dist-info/entry_points.txt,sha256=MsQXFdzL_dd7-2V6kHtA-QCf_iSQ-FmDcB9nZMLzJ98,2301
47
+ mgnify_pipelines_toolkit-1.0.1.dist-info/top_level.txt,sha256=xA_wC7C01V3VwuDnqwRM2QYeJJ45WtvF6LVav4tYxuE,25
48
+ mgnify_pipelines_toolkit-1.0.1.dist-info/RECORD,,
@@ -4,7 +4,7 @@ are_there_primers = mgnify_pipelines_toolkit.analysis.amplicon.are_there_primers
4
4
  assess_inflection_point_mcp = mgnify_pipelines_toolkit.analysis.amplicon.assess_inflection_point_mcp:main
5
5
  assess_mcp_proportions = mgnify_pipelines_toolkit.analysis.amplicon.assess_mcp_proportions:main
6
6
  classify_var_regions = mgnify_pipelines_toolkit.analysis.amplicon.classify_var_regions:main
7
- combined_gene_caller_merge = mgnify_pipelines_toolkit.analysis.assembly.combined_gene_caller_merge:combine_main
7
+ combined_gene_caller_merge = mgnify_pipelines_toolkit.analysis.assembly.combined_gene_caller_merge:main
8
8
  dwc_summary_generator = mgnify_pipelines_toolkit.analysis.assembly.dwc_summary_generator:main
9
9
  fasta_to_delimited = mgnify_pipelines_toolkit.utils.fasta_to_delimited:main
10
10
  fastq_suffix_header_check = mgnify_pipelines_toolkit.analysis.shared.fastq_suffix_header_check:main