gsrap 0.8.2__tar.gz → 0.9.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {gsrap-0.8.2 → gsrap-0.9.0}/PKG-INFO +1 -1
- {gsrap-0.8.2 → gsrap-0.9.0}/pyproject.toml +1 -1
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/.ipynb_checkpoints/__init__-checkpoint.py +2 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/__init__.py +2 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/assets/kegg_compound_to_others.pickle +0 -0
- gsrap-0.9.0/src/gsrap/assets/kegg_reaction_to_others.pickle +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/downloads-checkpoint.py +96 -4
- gsrap-0.9.0/src/gsrap/commons/.ipynb_checkpoints/escherutils-checkpoint.py +108 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/excelhub-checkpoint.py +2 -2
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/downloads.py +96 -4
- gsrap-0.9.0/src/gsrap/commons/escherutils.py +108 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/excelhub.py +2 -2
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/getmaps/.ipynb_checkpoints/getmaps-checkpoint.py +14 -5
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/getmaps/.ipynb_checkpoints/kdown-checkpoint.py +75 -4
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/getmaps/getmaps.py +14 -5
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/getmaps/kdown.py +75 -4
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/parsedb/.ipynb_checkpoints/annotation-checkpoint.py +9 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/parsedb/.ipynb_checkpoints/completeness-checkpoint.py +45 -11
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/parsedb/.ipynb_checkpoints/manual-checkpoint.py +10 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/parsedb/.ipynb_checkpoints/parsedb-checkpoint.py +40 -19
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/parsedb/.ipynb_checkpoints/repeating-checkpoint.py +2 -2
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/parsedb/annotation.py +9 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/parsedb/completeness.py +45 -11
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/parsedb/manual.py +10 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/parsedb/parsedb.py +40 -19
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/parsedb/repeating.py +2 -2
- gsrap-0.8.2/src/gsrap/assets/kegg_reaction_to_others.pickle +0 -0
- gsrap-0.8.2/src/gsrap/commons/.ipynb_checkpoints/escherutils-checkpoint.py +0 -37
- gsrap-0.8.2/src/gsrap/commons/escherutils.py +0 -37
- {gsrap-0.8.2 → gsrap-0.9.0}/LICENSE.txt +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/README.md +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/assets/.ipynb_checkpoints/PM1-checkpoint.csv +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/assets/.ipynb_checkpoints/PM2A-checkpoint.csv +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/assets/.ipynb_checkpoints/PM3B-checkpoint.csv +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/assets/.ipynb_checkpoints/PM4A-checkpoint.csv +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/assets/PM1.csv +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/assets/PM2A.csv +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/assets/PM3B.csv +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/assets/PM4A.csv +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/assets/__init__.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/__init__-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/biomass-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/coeffs-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/figures-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/fluxbal-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/keggutils-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/logutils-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/medium-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/memoteutils-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/metrics-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/sbmlutils-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/__init__.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/biomass.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/coeffs.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/figures.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/fluxbal.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/keggutils.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/logutils.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/medium.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/memoteutils.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/metrics.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/commons/sbmlutils.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/getmaps/.ipynb_checkpoints/__init__-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/getmaps/__init__.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/mkmodel/.ipynb_checkpoints/__init__-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/mkmodel/.ipynb_checkpoints/biologcuration-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/mkmodel/.ipynb_checkpoints/gapfill-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/mkmodel/.ipynb_checkpoints/gapfillutils-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/mkmodel/.ipynb_checkpoints/mkmodel-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/mkmodel/.ipynb_checkpoints/polishing-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/mkmodel/.ipynb_checkpoints/pruner-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/mkmodel/__init__.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/mkmodel/biologcuration.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/mkmodel/gapfill.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/mkmodel/gapfillutils.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/mkmodel/mkmodel.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/mkmodel/polishing.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/mkmodel/pruner.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/parsedb/.ipynb_checkpoints/__init__-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/parsedb/.ipynb_checkpoints/cycles-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/parsedb/.ipynb_checkpoints/introduce-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/parsedb/__init__.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/parsedb/cycles.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/parsedb/introduce.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/runsims/.ipynb_checkpoints/__init__-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/runsims/.ipynb_checkpoints/biosynth-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/runsims/.ipynb_checkpoints/cnps-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/runsims/.ipynb_checkpoints/essentialgenes-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/runsims/.ipynb_checkpoints/growthfactors-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/runsims/.ipynb_checkpoints/precursors-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/runsims/.ipynb_checkpoints/runsims-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/runsims/.ipynb_checkpoints/simplegrowth-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/runsims/.ipynb_checkpoints/singleomission-checkpoint.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/runsims/__init__.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/runsims/biosynth.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/runsims/cnps.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/runsims/essentialgenes.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/runsims/growthfactors.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/runsims/precursors.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/runsims/runsims.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/runsims/simplegrowth.py +0 -0
- {gsrap-0.8.2 → gsrap-0.9.0}/src/gsrap/runsims/singleomission.py +0 -0
|
@@ -75,12 +75,14 @@ def main():
|
|
|
75
75
|
parsedb_parser.add_argument("-z", "--initialize", metavar='', type=str, default='-', help="Initialize the universe on the provided medium. By default, the first medium in --media is used. Use 'none' to avoid initialization.")
|
|
76
76
|
parsedb_parser.add_argument("--precursors", action='store_true', help="Verify biosynthesis of biomass precursors and show blocked ones.")
|
|
77
77
|
parsedb_parser.add_argument("--biosynth", action='store_true', help="Check biosynthesis of all metabolites and detect dead-ends.")
|
|
78
|
+
parsedb_parser.add_argument("-t", "--taxon", metavar='', type=str, default='-', help="High-level taxon of interest. If provided, it must follow the syntax '{level}:{name}', where {level} is 'kingdom' or 'phylum'.")
|
|
78
79
|
parsedb_parser.add_argument("-e", "--eggnog", nargs='+', metavar='', type=str, default='-', help="Path to the optional eggnog-mapper annotation table(s).")
|
|
79
80
|
parsedb_parser.add_argument("-k", "--keggorg", metavar='', type=str, default='-', help="A single KEGG Organism code. If provided, it takes precedence over --eggnog.")
|
|
80
81
|
parsedb_parser.add_argument("--goodbefore", metavar='', type=str, default='-', help="Syntax is {pure_mid}-{rid1}-{rid2}. From top to bottom, build the universe until reaction {rid1}, transport {rid2} and metabolite {pure_mid} are reached.")
|
|
81
82
|
parsedb_parser.add_argument("--onlyauthor", metavar='', type=str, default='-', help="Build the universe by parsing contents of the specified author ID only. Contents affected by --goodbefore are parsed anyway.")
|
|
82
83
|
parsedb_parser.add_argument("--nofigs", action='store_true', help="Do not generate figures.")
|
|
83
84
|
parsedb_parser.add_argument("-j", "--justparse", action='store_true', help="Just parse the database without performing extra activities (saves time during universe expansion).")
|
|
85
|
+
parsedb_parser.add_argument("-d", "--keepdisconn", action='store_true', help="Do not remove disconnected metabolites.")
|
|
84
86
|
|
|
85
87
|
|
|
86
88
|
|
|
@@ -75,12 +75,14 @@ def main():
|
|
|
75
75
|
parsedb_parser.add_argument("-z", "--initialize", metavar='', type=str, default='-', help="Initialize the universe on the provided medium. By default, the first medium in --media is used. Use 'none' to avoid initialization.")
|
|
76
76
|
parsedb_parser.add_argument("--precursors", action='store_true', help="Verify biosynthesis of biomass precursors and show blocked ones.")
|
|
77
77
|
parsedb_parser.add_argument("--biosynth", action='store_true', help="Check biosynthesis of all metabolites and detect dead-ends.")
|
|
78
|
+
parsedb_parser.add_argument("-t", "--taxon", metavar='', type=str, default='-', help="High-level taxon of interest. If provided, it must follow the syntax '{level}:{name}', where {level} is 'kingdom' or 'phylum'.")
|
|
78
79
|
parsedb_parser.add_argument("-e", "--eggnog", nargs='+', metavar='', type=str, default='-', help="Path to the optional eggnog-mapper annotation table(s).")
|
|
79
80
|
parsedb_parser.add_argument("-k", "--keggorg", metavar='', type=str, default='-', help="A single KEGG Organism code. If provided, it takes precedence over --eggnog.")
|
|
80
81
|
parsedb_parser.add_argument("--goodbefore", metavar='', type=str, default='-', help="Syntax is {pure_mid}-{rid1}-{rid2}. From top to bottom, build the universe until reaction {rid1}, transport {rid2} and metabolite {pure_mid} are reached.")
|
|
81
82
|
parsedb_parser.add_argument("--onlyauthor", metavar='', type=str, default='-', help="Build the universe by parsing contents of the specified author ID only. Contents affected by --goodbefore are parsed anyway.")
|
|
82
83
|
parsedb_parser.add_argument("--nofigs", action='store_true', help="Do not generate figures.")
|
|
83
84
|
parsedb_parser.add_argument("-j", "--justparse", action='store_true', help="Just parse the database without performing extra activities (saves time during universe expansion).")
|
|
85
|
+
parsedb_parser.add_argument("-d", "--keepdisconn", action='store_true', help="Do not remove disconnected metabolites.")
|
|
84
86
|
|
|
85
87
|
|
|
86
88
|
|
|
Binary file
|
|
Binary file
|
|
@@ -243,7 +243,99 @@ def format_expansion(logger, eggnog):
|
|
|
243
243
|
|
|
244
244
|
|
|
245
245
|
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
246
|
+
def check_taxon(logger, taxon, idcollection_dict):
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
# verify presence of needed assets
|
|
250
|
+
if 'ko_to_taxa' not in idcollection_dict.keys():
|
|
251
|
+
logger.error(f"Asset 'ko_to_taxa' not found in 'gsrap.maps'. Please update 'gsrap.maps' with 'gsrap getmaps'.")
|
|
252
|
+
return 1
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
# extract level and name
|
|
256
|
+
try: level, name = taxon.split(':')
|
|
257
|
+
except:
|
|
258
|
+
logger.error(f"Provided --taxon is not well formatted: '{taxon}'.")
|
|
259
|
+
return 1
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
# compute available levels and check
|
|
263
|
+
avail_levels = set(['kingdom', 'phylum'])
|
|
264
|
+
if level not in avail_levels:
|
|
265
|
+
logger.error(f"Provided level is not acceptable: '{level}' (see --taxon). Acceptable levels are {avail_levels}.")
|
|
266
|
+
return 1
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
# compute available taxa at input level
|
|
270
|
+
avail_taxa_at_level = set()
|
|
271
|
+
ko_to_taxa = idcollection_dict['ko_to_taxa']
|
|
272
|
+
for ko in ko_to_taxa.keys():
|
|
273
|
+
for taxon_name in ko_to_taxa[ko][level]:
|
|
274
|
+
avail_taxa_at_level.add(taxon_name)
|
|
275
|
+
if name not in avail_taxa_at_level:
|
|
276
|
+
logger.error(f"Provided taxon name is not acceptable: '{name}' (see --taxon). Acceptable taxon names for level '{level}' are {avail_taxa_at_level}.")
|
|
277
|
+
return 1
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
"""
|
|
281
|
+
sorted(list(df.query("kingdom == 'Bacteria'")['phylum'].unique()))
|
|
282
|
+
['Acidobacteriota',
|
|
283
|
+
'Actinomycetota',
|
|
284
|
+
'Alphaproteobacteria',
|
|
285
|
+
'Aquificota',
|
|
286
|
+
'Armatimonadota',
|
|
287
|
+
'Atribacterota',
|
|
288
|
+
'Bacilli',
|
|
289
|
+
'Bacteria incertae sedis',
|
|
290
|
+
'Bacteroidota',
|
|
291
|
+
'Balneolota',
|
|
292
|
+
'Bdellovibrionota',
|
|
293
|
+
'Betaproteobacteria',
|
|
294
|
+
'Caldisericota',
|
|
295
|
+
'Calditrichota',
|
|
296
|
+
'Campylobacterota',
|
|
297
|
+
'Chlamydiota',
|
|
298
|
+
'Chlorobiota',
|
|
299
|
+
'Chloroflexota',
|
|
300
|
+
'Chrysiogenota',
|
|
301
|
+
'Cloacimonadota',
|
|
302
|
+
'Clostridia',
|
|
303
|
+
'Coprothermobacterota',
|
|
304
|
+
'Cyanobacteriota',
|
|
305
|
+
'Deferribacterota',
|
|
306
|
+
'Deinococcota',
|
|
307
|
+
'Deltaproteobacteria',
|
|
308
|
+
'Dictyoglomota',
|
|
309
|
+
'Elusimicrobiota',
|
|
310
|
+
'Enterobacteria',
|
|
311
|
+
'Fibrobacterota',
|
|
312
|
+
'Fidelibacterota',
|
|
313
|
+
'Fusobacteriota',
|
|
314
|
+
'Gemmatimonadota',
|
|
315
|
+
'Ignavibacteriota',
|
|
316
|
+
'Kiritimatiellota',
|
|
317
|
+
'Lentisphaerota',
|
|
318
|
+
'Melainabacteria',
|
|
319
|
+
'Mycoplasmatota',
|
|
320
|
+
'Myxococcota',
|
|
321
|
+
'Nitrospinota',
|
|
322
|
+
'Nitrospirota',
|
|
323
|
+
'Omnitrophota',
|
|
324
|
+
'Planctomycetota',
|
|
325
|
+
'Rhodothermota',
|
|
326
|
+
'Spirochaetota',
|
|
327
|
+
'Synergistota',
|
|
328
|
+
'Thermodesulfobacteriota',
|
|
329
|
+
'Thermodesulfobiota',
|
|
330
|
+
'Thermomicrobiota',
|
|
331
|
+
'Thermosulfidibacterota',
|
|
332
|
+
'Thermotogota',
|
|
333
|
+
'Verrucomicrobiota',
|
|
334
|
+
'Vulcanimicrobiota',
|
|
335
|
+
'other Bacillota',
|
|
336
|
+
'other Gammaproteobacteria',
|
|
337
|
+
'other Pseudomonadota',
|
|
338
|
+
'unclassified Bacteria']
|
|
339
|
+
"""
|
|
340
|
+
|
|
341
|
+
return 0
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
import warnings
|
|
2
|
+
import logging
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
import cobra
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def print_json_tree(data, level=0, max_level=2):
|
|
10
|
+
# explore contents of a json object
|
|
11
|
+
|
|
12
|
+
if level > max_level:
|
|
13
|
+
return
|
|
14
|
+
indent = ' ' * level
|
|
15
|
+
if isinstance(data, dict):
|
|
16
|
+
for key, value in data.items():
|
|
17
|
+
print(f"{indent}{key}")
|
|
18
|
+
print_tree(value, level + 1, max_level)
|
|
19
|
+
elif isinstance(data, list):
|
|
20
|
+
for i, item in enumerate(data):
|
|
21
|
+
print(f"{indent}[{i}]")
|
|
22
|
+
print_tree(item, level + 1, max_level)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def count_undrawn_rids(logger, universe, lastmap, focus):
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
rids = set([r.id for r in universe.reactions])
|
|
30
|
+
|
|
31
|
+
drawn_rids = set()
|
|
32
|
+
for key, value in lastmap['json'][1]['reactions'].items():
|
|
33
|
+
drawn_rids.add(value['bigg_id'])
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
remainings = rids - drawn_rids
|
|
37
|
+
filename = lastmap['filename']
|
|
38
|
+
logger.debug(f"Last universal map version detected: '{filename}'.")
|
|
39
|
+
if len(remainings) > 0:
|
|
40
|
+
logger.warning(f"Our universal map is {len(remainings)} reactions behind. Please draw!")
|
|
41
|
+
if focus == '-':
|
|
42
|
+
logger.warning(f"Drawing is eased when using '--focus'...")
|
|
43
|
+
else:
|
|
44
|
+
logger.info(f"Our universal map is {len(remainings)} reactions behind. Thank you ♥")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def count_undrawn_rids_focus(logger, universe, lastmap, focus, outdir):
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
# get modeled reads for this --focus:
|
|
52
|
+
rids = set()
|
|
53
|
+
try: gr = universe.groups.get_by_id(focus)
|
|
54
|
+
except:
|
|
55
|
+
logger.warning(f"Group '{focus}' not found!")
|
|
56
|
+
return
|
|
57
|
+
for r in gr.members:
|
|
58
|
+
rids.add(r.id)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
# get rids on Escher:
|
|
62
|
+
drawn_rids = set()
|
|
63
|
+
for key, value in lastmap['json'][1]['reactions'].items():
|
|
64
|
+
drawn_rids.add(value['bigg_id'])
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
# get remaining rids for this map:
|
|
68
|
+
remainings = rids - drawn_rids
|
|
69
|
+
remainings_krs = set()
|
|
70
|
+
for rid in remainings:
|
|
71
|
+
r = universe.reactions.get_by_id(rid)
|
|
72
|
+
krs = r.annotation['kegg.reaction']
|
|
73
|
+
for kr in krs:
|
|
74
|
+
remainings_krs.add(kr)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
if len(remainings) > 0:
|
|
78
|
+
if focus != 'transport':
|
|
79
|
+
logger.warning(f"Focusing on '{focus}', our universal map is {len(remainings)} reactions behind: {' '.join(list(remainings_krs))}.")
|
|
80
|
+
else:
|
|
81
|
+
logger.warning(f"Focusing on '{focus}', our universal map is {len(remainings)} reactions behind.") # usually no kegg codes for tranport reactions
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
# subset the universe to ease the drawing:
|
|
85
|
+
universe_focus = universe.copy()
|
|
86
|
+
to_remove = [r for r in universe_focus.reactions if r.id not in rids]
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
# trick to avoid the WARNING "cobra/core/group.py:147: UserWarning: need to pass in a list"
|
|
90
|
+
# triggered when trying to remove reactions that are included in groups.
|
|
91
|
+
with warnings.catch_warnings(): # temporarily suppress warnings for this block
|
|
92
|
+
warnings.simplefilter("ignore") # ignore all warnings
|
|
93
|
+
cobra_logger = logging.getLogger("cobra.util.solver")
|
|
94
|
+
old_level = cobra_logger.level
|
|
95
|
+
cobra_logger.setLevel(logging.ERROR)
|
|
96
|
+
|
|
97
|
+
universe_focus.remove_reactions(to_remove,remove_orphans=True)
|
|
98
|
+
|
|
99
|
+
# restore original behaviour:
|
|
100
|
+
cobra_logger.setLevel(old_level)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
# save the subset for drawing in Escher!
|
|
104
|
+
logger.info(f"Writing '{outdir}/{focus}.json' to ease your drawing workflow...")
|
|
105
|
+
cobra.io.save_json_model(universe_focus, f'{outdir}/{focus}.json')
|
|
106
|
+
else:
|
|
107
|
+
logger.info(f"Focusing on '{focus}', our universal map is {len(remainings)} reactions behind. Thank you ♥")
|
|
108
|
+
|
|
@@ -148,7 +148,7 @@ def write_excel_model(model, filepath, nofigs, memote_results_dict, df_E, df_B,
|
|
|
148
148
|
else: df_T.append(row_dict)
|
|
149
149
|
|
|
150
150
|
for g in model.genes:
|
|
151
|
-
row_dict = {'gid': g.id, 'involved_in': '; '.join([r.id for r in g.reactions])}
|
|
151
|
+
row_dict = {'gid': g.id, 'name': g.name, 'involved_in': '; '.join([r.id for r in g.reactions])}
|
|
152
152
|
|
|
153
153
|
for db in g.annotation.keys():
|
|
154
154
|
annots = g.annotation[db]
|
|
@@ -171,7 +171,7 @@ def write_excel_model(model, filepath, nofigs, memote_results_dict, df_E, df_B,
|
|
|
171
171
|
df_R = df_R[df_R_first_cols + sorted([c for c in df_R.columns if c not in df_R_first_cols])]
|
|
172
172
|
df_T = df_T[df_R_first_cols + sorted([c for c in df_T.columns if c not in df_R_first_cols])]
|
|
173
173
|
df_A = df_A[df_R_first_cols + sorted([c for c in df_A.columns if c not in df_R_first_cols])]
|
|
174
|
-
df_G_first_cols = ['gid', 'involved_in']
|
|
174
|
+
df_G_first_cols = ['gid', 'name', 'involved_in']
|
|
175
175
|
df_G = df_G[df_G_first_cols + sorted([c for c in df_G.columns if c not in df_G_first_cols])]
|
|
176
176
|
|
|
177
177
|
|
|
@@ -243,7 +243,99 @@ def format_expansion(logger, eggnog):
|
|
|
243
243
|
|
|
244
244
|
|
|
245
245
|
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
246
|
+
def check_taxon(logger, taxon, idcollection_dict):
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
# verify presence of needed assets
|
|
250
|
+
if 'ko_to_taxa' not in idcollection_dict.keys():
|
|
251
|
+
logger.error(f"Asset 'ko_to_taxa' not found in 'gsrap.maps'. Please update 'gsrap.maps' with 'gsrap getmaps'.")
|
|
252
|
+
return 1
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
# extract level and name
|
|
256
|
+
try: level, name = taxon.split(':')
|
|
257
|
+
except:
|
|
258
|
+
logger.error(f"Provided --taxon is not well formatted: '{taxon}'.")
|
|
259
|
+
return 1
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
# compute available levels and check
|
|
263
|
+
avail_levels = set(['kingdom', 'phylum'])
|
|
264
|
+
if level not in avail_levels:
|
|
265
|
+
logger.error(f"Provided level is not acceptable: '{level}' (see --taxon). Acceptable levels are {avail_levels}.")
|
|
266
|
+
return 1
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
# compute available taxa at input level
|
|
270
|
+
avail_taxa_at_level = set()
|
|
271
|
+
ko_to_taxa = idcollection_dict['ko_to_taxa']
|
|
272
|
+
for ko in ko_to_taxa.keys():
|
|
273
|
+
for taxon_name in ko_to_taxa[ko][level]:
|
|
274
|
+
avail_taxa_at_level.add(taxon_name)
|
|
275
|
+
if name not in avail_taxa_at_level:
|
|
276
|
+
logger.error(f"Provided taxon name is not acceptable: '{name}' (see --taxon). Acceptable taxon names for level '{level}' are {avail_taxa_at_level}.")
|
|
277
|
+
return 1
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
"""
|
|
281
|
+
sorted(list(df.query("kingdom == 'Bacteria'")['phylum'].unique()))
|
|
282
|
+
['Acidobacteriota',
|
|
283
|
+
'Actinomycetota',
|
|
284
|
+
'Alphaproteobacteria',
|
|
285
|
+
'Aquificota',
|
|
286
|
+
'Armatimonadota',
|
|
287
|
+
'Atribacterota',
|
|
288
|
+
'Bacilli',
|
|
289
|
+
'Bacteria incertae sedis',
|
|
290
|
+
'Bacteroidota',
|
|
291
|
+
'Balneolota',
|
|
292
|
+
'Bdellovibrionota',
|
|
293
|
+
'Betaproteobacteria',
|
|
294
|
+
'Caldisericota',
|
|
295
|
+
'Calditrichota',
|
|
296
|
+
'Campylobacterota',
|
|
297
|
+
'Chlamydiota',
|
|
298
|
+
'Chlorobiota',
|
|
299
|
+
'Chloroflexota',
|
|
300
|
+
'Chrysiogenota',
|
|
301
|
+
'Cloacimonadota',
|
|
302
|
+
'Clostridia',
|
|
303
|
+
'Coprothermobacterota',
|
|
304
|
+
'Cyanobacteriota',
|
|
305
|
+
'Deferribacterota',
|
|
306
|
+
'Deinococcota',
|
|
307
|
+
'Deltaproteobacteria',
|
|
308
|
+
'Dictyoglomota',
|
|
309
|
+
'Elusimicrobiota',
|
|
310
|
+
'Enterobacteria',
|
|
311
|
+
'Fibrobacterota',
|
|
312
|
+
'Fidelibacterota',
|
|
313
|
+
'Fusobacteriota',
|
|
314
|
+
'Gemmatimonadota',
|
|
315
|
+
'Ignavibacteriota',
|
|
316
|
+
'Kiritimatiellota',
|
|
317
|
+
'Lentisphaerota',
|
|
318
|
+
'Melainabacteria',
|
|
319
|
+
'Mycoplasmatota',
|
|
320
|
+
'Myxococcota',
|
|
321
|
+
'Nitrospinota',
|
|
322
|
+
'Nitrospirota',
|
|
323
|
+
'Omnitrophota',
|
|
324
|
+
'Planctomycetota',
|
|
325
|
+
'Rhodothermota',
|
|
326
|
+
'Spirochaetota',
|
|
327
|
+
'Synergistota',
|
|
328
|
+
'Thermodesulfobacteriota',
|
|
329
|
+
'Thermodesulfobiota',
|
|
330
|
+
'Thermomicrobiota',
|
|
331
|
+
'Thermosulfidibacterota',
|
|
332
|
+
'Thermotogota',
|
|
333
|
+
'Verrucomicrobiota',
|
|
334
|
+
'Vulcanimicrobiota',
|
|
335
|
+
'other Bacillota',
|
|
336
|
+
'other Gammaproteobacteria',
|
|
337
|
+
'other Pseudomonadota',
|
|
338
|
+
'unclassified Bacteria']
|
|
339
|
+
"""
|
|
340
|
+
|
|
341
|
+
return 0
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
import warnings
|
|
2
|
+
import logging
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
import cobra
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def print_json_tree(data, level=0, max_level=2):
|
|
10
|
+
# explore contents of a json object
|
|
11
|
+
|
|
12
|
+
if level > max_level:
|
|
13
|
+
return
|
|
14
|
+
indent = ' ' * level
|
|
15
|
+
if isinstance(data, dict):
|
|
16
|
+
for key, value in data.items():
|
|
17
|
+
print(f"{indent}{key}")
|
|
18
|
+
print_tree(value, level + 1, max_level)
|
|
19
|
+
elif isinstance(data, list):
|
|
20
|
+
for i, item in enumerate(data):
|
|
21
|
+
print(f"{indent}[{i}]")
|
|
22
|
+
print_tree(item, level + 1, max_level)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def count_undrawn_rids(logger, universe, lastmap, focus):
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
rids = set([r.id for r in universe.reactions])
|
|
30
|
+
|
|
31
|
+
drawn_rids = set()
|
|
32
|
+
for key, value in lastmap['json'][1]['reactions'].items():
|
|
33
|
+
drawn_rids.add(value['bigg_id'])
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
remainings = rids - drawn_rids
|
|
37
|
+
filename = lastmap['filename']
|
|
38
|
+
logger.debug(f"Last universal map version detected: '{filename}'.")
|
|
39
|
+
if len(remainings) > 0:
|
|
40
|
+
logger.warning(f"Our universal map is {len(remainings)} reactions behind. Please draw!")
|
|
41
|
+
if focus == '-':
|
|
42
|
+
logger.warning(f"Drawing is eased when using '--focus'...")
|
|
43
|
+
else:
|
|
44
|
+
logger.info(f"Our universal map is {len(remainings)} reactions behind. Thank you ♥")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def count_undrawn_rids_focus(logger, universe, lastmap, focus, outdir):
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
# get modeled reads for this --focus:
|
|
52
|
+
rids = set()
|
|
53
|
+
try: gr = universe.groups.get_by_id(focus)
|
|
54
|
+
except:
|
|
55
|
+
logger.warning(f"Group '{focus}' not found!")
|
|
56
|
+
return
|
|
57
|
+
for r in gr.members:
|
|
58
|
+
rids.add(r.id)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
# get rids on Escher:
|
|
62
|
+
drawn_rids = set()
|
|
63
|
+
for key, value in lastmap['json'][1]['reactions'].items():
|
|
64
|
+
drawn_rids.add(value['bigg_id'])
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
# get remaining rids for this map:
|
|
68
|
+
remainings = rids - drawn_rids
|
|
69
|
+
remainings_krs = set()
|
|
70
|
+
for rid in remainings:
|
|
71
|
+
r = universe.reactions.get_by_id(rid)
|
|
72
|
+
krs = r.annotation['kegg.reaction']
|
|
73
|
+
for kr in krs:
|
|
74
|
+
remainings_krs.add(kr)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
if len(remainings) > 0:
|
|
78
|
+
if focus != 'transport':
|
|
79
|
+
logger.warning(f"Focusing on '{focus}', our universal map is {len(remainings)} reactions behind: {' '.join(list(remainings_krs))}.")
|
|
80
|
+
else:
|
|
81
|
+
logger.warning(f"Focusing on '{focus}', our universal map is {len(remainings)} reactions behind.") # usually no kegg codes for tranport reactions
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
# subset the universe to ease the drawing:
|
|
85
|
+
universe_focus = universe.copy()
|
|
86
|
+
to_remove = [r for r in universe_focus.reactions if r.id not in rids]
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
# trick to avoid the WARNING "cobra/core/group.py:147: UserWarning: need to pass in a list"
|
|
90
|
+
# triggered when trying to remove reactions that are included in groups.
|
|
91
|
+
with warnings.catch_warnings(): # temporarily suppress warnings for this block
|
|
92
|
+
warnings.simplefilter("ignore") # ignore all warnings
|
|
93
|
+
cobra_logger = logging.getLogger("cobra.util.solver")
|
|
94
|
+
old_level = cobra_logger.level
|
|
95
|
+
cobra_logger.setLevel(logging.ERROR)
|
|
96
|
+
|
|
97
|
+
universe_focus.remove_reactions(to_remove,remove_orphans=True)
|
|
98
|
+
|
|
99
|
+
# restore original behaviour:
|
|
100
|
+
cobra_logger.setLevel(old_level)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
# save the subset for drawing in Escher!
|
|
104
|
+
logger.info(f"Writing '{outdir}/{focus}.json' to ease your drawing workflow...")
|
|
105
|
+
cobra.io.save_json_model(universe_focus, f'{outdir}/{focus}.json')
|
|
106
|
+
else:
|
|
107
|
+
logger.info(f"Focusing on '{focus}', our universal map is {len(remainings)} reactions behind. Thank you ♥")
|
|
108
|
+
|
|
@@ -148,7 +148,7 @@ def write_excel_model(model, filepath, nofigs, memote_results_dict, df_E, df_B,
|
|
|
148
148
|
else: df_T.append(row_dict)
|
|
149
149
|
|
|
150
150
|
for g in model.genes:
|
|
151
|
-
row_dict = {'gid': g.id, 'involved_in': '; '.join([r.id for r in g.reactions])}
|
|
151
|
+
row_dict = {'gid': g.id, 'name': g.name, 'involved_in': '; '.join([r.id for r in g.reactions])}
|
|
152
152
|
|
|
153
153
|
for db in g.annotation.keys():
|
|
154
154
|
annots = g.annotation[db]
|
|
@@ -171,7 +171,7 @@ def write_excel_model(model, filepath, nofigs, memote_results_dict, df_E, df_B,
|
|
|
171
171
|
df_R = df_R[df_R_first_cols + sorted([c for c in df_R.columns if c not in df_R_first_cols])]
|
|
172
172
|
df_T = df_T[df_R_first_cols + sorted([c for c in df_T.columns if c not in df_R_first_cols])]
|
|
173
173
|
df_A = df_A[df_R_first_cols + sorted([c for c in df_A.columns if c not in df_R_first_cols])]
|
|
174
|
-
df_G_first_cols = ['gid', 'involved_in']
|
|
174
|
+
df_G_first_cols = ['gid', 'name', 'involved_in']
|
|
175
175
|
df_G = df_G[df_G_first_cols + sorted([c for c in df_G.columns if c not in df_G_first_cols])]
|
|
176
176
|
|
|
177
177
|
|
|
@@ -4,6 +4,7 @@ import pickle
|
|
|
4
4
|
|
|
5
5
|
|
|
6
6
|
from .kdown import download_raw_txtfiles
|
|
7
|
+
from .kdown import create_dict_keggorg
|
|
7
8
|
from .kdown import create_dict_ko
|
|
8
9
|
from .kdown import create_dict_c
|
|
9
10
|
from .kdown import create_dict_r
|
|
@@ -20,13 +21,19 @@ def do_kdown(logger, outdir, usecache, keeptmp):
|
|
|
20
21
|
logger.info(f"Respectfully retrieving metabolic information from KEGG. Raw data are being saved into '{outdir}/kdown/'. Be patient, could take a couple of days...")
|
|
21
22
|
os.makedirs(f'{outdir}/kdown/', exist_ok=True)
|
|
22
23
|
|
|
24
|
+
|
|
23
25
|
response = download_raw_txtfiles(logger, outdir, usecache)
|
|
24
26
|
if type(response) == int: return 1
|
|
25
27
|
else: RELEASE_kegg = response
|
|
26
28
|
|
|
29
|
+
|
|
27
30
|
|
|
28
31
|
logger.info("Parsing downloaded KEGG information...")
|
|
29
|
-
|
|
32
|
+
|
|
33
|
+
response = create_dict_keggorg(logger, outdir)
|
|
34
|
+
if type(response) == int: return 1
|
|
35
|
+
else: dict_keggorg = response
|
|
36
|
+
|
|
30
37
|
response = create_dict_ko(logger, outdir)
|
|
31
38
|
if type(response) == int: return 1
|
|
32
39
|
else: dict_ko = response
|
|
@@ -49,7 +56,7 @@ def do_kdown(logger, outdir, usecache, keeptmp):
|
|
|
49
56
|
|
|
50
57
|
|
|
51
58
|
# create 'idcollection_dict' and 'summary_dict' dictionaries
|
|
52
|
-
idcollection_dict = create_idcollection_dict(dict_ko, dict_c, dict_r, dict_map, dict_md)
|
|
59
|
+
idcollection_dict = create_idcollection_dict(dict_keggorg, dict_ko, dict_c, dict_r, dict_map, dict_md)
|
|
53
60
|
summary_dict = create_summary_dict(dict_c, dict_r, dict_map, dict_md)
|
|
54
61
|
|
|
55
62
|
|
|
@@ -57,7 +64,6 @@ def do_kdown(logger, outdir, usecache, keeptmp):
|
|
|
57
64
|
|
|
58
65
|
|
|
59
66
|
|
|
60
|
-
|
|
61
67
|
def main(args, logger):
|
|
62
68
|
|
|
63
69
|
|
|
@@ -67,7 +73,7 @@ def main(args, logger):
|
|
|
67
73
|
os.makedirs(f'{args.outdir}/', exist_ok=True)
|
|
68
74
|
|
|
69
75
|
|
|
70
|
-
# KEGG
|
|
76
|
+
# KEGG download
|
|
71
77
|
response = do_kdown(logger, args.outdir, args.usecache, args.keeptmp)
|
|
72
78
|
if type(response) == int: return 1
|
|
73
79
|
else: RELEASE_kegg, idcollection_dict, summary_dict = response[0], response[1], response[2]
|
|
@@ -76,7 +82,9 @@ def main(args, logger):
|
|
|
76
82
|
# create 'gsrap.maps':
|
|
77
83
|
with open(f'{args.outdir}/gsrap.maps', 'wb') as wb_handler:
|
|
78
84
|
pickle.dump({
|
|
79
|
-
'RELEASE_kegg': RELEASE_kegg,
|
|
85
|
+
'RELEASE_kegg': RELEASE_kegg,
|
|
86
|
+
'idcollection_dict': idcollection_dict,
|
|
87
|
+
'summary_dict': summary_dict,
|
|
80
88
|
}, wb_handler)
|
|
81
89
|
logger.info(f"'{args.outdir}/gsrap.maps' created!")
|
|
82
90
|
|
|
@@ -87,4 +95,5 @@ def main(args, logger):
|
|
|
87
95
|
logger.info(f"Temporary raw files deleted!")
|
|
88
96
|
|
|
89
97
|
|
|
98
|
+
|
|
90
99
|
return 0
|