gsrap 0.8.3__tar.gz → 0.9.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. {gsrap-0.8.3 → gsrap-0.9.0}/PKG-INFO +1 -1
  2. {gsrap-0.8.3 → gsrap-0.9.0}/pyproject.toml +1 -1
  3. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/.ipynb_checkpoints/__init__-checkpoint.py +1 -0
  4. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/__init__.py +1 -0
  5. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/assets/kegg_compound_to_others.pickle +0 -0
  6. gsrap-0.9.0/src/gsrap/assets/kegg_reaction_to_others.pickle +0 -0
  7. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/downloads-checkpoint.py +62 -1
  8. gsrap-0.9.0/src/gsrap/commons/.ipynb_checkpoints/escherutils-checkpoint.py +108 -0
  9. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/downloads.py +62 -1
  10. gsrap-0.9.0/src/gsrap/commons/escherutils.py +108 -0
  11. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/parsedb/.ipynb_checkpoints/annotation-checkpoint.py +9 -0
  12. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/parsedb/.ipynb_checkpoints/completeness-checkpoint.py +13 -6
  13. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/parsedb/.ipynb_checkpoints/manual-checkpoint.py +10 -0
  14. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/parsedb/.ipynb_checkpoints/parsedb-checkpoint.py +17 -7
  15. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/parsedb/.ipynb_checkpoints/repeating-checkpoint.py +2 -2
  16. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/parsedb/annotation.py +9 -0
  17. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/parsedb/completeness.py +13 -6
  18. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/parsedb/manual.py +10 -0
  19. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/parsedb/parsedb.py +17 -7
  20. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/parsedb/repeating.py +2 -2
  21. gsrap-0.8.3/src/gsrap/assets/kegg_reaction_to_others.pickle +0 -0
  22. gsrap-0.8.3/src/gsrap/commons/.ipynb_checkpoints/escherutils-checkpoint.py +0 -37
  23. gsrap-0.8.3/src/gsrap/commons/escherutils.py +0 -37
  24. {gsrap-0.8.3 → gsrap-0.9.0}/LICENSE.txt +0 -0
  25. {gsrap-0.8.3 → gsrap-0.9.0}/README.md +0 -0
  26. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/assets/.ipynb_checkpoints/PM1-checkpoint.csv +0 -0
  27. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/assets/.ipynb_checkpoints/PM2A-checkpoint.csv +0 -0
  28. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/assets/.ipynb_checkpoints/PM3B-checkpoint.csv +0 -0
  29. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/assets/.ipynb_checkpoints/PM4A-checkpoint.csv +0 -0
  30. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/assets/PM1.csv +0 -0
  31. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/assets/PM2A.csv +0 -0
  32. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/assets/PM3B.csv +0 -0
  33. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/assets/PM4A.csv +0 -0
  34. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/assets/__init__.py +0 -0
  35. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/__init__-checkpoint.py +0 -0
  36. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/biomass-checkpoint.py +0 -0
  37. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/coeffs-checkpoint.py +0 -0
  38. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/excelhub-checkpoint.py +0 -0
  39. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/figures-checkpoint.py +0 -0
  40. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/fluxbal-checkpoint.py +0 -0
  41. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/keggutils-checkpoint.py +0 -0
  42. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/logutils-checkpoint.py +0 -0
  43. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/medium-checkpoint.py +0 -0
  44. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/memoteutils-checkpoint.py +0 -0
  45. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/metrics-checkpoint.py +0 -0
  46. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/.ipynb_checkpoints/sbmlutils-checkpoint.py +0 -0
  47. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/__init__.py +0 -0
  48. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/biomass.py +0 -0
  49. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/coeffs.py +0 -0
  50. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/excelhub.py +0 -0
  51. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/figures.py +0 -0
  52. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/fluxbal.py +0 -0
  53. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/keggutils.py +0 -0
  54. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/logutils.py +0 -0
  55. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/medium.py +0 -0
  56. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/memoteutils.py +0 -0
  57. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/metrics.py +0 -0
  58. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/commons/sbmlutils.py +0 -0
  59. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/getmaps/.ipynb_checkpoints/__init__-checkpoint.py +0 -0
  60. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/getmaps/.ipynb_checkpoints/getmaps-checkpoint.py +0 -0
  61. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/getmaps/.ipynb_checkpoints/kdown-checkpoint.py +0 -0
  62. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/getmaps/__init__.py +0 -0
  63. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/getmaps/getmaps.py +0 -0
  64. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/getmaps/kdown.py +0 -0
  65. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/mkmodel/.ipynb_checkpoints/__init__-checkpoint.py +0 -0
  66. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/mkmodel/.ipynb_checkpoints/biologcuration-checkpoint.py +0 -0
  67. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/mkmodel/.ipynb_checkpoints/gapfill-checkpoint.py +0 -0
  68. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/mkmodel/.ipynb_checkpoints/gapfillutils-checkpoint.py +0 -0
  69. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/mkmodel/.ipynb_checkpoints/mkmodel-checkpoint.py +0 -0
  70. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/mkmodel/.ipynb_checkpoints/polishing-checkpoint.py +0 -0
  71. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/mkmodel/.ipynb_checkpoints/pruner-checkpoint.py +0 -0
  72. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/mkmodel/__init__.py +0 -0
  73. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/mkmodel/biologcuration.py +0 -0
  74. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/mkmodel/gapfill.py +0 -0
  75. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/mkmodel/gapfillutils.py +0 -0
  76. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/mkmodel/mkmodel.py +0 -0
  77. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/mkmodel/polishing.py +0 -0
  78. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/mkmodel/pruner.py +0 -0
  79. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/parsedb/.ipynb_checkpoints/__init__-checkpoint.py +0 -0
  80. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/parsedb/.ipynb_checkpoints/cycles-checkpoint.py +0 -0
  81. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/parsedb/.ipynb_checkpoints/introduce-checkpoint.py +0 -0
  82. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/parsedb/__init__.py +0 -0
  83. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/parsedb/cycles.py +0 -0
  84. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/parsedb/introduce.py +0 -0
  85. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/runsims/.ipynb_checkpoints/__init__-checkpoint.py +0 -0
  86. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/runsims/.ipynb_checkpoints/biosynth-checkpoint.py +0 -0
  87. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/runsims/.ipynb_checkpoints/cnps-checkpoint.py +0 -0
  88. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/runsims/.ipynb_checkpoints/essentialgenes-checkpoint.py +0 -0
  89. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/runsims/.ipynb_checkpoints/growthfactors-checkpoint.py +0 -0
  90. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/runsims/.ipynb_checkpoints/precursors-checkpoint.py +0 -0
  91. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/runsims/.ipynb_checkpoints/runsims-checkpoint.py +0 -0
  92. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/runsims/.ipynb_checkpoints/simplegrowth-checkpoint.py +0 -0
  93. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/runsims/.ipynb_checkpoints/singleomission-checkpoint.py +0 -0
  94. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/runsims/__init__.py +0 -0
  95. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/runsims/biosynth.py +0 -0
  96. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/runsims/cnps.py +0 -0
  97. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/runsims/essentialgenes.py +0 -0
  98. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/runsims/growthfactors.py +0 -0
  99. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/runsims/precursors.py +0 -0
  100. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/runsims/runsims.py +0 -0
  101. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/runsims/simplegrowth.py +0 -0
  102. {gsrap-0.8.3 → gsrap-0.9.0}/src/gsrap/runsims/singleomission.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: gsrap
3
- Version: 0.8.3
3
+ Version: 0.9.0
4
4
  Summary:
5
5
  License: GNU General Public License v3.0
6
6
  Author: Gioele Lazzari
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "gsrap"
3
- version = "0.8.3"
3
+ version = "0.9.0"
4
4
  description = ""
5
5
  authors = ["Gioele Lazzari"]
6
6
  license = "GNU General Public License v3.0"
@@ -82,6 +82,7 @@ def main():
82
82
  parsedb_parser.add_argument("--onlyauthor", metavar='', type=str, default='-', help="Build the universe by parsing contents of the specified author ID only. Contents affected by --goodbefore are parsed anyway.")
83
83
  parsedb_parser.add_argument("--nofigs", action='store_true', help="Do not generate figures.")
84
84
  parsedb_parser.add_argument("-j", "--justparse", action='store_true', help="Just parse the database without performing extra activities (saves time during universe expansion).")
85
+ parsedb_parser.add_argument("-d", "--keepdisconn", action='store_true', help="Do not remove disconnected metabolites.")
85
86
 
86
87
 
87
88
 
@@ -82,6 +82,7 @@ def main():
82
82
  parsedb_parser.add_argument("--onlyauthor", metavar='', type=str, default='-', help="Build the universe by parsing contents of the specified author ID only. Contents affected by --goodbefore are parsed anyway.")
83
83
  parsedb_parser.add_argument("--nofigs", action='store_true', help="Do not generate figures.")
84
84
  parsedb_parser.add_argument("-j", "--justparse", action='store_true', help="Just parse the database without performing extra activities (saves time during universe expansion).")
85
+ parsedb_parser.add_argument("-d", "--keepdisconn", action='store_true', help="Do not remove disconnected metabolites.")
85
86
 
86
87
 
87
88
 
@@ -275,6 +275,67 @@ def check_taxon(logger, taxon, idcollection_dict):
275
275
  if name not in avail_taxa_at_level:
276
276
  logger.error(f"Provided taxon name is not acceptable: '{name}' (see --taxon). Acceptable taxon names for level '{level}' are {avail_taxa_at_level}.")
277
277
  return 1
278
-
278
+
279
+
280
+ """
281
+ sorted(list(df.query("kingdom == 'Bacteria'")['phylum'].unique()))
282
+ ['Acidobacteriota',
283
+ 'Actinomycetota',
284
+ 'Alphaproteobacteria',
285
+ 'Aquificota',
286
+ 'Armatimonadota',
287
+ 'Atribacterota',
288
+ 'Bacilli',
289
+ 'Bacteria incertae sedis',
290
+ 'Bacteroidota',
291
+ 'Balneolota',
292
+ 'Bdellovibrionota',
293
+ 'Betaproteobacteria',
294
+ 'Caldisericota',
295
+ 'Calditrichota',
296
+ 'Campylobacterota',
297
+ 'Chlamydiota',
298
+ 'Chlorobiota',
299
+ 'Chloroflexota',
300
+ 'Chrysiogenota',
301
+ 'Cloacimonadota',
302
+ 'Clostridia',
303
+ 'Coprothermobacterota',
304
+ 'Cyanobacteriota',
305
+ 'Deferribacterota',
306
+ 'Deinococcota',
307
+ 'Deltaproteobacteria',
308
+ 'Dictyoglomota',
309
+ 'Elusimicrobiota',
310
+ 'Enterobacteria',
311
+ 'Fibrobacterota',
312
+ 'Fidelibacterota',
313
+ 'Fusobacteriota',
314
+ 'Gemmatimonadota',
315
+ 'Ignavibacteriota',
316
+ 'Kiritimatiellota',
317
+ 'Lentisphaerota',
318
+ 'Melainabacteria',
319
+ 'Mycoplasmatota',
320
+ 'Myxococcota',
321
+ 'Nitrospinota',
322
+ 'Nitrospirota',
323
+ 'Omnitrophota',
324
+ 'Planctomycetota',
325
+ 'Rhodothermota',
326
+ 'Spirochaetota',
327
+ 'Synergistota',
328
+ 'Thermodesulfobacteriota',
329
+ 'Thermodesulfobiota',
330
+ 'Thermomicrobiota',
331
+ 'Thermosulfidibacterota',
332
+ 'Thermotogota',
333
+ 'Verrucomicrobiota',
334
+ 'Vulcanimicrobiota',
335
+ 'other Bacillota',
336
+ 'other Gammaproteobacteria',
337
+ 'other Pseudomonadota',
338
+ 'unclassified Bacteria']
339
+ """
279
340
 
280
341
  return 0
@@ -0,0 +1,108 @@
1
+ import warnings
2
+ import logging
3
+
4
+
5
+ import cobra
6
+
7
+
8
+
9
+ def print_json_tree(data, level=0, max_level=2):
10
+ # explore contents of a json object
11
+
12
+ if level > max_level:
13
+ return
14
+ indent = ' ' * level
15
+ if isinstance(data, dict):
16
+ for key, value in data.items():
17
+ print(f"{indent}{key}")
18
+ print_tree(value, level + 1, max_level)
19
+ elif isinstance(data, list):
20
+ for i, item in enumerate(data):
21
+ print(f"{indent}[{i}]")
22
+ print_tree(item, level + 1, max_level)
23
+
24
+
25
+
26
+ def count_undrawn_rids(logger, universe, lastmap, focus):
27
+
28
+
29
+ rids = set([r.id for r in universe.reactions])
30
+
31
+ drawn_rids = set()
32
+ for key, value in lastmap['json'][1]['reactions'].items():
33
+ drawn_rids.add(value['bigg_id'])
34
+
35
+
36
+ remainings = rids - drawn_rids
37
+ filename = lastmap['filename']
38
+ logger.debug(f"Last universal map version detected: '{filename}'.")
39
+ if len(remainings) > 0:
40
+ logger.warning(f"Our universal map is {len(remainings)} reactions behind. Please draw!")
41
+ if focus == '-':
42
+ logger.warning(f"Drawing is eased when using '--focus'...")
43
+ else:
44
+ logger.info(f"Our universal map is {len(remainings)} reactions behind. Thank you ♥")
45
+
46
+
47
+
48
+ def count_undrawn_rids_focus(logger, universe, lastmap, focus, outdir):
49
+
50
+
51
+ # get modeled reads for this --focus:
52
+ rids = set()
53
+ try: gr = universe.groups.get_by_id(focus)
54
+ except:
55
+ logger.warning(f"Group '{focus}' not found!")
56
+ return
57
+ for r in gr.members:
58
+ rids.add(r.id)
59
+
60
+
61
+ # get rids on Escher:
62
+ drawn_rids = set()
63
+ for key, value in lastmap['json'][1]['reactions'].items():
64
+ drawn_rids.add(value['bigg_id'])
65
+
66
+
67
+ # get remaining rids for this map:
68
+ remainings = rids - drawn_rids
69
+ remainings_krs = set()
70
+ for rid in remainings:
71
+ r = universe.reactions.get_by_id(rid)
72
+ krs = r.annotation['kegg.reaction']
73
+ for kr in krs:
74
+ remainings_krs.add(kr)
75
+
76
+
77
+ if len(remainings) > 0:
78
+ if focus != 'transport':
79
+ logger.warning(f"Focusing on '{focus}', our universal map is {len(remainings)} reactions behind: {' '.join(list(remainings_krs))}.")
80
+ else:
81
+ logger.warning(f"Focusing on '{focus}', our universal map is {len(remainings)} reactions behind.") # usually no kegg codes for tranport reactions
82
+
83
+
84
+ # subset the universe to ease the drawing:
85
+ universe_focus = universe.copy()
86
+ to_remove = [r for r in universe_focus.reactions if r.id not in rids]
87
+
88
+
89
+ # trick to avoid the WARNING "cobra/core/group.py:147: UserWarning: need to pass in a list"
90
+ # triggered when trying to remove reactions that are included in groups.
91
+ with warnings.catch_warnings(): # temporarily suppress warnings for this block
92
+ warnings.simplefilter("ignore") # ignore all warnings
93
+ cobra_logger = logging.getLogger("cobra.util.solver")
94
+ old_level = cobra_logger.level
95
+ cobra_logger.setLevel(logging.ERROR)
96
+
97
+ universe_focus.remove_reactions(to_remove,remove_orphans=True)
98
+
99
+ # restore original behaviour:
100
+ cobra_logger.setLevel(old_level)
101
+
102
+
103
+ # save the subset for drawing in Escher!
104
+ logger.info(f"Writing '{outdir}/{focus}.json' to ease your drawing workflow...")
105
+ cobra.io.save_json_model(universe_focus, f'{outdir}/{focus}.json')
106
+ else:
107
+ logger.info(f"Focusing on '{focus}', our universal map is {len(remainings)} reactions behind. Thank you ♥")
108
+
@@ -275,6 +275,67 @@ def check_taxon(logger, taxon, idcollection_dict):
275
275
  if name not in avail_taxa_at_level:
276
276
  logger.error(f"Provided taxon name is not acceptable: '{name}' (see --taxon). Acceptable taxon names for level '{level}' are {avail_taxa_at_level}.")
277
277
  return 1
278
-
278
+
279
+
280
+ """
281
+ sorted(list(df.query("kingdom == 'Bacteria'")['phylum'].unique()))
282
+ ['Acidobacteriota',
283
+ 'Actinomycetota',
284
+ 'Alphaproteobacteria',
285
+ 'Aquificota',
286
+ 'Armatimonadota',
287
+ 'Atribacterota',
288
+ 'Bacilli',
289
+ 'Bacteria incertae sedis',
290
+ 'Bacteroidota',
291
+ 'Balneolota',
292
+ 'Bdellovibrionota',
293
+ 'Betaproteobacteria',
294
+ 'Caldisericota',
295
+ 'Calditrichota',
296
+ 'Campylobacterota',
297
+ 'Chlamydiota',
298
+ 'Chlorobiota',
299
+ 'Chloroflexota',
300
+ 'Chrysiogenota',
301
+ 'Cloacimonadota',
302
+ 'Clostridia',
303
+ 'Coprothermobacterota',
304
+ 'Cyanobacteriota',
305
+ 'Deferribacterota',
306
+ 'Deinococcota',
307
+ 'Deltaproteobacteria',
308
+ 'Dictyoglomota',
309
+ 'Elusimicrobiota',
310
+ 'Enterobacteria',
311
+ 'Fibrobacterota',
312
+ 'Fidelibacterota',
313
+ 'Fusobacteriota',
314
+ 'Gemmatimonadota',
315
+ 'Ignavibacteriota',
316
+ 'Kiritimatiellota',
317
+ 'Lentisphaerota',
318
+ 'Melainabacteria',
319
+ 'Mycoplasmatota',
320
+ 'Myxococcota',
321
+ 'Nitrospinota',
322
+ 'Nitrospirota',
323
+ 'Omnitrophota',
324
+ 'Planctomycetota',
325
+ 'Rhodothermota',
326
+ 'Spirochaetota',
327
+ 'Synergistota',
328
+ 'Thermodesulfobacteriota',
329
+ 'Thermodesulfobiota',
330
+ 'Thermomicrobiota',
331
+ 'Thermosulfidibacterota',
332
+ 'Thermotogota',
333
+ 'Verrucomicrobiota',
334
+ 'Vulcanimicrobiota',
335
+ 'other Bacillota',
336
+ 'other Gammaproteobacteria',
337
+ 'other Pseudomonadota',
338
+ 'unclassified Bacteria']
339
+ """
279
340
 
280
341
  return 0
@@ -0,0 +1,108 @@
1
+ import warnings
2
+ import logging
3
+
4
+
5
+ import cobra
6
+
7
+
8
+
9
+ def print_json_tree(data, level=0, max_level=2):
10
+ # explore contents of a json object
11
+
12
+ if level > max_level:
13
+ return
14
+ indent = ' ' * level
15
+ if isinstance(data, dict):
16
+ for key, value in data.items():
17
+ print(f"{indent}{key}")
18
+ print_tree(value, level + 1, max_level)
19
+ elif isinstance(data, list):
20
+ for i, item in enumerate(data):
21
+ print(f"{indent}[{i}]")
22
+ print_tree(item, level + 1, max_level)
23
+
24
+
25
+
26
+ def count_undrawn_rids(logger, universe, lastmap, focus):
27
+
28
+
29
+ rids = set([r.id for r in universe.reactions])
30
+
31
+ drawn_rids = set()
32
+ for key, value in lastmap['json'][1]['reactions'].items():
33
+ drawn_rids.add(value['bigg_id'])
34
+
35
+
36
+ remainings = rids - drawn_rids
37
+ filename = lastmap['filename']
38
+ logger.debug(f"Last universal map version detected: '{filename}'.")
39
+ if len(remainings) > 0:
40
+ logger.warning(f"Our universal map is {len(remainings)} reactions behind. Please draw!")
41
+ if focus == '-':
42
+ logger.warning(f"Drawing is eased when using '--focus'...")
43
+ else:
44
+ logger.info(f"Our universal map is {len(remainings)} reactions behind. Thank you ♥")
45
+
46
+
47
+
48
+ def count_undrawn_rids_focus(logger, universe, lastmap, focus, outdir):
49
+
50
+
51
+ # get modeled reads for this --focus:
52
+ rids = set()
53
+ try: gr = universe.groups.get_by_id(focus)
54
+ except:
55
+ logger.warning(f"Group '{focus}' not found!")
56
+ return
57
+ for r in gr.members:
58
+ rids.add(r.id)
59
+
60
+
61
+ # get rids on Escher:
62
+ drawn_rids = set()
63
+ for key, value in lastmap['json'][1]['reactions'].items():
64
+ drawn_rids.add(value['bigg_id'])
65
+
66
+
67
+ # get remaining rids for this map:
68
+ remainings = rids - drawn_rids
69
+ remainings_krs = set()
70
+ for rid in remainings:
71
+ r = universe.reactions.get_by_id(rid)
72
+ krs = r.annotation['kegg.reaction']
73
+ for kr in krs:
74
+ remainings_krs.add(kr)
75
+
76
+
77
+ if len(remainings) > 0:
78
+ if focus != 'transport':
79
+ logger.warning(f"Focusing on '{focus}', our universal map is {len(remainings)} reactions behind: {' '.join(list(remainings_krs))}.")
80
+ else:
81
+ logger.warning(f"Focusing on '{focus}', our universal map is {len(remainings)} reactions behind.") # usually no kegg codes for tranport reactions
82
+
83
+
84
+ # subset the universe to ease the drawing:
85
+ universe_focus = universe.copy()
86
+ to_remove = [r for r in universe_focus.reactions if r.id not in rids]
87
+
88
+
89
+ # trick to avoid the WARNING "cobra/core/group.py:147: UserWarning: need to pass in a list"
90
+ # triggered when trying to remove reactions that are included in groups.
91
+ with warnings.catch_warnings(): # temporarily suppress warnings for this block
92
+ warnings.simplefilter("ignore") # ignore all warnings
93
+ cobra_logger = logging.getLogger("cobra.util.solver")
94
+ old_level = cobra_logger.level
95
+ cobra_logger.setLevel(logging.ERROR)
96
+
97
+ universe_focus.remove_reactions(to_remove,remove_orphans=True)
98
+
99
+ # restore original behaviour:
100
+ cobra_logger.setLevel(old_level)
101
+
102
+
103
+ # save the subset for drawing in Escher!
104
+ logger.info(f"Writing '{outdir}/{focus}.json' to ease your drawing workflow...")
105
+ cobra.io.save_json_model(universe_focus, f'{outdir}/{focus}.json')
106
+ else:
107
+ logger.info(f"Focusing on '{focus}', our universal map is {len(remainings)} reactions behind. Thank you ♥")
108
+
@@ -138,6 +138,15 @@ def set_up_groups(logger, model, idcollection_dict):
138
138
 
139
139
  # insert custom groups:
140
140
  custom_groups = get_custom_groups()
141
+ #
142
+ # create a group for transporters on-the-fly
143
+ custom_groups['transport'] = []
144
+ for r in model.reactions:
145
+ if len(r.metabolites) == 1: # exchanges / sinks/ demands
146
+ custom_groups['transport'].append(r.id)
147
+ if len(set([m.id.rsplit('_', 1)[-1] for m in r.metabolites])) > 1: # transport reactions
148
+ custom_groups['transport'].append(r.id)
149
+ #
141
150
  for group_id in custom_groups.keys():
142
151
  actual_group = cobra.core.Group(
143
152
  group_id,
@@ -6,6 +6,9 @@ import os
6
6
  import pandas as pnd
7
7
 
8
8
 
9
+ from .manual import get_krs_to_exclude
10
+
11
+
9
12
 
10
13
  def parse_eggnog(model, eggnog, idcollection_dict):
11
14
 
@@ -112,7 +115,7 @@ def check_completeness(logger, model, progress, module, focus, taxon, eggnog, ke
112
115
  if 'kegg.reaction' in r.annotation.keys():
113
116
  for kr_id in r.annotation['kegg.reaction']:
114
117
  kr_ids_modeled.add(kr_id)
115
- kr_uni_missing = kr_uni - kr_ids_modeled
118
+ kr_uni_missing = (kr_uni - kr_ids_modeled) - get_krs_to_exclude()
116
119
  kr_uni_coverage = len(kr_ids_modeled.intersection(kr_uni)) / len(kr_uni) * 100
117
120
  logger.info(f"Coverage for {kr_uni_label}: {round(kr_uni_coverage, 0)}% ({len(kr_uni_missing)} missing).")
118
121
 
@@ -141,8 +144,12 @@ def check_completeness(logger, model, progress, module, focus, taxon, eggnog, ke
141
144
 
142
145
  # check if 'focus' exist
143
146
  if focus != '-' and focus not in map_ids and focus not in md_ids:
144
- logger.error(f"The ID provided with --focus does not exist: {focus}.")
145
- return 1
147
+ if focus == 'transport':
148
+ df_coverage = None
149
+ return df_coverage # just the jeneration of 'transport.json' for Escher drawing is needed here
150
+ else:
151
+ logger.error(f"The ID provided with --focus does not exist: {focus}.")
152
+ return 1
146
153
  if focus.startswith('map'):
147
154
  logger.debug(f"With --focus {focus}, --module will switch to False.")
148
155
  module = False
@@ -175,7 +182,7 @@ def check_completeness(logger, model, progress, module, focus, taxon, eggnog, ke
175
182
 
176
183
  # check if this map was (at least partially) covered:
177
184
  map_krs = set([kr for kr in i['kr_ids'] if kr in kr_uni])
178
- missing = map_krs - kr_ids_modeled
185
+ missing = (map_krs - kr_ids_modeled) - get_krs_to_exclude()
179
186
  present = kr_ids_modeled.intersection(map_krs)
180
187
  if focus == map_id:
181
188
  missing_logger = (map_id, missing)
@@ -287,7 +294,7 @@ def check_completeness(logger, model, progress, module, focus, taxon, eggnog, ke
287
294
 
288
295
  # check if this module was (at least partially) covered:
289
296
  md_krs = set([kr for kr in z['kr_ids_md'] if kr in kr_uni])
290
- missing = md_krs - kr_ids_modeled
297
+ missing = (md_krs - kr_ids_modeled) - get_krs_to_exclude()
291
298
  present = kr_ids_modeled.intersection(md_krs)
292
299
  if focus == md_id:
293
300
  missing_logger = (md_id, missing)
@@ -336,7 +343,7 @@ def check_completeness(logger, model, progress, module, focus, taxon, eggnog, ke
336
343
  if module and focus=='-':
337
344
  logger.info(f"{spacer}Modules of {right_item['map_id']}: completed {len(mds_completed)} - partial {len(mds_partial)} - missing {len(mds_missing)} - noreac {len(mds_noreac)}")
338
345
  if focus != '-':
339
- logger.info(f"Missing reactions focusing on {missing_logger[0]}: {' '.join(list(missing_logger[1]))}.")
346
+ logger.info(f"Missing reactions focusing on '{missing_logger[0]}': {' '.join(list(missing_logger[1]))}.")
340
347
  if progress:
341
348
  logger.info(f"Maps: finished {len(maps_finished)} - partial {len(maps_partial)} - missing {len(maps_missing)} - noreac {len(maps_noreac)}")
342
349
 
@@ -5,11 +5,21 @@ def get_deprecated_kos():
5
5
  deprecated_kos = [
6
6
  'K11189', # should be K02784
7
7
  'K07011', # linked to lp_1215(cps3A) and lp_1216(cps3B) during 2018 and not replaced
8
+ #'K24301', # to be introduced in GPRs
8
9
  ]
9
10
  return deprecated_kos
10
11
 
11
12
 
12
13
 
14
+ def get_krs_to_exclude():
15
+ return set([
16
+ 'R12328', 'R05190', # general forms of fatty acid biosynthesis
17
+ 'R01347', 'R04121', # general forms of fatty acid degradation
18
+ ])
19
+
20
+
21
+
22
+
13
23
  def get_rids_with_mancheck_gpr():
14
24
  rids_mancheck_gpr = [ # reactions with manually checked GPRs
15
25
  'SUCD1', 'ALKP', 'PFK_3', 'TCMPTS', 'PPA', 'APSR',
@@ -16,6 +16,8 @@ from ..commons import write_excel_model
16
16
  from ..commons import show_contributions
17
17
  from ..commons import adjust_biomass_precursors
18
18
  from ..commons import count_undrawn_rids
19
+ from ..commons import count_undrawn_rids_focus
20
+
19
21
  from ..commons import format_expansion
20
22
  from ..commons import check_taxon
21
23
  from ..commons import download_keggorg
@@ -184,7 +186,8 @@ def main(args, logger):
184
186
 
185
187
  ###### POLISHING 1
186
188
  # remove disconnected metabolites
187
- universe = remove_disconnected(logger, universe)
189
+ if args.keepdisconn == False:
190
+ universe = remove_disconnected(logger, universe) # can be commented when using booster.py
188
191
 
189
192
 
190
193
 
@@ -193,9 +196,9 @@ def main(args, logger):
193
196
  verify_egc_all(logger, universe, args.outdir)
194
197
 
195
198
 
199
+
196
200
  if not args.justparse:
197
201
 
198
-
199
202
  ###### CHECKS 3
200
203
  # check growth on minmal media
201
204
  df_G = grow_on_media(logger, universe, dbexp, args.media, '-', True)
@@ -228,10 +231,15 @@ def main(args, logger):
228
231
 
229
232
 
230
233
 
231
- # output the universe
232
- logger.info("Writing universal model...")
233
- cobra.io.save_json_model(universe, f'{args.outdir}/universe.json')
234
- logger.info(f"'{args.outdir}/universe.json' created!")
234
+ # output the universe (even when --justparse)
235
+ logger.info("Writing universal model...")
236
+ cobra.io.save_json_model(universe, f'{args.outdir}/universe.json')
237
+ logger.info(f"'{args.outdir}/universe.json' created!")
238
+
239
+
240
+ if not args.justparse:
241
+
242
+ # outptu in the remaining formats:
235
243
  cobra.io.write_sbml_model(universe, f'{args.outdir}/universe.xml') # groups are saved only to SBML
236
244
  logger.info(f"'{args.outdir}/universe.xml' created!")
237
245
  force_id_on_sbml(f'{args.outdir}/universe.xml', 'universe') # force introduction of the 'id=""' field
@@ -242,7 +250,9 @@ def main(args, logger):
242
250
 
243
251
  ###### CHECKS 4
244
252
  # check if universal escher map is updated:
245
- count_undrawn_rids(logger, universe, lastmap)
253
+ count_undrawn_rids(logger, universe, lastmap, args.focus)
254
+ if args.focus != '-':
255
+ count_undrawn_rids_focus(logger, universe, lastmap, args.focus, args.outdir)
246
256
 
247
257
 
248
258
  return 0
@@ -45,7 +45,7 @@ def check_gpr(logger, rid, row, kr_ids, idcollection_dict, addtype='R'):
45
45
  pass
46
46
  elif ko_id not in idcollection_dict['ko'] and ko_id != 'spontaneous' and ko_id != 'orphan':
47
47
  logger.error(f"{itemtype} '{rid}' has an invalid KEGG Ortholog: '{ko_id}'.")
48
- return 1
48
+ return 1 # can be commented when migrating to new kegg release
49
49
 
50
50
 
51
51
  # check if these ko_ids are really assigned to this reaction:
@@ -61,7 +61,7 @@ def check_gpr(logger, rid, row, kr_ids, idcollection_dict, addtype='R'):
61
61
  missing_ko_ids = ko_for_rid - (set(ko_ids_parsed) - set(['spontaneous', 'orphan']))
62
62
  if len(missing_ko_ids) > 0:
63
63
  logger.error(f"Orthologs {missing_ko_ids} are missing from reaction '{rid}' ({kr_ids}).")
64
- return 1
64
+ return 1 # can be commented when migrating to new kegg release
65
65
 
66
66
 
67
67
  return 0
@@ -138,6 +138,15 @@ def set_up_groups(logger, model, idcollection_dict):
138
138
 
139
139
  # insert custom groups:
140
140
  custom_groups = get_custom_groups()
141
+ #
142
+ # create a group for transporters on-the-fly
143
+ custom_groups['transport'] = []
144
+ for r in model.reactions:
145
+ if len(r.metabolites) == 1: # exchanges / sinks/ demands
146
+ custom_groups['transport'].append(r.id)
147
+ if len(set([m.id.rsplit('_', 1)[-1] for m in r.metabolites])) > 1: # transport reactions
148
+ custom_groups['transport'].append(r.id)
149
+ #
141
150
  for group_id in custom_groups.keys():
142
151
  actual_group = cobra.core.Group(
143
152
  group_id,
@@ -6,6 +6,9 @@ import os
6
6
  import pandas as pnd
7
7
 
8
8
 
9
+ from .manual import get_krs_to_exclude
10
+
11
+
9
12
 
10
13
  def parse_eggnog(model, eggnog, idcollection_dict):
11
14
 
@@ -112,7 +115,7 @@ def check_completeness(logger, model, progress, module, focus, taxon, eggnog, ke
112
115
  if 'kegg.reaction' in r.annotation.keys():
113
116
  for kr_id in r.annotation['kegg.reaction']:
114
117
  kr_ids_modeled.add(kr_id)
115
- kr_uni_missing = kr_uni - kr_ids_modeled
118
+ kr_uni_missing = (kr_uni - kr_ids_modeled) - get_krs_to_exclude()
116
119
  kr_uni_coverage = len(kr_ids_modeled.intersection(kr_uni)) / len(kr_uni) * 100
117
120
  logger.info(f"Coverage for {kr_uni_label}: {round(kr_uni_coverage, 0)}% ({len(kr_uni_missing)} missing).")
118
121
 
@@ -141,8 +144,12 @@ def check_completeness(logger, model, progress, module, focus, taxon, eggnog, ke
141
144
 
142
145
  # check if 'focus' exist
143
146
  if focus != '-' and focus not in map_ids and focus not in md_ids:
144
- logger.error(f"The ID provided with --focus does not exist: {focus}.")
145
- return 1
147
+ if focus == 'transport':
148
+ df_coverage = None
149
+ return df_coverage # just the jeneration of 'transport.json' for Escher drawing is needed here
150
+ else:
151
+ logger.error(f"The ID provided with --focus does not exist: {focus}.")
152
+ return 1
146
153
  if focus.startswith('map'):
147
154
  logger.debug(f"With --focus {focus}, --module will switch to False.")
148
155
  module = False
@@ -175,7 +182,7 @@ def check_completeness(logger, model, progress, module, focus, taxon, eggnog, ke
175
182
 
176
183
  # check if this map was (at least partially) covered:
177
184
  map_krs = set([kr for kr in i['kr_ids'] if kr in kr_uni])
178
- missing = map_krs - kr_ids_modeled
185
+ missing = (map_krs - kr_ids_modeled) - get_krs_to_exclude()
179
186
  present = kr_ids_modeled.intersection(map_krs)
180
187
  if focus == map_id:
181
188
  missing_logger = (map_id, missing)
@@ -287,7 +294,7 @@ def check_completeness(logger, model, progress, module, focus, taxon, eggnog, ke
287
294
 
288
295
  # check if this module was (at least partially) covered:
289
296
  md_krs = set([kr for kr in z['kr_ids_md'] if kr in kr_uni])
290
- missing = md_krs - kr_ids_modeled
297
+ missing = (md_krs - kr_ids_modeled) - get_krs_to_exclude()
291
298
  present = kr_ids_modeled.intersection(md_krs)
292
299
  if focus == md_id:
293
300
  missing_logger = (md_id, missing)
@@ -336,7 +343,7 @@ def check_completeness(logger, model, progress, module, focus, taxon, eggnog, ke
336
343
  if module and focus=='-':
337
344
  logger.info(f"{spacer}Modules of {right_item['map_id']}: completed {len(mds_completed)} - partial {len(mds_partial)} - missing {len(mds_missing)} - noreac {len(mds_noreac)}")
338
345
  if focus != '-':
339
- logger.info(f"Missing reactions focusing on {missing_logger[0]}: {' '.join(list(missing_logger[1]))}.")
346
+ logger.info(f"Missing reactions focusing on '{missing_logger[0]}': {' '.join(list(missing_logger[1]))}.")
340
347
  if progress:
341
348
  logger.info(f"Maps: finished {len(maps_finished)} - partial {len(maps_partial)} - missing {len(maps_missing)} - noreac {len(maps_noreac)}")
342
349
 
@@ -5,11 +5,21 @@ def get_deprecated_kos():
5
5
  deprecated_kos = [
6
6
  'K11189', # should be K02784
7
7
  'K07011', # linked to lp_1215(cps3A) and lp_1216(cps3B) during 2018 and not replaced
8
+ #'K24301', # to be introduced in GPRs
8
9
  ]
9
10
  return deprecated_kos
10
11
 
11
12
 
12
13
 
14
+ def get_krs_to_exclude():
15
+ return set([
16
+ 'R12328', 'R05190', # general forms of fatty acid biosynthesis
17
+ 'R01347', 'R01348', 'R04121', # general forms of fatty acid degradation
18
+ ])
19
+
20
+
21
+
22
+
13
23
  def get_rids_with_mancheck_gpr():
14
24
  rids_mancheck_gpr = [ # reactions with manually checked GPRs
15
25
  'SUCD1', 'ALKP', 'PFK_3', 'TCMPTS', 'PPA', 'APSR',
@@ -16,6 +16,8 @@ from ..commons import write_excel_model
16
16
  from ..commons import show_contributions
17
17
  from ..commons import adjust_biomass_precursors
18
18
  from ..commons import count_undrawn_rids
19
+ from ..commons import count_undrawn_rids_focus
20
+
19
21
  from ..commons import format_expansion
20
22
  from ..commons import check_taxon
21
23
  from ..commons import download_keggorg
@@ -184,7 +186,8 @@ def main(args, logger):
184
186
 
185
187
  ###### POLISHING 1
186
188
  # remove disconnected metabolites
187
- universe = remove_disconnected(logger, universe)
189
+ if args.keepdisconn == False:
190
+ universe = remove_disconnected(logger, universe) # can be commented when using booster.py
188
191
 
189
192
 
190
193
 
@@ -193,9 +196,9 @@ def main(args, logger):
193
196
  verify_egc_all(logger, universe, args.outdir)
194
197
 
195
198
 
199
+
196
200
  if not args.justparse:
197
201
 
198
-
199
202
  ###### CHECKS 3
200
203
  # check growth on minmal media
201
204
  df_G = grow_on_media(logger, universe, dbexp, args.media, '-', True)
@@ -228,10 +231,15 @@ def main(args, logger):
228
231
 
229
232
 
230
233
 
231
- # output the universe
232
- logger.info("Writing universal model...")
233
- cobra.io.save_json_model(universe, f'{args.outdir}/universe.json')
234
- logger.info(f"'{args.outdir}/universe.json' created!")
234
+ # output the universe (even when --justparse)
235
+ logger.info("Writing universal model...")
236
+ cobra.io.save_json_model(universe, f'{args.outdir}/universe.json')
237
+ logger.info(f"'{args.outdir}/universe.json' created!")
238
+
239
+
240
+ if not args.justparse:
241
+
242
+ # outptu in the remaining formats:
235
243
  cobra.io.write_sbml_model(universe, f'{args.outdir}/universe.xml') # groups are saved only to SBML
236
244
  logger.info(f"'{args.outdir}/universe.xml' created!")
237
245
  force_id_on_sbml(f'{args.outdir}/universe.xml', 'universe') # force introduction of the 'id=""' field
@@ -242,7 +250,9 @@ def main(args, logger):
242
250
 
243
251
  ###### CHECKS 4
244
252
  # check if universal escher map is updated:
245
- count_undrawn_rids(logger, universe, lastmap)
253
+ count_undrawn_rids(logger, universe, lastmap, args.focus)
254
+ if args.focus != '-':
255
+ count_undrawn_rids_focus(logger, universe, lastmap, args.focus, args.outdir)
246
256
 
247
257
 
248
258
  return 0
@@ -45,7 +45,7 @@ def check_gpr(logger, rid, row, kr_ids, idcollection_dict, addtype='R'):
45
45
  pass
46
46
  elif ko_id not in idcollection_dict['ko'] and ko_id != 'spontaneous' and ko_id != 'orphan':
47
47
  logger.error(f"{itemtype} '{rid}' has an invalid KEGG Ortholog: '{ko_id}'.")
48
- return 1
48
+ return 1 # can be commented when migrating to new kegg release
49
49
 
50
50
 
51
51
  # check if these ko_ids are really assigned to this reaction:
@@ -61,7 +61,7 @@ def check_gpr(logger, rid, row, kr_ids, idcollection_dict, addtype='R'):
61
61
  missing_ko_ids = ko_for_rid - (set(ko_ids_parsed) - set(['spontaneous', 'orphan']))
62
62
  if len(missing_ko_ids) > 0:
63
63
  logger.error(f"Orthologs {missing_ko_ids} are missing from reaction '{rid}' ({kr_ids}).")
64
- return 1
64
+ return 1 # can be commented when migrating to new kegg release
65
65
 
66
66
 
67
67
  return 0
@@ -1,37 +0,0 @@
1
-
2
-
3
- def print_json_tree(data, level=0, max_level=2):
4
- # explore contents of a json object
5
-
6
- if level > max_level:
7
- return
8
- indent = ' ' * level
9
- if isinstance(data, dict):
10
- for key, value in data.items():
11
- print(f"{indent}{key}")
12
- print_tree(value, level + 1, max_level)
13
- elif isinstance(data, list):
14
- for i, item in enumerate(data):
15
- print(f"{indent}[{i}]")
16
- print_tree(item, level + 1, max_level)
17
-
18
-
19
-
20
- def count_undrawn_rids(logger, universe, lastmap):
21
-
22
-
23
- rids = set([r.id for r in universe.reactions])
24
-
25
- drawn_rids = set()
26
- for key, value in lastmap['json'][1]['reactions'].items():
27
- drawn_rids.add(value['bigg_id'])
28
-
29
-
30
- remainings = rids - drawn_rids
31
- filename = lastmap['filename']
32
- logger.debug(f"Last universal map version detected: '{filename}'.")
33
- if len(remainings) > 0:
34
- logger.warning(f"Our universal map is {len(remainings)} reactions behind. Please draw!")
35
- else:
36
- logger.info(f"Our universal map is {len(remainings)} reactions behind. Thank you ♥")
37
-
@@ -1,37 +0,0 @@
1
-
2
-
3
- def print_json_tree(data, level=0, max_level=2):
4
- # explore contents of a json object
5
-
6
- if level > max_level:
7
- return
8
- indent = ' ' * level
9
- if isinstance(data, dict):
10
- for key, value in data.items():
11
- print(f"{indent}{key}")
12
- print_tree(value, level + 1, max_level)
13
- elif isinstance(data, list):
14
- for i, item in enumerate(data):
15
- print(f"{indent}[{i}]")
16
- print_tree(item, level + 1, max_level)
17
-
18
-
19
-
20
- def count_undrawn_rids(logger, universe, lastmap):
21
-
22
-
23
- rids = set([r.id for r in universe.reactions])
24
-
25
- drawn_rids = set()
26
- for key, value in lastmap['json'][1]['reactions'].items():
27
- drawn_rids.add(value['bigg_id'])
28
-
29
-
30
- remainings = rids - drawn_rids
31
- filename = lastmap['filename']
32
- logger.debug(f"Last universal map version detected: '{filename}'.")
33
- if len(remainings) > 0:
34
- logger.warning(f"Our universal map is {len(remainings)} reactions behind. Please draw!")
35
- else:
36
- logger.info(f"Our universal map is {len(remainings)} reactions behind. Thank you ♥")
37
-
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes