gsrap 0.7.0__tar.gz → 0.7.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. {gsrap-0.7.0 → gsrap-0.7.1}/PKG-INFO +1 -1
  2. {gsrap-0.7.0 → gsrap-0.7.1}/pyproject.toml +1 -1
  3. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/.ipynb_checkpoints/__init__-checkpoint.py +31 -4
  4. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/__init__.py +31 -4
  5. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/.ipynb_checkpoints/biomass-checkpoint.py +4 -0
  6. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/.ipynb_checkpoints/coeffs-checkpoint.py +1 -1
  7. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/.ipynb_checkpoints/fluxbal-checkpoint.py +1 -1
  8. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/biomass.py +4 -0
  9. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/coeffs.py +1 -1
  10. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/fluxbal.py +1 -1
  11. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/mkmodel/.ipynb_checkpoints/gapfillutils-checkpoint.py +3 -0
  12. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/mkmodel/.ipynb_checkpoints/mkmodel-checkpoint.py +10 -3
  13. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/mkmodel/gapfillutils.py +3 -0
  14. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/mkmodel/mkmodel.py +10 -3
  15. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/parsedb/.ipynb_checkpoints/annotation-checkpoint.py +3 -0
  16. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/parsedb/.ipynb_checkpoints/completeness-checkpoint.py +7 -4
  17. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/parsedb/.ipynb_checkpoints/introduce-checkpoint.py +16 -1
  18. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/parsedb/.ipynb_checkpoints/parsedb-checkpoint.py +1 -1
  19. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/parsedb/.ipynb_checkpoints/repeating-checkpoint.py +7 -0
  20. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/parsedb/annotation.py +3 -0
  21. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/parsedb/completeness.py +7 -4
  22. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/parsedb/introduce.py +16 -1
  23. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/parsedb/parsedb.py +1 -1
  24. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/parsedb/repeating.py +7 -0
  25. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/runsims/.ipynb_checkpoints/simplegrowth-checkpoint.py +6 -7
  26. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/runsims/simplegrowth.py +6 -7
  27. {gsrap-0.7.0 → gsrap-0.7.1}/LICENSE.txt +0 -0
  28. {gsrap-0.7.0 → gsrap-0.7.1}/README.md +0 -0
  29. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/assets/.ipynb_checkpoints/PM1-checkpoint.csv +0 -0
  30. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/assets/.ipynb_checkpoints/PM2A-checkpoint.csv +0 -0
  31. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/assets/.ipynb_checkpoints/PM3B-checkpoint.csv +0 -0
  32. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/assets/.ipynb_checkpoints/PM4A-checkpoint.csv +0 -0
  33. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/assets/PM1.csv +0 -0
  34. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/assets/PM2A.csv +0 -0
  35. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/assets/PM3B.csv +0 -0
  36. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/assets/PM4A.csv +0 -0
  37. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/assets/__init__.py +0 -0
  38. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/assets/kegg_compound_to_others.pickle +0 -0
  39. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/assets/kegg_reaction_to_others.pickle +0 -0
  40. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/.ipynb_checkpoints/__init__-checkpoint.py +0 -0
  41. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/.ipynb_checkpoints/downloads-checkpoint.py +0 -0
  42. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/.ipynb_checkpoints/escherutils-checkpoint.py +0 -0
  43. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/.ipynb_checkpoints/excelhub-checkpoint.py +0 -0
  44. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/.ipynb_checkpoints/logutils-checkpoint.py +0 -0
  45. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/.ipynb_checkpoints/medium-checkpoint.py +0 -0
  46. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/.ipynb_checkpoints/metrics-checkpoint.py +0 -0
  47. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/.ipynb_checkpoints/sbmlutils-checkpoint.py +0 -0
  48. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/__init__.py +0 -0
  49. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/downloads.py +0 -0
  50. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/escherutils.py +0 -0
  51. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/excelhub.py +0 -0
  52. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/logutils.py +0 -0
  53. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/medium.py +0 -0
  54. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/metrics.py +0 -0
  55. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/commons/sbmlutils.py +0 -0
  56. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/getmaps/.ipynb_checkpoints/__init__-checkpoint.py +0 -0
  57. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/getmaps/.ipynb_checkpoints/getmaps-checkpoint.py +0 -0
  58. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/getmaps/.ipynb_checkpoints/kdown-checkpoint.py +0 -0
  59. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/getmaps/__init__.py +0 -0
  60. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/getmaps/getmaps.py +0 -0
  61. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/getmaps/kdown.py +0 -0
  62. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/mkmodel/.ipynb_checkpoints/__init__-checkpoint.py +0 -0
  63. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/mkmodel/.ipynb_checkpoints/biologcuration-checkpoint.py +0 -0
  64. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/mkmodel/.ipynb_checkpoints/gapfill-checkpoint.py +0 -0
  65. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/mkmodel/.ipynb_checkpoints/polishing-checkpoint.py +0 -0
  66. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/mkmodel/.ipynb_checkpoints/pruner-checkpoint.py +0 -0
  67. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/mkmodel/__init__.py +0 -0
  68. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/mkmodel/biologcuration.py +0 -0
  69. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/mkmodel/gapfill.py +0 -0
  70. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/mkmodel/polishing.py +0 -0
  71. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/mkmodel/pruner.py +0 -0
  72. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/parsedb/.ipynb_checkpoints/__init__-checkpoint.py +0 -0
  73. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/parsedb/.ipynb_checkpoints/manual-checkpoint.py +0 -0
  74. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/parsedb/__init__.py +0 -0
  75. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/parsedb/manual.py +0 -0
  76. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/runsims/.ipynb_checkpoints/__init__-checkpoint.py +0 -0
  77. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/runsims/.ipynb_checkpoints/biosynth-checkpoint.py +0 -0
  78. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/runsims/.ipynb_checkpoints/cnps-checkpoint.py +0 -0
  79. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/runsims/.ipynb_checkpoints/essentialgenes-checkpoint.py +0 -0
  80. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/runsims/.ipynb_checkpoints/growthfactors-checkpoint.py +0 -0
  81. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/runsims/.ipynb_checkpoints/precursors-checkpoint.py +0 -0
  82. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/runsims/.ipynb_checkpoints/runsims-checkpoint.py +0 -0
  83. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/runsims/.ipynb_checkpoints/singleomission-checkpoint.py +0 -0
  84. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/runsims/__init__.py +0 -0
  85. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/runsims/biosynth.py +0 -0
  86. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/runsims/cnps.py +0 -0
  87. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/runsims/essentialgenes.py +0 -0
  88. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/runsims/growthfactors.py +0 -0
  89. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/runsims/precursors.py +0 -0
  90. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/runsims/runsims.py +0 -0
  91. {gsrap-0.7.0 → gsrap-0.7.1}/src/gsrap/runsims/singleomission.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: gsrap
3
- Version: 0.7.0
3
+ Version: 0.7.1
4
4
  Summary:
5
5
  License: GNU General Public License v3.0
6
6
  Author: Gioele Lazzari
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "gsrap"
3
- version = "0.7.0"
3
+ version = "0.7.1"
4
4
  description = ""
5
5
  authors = ["Gioele Lazzari"]
6
6
  license = "GNU General Public License v3.0"
@@ -1,8 +1,10 @@
1
1
  import argparse
2
2
  import sys
3
3
  import traceback
4
+ import requests
4
5
  import importlib.metadata
5
6
  from datetime import datetime
7
+ from packaging import version
6
8
 
7
9
 
8
10
  import cobra
@@ -29,8 +31,9 @@ solver_name = solver_name.replace("_interface", '')
29
31
  def main():
30
32
 
31
33
 
32
- # define the header of main- and sub-commands.
33
- header = f'gsrap v{importlib.metadata.metadata("gsrap")["Version"]},\ndeveloped by Gioele Lazzari (gioele.lazzari@univr.it).'
34
+ # define the header of main- and sub-commands.
35
+ current_version = importlib.metadata.metadata("gsrap")["Version"]
36
+ header = f'gsrap v{current_version},\ndeveloped by Gioele Lazzari (gioele.lazzari@univr.it).'
34
37
 
35
38
 
36
39
  # create the command line arguments:
@@ -117,12 +120,36 @@ def main():
117
120
  # set up the logger:
118
121
  logger = get_logger('gsrap', args.verbose)
119
122
 
120
-
123
+
121
124
 
122
125
  # show a welcome message:
123
126
  set_header_trailer_formatter(logger.handlers[0])
124
127
  logger.info(header + '\n')
125
- command_line = '' # print the full command line:
128
+
129
+
130
+
131
+ # check if newer version is available
132
+ try:
133
+ response = requests.get(f"https://pypi.org/pypi/gsrap/json", timeout=3) # sends an HTTP GET request to the given URL
134
+ response.raise_for_status() # check the HTTP status code (e.g. 200, 404, 500): if not in the 2xx success range, raise requests.exceptions.HTTPError
135
+ data = response.json()
136
+ newest_version = data["info"]["version"]
137
+ except Exception as error: # eg requests.exceptions.Timeout, requests.exceptions.HTTPError
138
+ logger.info(f'Can\'t retrieve the number of the newest version. Please contact the developer reporting the following error: "{error}".')
139
+ logger.info('') # still no formatting here
140
+ # do not exit, continue with the program
141
+ if version.parse(current_version) < version.parse(newest_version):
142
+ warning_message = f"███ Last version is v{newest_version} and you have v{current_version}: please update gsrap! ███"
143
+ border = ''.join(['█' for i in range(len(warning_message))])
144
+ logger.info(border)
145
+ logger.info(warning_message)
146
+ logger.info(border)
147
+ logger.info('') # still no formatting here
148
+
149
+
150
+
151
+ # print the full command line:
152
+ command_line = ''
126
153
  for arg, value in vars(args).items():
127
154
  if arg == 'subcommand': command_line = command_line + f"gsrap {value} "
128
155
  else: command_line = command_line + f"--{arg} {value} "
@@ -1,8 +1,10 @@
1
1
  import argparse
2
2
  import sys
3
3
  import traceback
4
+ import requests
4
5
  import importlib.metadata
5
6
  from datetime import datetime
7
+ from packaging import version
6
8
 
7
9
 
8
10
  import cobra
@@ -29,8 +31,9 @@ solver_name = solver_name.replace("_interface", '')
29
31
  def main():
30
32
 
31
33
 
32
- # define the header of main- and sub-commands.
33
- header = f'gsrap v{importlib.metadata.metadata("gsrap")["Version"]},\ndeveloped by Gioele Lazzari (gioele.lazzari@univr.it).'
34
+ # define the header of main- and sub-commands.
35
+ current_version = importlib.metadata.metadata("gsrap")["Version"]
36
+ header = f'gsrap v{current_version},\ndeveloped by Gioele Lazzari (gioele.lazzari@univr.it).'
34
37
 
35
38
 
36
39
  # create the command line arguments:
@@ -117,12 +120,36 @@ def main():
117
120
  # set up the logger:
118
121
  logger = get_logger('gsrap', args.verbose)
119
122
 
120
-
123
+
121
124
 
122
125
  # show a welcome message:
123
126
  set_header_trailer_formatter(logger.handlers[0])
124
127
  logger.info(header + '\n')
125
- command_line = '' # print the full command line:
128
+
129
+
130
+
131
+ # check if newer version is available
132
+ try:
133
+ response = requests.get(f"https://pypi.org/pypi/gsrap/json", timeout=3) # sends an HTTP GET request to the given URL
134
+ response.raise_for_status() # check the HTTP status code (e.g. 200, 404, 500): if not in the 2xx success range, raise requests.exceptions.HTTPError
135
+ data = response.json()
136
+ newest_version = data["info"]["version"]
137
+ except Exception as error: # eg requests.exceptions.Timeout, requests.exceptions.HTTPError
138
+ logger.info(f'Can\'t retrieve the number of the newest version. Please contact the developer reporting the following error: "{error}".')
139
+ logger.info('') # still no formatting here
140
+ # do not exit, continue with the program
141
+ if version.parse(current_version) < version.parse(newest_version):
142
+ warning_message = f"███ Last version is v{newest_version} and you have v{current_version}: please update gsrap! ███"
143
+ border = ''.join(['█' for i in range(len(warning_message))])
144
+ logger.info(border)
145
+ logger.info(warning_message)
146
+ logger.info(border)
147
+ logger.info('') # still no formatting here
148
+
149
+
150
+
151
+ # print the full command line:
152
+ command_line = ''
126
153
  for arg, value in vars(args).items():
127
154
  if arg == 'subcommand': command_line = command_line + f"gsrap {value} "
128
155
  else: command_line = command_line + f"--{arg} {value} "
@@ -190,6 +190,10 @@ def introduce_universal_biomass(logger, dbexp, universe):
190
190
  r.build_reaction_from_string(rstring)
191
191
 
192
192
 
193
+ # add SBO annotation
194
+ r.annotation['sbo'] = ['SBO:0000629'] # biomass reaction
195
+
196
+
193
197
  # set as objective:
194
198
  universe.objective = 'Biomass'
195
199
 
@@ -267,7 +267,7 @@ def compute_exp_LIPIDS_coeffs(logger, model, MWF, LIPIDS_PL, LIPIDS_FA):
267
267
  r.bounds = (0, 1000)
268
268
  r.gene_reaction_rule = 'spontaneous'
269
269
  r.update_genes_from_gpr()
270
-
270
+
271
271
 
272
272
  # determine 'L' formula and charge (charge should be -1 like every fatty acid)
273
273
  L_dict = dict() # for 1 mol
@@ -44,7 +44,7 @@ def verify_growth(model, boolean=True):
44
44
  if status =='infeasible':
45
45
  return 'infeasible'
46
46
  elif obj_value < get_optthr():
47
- return 0
47
+ return 0.0
48
48
  else:
49
49
  return round(obj_value, 3)
50
50
 
@@ -190,6 +190,10 @@ def introduce_universal_biomass(logger, dbexp, universe):
190
190
  r.build_reaction_from_string(rstring)
191
191
 
192
192
 
193
+ # add SBO annotation
194
+ r.annotation['sbo'] = ['SBO:0000629'] # biomass reaction
195
+
196
+
193
197
  # set as objective:
194
198
  universe.objective = 'Biomass'
195
199
 
@@ -267,7 +267,7 @@ def compute_exp_LIPIDS_coeffs(logger, model, MWF, LIPIDS_PL, LIPIDS_FA):
267
267
  r.bounds = (0, 1000)
268
268
  r.gene_reaction_rule = 'spontaneous'
269
269
  r.update_genes_from_gpr()
270
-
270
+
271
271
 
272
272
  # determine 'L' formula and charge (charge should be -1 like every fatty acid)
273
273
  L_dict = dict() # for 1 mol
@@ -44,7 +44,7 @@ def verify_growth(model, boolean=True):
44
44
  if status =='infeasible':
45
45
  return 'infeasible'
46
46
  elif obj_value < get_optthr():
47
- return 0
47
+ return 0.0
48
48
  else:
49
49
  return round(obj_value, 3)
50
50
 
@@ -33,6 +33,9 @@ def import_from_universe(model, universe, rid, bounds=None, gpr=None):
33
33
  else:
34
34
  r.gene_reaction_rule = ''
35
35
  r.update_genes_from_gpr()
36
+
37
+ # set annotations
38
+ r.annotation = ru.annotation
36
39
 
37
40
 
38
41
 
@@ -64,6 +64,7 @@ def create_model_incore(params):
64
64
  # remove universal orphans
65
65
  model = remove_universal_orphans(logger, model)
66
66
 
67
+
67
68
 
68
69
  ###### PRUNING
69
70
  logger.info("Reading provided eggnog-mapper annotation...")
@@ -77,6 +78,7 @@ def create_model_incore(params):
77
78
  translate_remaining_kos(logger, model, eggnog_ko_to_gids)
78
79
  restore_gene_annotations(logger, model, universe, eggonog_gid_to_kos)
79
80
 
81
+
80
82
 
81
83
  ###### GAPFILLING
82
84
  # force inclusion of reactions:
@@ -103,30 +105,35 @@ def create_model_incore(params):
103
105
  if type(df_P)==int: return 1
104
106
 
105
107
 
106
- ###### POLISHING 2
107
- # remove disconnected metabolites
108
- model = remove_disconnected(logger, model)
109
108
 
109
+ ###### POLISHING 2
110
110
  # remove unsed sinks and demands
111
111
  model = remove_sinks_demands(logger, model)
112
+
113
+ # remove disconnected metabolites
114
+ model = remove_disconnected(logger, model)
112
115
 
116
+
113
117
 
114
118
  # # # # # DERIVATION ENDS HERE # # # # #
115
119
  log_metrics(logger, model)
116
120
  log_unbalances(logger, model)
117
121
 
118
122
 
123
+
119
124
  ###### CHECKS
120
125
  # check blocked metabolites / dead-ends
121
126
  df_S = biosynthesis_on_media(logger, model, dbexp, args.gap_fill, args.biosynth)
122
127
  if type(df_S)==int: return 1
123
128
 
124
129
 
130
+
125
131
  ###### POLISHING 3
126
132
  # reset growth environment befor saving the model
127
133
  gempipe.reset_growth_env(model)
128
134
 
129
135
 
136
+
130
137
  # output the model:
131
138
  logger.info("Writing strain-specific model...")
132
139
  cobra.io.save_json_model(model, f'{args.outdir}/{model.id}.json') # JSON
@@ -33,6 +33,9 @@ def import_from_universe(model, universe, rid, bounds=None, gpr=None):
33
33
  else:
34
34
  r.gene_reaction_rule = ''
35
35
  r.update_genes_from_gpr()
36
+
37
+ # set annotations
38
+ r.annotation = ru.annotation
36
39
 
37
40
 
38
41
 
@@ -64,6 +64,7 @@ def create_model_incore(params):
64
64
  # remove universal orphans
65
65
  model = remove_universal_orphans(logger, model)
66
66
 
67
+
67
68
 
68
69
  ###### PRUNING
69
70
  logger.info("Reading provided eggnog-mapper annotation...")
@@ -77,6 +78,7 @@ def create_model_incore(params):
77
78
  translate_remaining_kos(logger, model, eggnog_ko_to_gids)
78
79
  restore_gene_annotations(logger, model, universe, eggonog_gid_to_kos)
79
80
 
81
+
80
82
 
81
83
  ###### GAPFILLING
82
84
  # force inclusion of reactions:
@@ -103,30 +105,35 @@ def create_model_incore(params):
103
105
  if type(df_P)==int: return 1
104
106
 
105
107
 
106
- ###### POLISHING 2
107
- # remove disconnected metabolites
108
- model = remove_disconnected(logger, model)
109
108
 
109
+ ###### POLISHING 2
110
110
  # remove unsed sinks and demands
111
111
  model = remove_sinks_demands(logger, model)
112
+
113
+ # remove disconnected metabolites
114
+ model = remove_disconnected(logger, model)
112
115
 
116
+
113
117
 
114
118
  # # # # # DERIVATION ENDS HERE # # # # #
115
119
  log_metrics(logger, model)
116
120
  log_unbalances(logger, model)
117
121
 
118
122
 
123
+
119
124
  ###### CHECKS
120
125
  # check blocked metabolites / dead-ends
121
126
  df_S = biosynthesis_on_media(logger, model, dbexp, args.gap_fill, args.biosynth)
122
127
  if type(df_S)==int: return 1
123
128
 
124
129
 
130
+
125
131
  ###### POLISHING 3
126
132
  # reset growth environment befor saving the model
127
133
  gempipe.reset_growth_env(model)
128
134
 
129
135
 
136
+
130
137
  # output the model:
131
138
  logger.info("Writing strain-specific model...")
132
139
  cobra.io.save_json_model(model, f'{args.outdir}/{model.id}.json') # JSON
@@ -66,6 +66,9 @@ def translate_annotate_genes(logger, model, idcollection_dict):
66
66
  g.annotation['ec'] = list(ko_to_ecs[ko])
67
67
  g.annotation['cog'] = list(ko_to_cogs[ko])
68
68
  g.annotation['go'] = list(ko_to_gos[ko])
69
+
70
+ # add SBO annotation
71
+ g.annotation['sbo'] = ['SBO:0000243'] # demand reaction
69
72
 
70
73
 
71
74
 
@@ -43,10 +43,10 @@ def check_completeness(logger, model, progress, module, focus, eggnog, zeroes, i
43
43
  for eggfile in eggnog:
44
44
  eggset = parse_eggnog(model, eggfile, idcollection_dict)
45
45
  kr_uni = kr_uni.union(eggset)
46
- kr_uni_label = f"'{len(eggnog)} eggnog annotations'"
46
+ kr_uni_label = f"{len(eggnog)} eggnog annotations"
47
47
  else:
48
48
  kr_uni = idcollection_dict['kr']
49
- kr_uni_label = "'whole KEGG'"
49
+ kr_uni_label = "whole KEGG"
50
50
 
51
51
 
52
52
  # get all the 'kr' annotations in the model
@@ -55,7 +55,10 @@ def check_completeness(logger, model, progress, module, focus, eggnog, zeroes, i
55
55
  if 'kegg.reaction' in r.annotation.keys():
56
56
  for kr_id in r.annotation['kegg.reaction']:
57
57
  kr_ids_modeled.add(kr_id)
58
- logger.info(f"Universe coverage for {kr_uni_label}: {round(len(kr_ids_modeled.intersection(kr_uni))/len(kr_uni)*100, 0)}%!")
58
+ kr_uni_missing = len(kr_uni - kr_ids_modeled.intersection(kr_uni))
59
+ kr_uni_coverage = len(kr_ids_modeled.intersection(kr_uni)) / len(kr_uni) * 100
60
+ logger.info(f"Coverage for '{kr_uni_label}': {round(kr_uni_coverage, 0)}% ({kr_uni_missing} missing).")
61
+
59
62
 
60
63
 
61
64
  # get all the map / md codes:
@@ -147,7 +150,7 @@ def check_completeness(logger, model, progress, module, focus, eggnog, zeroes, i
147
150
  'missing': missing,
148
151
  'md_ids': [j['md_id'] for j in i['mds']],
149
152
  })
150
-
153
+
151
154
 
152
155
  # order list by '%' of completness and print:
153
156
  list_coverage = sorted(list_coverage, key=lambda x: x['perc_completeness'], reverse=True)
@@ -141,6 +141,10 @@ def introduce_metabolites(logger, db, model, idcollection_dict, kegg_compound_to
141
141
  # save as list:
142
142
  for ankey in ankeys:
143
143
  m.annotation[ankey] = list(m.annotation[ankey])
144
+
145
+
146
+ # add SBO annotation
147
+ m.annotation['sbo'] = ['SBO:0000247'] # generic metabolite
144
148
 
145
149
 
146
150
 
@@ -264,7 +268,8 @@ def introduce_transporters(logger, db, model, idcollection_dict, kegg_reaction_t
264
268
  m_e.name = m_c.name
265
269
  m_e.formula = m_c.formula
266
270
  m_e.charge = m_c.charge
267
- m_e.annotation = m_c.annotation
271
+
272
+ m_e.annotation = m_c.annotation # transfer all annotations, including SBO!
268
273
 
269
274
 
270
275
  def add_exchange_reaction(model, mid_e):
@@ -283,6 +288,10 @@ def introduce_transporters(logger, db, model, idcollection_dict, kegg_reaction_t
283
288
  r.bounds = (-1000, 1000)
284
289
  else:
285
290
  r.bounds = (0, 1000)
291
+
292
+ # add SBO annotation
293
+ r.annotation['sbo'] = ['SBO:0000627'] # exchange reaction
294
+
286
295
 
287
296
 
288
297
 
@@ -418,6 +427,9 @@ def introduce_sinks_demands(logger, model):
418
427
  r.name = f"Sink for {model.metabolites.get_by_id(f'{puremid}_c').name}"
419
428
  r.build_reaction_from_string(f'{puremid}_c <=> ')
420
429
  r.bounds = (-1000, 1000)
430
+
431
+ # add SBO annotation
432
+ r.annotation['sbo'] = ['SBO:0000632'] # sink reaction
421
433
 
422
434
 
423
435
  for puremid in demands:
@@ -427,6 +439,9 @@ def introduce_sinks_demands(logger, model):
427
439
  r.name = f"Demand for {model.metabolites.get_by_id(f'{puremid}_c').name}"
428
440
  r.build_reaction_from_string(f'{puremid}_c --> ')
429
441
  r.bounds = (0, 1000)
442
+
443
+ # add SBO annotation
444
+ r.annotation['sbo'] = ['SBO:0000628'] # demand reaction
430
445
 
431
446
 
432
447
  return model
@@ -113,7 +113,7 @@ def main(args, logger):
113
113
 
114
114
  ###### RECONSTRUCTION
115
115
  # create the model
116
- universe = cobra.Model('newuni')
116
+ universe = cobra.Model('universe')
117
117
  logger.info("Parsing gsrap database...")
118
118
 
119
119
  # introduce M / R / T
@@ -125,6 +125,13 @@ def add_reaction(logger, model, rid, row, kr_ids, kegg_reaction_to_others, addty
125
125
  r.annotation[ankey] = list(r.annotation[ankey])
126
126
 
127
127
 
128
+ # add SBO annotation
129
+ if addtype=='R':
130
+ r.annotation['sbo'] = ['SBO:0000176'] # metabolic reaction
131
+ else:
132
+ r.annotation['sbo'] = ['SBO:0000185'] # transport reaction
133
+
134
+
128
135
  # check if unbalanced
129
136
  if r.check_mass_balance() != {}:
130
137
  logger.error(f"{itemtype} '{r.id}' is unbalanced: {r.check_mass_balance()}.")
@@ -66,6 +66,9 @@ def translate_annotate_genes(logger, model, idcollection_dict):
66
66
  g.annotation['ec'] = list(ko_to_ecs[ko])
67
67
  g.annotation['cog'] = list(ko_to_cogs[ko])
68
68
  g.annotation['go'] = list(ko_to_gos[ko])
69
+
70
+ # add SBO annotation
71
+ g.annotation['sbo'] = ['SBO:0000243'] # demand reaction
69
72
 
70
73
 
71
74
 
@@ -43,10 +43,10 @@ def check_completeness(logger, model, progress, module, focus, eggnog, zeroes, i
43
43
  for eggfile in eggnog:
44
44
  eggset = parse_eggnog(model, eggfile, idcollection_dict)
45
45
  kr_uni = kr_uni.union(eggset)
46
- kr_uni_label = f"'{len(eggnog)} eggnog annotations'"
46
+ kr_uni_label = f"{len(eggnog)} eggnog annotations"
47
47
  else:
48
48
  kr_uni = idcollection_dict['kr']
49
- kr_uni_label = "'whole KEGG'"
49
+ kr_uni_label = "whole KEGG"
50
50
 
51
51
 
52
52
  # get all the 'kr' annotations in the model
@@ -55,7 +55,10 @@ def check_completeness(logger, model, progress, module, focus, eggnog, zeroes, i
55
55
  if 'kegg.reaction' in r.annotation.keys():
56
56
  for kr_id in r.annotation['kegg.reaction']:
57
57
  kr_ids_modeled.add(kr_id)
58
- logger.info(f"Universe coverage for {kr_uni_label}: {round(len(kr_ids_modeled.intersection(kr_uni))/len(kr_uni)*100, 0)}%!")
58
+ kr_uni_missing = len(kr_uni - kr_ids_modeled.intersection(kr_uni))
59
+ kr_uni_coverage = len(kr_ids_modeled.intersection(kr_uni)) / len(kr_uni) * 100
60
+ logger.info(f"Coverage for '{kr_uni_label}': {round(kr_uni_coverage, 0)}% ({kr_uni_missing} missing).")
61
+
59
62
 
60
63
 
61
64
  # get all the map / md codes:
@@ -147,7 +150,7 @@ def check_completeness(logger, model, progress, module, focus, eggnog, zeroes, i
147
150
  'missing': missing,
148
151
  'md_ids': [j['md_id'] for j in i['mds']],
149
152
  })
150
-
153
+
151
154
 
152
155
  # order list by '%' of completness and print:
153
156
  list_coverage = sorted(list_coverage, key=lambda x: x['perc_completeness'], reverse=True)
@@ -141,6 +141,10 @@ def introduce_metabolites(logger, db, model, idcollection_dict, kegg_compound_to
141
141
  # save as list:
142
142
  for ankey in ankeys:
143
143
  m.annotation[ankey] = list(m.annotation[ankey])
144
+
145
+
146
+ # add SBO annotation
147
+ m.annotation['sbo'] = ['SBO:0000247'] # generic metabolite
144
148
 
145
149
 
146
150
 
@@ -264,7 +268,8 @@ def introduce_transporters(logger, db, model, idcollection_dict, kegg_reaction_t
264
268
  m_e.name = m_c.name
265
269
  m_e.formula = m_c.formula
266
270
  m_e.charge = m_c.charge
267
- m_e.annotation = m_c.annotation
271
+
272
+ m_e.annotation = m_c.annotation # transfer all annotations, including SBO!
268
273
 
269
274
 
270
275
  def add_exchange_reaction(model, mid_e):
@@ -283,6 +288,10 @@ def introduce_transporters(logger, db, model, idcollection_dict, kegg_reaction_t
283
288
  r.bounds = (-1000, 1000)
284
289
  else:
285
290
  r.bounds = (0, 1000)
291
+
292
+ # add SBO annotation
293
+ r.annotation['sbo'] = ['SBO:0000627'] # exchange reaction
294
+
286
295
 
287
296
 
288
297
 
@@ -418,6 +427,9 @@ def introduce_sinks_demands(logger, model):
418
427
  r.name = f"Sink for {model.metabolites.get_by_id(f'{puremid}_c').name}"
419
428
  r.build_reaction_from_string(f'{puremid}_c <=> ')
420
429
  r.bounds = (-1000, 1000)
430
+
431
+ # add SBO annotation
432
+ r.annotation['sbo'] = ['SBO:0000632'] # sink reaction
421
433
 
422
434
 
423
435
  for puremid in demands:
@@ -427,6 +439,9 @@ def introduce_sinks_demands(logger, model):
427
439
  r.name = f"Demand for {model.metabolites.get_by_id(f'{puremid}_c').name}"
428
440
  r.build_reaction_from_string(f'{puremid}_c --> ')
429
441
  r.bounds = (0, 1000)
442
+
443
+ # add SBO annotation
444
+ r.annotation['sbo'] = ['SBO:0000628'] # demand reaction
430
445
 
431
446
 
432
447
  return model
@@ -113,7 +113,7 @@ def main(args, logger):
113
113
 
114
114
  ###### RECONSTRUCTION
115
115
  # create the model
116
- universe = cobra.Model('newuni')
116
+ universe = cobra.Model('universe')
117
117
  logger.info("Parsing gsrap database...")
118
118
 
119
119
  # introduce M / R / T
@@ -125,6 +125,13 @@ def add_reaction(logger, model, rid, row, kr_ids, kegg_reaction_to_others, addty
125
125
  r.annotation[ankey] = list(r.annotation[ankey])
126
126
 
127
127
 
128
+ # add SBO annotation
129
+ if addtype=='R':
130
+ r.annotation['sbo'] = ['SBO:0000176'] # metabolic reaction
131
+ else:
132
+ r.annotation['sbo'] = ['SBO:0000185'] # transport reaction
133
+
134
+
128
135
  # check if unbalanced
129
136
  if r.check_mass_balance() != {}:
130
137
  logger.error(f"{itemtype} '{r.id}' is unbalanced: {r.check_mass_balance()}.")
@@ -12,9 +12,7 @@ from ..commons import verify_growth
12
12
  def grow_on_media(logger, model, dbexp, media, fva, universe_in_parsedb=False):
13
13
 
14
14
 
15
- # if working with the universe:
16
- if universe_in_parsedb:
17
- log_for_parsedb = []
15
+
18
16
 
19
17
 
20
18
  # check if requested
@@ -58,7 +56,10 @@ def grow_on_media(logger, model, dbexp, media, fva, universe_in_parsedb=False):
58
56
  res_fba = verify_growth(model, boolean=False)
59
57
  df_G.loc[obj_id, f'{medium}'] = res_fba
60
58
  if universe_in_parsedb:
61
- log_for_parsedb.append(f"'{medium}': {res_fba}")
59
+ if res_fba == 'infeasible' or res_fba == 0.0:
60
+ logger.warning(f"Growth on '{medium}': {res_fba}.")
61
+ else:
62
+ logger.info(f"Growth on '{medium}': {res_fba}.")
62
63
 
63
64
 
64
65
  # perform FVA if requested:
@@ -68,7 +69,5 @@ def grow_on_media(logger, model, dbexp, media, fva, universe_in_parsedb=False):
68
69
  for rid, row in df_fva.iterrows():
69
70
  df_G.loc[rid, f'{medium}'] = f"({round(row['minimum'], 3)}, {round(row['maximum'], 3)})"
70
71
 
71
-
72
- if universe_in_parsedb:
73
- logger.info(f"Results: {'; '.join(log_for_parsedb)}.")
72
+
74
73
  return df_G
@@ -12,9 +12,7 @@ from ..commons import verify_growth
12
12
  def grow_on_media(logger, model, dbexp, media, fva, universe_in_parsedb=False):
13
13
 
14
14
 
15
- # if working with the universe:
16
- if universe_in_parsedb:
17
- log_for_parsedb = []
15
+
18
16
 
19
17
 
20
18
  # check if requested
@@ -58,7 +56,10 @@ def grow_on_media(logger, model, dbexp, media, fva, universe_in_parsedb=False):
58
56
  res_fba = verify_growth(model, boolean=False)
59
57
  df_G.loc[obj_id, f'{medium}'] = res_fba
60
58
  if universe_in_parsedb:
61
- log_for_parsedb.append(f"'{medium}': {res_fba}")
59
+ if res_fba == 'infeasible' or res_fba == 0.0:
60
+ logger.warning(f"Growth on '{medium}': {res_fba}.")
61
+ else:
62
+ logger.info(f"Growth on '{medium}': {res_fba}.")
62
63
 
63
64
 
64
65
  # perform FVA if requested:
@@ -68,7 +69,5 @@ def grow_on_media(logger, model, dbexp, media, fva, universe_in_parsedb=False):
68
69
  for rid, row in df_fva.iterrows():
69
70
  df_G.loc[rid, f'{medium}'] = f"({round(row['minimum'], 3)}, {round(row['maximum'], 3)})"
70
71
 
71
-
72
- if universe_in_parsedb:
73
- logger.info(f"Results: {'; '.join(log_for_parsedb)}.")
72
+
74
73
  return df_G
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes