seed2lp 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- seed2lp/__init__.py +12 -0
- seed2lp/__main__.py +837 -0
- seed2lp/_version.py +2 -0
- seed2lp/argument.py +717 -0
- seed2lp/asp/atom_for_transfers.lp +7 -0
- seed2lp/asp/community_heuristic.lp +3 -0
- seed2lp/asp/community_search.lp +14 -0
- seed2lp/asp/constraints_targets.lp +15 -0
- seed2lp/asp/definition_atoms.lp +87 -0
- seed2lp/asp/enum-cc.lp +50 -0
- seed2lp/asp/flux.lp +70 -0
- seed2lp/asp/limit_transfers.lp +9 -0
- seed2lp/asp/maximize_flux.lp +2 -0
- seed2lp/asp/maximize_produced_target.lp +7 -0
- seed2lp/asp/minimize.lp +8 -0
- seed2lp/asp/seed-solving.lp +116 -0
- seed2lp/asp/seed_external.lp +1 -0
- seed2lp/asp/show_seeds.lp +2 -0
- seed2lp/asp/show_tranfers.lp +1 -0
- seed2lp/asp/test.lp +61 -0
- seed2lp/clingo_lpx.py +236 -0
- seed2lp/color.py +34 -0
- seed2lp/config.yaml +56 -0
- seed2lp/description.py +424 -0
- seed2lp/file.py +151 -0
- seed2lp/flux.py +365 -0
- seed2lp/linear.py +431 -0
- seed2lp/log_conf.yaml +25 -0
- seed2lp/logger.py +112 -0
- seed2lp/metabolite.py +46 -0
- seed2lp/network.py +1921 -0
- seed2lp/reaction.py +207 -0
- seed2lp/reasoning.py +459 -0
- seed2lp/reasoningcom.py +753 -0
- seed2lp/reasoninghybrid.py +791 -0
- seed2lp/resmod.py +74 -0
- seed2lp/sbml.py +307 -0
- seed2lp/scope.py +124 -0
- seed2lp/solver.py +333 -0
- seed2lp/temp_flux_com.py +74 -0
- seed2lp/utils.py +237 -0
- seed2lp-2.0.0.dist-info/METADATA +404 -0
- seed2lp-2.0.0.dist-info/RECORD +53 -0
- seed2lp-2.0.0.dist-info/WHEEL +5 -0
- seed2lp-2.0.0.dist-info/entry_points.txt +2 -0
- seed2lp-2.0.0.dist-info/licenses/LICENCE.txt +145 -0
- seed2lp-2.0.0.dist-info/top_level.txt +2 -0
- tests/__init__.py +0 -0
- tests/fba.py +147 -0
- tests/full_network.py +166 -0
- tests/normalization.py +188 -0
- tests/target.py +286 -0
- tests/utils.py +181 -0
seed2lp/network.py
ADDED
|
@@ -0,0 +1,1921 @@
|
|
|
1
|
+
# Object Network constitued of
|
|
2
|
+
# - file (str): Path of input network file (sbml)
|
|
3
|
+
# - run_mode (str): Running command used (full or target)
|
|
4
|
+
# - name (str): Species name/ID from file name
|
|
5
|
+
# - targets_as_seeds (bool): Targets can't be seeds and are noted as forbidden
|
|
6
|
+
# - use_topological_injections (bool): Metabolite of import reaction are seeds
|
|
7
|
+
# - keep_import_reactions (bool): Import reactions are removed
|
|
8
|
+
# - reactions (list): List of reactions (object Reaction)
|
|
9
|
+
# - targets (list): List of target (object Metabolite)
|
|
10
|
+
# - seeds (list): List of seed given by the user (object Metabolite)
|
|
11
|
+
# - possible_seeds (list): List of possible seeds given by the user (object Metabolite)
|
|
12
|
+
# - forbiddend_seed (list): List of forbidden seeds (object Metabolite)
|
|
13
|
+
# - facts (str): Conversion sbml into asp facts
|
|
14
|
+
# - fluxes (list): List of flux check on all set of seeds
|
|
15
|
+
|
|
16
|
+
import os
|
|
17
|
+
import pandas as pd
|
|
18
|
+
from .reaction import Reaction
|
|
19
|
+
import seed2lp.sbml as SBML
|
|
20
|
+
from .utils import quoted, prefix_id_network
|
|
21
|
+
from . import flux
|
|
22
|
+
from .resmod import Resmod
|
|
23
|
+
from time import time
|
|
24
|
+
from . import color
|
|
25
|
+
from . import logger
|
|
26
|
+
from .file import existant_path
|
|
27
|
+
import xml.etree.ElementTree as ET
|
|
28
|
+
import copy
|
|
29
|
+
from dataclasses import dataclass
|
|
30
|
+
from tqdm import tqdm
|
|
31
|
+
from concurrent.futures import as_completed
|
|
32
|
+
|
|
33
|
+
from concurrent.futures import ProcessPoolExecutor, as_completed
|
|
34
|
+
|
|
35
|
+
FLUX_MESSAGE=\
|
|
36
|
+
f""" {color.bold} "Cobra (seeds)" {color.reset} indicates the maximum flux
|
|
37
|
+
obtained in FBA from the seeds after shutting
|
|
38
|
+
off all other exchange reactions. If the maximum
|
|
39
|
+
flux is null, a test is performed opening demand
|
|
40
|
+
reactions for the objective reaction's products,
|
|
41
|
+
in order to test the effect of their accumulation
|
|
42
|
+
({color.bold}"cobra (demands)"{color.reset} ). If this test is not performed,
|
|
43
|
+
"NA" value is indicated."""
|
|
44
|
+
|
|
45
|
+
WARNING_MESSAGE_LP_COBRA=\
|
|
46
|
+
f"""Cobra flux and LP flux might be
|
|
47
|
+
different because the option -max/--maximize
|
|
48
|
+
is not used"""
|
|
49
|
+
|
|
50
|
+
@dataclass
|
|
51
|
+
class NET_TITLE:
|
|
52
|
+
CONVERT_TITLE_SOLVE={"REASONING":'reasoning',
|
|
53
|
+
"REASONING FILTER":'filter',
|
|
54
|
+
"REASONING GUESS-CHECK":'guess_check',
|
|
55
|
+
"REASONING GUESS-CHECK DIVERSITY":'guess_check_div',
|
|
56
|
+
"HYBRID":'hybrid',
|
|
57
|
+
"ALL":'all'}
|
|
58
|
+
|
|
59
|
+
CONVERT_TITLE_MODE={"Target":'target',
|
|
60
|
+
"Full network":'full',
|
|
61
|
+
"FBA":'fba',
|
|
62
|
+
"Community Global":'global',
|
|
63
|
+
"Community Bisteps":'bisteps',
|
|
64
|
+
"Community delete superset":'delsupset'}
|
|
65
|
+
|
|
66
|
+
###################################################################
|
|
67
|
+
########################## Class NetBase ##########################
|
|
68
|
+
###################################################################
|
|
69
|
+
class NetBase:
|
|
70
|
+
def __init__(self, targets_as_seeds:bool=False, use_topological_injections:bool=False,
|
|
71
|
+
keep_import_reactions:bool=True, accumulation:bool=False, equality_flux:bool=False):
|
|
72
|
+
"""Initialize Object NetBase
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
targets_as_seeds (bool): Targets can't be seeds and are noted as forbidden
|
|
76
|
+
use_topological_injections (bool): Metabolite of import reaction are seeds
|
|
77
|
+
keep_import_reactions (bool): Import reactions are not removed
|
|
78
|
+
accumulation (bool, optional): Is accumulation authorized. Defaults to False.
|
|
79
|
+
"""
|
|
80
|
+
self.targets_as_seeds = targets_as_seeds
|
|
81
|
+
self.use_topological_injections = use_topological_injections
|
|
82
|
+
self.keep_import_reactions = keep_import_reactions
|
|
83
|
+
self.reactions = list()
|
|
84
|
+
self.model=dict()
|
|
85
|
+
# list of reaction having reactants and products switched
|
|
86
|
+
self.switched_meta_reactions = dict()
|
|
87
|
+
# list of reaction having the reversibility changed
|
|
88
|
+
self.reversible_modified_reactions = dict()
|
|
89
|
+
# list of reaction deleted because boudaries [0,0]
|
|
90
|
+
self.deleted_reactions = dict()
|
|
91
|
+
# list of exchange reaction
|
|
92
|
+
self.exchanged_reactions = dict()
|
|
93
|
+
|
|
94
|
+
self.fbc=dict()
|
|
95
|
+
self.parameters = dict()
|
|
96
|
+
self.objectives = list()
|
|
97
|
+
self.objectives_reaction_name = list()
|
|
98
|
+
self.is_objective_error = False
|
|
99
|
+
self.targets=dict()
|
|
100
|
+
self.seeds = list()
|
|
101
|
+
self.possible_seeds = list()
|
|
102
|
+
self.is_subseed = False
|
|
103
|
+
self.forbidden_seeds = list()
|
|
104
|
+
self.facts = ""
|
|
105
|
+
self.meta_exchange_list = list()
|
|
106
|
+
self.meta_transport_list = list()
|
|
107
|
+
self.meta_other_list = list()
|
|
108
|
+
# metabolite of import reaction having multiple metabolite such as None -> A+B
|
|
109
|
+
self.meta_multiple_import_list = list()
|
|
110
|
+
self.accumulation = accumulation
|
|
111
|
+
|
|
112
|
+
self.instance_file=str()
|
|
113
|
+
|
|
114
|
+
self.result_seeds=list()
|
|
115
|
+
self.fluxes = pd.DataFrame()
|
|
116
|
+
|
|
117
|
+
self.is_community=False
|
|
118
|
+
self.species=list()
|
|
119
|
+
|
|
120
|
+
# dictionnary of used metabolites containing list of reactions where
|
|
121
|
+
# they are used
|
|
122
|
+
self.used_meta = dict()
|
|
123
|
+
|
|
124
|
+
self.equality_flux=equality_flux
|
|
125
|
+
|
|
126
|
+
######################## GETTER ########################
|
|
127
|
+
def _get_reactions(self):
|
|
128
|
+
return self.reactions
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def _get_result_seeds(self):
|
|
132
|
+
return self.result_seeds
|
|
133
|
+
########################################################
|
|
134
|
+
|
|
135
|
+
######################## SETTER ########################
|
|
136
|
+
def _set_file_extension(self, file:str):
|
|
137
|
+
ext = os.path.splitext(file)[1]
|
|
138
|
+
self.file_extension = ext
|
|
139
|
+
|
|
140
|
+
def _set_name(self):
|
|
141
|
+
n = f'{os.path.splitext(os.path.basename(self.file))[0]}'
|
|
142
|
+
self.name = n
|
|
143
|
+
print(f"Network name: {n}")
|
|
144
|
+
|
|
145
|
+
def _set_reactions(self, reactions:list):
|
|
146
|
+
self.reactions = reactions
|
|
147
|
+
|
|
148
|
+
def _set_result_seeds(self, result_seeds:list):
|
|
149
|
+
self.result_seeds = result_seeds
|
|
150
|
+
########################################################
|
|
151
|
+
|
|
152
|
+
######################## METHODS ########################
|
|
153
|
+
|
|
154
|
+
def get_objective_reactant(self, ojective_name:str, species:str):
|
|
155
|
+
"""Get the objective reactants from SBML file
|
|
156
|
+
"""
|
|
157
|
+
logger.log.info("Finding list of reactants from opbjective reaction...")
|
|
158
|
+
reactants = SBML.get_listOfReactants_from_name(self.model[species], ojective_name)
|
|
159
|
+
for reactant in reactants:
|
|
160
|
+
react_name = prefixed_name = reactant.attrib.get('species')
|
|
161
|
+
prefixed_name = prefix_id_network(self.is_community, react_name, species, "metabolite")
|
|
162
|
+
|
|
163
|
+
if react_name not in self.targets:
|
|
164
|
+
self.targets[react_name] = [prefixed_name]
|
|
165
|
+
else:
|
|
166
|
+
self.targets[react_name].append(prefixed_name)
|
|
167
|
+
logger.log.info("... DONE")
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def get_boundaries(self, reaction, species:str):
|
|
171
|
+
"""Get Boundaries of a reaction
|
|
172
|
+
|
|
173
|
+
Args:
|
|
174
|
+
reaction (etree line): Reaction from etree package
|
|
175
|
+
|
|
176
|
+
Returns:
|
|
177
|
+
lbound (float), ubound (float): lower and uppper boundaries value
|
|
178
|
+
"""
|
|
179
|
+
lower_bound = self.parameters[species][reaction.attrib.get('{'+self.fbc[species]+'}lowerFluxBound')] \
|
|
180
|
+
if type(reaction.attrib.get('{'+self.fbc[species]+'}lowerFluxBound')) is not float \
|
|
181
|
+
else '"'+reaction.attrib.get('{'+self.fbc[species]+'}lowerFluxBound')+'"'
|
|
182
|
+
lbound = round(float(lower_bound),10)
|
|
183
|
+
upper_bound = self.parameters[species][reaction.attrib.get('{'+self.fbc[species]+'}upperFluxBound')] \
|
|
184
|
+
if type(reaction.attrib.get('{'+self.fbc[species]+'}upperFluxBound')) is not float \
|
|
185
|
+
else '"'+reaction.attrib.get('{'+self.fbc[species]+'}upperFluxBound')+'"'
|
|
186
|
+
ubound = round(float(upper_bound),10)
|
|
187
|
+
return lbound, ubound
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def find_objectives(self, input_dict:dict, species:str=""):
|
|
191
|
+
"""Find the objective reaction from SBML file
|
|
192
|
+
If mode Target and no target set : put reactant of objective
|
|
193
|
+
as targets
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
input_dict (dict): Constructed dictionnary of inputs
|
|
197
|
+
|
|
198
|
+
Raises:
|
|
199
|
+
ValueError: Multiple objective reaction with coefficient 1 found
|
|
200
|
+
ValueError: No objective reaction found or none has coefficient 1
|
|
201
|
+
"""
|
|
202
|
+
objectives = SBML.get_listOfFluxObjectives(self.model[species], self.fbc[species])
|
|
203
|
+
|
|
204
|
+
obj_found_name = None
|
|
205
|
+
is_reactant_found = False
|
|
206
|
+
for obj in objectives:
|
|
207
|
+
coef=float(obj[1])
|
|
208
|
+
if obj_found_name is None:
|
|
209
|
+
#For now works with only one objective
|
|
210
|
+
if coef == 1:
|
|
211
|
+
obj_found_name = obj[0]
|
|
212
|
+
reaction = obj[2]
|
|
213
|
+
lbound, ubound = self.get_boundaries(reaction, species)
|
|
214
|
+
# multiple objectives found with coefficient 1
|
|
215
|
+
else:
|
|
216
|
+
if coef == 1:
|
|
217
|
+
objectives = None
|
|
218
|
+
obj_found_name = None
|
|
219
|
+
self.is_objective_error = True
|
|
220
|
+
raise ValueError(f"Multiple objective reaction with coefficient 1 found\n")
|
|
221
|
+
|
|
222
|
+
if not obj_found_name:
|
|
223
|
+
self.is_objective_error = True
|
|
224
|
+
raise ValueError(f"No objective reaction found or none has coefficient 1\n")
|
|
225
|
+
else:
|
|
226
|
+
logger.print_log(f'Objective found for {species}: {color.bold}{obj_found_name}{color.reset}', "info")
|
|
227
|
+
if lbound == 0 and ubound == 0:
|
|
228
|
+
self.is_objective_error = True
|
|
229
|
+
raise ValueError(f"Lower and upper boundaries are [0,0] \nfor objetive reaction {obj_found_name}\n")
|
|
230
|
+
obj_found_id = prefix_id_network(self.is_community, obj_found_name, species, "reaction")
|
|
231
|
+
self.objectives.append([species, obj_found_id])
|
|
232
|
+
self.objectives_reaction_name.append(obj_found_id)
|
|
233
|
+
if (self.run_mode == "target" or self.run_mode == "fba" or self.run_mode == "community")\
|
|
234
|
+
and ('Targets' not in input_dict or not input_dict["Targets"]):
|
|
235
|
+
if not self.is_community:
|
|
236
|
+
species = self.name
|
|
237
|
+
self.get_objective_reactant(obj_found_name, species)
|
|
238
|
+
is_reactant_found = True
|
|
239
|
+
return is_reactant_found
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
def check_objectives(self, input_dict:dict):
|
|
243
|
+
"""Check if objectives reaction is given by user
|
|
244
|
+
|
|
245
|
+
Args:
|
|
246
|
+
input_dict (dict): The input dictionnary
|
|
247
|
+
|
|
248
|
+
Returns:
|
|
249
|
+
bool: Boolean determining if objectives is given by user
|
|
250
|
+
"""
|
|
251
|
+
is_user_objective = False
|
|
252
|
+
if input_dict and "Objective" in input_dict and input_dict["Objective"]:
|
|
253
|
+
self.objectives = input_dict["Objective"]
|
|
254
|
+
is_user_objective = True
|
|
255
|
+
return is_user_objective
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def init_with_inputs(self, input_dict:dict, is_reactant_found:bool, objective_error:bool,
|
|
259
|
+
is_user_objective:bool):
|
|
260
|
+
"""Initiate Networks with inputs given by user and arguments. Find objectives reaction
|
|
261
|
+
if not given by user. Show messages on termminal about the Network.
|
|
262
|
+
|
|
263
|
+
Args:
|
|
264
|
+
input_dict (dict): The input dictionnary
|
|
265
|
+
is_reactant_found (bool): Define if the reactant are found in sbml or given by user as targets (all modes except Full Network mode)
|
|
266
|
+
objective_error (bool): Define if the objective reaction was in error (not found) to write the corresponding warning message
|
|
267
|
+
"""
|
|
268
|
+
tgt_message = ""
|
|
269
|
+
|
|
270
|
+
match self.run_mode:
|
|
271
|
+
case "target" | "community":
|
|
272
|
+
tgt_message = "Targets set:\n"
|
|
273
|
+
case "target" | "fba":
|
|
274
|
+
tgt_message = "Targets detected for option \"target as seeds\":\n"
|
|
275
|
+
case _:
|
|
276
|
+
tgt_message = "Targets set:\nAll metabolites as target\n"
|
|
277
|
+
obj_message = f"Objective set:\n"
|
|
278
|
+
kir_mess = "Import reaction: "
|
|
279
|
+
ti_inject = "Product of import reaction set as seed: "
|
|
280
|
+
tas_mess = "Targets can be seeds: "
|
|
281
|
+
accu_mess = "Accumulation: "
|
|
282
|
+
|
|
283
|
+
# init network with input data
|
|
284
|
+
if input_dict:
|
|
285
|
+
if "Targets" in input_dict and input_dict["Targets"]:
|
|
286
|
+
self.targets = input_dict["Targets"]
|
|
287
|
+
tgt_message += " Metabolites from target file\n"
|
|
288
|
+
if "Seeds" in input_dict:
|
|
289
|
+
self.seeds = input_dict["Seeds"]
|
|
290
|
+
if "Possible seeds" in input_dict:
|
|
291
|
+
self.possible_seeds = input_dict["Possible seeds"]
|
|
292
|
+
self.is_subseed = True
|
|
293
|
+
if "Forbidden seeds" in input_dict:
|
|
294
|
+
self.forbidden_seeds = input_dict["Forbidden seeds"]
|
|
295
|
+
# Objective given in target file (mode target)
|
|
296
|
+
# or command line (mode full)
|
|
297
|
+
if is_user_objective:
|
|
298
|
+
if self.run_mode == "target":
|
|
299
|
+
obj_message += "- Objective reaction from target file\n"
|
|
300
|
+
else:
|
|
301
|
+
obj_message += "- Objective reaction from command line\n"
|
|
302
|
+
suff_plurial=""
|
|
303
|
+
if len(self.objectives) > 1:
|
|
304
|
+
suff_plurial = "s"
|
|
305
|
+
# Reactant of objective set as target on target mode
|
|
306
|
+
# No needed on full mode (all metabolite are set as target)
|
|
307
|
+
if self.run_mode != "full":
|
|
308
|
+
for obj in self.objectives:
|
|
309
|
+
if self.is_community:
|
|
310
|
+
species=obj[0]
|
|
311
|
+
else:
|
|
312
|
+
species = self.name
|
|
313
|
+
self.get_objective_reactant(ojective_name=obj[1], species=species)
|
|
314
|
+
obj[1]=prefix_id_network(self.is_community, obj[1], obj[0], "reaction")
|
|
315
|
+
self.objectives_reaction_name.append(obj[1])
|
|
316
|
+
obj_string = " | ".join([str(item[1]) for item in self.objectives])
|
|
317
|
+
obj_message += f' Objective{suff_plurial} : {obj_string}'
|
|
318
|
+
tgt_message += " Reactant of objective reaction\n from target file\n"
|
|
319
|
+
else:
|
|
320
|
+
obj_string = " | ".join([str(item[1]) for item in self.objectives])
|
|
321
|
+
obj_message += f'\n Objective{suff_plurial} : {obj_string}'
|
|
322
|
+
|
|
323
|
+
# Find objective into sbml file if not given by user
|
|
324
|
+
if not is_user_objective:
|
|
325
|
+
if not objective_error :
|
|
326
|
+
obj_message += "- Objective reaction from SBML file"
|
|
327
|
+
suff_plurial=""
|
|
328
|
+
if len(self.objectives) > 1:
|
|
329
|
+
suff_plurial = "s"
|
|
330
|
+
obj_string = " | ".join([str(item[1]) for item in self.objectives])
|
|
331
|
+
obj_message += f'\n Objective{suff_plurial} : {obj_string}'
|
|
332
|
+
if self.run_mode != "full" \
|
|
333
|
+
and is_reactant_found:
|
|
334
|
+
tgt_message += " Reactant of objective reaction\n from SBML file\n"
|
|
335
|
+
else:
|
|
336
|
+
if self.run_mode == "target" \
|
|
337
|
+
and (self.targets is None or not self.targets):
|
|
338
|
+
tgt_message += " No target found"
|
|
339
|
+
obj_message += " No objective reaction found"
|
|
340
|
+
else:
|
|
341
|
+
if len(self.objectives)<len(self.species):
|
|
342
|
+
obj_message += "\n\n- Objective reaction from SBML file"
|
|
343
|
+
suff_plurial = "s"
|
|
344
|
+
obj_found_string = ""
|
|
345
|
+
added_species = set()
|
|
346
|
+
for species in self.species:
|
|
347
|
+
if species not in [obj[0] for obj in self.objectives]:
|
|
348
|
+
added_species.add(species)
|
|
349
|
+
is_reactant_found = self.find_objectives(input_dict, species)
|
|
350
|
+
for item in self.objectives:
|
|
351
|
+
if item[0] in added_species:
|
|
352
|
+
obj_found_string = obj_found_string + item[1] + " | "
|
|
353
|
+
self.objectives_reaction_name.append(item[1])
|
|
354
|
+
obj_found_string = obj_found_string.removesuffix(" | ")
|
|
355
|
+
obj_message += f'\n Objective{suff_plurial} : {obj_found_string}'
|
|
356
|
+
|
|
357
|
+
if self.keep_import_reactions:
|
|
358
|
+
kir_mess += " Kept"
|
|
359
|
+
else:
|
|
360
|
+
kir_mess += " Removed"
|
|
361
|
+
|
|
362
|
+
if self.use_topological_injections:
|
|
363
|
+
ti_inject += " Yes"
|
|
364
|
+
else:
|
|
365
|
+
ti_inject += " No"
|
|
366
|
+
|
|
367
|
+
if self.run_mode != "full" and self.targets_as_seeds:
|
|
368
|
+
tas_mess += " Yes"
|
|
369
|
+
elif self.run_mode == "full":
|
|
370
|
+
tas_mess = None
|
|
371
|
+
else:
|
|
372
|
+
tas_mess += " No"
|
|
373
|
+
|
|
374
|
+
if self.accumulation:
|
|
375
|
+
accu_mess += " Allowed"
|
|
376
|
+
else:
|
|
377
|
+
accu_mess += " Forbidden"
|
|
378
|
+
|
|
379
|
+
self.write_cases_messages(tgt_message, obj_message, [kir_mess, ti_inject, tas_mess, accu_mess])
|
|
380
|
+
|
|
381
|
+
|
|
382
|
+
def add_reaction(self, reaction:Reaction):
|
|
383
|
+
"""Add a reaction into the Network list of reaction
|
|
384
|
+
|
|
385
|
+
Args:
|
|
386
|
+
reaction (Reaction): Object reaction
|
|
387
|
+
"""
|
|
388
|
+
reactions_list = self._get_reactions()
|
|
389
|
+
reactions_list.append(reaction)
|
|
390
|
+
self._set_reactions(reactions_list)
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
# def prefix_id_network(self, name:str, species:str="", type_element:str=""):
|
|
394
|
+
# """Prefix Reaction or Metbolite by the network name (filename) if the tool is used for community.
|
|
395
|
+
# For single network, nothing is prefixed.
|
|
396
|
+
|
|
397
|
+
# Args:
|
|
398
|
+
# name (str): ID of the element
|
|
399
|
+
# species (str, optional): Network name (from filename). Defaults to "".
|
|
400
|
+
# type_element: (str, optional): "reaction" or "metabolite" or no type. Defaults to "".
|
|
401
|
+
|
|
402
|
+
# Returns:
|
|
403
|
+
# str: The name prfixed by the network if needed
|
|
404
|
+
# """
|
|
405
|
+
# match self.is_community, type_element:
|
|
406
|
+
# case True,"reaction":
|
|
407
|
+
# return sub("^R_", f"R_{species}_",name)
|
|
408
|
+
# case True,"metabolite":
|
|
409
|
+
# return sub("^M_", f"M_{species}_",name)
|
|
410
|
+
# case True,"metaid":
|
|
411
|
+
# return sub("^meta_R_", f"meta_R_{species}_",name)
|
|
412
|
+
# case True,_:
|
|
413
|
+
# return f"{species}_{name}"
|
|
414
|
+
# case _,_:
|
|
415
|
+
# return name
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
def get_network(self, species:str, to_print:bool=True,
|
|
419
|
+
write_sbml:bool=False):
|
|
420
|
+
"""Get the description of the Network from SBML file
|
|
421
|
+
Construct list of reactants and products
|
|
422
|
+
Correct the reversibility based on boundaries
|
|
423
|
+
For import or export reaction, if reversibilité is corrected
|
|
424
|
+
correct also reactants and products by exchanging them
|
|
425
|
+
When writing SBML, delete Import reaction
|
|
426
|
+
|
|
427
|
+
Args:
|
|
428
|
+
species (str): Network name (from filename)
|
|
429
|
+
to_print (bool, optional): Print messages on terminal. Defaults to True.
|
|
430
|
+
write_sbml (bool, optional): Is a writing SBML file mode or not. Defaults to False.
|
|
431
|
+
"""
|
|
432
|
+
reactions_list = SBML.get_listOfReactions(self.model[species])
|
|
433
|
+
if self.is_community:
|
|
434
|
+
warning_message = f"{species}"
|
|
435
|
+
else:
|
|
436
|
+
warning_message = ""
|
|
437
|
+
|
|
438
|
+
info_message = ""
|
|
439
|
+
|
|
440
|
+
for r in reactions_list:
|
|
441
|
+
reaction_id= r.attrib.get("id")
|
|
442
|
+
if self.is_community:
|
|
443
|
+
reaction_id = prefix_id_network(self.is_community, reaction_id, species, "reaction")
|
|
444
|
+
reaction = Reaction(reaction_id, species=species)
|
|
445
|
+
reaction.is_exchange=False
|
|
446
|
+
source_reversible = False if r.attrib.get('reversible') == 'false' else True
|
|
447
|
+
# Treating reverserbility separately, lower bound can stay to 0
|
|
448
|
+
reaction.lbound, reaction.ubound = self.get_boundaries(r,species)
|
|
449
|
+
|
|
450
|
+
# delete prefix species on name for community mode
|
|
451
|
+
if self.is_community:
|
|
452
|
+
reaction_origin_name = reaction.name.replace(f"_{species}", "")
|
|
453
|
+
else:
|
|
454
|
+
reaction_origin_name = reaction.name
|
|
455
|
+
|
|
456
|
+
# If the reaction can never have flux, meaning lower_bound = 0 and upper_bound = 0
|
|
457
|
+
# The reaction is deleted from the network
|
|
458
|
+
if reaction.lbound == 0 and reaction.ubound == 0:
|
|
459
|
+
self.deleted_reactions[reaction.name] = len(self.reactions)
|
|
460
|
+
warning_message += f"\n - {reaction_origin_name}: Deleted.\n Boundaries was: [{reaction.lbound} ; {reaction.ubound}]"
|
|
461
|
+
# Not added not reaction list of the network
|
|
462
|
+
continue
|
|
463
|
+
|
|
464
|
+
reactants, list_react_names = SBML.get_listOfReactants(r,species,self.is_community)
|
|
465
|
+
products, list_product_names = SBML.get_listOfProducts(r,species,self.is_community)
|
|
466
|
+
# uses the definition of boundaries as cobra
|
|
467
|
+
# a reaction is in boundaries (so exchange reaction)
|
|
468
|
+
# when a reaction has only one metabolite and
|
|
469
|
+
# does not have reactants or products
|
|
470
|
+
if not (reactants and products):
|
|
471
|
+
if (reactants and len(reactants) == 1) \
|
|
472
|
+
or (products and len(products) == 1):
|
|
473
|
+
# Cobra definition of exchange reaction
|
|
474
|
+
reaction.is_exchange=True
|
|
475
|
+
self.exchanged_reactions[reaction.name] = len(self.reactions)
|
|
476
|
+
elif not reactants and not products:
|
|
477
|
+
# Reaction with boundaries [0,0] is deleted on the network for reasoning part.
|
|
478
|
+
# But it is not deleted on sbml file when rewritten (command network option rewrite file).
|
|
479
|
+
warning_message += f"\n - {reaction_origin_name} deleted. No reactants and no products."
|
|
480
|
+
continue
|
|
481
|
+
else:
|
|
482
|
+
self.exchanged_reactions[reaction.name] = len(self.reactions)
|
|
483
|
+
# A reaction is multiple exchange
|
|
484
|
+
# when None -> A + B || A + B -> None || None <-> A + B || A + B <-> None
|
|
485
|
+
warning_message += f" - {reaction_origin_name} is multiple (more than 1) metabolites import/export reaction. "
|
|
486
|
+
if not self.keep_import_reactions:
|
|
487
|
+
warning_message += "\n\tDeleted as it is an import reaction."
|
|
488
|
+
else:
|
|
489
|
+
warning_message += "\n"
|
|
490
|
+
|
|
491
|
+
# Check if transport reactions
|
|
492
|
+
# The same metabolites are involved in both reactants and products to be defined as Transport
|
|
493
|
+
# The list contains only the name of metabolite without the compartiment
|
|
494
|
+
# both list must be the same
|
|
495
|
+
if set(list_react_names) == set(list_product_names):
|
|
496
|
+
reaction.is_transport=True
|
|
497
|
+
|
|
498
|
+
# import reactions to remove
|
|
499
|
+
|
|
500
|
+
# For each reaction check if the lower and upper bounds
|
|
501
|
+
# have the same sign (not reversible)
|
|
502
|
+
# Cases : [-1000,-10] , [-1000,0], [0, 1000], [10,1000]
|
|
503
|
+
if reaction.lbound*reaction.ubound >= 0:
|
|
504
|
+
reaction.reversible=False
|
|
505
|
+
# Reaction written backwards
|
|
506
|
+
# M -> None, with boundaries [-1000, -10] is import reaction case
|
|
507
|
+
# R -> P, with boundaries [-1000, -10] is P -> R
|
|
508
|
+
# None -> M, with boundaries [-1000, -10] is export reaction case
|
|
509
|
+
# import reactions removed, needs no reactant
|
|
510
|
+
# exchange reactants and products
|
|
511
|
+
if reaction.ubound <= 0:
|
|
512
|
+
if "- R_" not in warning_message:
|
|
513
|
+
warning_message +="\n"
|
|
514
|
+
warning_message += f"\n - {reaction_origin_name}: Reactants and products switched.\n Boundaries was: [{reaction.lbound} ; {reaction.ubound}]"
|
|
515
|
+
meta = products
|
|
516
|
+
products = reactants
|
|
517
|
+
reactants = meta
|
|
518
|
+
reaction.is_meta_modified = True
|
|
519
|
+
# Index of reaction needed for network rewriting
|
|
520
|
+
self.switched_meta_reactions[reaction.name] = len(self.reactions)
|
|
521
|
+
|
|
522
|
+
# Change the bounds
|
|
523
|
+
if reaction.ubound != 0:
|
|
524
|
+
bound = - reaction.ubound
|
|
525
|
+
else:
|
|
526
|
+
bound = reaction.ubound
|
|
527
|
+
if reaction.lbound != 0:
|
|
528
|
+
reaction.ubound = - reaction.lbound
|
|
529
|
+
else:
|
|
530
|
+
reaction.ubound = reaction.lbound
|
|
531
|
+
reaction.lbound = bound
|
|
532
|
+
# The upper and lower bound does not have the same sign
|
|
533
|
+
# The reaction is reversible
|
|
534
|
+
# Cases: [-1000,10], [-10,1000], ...
|
|
535
|
+
else:
|
|
536
|
+
reaction.reversible = True
|
|
537
|
+
self.meta_exchange_list, self.meta_transport_list, self.used_meta = reaction.add_metabolites_from_list(reactants,"reactant",
|
|
538
|
+
self.meta_exchange_list, self.meta_transport_list, self.used_meta)
|
|
539
|
+
|
|
540
|
+
self.meta_exchange_list, self.meta_transport_list, self.used_meta = reaction.add_metabolites_from_list(products,"product",
|
|
541
|
+
self.meta_exchange_list, self.meta_transport_list, self.used_meta)
|
|
542
|
+
|
|
543
|
+
if reaction.is_exchange and not self.keep_import_reactions:
|
|
544
|
+
# We already review the order of reaction and product regarding boudaries
|
|
545
|
+
# there is some cases to take into account :
|
|
546
|
+
# None <-> P [-1000, 1000] : reactant and product has to be exchanged to then put lbound to 0 and transform it into export only
|
|
547
|
+
# R <-> None [-1000, 1000] : lbound set to 0 and it will become an export only
|
|
548
|
+
# None -> P [0,1000] : Is an import only reaction and has to be deleted
|
|
549
|
+
# R -> None [0,1000] : OK no modifications
|
|
550
|
+
if products and not reactants:
|
|
551
|
+
if reaction.lbound >= 0:
|
|
552
|
+
# Index of reaction needed for network rewriting
|
|
553
|
+
self.deleted_reactions[reaction.name] = len(self.reactions)
|
|
554
|
+
warning_message += f"\n - {reaction_origin_name}: Deleted.\n Not reversible import reaction."
|
|
555
|
+
else:
|
|
556
|
+
reaction.is_meta_modified = True
|
|
557
|
+
meta = products
|
|
558
|
+
products = reactants
|
|
559
|
+
reactants = meta
|
|
560
|
+
# Index of reaction needed for network rewriting
|
|
561
|
+
self.switched_meta_reactions[reaction.name] = len(self.reactions)
|
|
562
|
+
warning_message += f"\n - {reaction_origin_name}: Reactants and products switched.\n Exchanged reaction became export only."
|
|
563
|
+
# The reaction is no more reversible
|
|
564
|
+
reaction.reversible = False
|
|
565
|
+
reaction.has_rm_prefix = True
|
|
566
|
+
|
|
567
|
+
reaction.is_reversible_modified = source_reversible != reaction.reversible
|
|
568
|
+
|
|
569
|
+
if reaction.is_reversible_modified:
|
|
570
|
+
# Index of reaction needed for network rewriting
|
|
571
|
+
self.reversible_modified_reactions[reaction.name] = len(self.reactions)
|
|
572
|
+
info_message += f"\n - {reaction.name}: Reversibility modified."
|
|
573
|
+
self.add_reaction(reaction)
|
|
574
|
+
|
|
575
|
+
# Because of the order of reaction, the metabolites can be found as exchanged
|
|
576
|
+
# after another reaction, it is needed to correct that
|
|
577
|
+
for reaction in self.reactions:
|
|
578
|
+
self.review_tag_metabolite(reaction.is_transport , reaction.products, reaction.reactants)
|
|
579
|
+
self.review_tag_metabolite(reaction.is_transport , reaction.reactants, reaction.products)
|
|
580
|
+
|
|
581
|
+
if write_sbml and reaction.is_exchange and not self.keep_import_reactions:
|
|
582
|
+
self.update_network_sbml(reaction)
|
|
583
|
+
|
|
584
|
+
if (to_print):
|
|
585
|
+
if warning_message :
|
|
586
|
+
if not self.is_community or ( self.is_community and warning_message != species):
|
|
587
|
+
warning_message += "\n"
|
|
588
|
+
logger.log.warning(warning_message)
|
|
589
|
+
if info_message:
|
|
590
|
+
logger.log.info(info_message)
|
|
591
|
+
else:
|
|
592
|
+
logger.log.info(warning_message)
|
|
593
|
+
logger.log.info(info_message)
|
|
594
|
+
print("____________________________________________\n")
|
|
595
|
+
|
|
596
|
+
|
|
597
|
+
def review_tag_metabolite(self, is_transport:bool , meta_list:list, meta_list_opposite:list):
|
|
598
|
+
"""Review all metabolite tag that can be not well tagues because of the order of reaction writen into source SBML fil
|
|
599
|
+
Indeed the metabolites can be found as exchanged later, the previous reaction needs to tag the metabolite as exchanged too.
|
|
600
|
+
Also corrects the transport tag, change it into "other" tag when a metabolite is only involve in one reaction (the transport reaction)
|
|
601
|
+
|
|
602
|
+
Args:
|
|
603
|
+
reaction (Reaction): object Reaction
|
|
604
|
+
meta_list (list): List of metabolite (reactants or products)
|
|
605
|
+
meta_list_opposite (list): list of opposite metabolite(products or reactants respectively to previous list)
|
|
606
|
+
"""
|
|
607
|
+
for metabolite in meta_list:
|
|
608
|
+
if metabolite.id_meta in self.meta_exchange_list and not metabolite.type == "exchange":
|
|
609
|
+
metabolite.type = "exchange"
|
|
610
|
+
if metabolite.id_meta in self.meta_transport_list and not metabolite.type == "transport":
|
|
611
|
+
metabolite.type = "transport"
|
|
612
|
+
# In this case, we can have some metabolite that are all tagged transport and none of them will be used
|
|
613
|
+
# as seed on asp. We need to find the node that doesn't have any parent (no reaction incoming or outgoing)
|
|
614
|
+
# This also means that a metabolite is only involved in one reaction (one transport reaction only)
|
|
615
|
+
# Example :
|
|
616
|
+
# R1: C_m <-> C_c | R2: C_g <-> C_c | R3: C_e <-> C_g | R4: C_m + A <-> B
|
|
617
|
+
# Here C_e is involved in only 1 tranport reaction R3, there is no exchange reaction, it will be
|
|
618
|
+
# tagged other while the other will be taggued exchange
|
|
619
|
+
# used_meta is a dictionnary with all metabolite used in key, and having a list of reation where thy are involved
|
|
620
|
+
# as a value.
|
|
621
|
+
# We need to check this only for transport reaction
|
|
622
|
+
if is_transport and metabolite.type == "transport" and len(self.used_meta[metabolite.id_meta])==1:
|
|
623
|
+
# Before changing the value, we have to check ih the other metabolite is also a transport.
|
|
624
|
+
# There is no need to untaged both of them as transport.
|
|
625
|
+
# When the reaction is a trasport reaction, there is only one metabolite involved in reactant
|
|
626
|
+
# and product as defined in get_network() function
|
|
627
|
+
if meta_list_opposite[0].type == "transport":
|
|
628
|
+
metabolite.type = "other"
|
|
629
|
+
self.meta_transport_list.remove(metabolite.id_meta)
|
|
630
|
+
|
|
631
|
+
|
|
632
|
+
def convert_to_facts(self):
|
|
633
|
+
"""Convert the corrected Network into ASP facts
|
|
634
|
+
"""
|
|
635
|
+
logger.log.info("Converting Network into ASP facts ...")
|
|
636
|
+
facts = ""
|
|
637
|
+
# Upper bound does not change on forward reaction
|
|
638
|
+
|
|
639
|
+
for reaction in self.reactions:
|
|
640
|
+
facts += reaction.convert_to_facts(self.keep_import_reactions,
|
|
641
|
+
self.use_topological_injections)
|
|
642
|
+
|
|
643
|
+
|
|
644
|
+
for objective in self.objectives_reaction_name:
|
|
645
|
+
facts += '\nobjective("'+objective+'").'
|
|
646
|
+
for seed in self.seeds:
|
|
647
|
+
facts += f'\nseed_user({quoted(seed)}).'
|
|
648
|
+
for target in self.targets:
|
|
649
|
+
for metabolite in self.targets[target]:
|
|
650
|
+
facts += f'\ntarget({quoted(target)},{quoted(metabolite)}).'
|
|
651
|
+
for forbidden in self.forbidden_seeds:
|
|
652
|
+
facts += f'\nforbidden({quoted(forbidden)}).'
|
|
653
|
+
for possible in self.possible_seeds:
|
|
654
|
+
facts += f'\np_seed({quoted(possible)}).'
|
|
655
|
+
|
|
656
|
+
self.facts = facts
|
|
657
|
+
logger.log.info("... DONE")
|
|
658
|
+
|
|
659
|
+
|
|
660
|
+
def simplify(self):
|
|
661
|
+
"""Lighten the Network Object, only facts needed
|
|
662
|
+
"""
|
|
663
|
+
self.sbml = None
|
|
664
|
+
self.reactions = None
|
|
665
|
+
self.seeds = None
|
|
666
|
+
self.forbidden_seeds = None
|
|
667
|
+
|
|
668
|
+
|
|
669
|
+
def add_result_seeds(self, solver_type:str, search_info:str, model_name:str,
|
|
670
|
+
size:int, seeds:list, flux_lp:dict=None, flux_cobra:dict=None,
|
|
671
|
+
transferred_list:list=None):
|
|
672
|
+
"""Add a formated resulted set of seeds into a list
|
|
673
|
+
|
|
674
|
+
Args:
|
|
675
|
+
solver_type (str): Type of solver (Reasoning / FBA / Hybrid)
|
|
676
|
+
search_mode (str): search mode type (Minimize / Submin
|
|
677
|
+
containing search type enumeration / union /intersection)
|
|
678
|
+
model_name (str): model name
|
|
679
|
+
len (int): length of a set of seed
|
|
680
|
+
seeds (list): list of seeds
|
|
681
|
+
flux_lp (dict, optional): Dictionnary of all reaction with their LP flux. Defaults to None.
|
|
682
|
+
flux_cobra (float, optional): Cobra flux calculated (mode Filter, Guess Check). Defaults to None.
|
|
683
|
+
"""
|
|
684
|
+
result_seeds_list = self._get_result_seeds()
|
|
685
|
+
match search_info:
|
|
686
|
+
# FROM SOLVER
|
|
687
|
+
case "minimize-one-model":
|
|
688
|
+
search_mode="Minimize"
|
|
689
|
+
search_type="Optimum"
|
|
690
|
+
case "minimize-intersection":
|
|
691
|
+
search_mode="Minimize"
|
|
692
|
+
search_type="Intersection"
|
|
693
|
+
case "minimize-union":
|
|
694
|
+
search_mode="Minimize"
|
|
695
|
+
search_type="Union"
|
|
696
|
+
case "minimize-enumeration":
|
|
697
|
+
search_mode="Minimize"
|
|
698
|
+
search_type="Enumeration"
|
|
699
|
+
case "submin-enumeration":
|
|
700
|
+
search_mode="Subset Minimal"
|
|
701
|
+
search_type="Enumeration"
|
|
702
|
+
case "submin-intersection":
|
|
703
|
+
search_mode="Subset Minimal"
|
|
704
|
+
search_type="Intersection"
|
|
705
|
+
# FROM RESULT FILE
|
|
706
|
+
case'MINIMIZE OPTIMUM':
|
|
707
|
+
search_mode = 'Minimize'
|
|
708
|
+
search_type = 'Optimum'
|
|
709
|
+
case 'MINIMIZE INTERSECTION' \
|
|
710
|
+
| 'MINIMIZE INTERSECTION FILTER' \
|
|
711
|
+
| 'MINIMIZE INTERSECTION GUESS-CHECK' \
|
|
712
|
+
| 'MINIMIZE INTERSECTION GUESS-CHECK-DIVERSITY':
|
|
713
|
+
search_mode = 'Minimize'
|
|
714
|
+
search_type = 'Intersection'
|
|
715
|
+
case 'MINIMIZE UNION' \
|
|
716
|
+
| 'MINIMIZE UNION FILTER' \
|
|
717
|
+
| 'MINIMIZE UNION GUESS-CHECK' \
|
|
718
|
+
| 'MINIMIZE UNION GUESS-CHECK-DIVERSITY':
|
|
719
|
+
search_mode = 'Minimize'
|
|
720
|
+
search_type = 'Union'
|
|
721
|
+
case 'MINIMIZE ENUMERATION'\
|
|
722
|
+
| 'MINIMIZE ENUMERATION FILTER' \
|
|
723
|
+
| 'MINIMIZE ENUMERATION GUESS-CHECK' \
|
|
724
|
+
| 'MINIMIZE ENUMERATION GUESS-CHECK-DIVERSITY':
|
|
725
|
+
search_mode = 'Minimize'
|
|
726
|
+
search_type = 'Enumeration'
|
|
727
|
+
case 'SUBSET MINIMAL ENUMERATION' \
|
|
728
|
+
| 'SUBSET MINIMAL ENUMERATION FILTER' \
|
|
729
|
+
| 'SUBSET MINIMAL ENUMERATION GUESS-CHECK' \
|
|
730
|
+
| 'SUBSET MINIMAL ENUMERATION GUESS-CHECK-DIVERSITY':
|
|
731
|
+
search_mode = 'Subset Minimal'
|
|
732
|
+
search_type = 'Enumeration'
|
|
733
|
+
case 'SUBSET MINIMAL INTERSECTION'\
|
|
734
|
+
| 'SUBSET MINIMAL INTERSECTION FILTER' \
|
|
735
|
+
| 'SUBSET MINIMAL INTERSECTION GUESS-CHECK' \
|
|
736
|
+
| 'SUBSET MINIMAL INTERSECTION GUESS-CHECK-DIVERSITY':
|
|
737
|
+
search_mode = 'Subset Minimal'
|
|
738
|
+
search_type = 'Intersection'
|
|
739
|
+
case _:
|
|
740
|
+
search_mode = 'Other'
|
|
741
|
+
search_type = 'Enumeration'
|
|
742
|
+
result = Resmod(model_name, self.objectives_reaction_name, solver_type, search_mode, search_type,
|
|
743
|
+
size, seeds, flux_lp, flux_cobra, self.run_mode, self.accumulation,
|
|
744
|
+
self.is_community, transferred_list)
|
|
745
|
+
result_seeds_list.append(result)
|
|
746
|
+
self._set_result_seeds(result_seeds_list)
|
|
747
|
+
|
|
748
|
+
|
|
749
|
+
def format_flux_result(self, result:Resmod, fluxes_init:dict, fluxes_no_import:dict=None):
|
|
750
|
+
"""Formating Result data (Resmod object) with Network data and flux check
|
|
751
|
+
Args:
|
|
752
|
+
result (Resmod): Result (Object Resmod)
|
|
753
|
+
fluxes_init (dict): Dictionnary of initial fluxes for each objectives
|
|
754
|
+
fluxes_no_import (dict): Dictionnary of fluxes for each objectives after "shuttig down" import reaction
|
|
755
|
+
|
|
756
|
+
Returns:
|
|
757
|
+
list, Resmod, dict, dict: objective, result, flux_no_import, flux_init
|
|
758
|
+
"""
|
|
759
|
+
if self.keep_import_reactions:
|
|
760
|
+
flux_no_import = None
|
|
761
|
+
else:
|
|
762
|
+
flux_no_import = fluxes_no_import
|
|
763
|
+
|
|
764
|
+
if self.is_community:
|
|
765
|
+
objective = self.objectives_reaction_name
|
|
766
|
+
flux_init = fluxes_init
|
|
767
|
+
|
|
768
|
+
else:
|
|
769
|
+
objective = result.tested_objective
|
|
770
|
+
flux_init = fluxes_init[objective]
|
|
771
|
+
flux_no_import = fluxes_no_import[objective]
|
|
772
|
+
result.objective_flux_seeds=result.objective_flux_seeds[objective]
|
|
773
|
+
if result.objective_flux_demands:
|
|
774
|
+
result.objective_flux_demands=result.objective_flux_demands[objective]
|
|
775
|
+
else:
|
|
776
|
+
result.objective_flux_demands=None
|
|
777
|
+
return objective, result, flux_no_import, flux_init
|
|
778
|
+
|
|
779
|
+
|
|
780
|
+
|
|
781
|
+
def check_fluxes(self, maximize:bool, max_workers:int=-1):
|
|
782
|
+
"""Calculate the flux using Cobra and get the flux from lp for all solutions (set of seed).
|
|
783
|
+
Store information and data into dataframe
|
|
784
|
+
|
|
785
|
+
Args:
|
|
786
|
+
maximize (bool): Determine if Maximize option is used
|
|
787
|
+
"""
|
|
788
|
+
if self.is_community:
|
|
789
|
+
dtypes = {'species':'str',
|
|
790
|
+
'biomass_reaction':'str',
|
|
791
|
+
'solver_type':'str',
|
|
792
|
+
'search_mode':'str',
|
|
793
|
+
'search_type':'str',
|
|
794
|
+
'accumulation':'str',
|
|
795
|
+
'model':'str',
|
|
796
|
+
'size':'int',
|
|
797
|
+
'lp_flux':'float',
|
|
798
|
+
'cobra_flux_init':'str',
|
|
799
|
+
'cobra_flux_no_import':'str',
|
|
800
|
+
'cobra_flux_seeds':'str',
|
|
801
|
+
'cobra_flux_demands':'str',
|
|
802
|
+
'has_flux':'str',
|
|
803
|
+
'has_flux_seeds':'str',
|
|
804
|
+
'has_flux_demands':'str',
|
|
805
|
+
'timer':'float'
|
|
806
|
+
}
|
|
807
|
+
else :
|
|
808
|
+
dtypes = {'species':'str',
|
|
809
|
+
'biomass_reaction':'str',
|
|
810
|
+
'solver_type':'str',
|
|
811
|
+
'search_mode':'str',
|
|
812
|
+
'search_type':'str',
|
|
813
|
+
'accumulation':'str',
|
|
814
|
+
'model':'str',
|
|
815
|
+
'size':'int',
|
|
816
|
+
'lp_flux':'float',
|
|
817
|
+
'cobra_flux_init':'float',
|
|
818
|
+
'cobra_flux_no_import':'float',
|
|
819
|
+
'cobra_flux_seeds':'float',
|
|
820
|
+
'cobra_flux_demands':'float',
|
|
821
|
+
'has_flux':'str',
|
|
822
|
+
'has_flux_seeds':'str',
|
|
823
|
+
'has_flux_demands':'str',
|
|
824
|
+
'timer':'float'
|
|
825
|
+
}
|
|
826
|
+
|
|
827
|
+
fluxes_no_import=None
|
|
828
|
+
|
|
829
|
+
fluxes = pd.DataFrame(columns=['species','biomass_reaction', 'solver_type', 'search_mode', 'search_type',
|
|
830
|
+
'accumulation', 'model', 'size', 'lp_flux', 'cobra_flux_init', 'cobra_flux_no_import',
|
|
831
|
+
'cobra_flux_seeds', 'cobra_flux_demands', 'has_flux','has_flux_seeds',
|
|
832
|
+
'has_flux_demands', 'timer'])
|
|
833
|
+
fluxes = fluxes.astype(dtypes)
|
|
834
|
+
|
|
835
|
+
if self.objectives_reaction_name:
|
|
836
|
+
if self.result_seeds:
|
|
837
|
+
logger.log.info("Check fluxes Starting")
|
|
838
|
+
model = flux.get_model(self.file)
|
|
839
|
+
fluxes_init = flux.get_init(model, self.objectives_reaction_name)
|
|
840
|
+
if not self.keep_import_reactions:
|
|
841
|
+
fluxes_no_import = flux.stop_flux(model, self.objectives_reaction_name)
|
|
842
|
+
self.model[self.name] = model
|
|
843
|
+
print(color.purple+"\n____________________________________________")
|
|
844
|
+
print("____________________________________________\n"+color.reset)
|
|
845
|
+
print("RESULTS".center(44))
|
|
846
|
+
print(color.purple+"____________________________________________")
|
|
847
|
+
print("____________________________________________\n"+color.reset)
|
|
848
|
+
|
|
849
|
+
logger.log.warning("Processing in parallel. " \
|
|
850
|
+
"\nNo outputs will be shown. " \
|
|
851
|
+
"\nPlease wait ...\n")
|
|
852
|
+
|
|
853
|
+
prev_solver_type=None
|
|
854
|
+
prev_search_mode=None
|
|
855
|
+
has_warning=False
|
|
856
|
+
|
|
857
|
+
# ProcessPoolExecutor is used to run the function in parallel
|
|
858
|
+
if max_workers != -1:
|
|
859
|
+
# If max_workers is set to 0, it will use the default number of workers
|
|
860
|
+
if max_workers == 0:
|
|
861
|
+
max_workers=None
|
|
862
|
+
with ProcessPoolExecutor(max_workers=max_workers) as executor:
|
|
863
|
+
futures = []
|
|
864
|
+
for result in self.result_seeds:
|
|
865
|
+
futures.append(executor.submit(
|
|
866
|
+
process_result,
|
|
867
|
+
result,
|
|
868
|
+
self.model[self.name],
|
|
869
|
+
self.name,
|
|
870
|
+
self.equality_flux,
|
|
871
|
+
fluxes_init,
|
|
872
|
+
fluxes_no_import,
|
|
873
|
+
maximize,
|
|
874
|
+
dtypes,
|
|
875
|
+
self.accumulation,
|
|
876
|
+
self.is_community,
|
|
877
|
+
self.keep_import_reactions,
|
|
878
|
+
objectives=self.objectives_reaction_name
|
|
879
|
+
))
|
|
880
|
+
|
|
881
|
+
prev_solver_type = None
|
|
882
|
+
prev_search_mode = None
|
|
883
|
+
has_warning = False
|
|
884
|
+
|
|
885
|
+
# Add tqdm progress bar for completed futures
|
|
886
|
+
for future in tqdm(as_completed(futures), total=len(futures), desc="Processing results"):
|
|
887
|
+
for future in as_completed(futures):
|
|
888
|
+
result_flux, solver_type, search_mode, warn = future.result()
|
|
889
|
+
# Check if the search mode has changed
|
|
890
|
+
if prev_search_mode == None or result.search_mode != prev_search_mode:
|
|
891
|
+
if has_warning:
|
|
892
|
+
logger.log.warning(WARNING_MESSAGE_LP_COBRA)
|
|
893
|
+
prev_search_mode = search_mode
|
|
894
|
+
prev_solver_type = None
|
|
895
|
+
if prev_solver_type != solver_type:
|
|
896
|
+
prev_solver_type = solver_type
|
|
897
|
+
|
|
898
|
+
if warn:
|
|
899
|
+
has_warning = True
|
|
900
|
+
# Add result to DataFrame
|
|
901
|
+
fluxes = pd.concat([fluxes, result_flux], ignore_index=True)
|
|
902
|
+
|
|
903
|
+
if has_warning:
|
|
904
|
+
logger.log.warning(WARNING_MESSAGE_LP_COBRA)
|
|
905
|
+
else:
|
|
906
|
+
for result in self.result_seeds :
|
|
907
|
+
|
|
908
|
+
if prev_search_mode == None or result.search_mode != prev_search_mode:
|
|
909
|
+
if has_warning:
|
|
910
|
+
print("\n")
|
|
911
|
+
logger.log.warning(WARNING_MESSAGE_LP_COBRA)
|
|
912
|
+
print(color.yellow+"\n____________________________________________")
|
|
913
|
+
print("____________________________________________\n"+color.reset)
|
|
914
|
+
print(result.search_mode.center(44))
|
|
915
|
+
prev_search_mode = result.search_mode
|
|
916
|
+
prev_solver_type=None
|
|
917
|
+
if prev_solver_type == None or result.solver_type != prev_solver_type:
|
|
918
|
+
print(color.yellow+"--------------------------------------------"+color.reset)
|
|
919
|
+
print(result.solver_type.center(44))
|
|
920
|
+
print(color.yellow+". . . . . . . . . . ".center(44)+color.reset)
|
|
921
|
+
prev_solver_type=result.solver_type
|
|
922
|
+
type_column = "name | cobra (seeds) | cobra (demands)"
|
|
923
|
+
separate_line="-----|---------------|-----------------"
|
|
924
|
+
has_warning=False
|
|
925
|
+
if result.solver_type=="HYBRID" or result.solver_type=="FBA":
|
|
926
|
+
type_column+=" | LP"
|
|
927
|
+
separate_line+="|----"
|
|
928
|
+
print(type_column)
|
|
929
|
+
print(separate_line)
|
|
930
|
+
|
|
931
|
+
flux_time = time()
|
|
932
|
+
result.check_flux(self.model[self.name], equality_flux=self.equality_flux)
|
|
933
|
+
|
|
934
|
+
objective, result, flux_no_import, flux_init = self.format_flux_result(result, fluxes_init, fluxes_no_import)
|
|
935
|
+
|
|
936
|
+
warn = print_flux(result, maximize, self.is_community)
|
|
937
|
+
if warn:
|
|
938
|
+
has_warning=True
|
|
939
|
+
|
|
940
|
+
|
|
941
|
+
flux_time = time() - flux_time
|
|
942
|
+
flux_time=round(flux_time, 3)
|
|
943
|
+
|
|
944
|
+
result_flux = pd.DataFrame([[self.name, objective, result.solver_type, result.search_mode,
|
|
945
|
+
result.search_type, str(self.accumulation), result.name, result.size, result.chosen_lp, flux_init,
|
|
946
|
+
flux_no_import, result.objective_flux_seeds, result.objective_flux_demands,
|
|
947
|
+
str(result.OK), str(result.OK_seeds), str(result.OK_demands), flux_time]],
|
|
948
|
+
columns=['species','biomass_reaction', 'solver_type', 'search_mode',
|
|
949
|
+
'search_type', 'accumulation', 'model', 'size', 'lp_flux', 'cobra_flux_init',
|
|
950
|
+
'cobra_flux_no_import', 'cobra_flux_seeds', 'cobra_flux_demands',
|
|
951
|
+
'has_flux','has_flux_seeds', 'has_flux_demands', 'timer'])
|
|
952
|
+
result_flux = result_flux.astype(dtypes)
|
|
953
|
+
fluxes = pd.concat([fluxes, result_flux], ignore_index=True)
|
|
954
|
+
|
|
955
|
+
if has_warning:
|
|
956
|
+
print("\n")
|
|
957
|
+
logger.log.warning(WARNING_MESSAGE_LP_COBRA)
|
|
958
|
+
print(color.yellow+"\n____________________________________________\n"+color.reset)
|
|
959
|
+
|
|
960
|
+
else:
|
|
961
|
+
print(color.red_bright+"No solution found"+color.reset)
|
|
962
|
+
else:
|
|
963
|
+
print(color.red_bright+"No objective found, can't run cobra optimization"+color.reset)
|
|
964
|
+
self.fluxes = fluxes
|
|
965
|
+
|
|
966
|
+
|
|
967
|
+
def convert_data_to_resmod(self, data):
|
|
968
|
+
"""Convert json data into Resmod object in order to add the list to Netork object.
|
|
969
|
+
|
|
970
|
+
Args:
|
|
971
|
+
data (dict): Json data from previous seed2lp result file
|
|
972
|
+
"""
|
|
973
|
+
logger.log.info("Converting data from result file ...")
|
|
974
|
+
reaction_option = data["OPTIONS"]["REACTION"]
|
|
975
|
+
match reaction_option:
|
|
976
|
+
case "Remove Import Reaction":
|
|
977
|
+
self.keep_import_reactions = False
|
|
978
|
+
self.use_topological_injections = False
|
|
979
|
+
case "Topological Injection":
|
|
980
|
+
self.keep_import_reactions = True
|
|
981
|
+
self.use_topological_injections = True
|
|
982
|
+
case "No Topological Injection":
|
|
983
|
+
self.keep_import_reactions = True
|
|
984
|
+
self.use_topological_injections = False
|
|
985
|
+
|
|
986
|
+
if data["OPTIONS"]["ACCUMULATION"] == "Allowed":
|
|
987
|
+
self.accumulation = True
|
|
988
|
+
else:
|
|
989
|
+
self.accumulation = False
|
|
990
|
+
|
|
991
|
+
self.objectives_reaction_name = data["NETWORK"]["OBJECTIVE"]
|
|
992
|
+
|
|
993
|
+
if data["NETWORK"]["SEARCH_MODE"] in NET_TITLE.CONVERT_TITLE_MODE:
|
|
994
|
+
self.run_mode = NET_TITLE.CONVERT_TITLE_MODE[data["NETWORK"]["SEARCH_MODE"]]
|
|
995
|
+
else:
|
|
996
|
+
self.run_mode = data["NETWORK"]["SEARCH_MODE"]
|
|
997
|
+
|
|
998
|
+
|
|
999
|
+
if data["OPTIONS"]["FLUX"] == "Maximization":
|
|
1000
|
+
maximize = True
|
|
1001
|
+
else:
|
|
1002
|
+
maximize = False
|
|
1003
|
+
|
|
1004
|
+
if data["NETWORK"]["SOLVE"] in NET_TITLE.CONVERT_TITLE_SOLVE:
|
|
1005
|
+
solve = NET_TITLE.CONVERT_TITLE_SOLVE[data["NETWORK"]["SOLVE"]]
|
|
1006
|
+
else:
|
|
1007
|
+
solve = 'all'
|
|
1008
|
+
|
|
1009
|
+
for solver_type in data["RESULTS"]:
|
|
1010
|
+
for search_info in data["RESULTS"][solver_type]:
|
|
1011
|
+
solver_type_transmetted = solver_type
|
|
1012
|
+
if data["NETWORK"]["SOLVE"] !="ALL":
|
|
1013
|
+
if "REASONING" in solver_type:
|
|
1014
|
+
solver_type_transmetted = data["NETWORK"]["SOLVE"]
|
|
1015
|
+
elif solver_type == "REASONING":
|
|
1016
|
+
if "DIVERSITY" in search_info:
|
|
1017
|
+
solver_type_transmetted = "REASONING GUESS-CHECK DIVERSITY"
|
|
1018
|
+
elif 'GUESS-CHECK' in search_info:
|
|
1019
|
+
solver_type_transmetted = "REASONING GUESS-CHECK"
|
|
1020
|
+
elif 'FILTER' in search_info:
|
|
1021
|
+
solver_type_transmetted = "REASONING FILTER"
|
|
1022
|
+
|
|
1023
|
+
if "solutions" in data["RESULTS"][solver_type][search_info]:
|
|
1024
|
+
for solution in data["RESULTS"][solver_type][search_info]["solutions"]:
|
|
1025
|
+
name = solution
|
|
1026
|
+
size = data["RESULTS"][solver_type][search_info]["solutions"][solution][1]
|
|
1027
|
+
seeds_list = data["RESULTS"][solver_type][search_info]["solutions"][solution][3]
|
|
1028
|
+
obj_flux_lp = dict()
|
|
1029
|
+
if solver_type == "FBA" or solver_type == "HYBRID":
|
|
1030
|
+
for flux in data["RESULTS"][solver_type][search_info]["solutions"][solution][5]:
|
|
1031
|
+
reaction = flux[0]
|
|
1032
|
+
if reaction in self.objectives_reaction_name:
|
|
1033
|
+
obj_flux_lp[reaction] = flux[1]
|
|
1034
|
+
if self.is_community:
|
|
1035
|
+
transferred_list = data["RESULTS"][solver_type][search_info]["solutions"][solution][5]
|
|
1036
|
+
else:
|
|
1037
|
+
transferred_list = None
|
|
1038
|
+
self.add_result_seeds(solver_type_transmetted, search_info, name, size, seeds_list,
|
|
1039
|
+
obj_flux_lp, transferred_list=transferred_list)
|
|
1040
|
+
logger.log.info("... DONE")
|
|
1041
|
+
return maximize, solve
|
|
1042
|
+
|
|
1043
|
+
|
|
1044
|
+
def write_cases_messages(self, tgt_message:str, obj_message:str,
|
|
1045
|
+
net_mess:list):
|
|
1046
|
+
"""Write terminal messages depending on
|
|
1047
|
+
- target file data for target mode
|
|
1048
|
+
- command line for full mode
|
|
1049
|
+
|
|
1050
|
+
Args:
|
|
1051
|
+
tgt_message (str): The message to show for target
|
|
1052
|
+
obj_message (str): The message to show for objective
|
|
1053
|
+
net_mess ([str]): The message to show for network
|
|
1054
|
+
"""
|
|
1055
|
+
print("\n____________________________________________\n")
|
|
1056
|
+
print(f"TARGETS".center(44))
|
|
1057
|
+
print(f"FOR TARGET MODE AND FBA".center(44))
|
|
1058
|
+
print("____________________________________________\n")
|
|
1059
|
+
logger.print_log(tgt_message, "info")
|
|
1060
|
+
|
|
1061
|
+
print("\n____________________________________________\n")
|
|
1062
|
+
print(f"OBJECTVE".center(44))
|
|
1063
|
+
print(f"FOR HYBRID".center(44))
|
|
1064
|
+
print("____________________________________________\n")
|
|
1065
|
+
logger.print_log(obj_message, "info")
|
|
1066
|
+
print("\n")
|
|
1067
|
+
|
|
1068
|
+
|
|
1069
|
+
print("\n____________________________________________\n")
|
|
1070
|
+
print(f"NETWORK".center(44))
|
|
1071
|
+
print("____________________________________________\n")
|
|
1072
|
+
logger.print_log(net_mess[0], "info")
|
|
1073
|
+
if self.keep_import_reactions:
|
|
1074
|
+
logger.print_log(net_mess[1], "info")
|
|
1075
|
+
if self.run_mode != "full":
|
|
1076
|
+
logger.print_log(net_mess[2], "info")
|
|
1077
|
+
if self.run_mode != "fba":
|
|
1078
|
+
logger.print_log(net_mess[3], "info")
|
|
1079
|
+
print("\n")
|
|
1080
|
+
|
|
1081
|
+
|
|
1082
|
+
|
|
1083
|
+
def check_seeds(self, seeds:list, transferred:list=None):
|
|
1084
|
+
"""Check flux into objective reaction for a set of seeds.
|
|
1085
|
+
|
|
1086
|
+
Args:
|
|
1087
|
+
seeds (list): Set of seeds to test
|
|
1088
|
+
|
|
1089
|
+
Returns:
|
|
1090
|
+
bool: Return if the objective reaction has flux (True) or not (False)
|
|
1091
|
+
"""
|
|
1092
|
+
model = flux.get_model(self.file)
|
|
1093
|
+
flux.get_init(model, self.objectives_reaction_name, False)
|
|
1094
|
+
flux.stop_flux(model, self.objectives_reaction_name, False)
|
|
1095
|
+
|
|
1096
|
+
result = Resmod(None, self.objectives_reaction_name,
|
|
1097
|
+
None, None, None, len(seeds), seeds, None, None,
|
|
1098
|
+
is_community=self.is_community, transferred_list=transferred)
|
|
1099
|
+
|
|
1100
|
+
# This mode has to work with the seeds directly
|
|
1101
|
+
# that's whiy wi do not want to try "on demands" the flux
|
|
1102
|
+
result.check_flux(model, False, self.equality_flux)
|
|
1103
|
+
return result.OK, result.objective_flux_seeds
|
|
1104
|
+
|
|
1105
|
+
|
|
1106
|
+
def update_network_sbml(self, reaction:Reaction):
|
|
1107
|
+
"""When writing SBML, we need to update the Network object, more precisely the Reaction object
|
|
1108
|
+
and change the boundaries as they are not needed for Hybrid-lpx.
|
|
1109
|
+
This is important to keep the mboudary for sigle network using Hybrid-lpx.
|
|
1110
|
+
For community, there is no Hybrid-lpx mode therefore i is not important to keep the boundaries,
|
|
1111
|
+
but we need a written sbml version for all Hybrid-Cobra modes (Filter, Guess-Check, Guess-Check Diversity)
|
|
1112
|
+
|
|
1113
|
+
Args:
|
|
1114
|
+
reaction (Reaction): _description_.
|
|
1115
|
+
"""
|
|
1116
|
+
if len(reaction.reactants) == 0:
|
|
1117
|
+
reaction.ubound=0
|
|
1118
|
+
# Upper bound can't be lower than lower bound
|
|
1119
|
+
if reaction.lbound > 0:
|
|
1120
|
+
reaction.lbound = 0
|
|
1121
|
+
if len(reaction.products) == 0:
|
|
1122
|
+
reaction.lbound=0
|
|
1123
|
+
# Lower bound can't be upper than upper bound
|
|
1124
|
+
if reaction.ubound < 0:
|
|
1125
|
+
reaction.ubound = 0
|
|
1126
|
+
|
|
1127
|
+
|
|
1128
|
+
def print_flux(self, result:Resmod, maximize:bool):
|
|
1129
|
+
"""Print fluxes data as a table
|
|
1130
|
+
|
|
1131
|
+
Args:
|
|
1132
|
+
result (Resmod): Current result to write
|
|
1133
|
+
maximize (bool): Determine if Maximize option is used
|
|
1134
|
+
|
|
1135
|
+
Returns:
|
|
1136
|
+
warning (bool): Is a warning has to be raised or not. Only used for Hybrid-lpx mode
|
|
1137
|
+
"""
|
|
1138
|
+
warning=False
|
|
1139
|
+
if result.name != "model_one_solution":
|
|
1140
|
+
if result.OK_seeds:
|
|
1141
|
+
if (result.solver_type=="HYBRID" or result.solver_type=="FBA") \
|
|
1142
|
+
and abs(result.chosen_lp - result.objective_flux_seeds) < 0.1:
|
|
1143
|
+
color_seeds = color_lp =color.green_light
|
|
1144
|
+
else:
|
|
1145
|
+
color_seeds = color_lp =color.cyan_light
|
|
1146
|
+
if (result.solver_type=="HYBRID" or result.solver_type=="FBA") \
|
|
1147
|
+
and not maximize and abs(result.chosen_lp - result.objective_flux_seeds) > 0.1:
|
|
1148
|
+
warning = True
|
|
1149
|
+
else:
|
|
1150
|
+
color_seeds=color.red_bright
|
|
1151
|
+
|
|
1152
|
+
|
|
1153
|
+
concat_result = f"{result.name} | "
|
|
1154
|
+
|
|
1155
|
+
if not result.infeasible_seeds:
|
|
1156
|
+
if self.is_community:
|
|
1157
|
+
concat_result=add_print_seed_community(result, color_seeds, concat_result, result.infeasible_seeds)
|
|
1158
|
+
|
|
1159
|
+
else:
|
|
1160
|
+
concat_result += color_seeds + f"{result.objective_flux_seeds}" + color.reset + " | "
|
|
1161
|
+
warning, concat_result = add_print_demands(result, maximize, warning, concat_result)
|
|
1162
|
+
else:
|
|
1163
|
+
concat_result += f"Infeasible" + " | "
|
|
1164
|
+
if self.is_community:
|
|
1165
|
+
concat_result=add_print_seed_community(result, color_seeds, concat_result, result.infeasible_seeds)
|
|
1166
|
+
else:
|
|
1167
|
+
warning, concat_result = add_print_demands(result, maximize, warning, concat_result)
|
|
1168
|
+
|
|
1169
|
+
|
|
1170
|
+
if result.solver_type=="HYBRID" or result.solver_type=="FBA":
|
|
1171
|
+
lp_flux_rounded = round(result.chosen_lp,4)
|
|
1172
|
+
concat_result += " | " + color_lp + f"{lp_flux_rounded}" + color.reset
|
|
1173
|
+
print(concat_result)
|
|
1174
|
+
return warning
|
|
1175
|
+
|
|
1176
|
+
|
|
1177
|
+
|
|
1178
|
+
|
|
1179
|
+
|
|
1180
|
+
def sbml_remove_reaction(self, reaction:ET.Element, species:str):
|
|
1181
|
+
"""Remove reaction node into the model when defines by the normalisation as needed to be deleted
|
|
1182
|
+
and then remove the reaction from objective reaction list node.
|
|
1183
|
+
The objective list deletion is needed to get a Cobra valid file.
|
|
1184
|
+
|
|
1185
|
+
Args:
|
|
1186
|
+
reaction (ET.Element): Etree element (node) to delete
|
|
1187
|
+
species (str): Network to apply the deletion
|
|
1188
|
+
"""
|
|
1189
|
+
SBML.remove_reaction(self.model[species], reaction)
|
|
1190
|
+
SBML.check_remove_objective(self.model[species], reaction, self.fbc[species])
|
|
1191
|
+
|
|
1192
|
+
|
|
1193
|
+
|
|
1194
|
+
def sbml_review_reversibilty(self, reaction_name:str, reaction:ET.Element):
|
|
1195
|
+
"""Review the reaction reversibility attribute of reaction node when defined by the normalisation as needed to be modified.
|
|
1196
|
+
|
|
1197
|
+
Args:
|
|
1198
|
+
reaction_name (str): Reaction ID that needs to by modified
|
|
1199
|
+
reaction (ET.Element): Etree element (node) to modify
|
|
1200
|
+
|
|
1201
|
+
Returns:
|
|
1202
|
+
bool: define if the reaction has been modified
|
|
1203
|
+
"""
|
|
1204
|
+
is_modif_rev = False
|
|
1205
|
+
if reaction_name in self.reversible_modified_reactions:
|
|
1206
|
+
index = self.reversible_modified_reactions[reaction_name]
|
|
1207
|
+
reaction.attrib["reversible"] = str(self.reactions[index].reversible).lower()
|
|
1208
|
+
is_modif_rev = True
|
|
1209
|
+
return is_modif_rev
|
|
1210
|
+
|
|
1211
|
+
|
|
1212
|
+
def sbml_switch_meta(self, reaction_name:str, reaction:ET.Element, species:str):
|
|
1213
|
+
"""Switch reactant and products for a reaction node when defined by the normalisation as needed to be switch.
|
|
1214
|
+
|
|
1215
|
+
Args:
|
|
1216
|
+
reaction_name (str): Reaction ID that needs to by modified
|
|
1217
|
+
reaction (ET.Element): Etree element (node) to modify
|
|
1218
|
+
species (str): Network to apply the switch
|
|
1219
|
+
|
|
1220
|
+
Returns:
|
|
1221
|
+
bool: define if the reaction has been modified
|
|
1222
|
+
"""
|
|
1223
|
+
# Exchange list of reactants and product if they are tagged as modified
|
|
1224
|
+
# The modification tag is only on exchanging reactants and products
|
|
1225
|
+
# Reaction written backward will be wrote forward
|
|
1226
|
+
is_switch_meta=False
|
|
1227
|
+
if reaction_name in self.switched_meta_reactions:
|
|
1228
|
+
index = self.switched_meta_reactions[reaction_name]
|
|
1229
|
+
has_reactant=False
|
|
1230
|
+
has_product=False
|
|
1231
|
+
reactants=list()
|
|
1232
|
+
products=list()
|
|
1233
|
+
# loop into source
|
|
1234
|
+
for element in reaction:
|
|
1235
|
+
# copy and remove list of reactant and products from source
|
|
1236
|
+
if SBML.get_sbml_tag(element) == "listOfReactants":
|
|
1237
|
+
reactants = copy.copy(element)
|
|
1238
|
+
has_reactant = True
|
|
1239
|
+
SBML.remove_sub_elements(element)
|
|
1240
|
+
elif SBML.get_sbml_tag(element) == "listOfProducts":
|
|
1241
|
+
products = copy.copy(element)
|
|
1242
|
+
has_product=True
|
|
1243
|
+
SBML.remove_sub_elements(element)
|
|
1244
|
+
|
|
1245
|
+
# add the new element into source node
|
|
1246
|
+
# put products into reactant and reactant into products
|
|
1247
|
+
recreate_other_node=True
|
|
1248
|
+
for element in reaction:
|
|
1249
|
+
if SBML.get_sbml_tag(element) == "listOfReactants":
|
|
1250
|
+
#check if node listOfProducts exist and copy element
|
|
1251
|
+
if has_product:
|
|
1252
|
+
SBML.add_metabolites(element, products)
|
|
1253
|
+
elif recreate_other_node:
|
|
1254
|
+
# the node listOfProducts doesnt exist it needs to be created
|
|
1255
|
+
SBML.create_sub_element(reaction, "listOfProducts")
|
|
1256
|
+
#copy the element of reactant (exchanging reactant and products)
|
|
1257
|
+
SBML.add_metabolites(element, reactants)
|
|
1258
|
+
recreate_other_node=False
|
|
1259
|
+
SBML.remove_sub_elements(element)
|
|
1260
|
+
elif SBML.get_sbml_tag(element) == "listOfProducts" and has_reactant:
|
|
1261
|
+
if has_reactant:
|
|
1262
|
+
SBML.add_metabolites(element, reactants)
|
|
1263
|
+
elif recreate_other_node:
|
|
1264
|
+
SBML.create_sub_element(reaction, "listOfReactants")
|
|
1265
|
+
SBML.add_metabolites(element, products)
|
|
1266
|
+
recreate_other_node=False
|
|
1267
|
+
SBML.remove_sub_elements(element)
|
|
1268
|
+
# Modify boundaries
|
|
1269
|
+
if self.parameters[species]:
|
|
1270
|
+
self.parameters[species][f'{reaction_name}_lower_bound'] = self.reactions[index].lbound
|
|
1271
|
+
self.parameters[species][f'{reaction_name}_upper_bound'] = self.reactions[index].ubound
|
|
1272
|
+
reaction.attrib['{'+self.fbc[species]+'}lowerFluxBound']= f'{reaction_name}_lower_bound'
|
|
1273
|
+
reaction.attrib['{'+self.fbc[species]+'}upperFluxBound']= f'{reaction_name}_upper_bound'
|
|
1274
|
+
else:
|
|
1275
|
+
reaction.attrib['{'+self.fbc[species]+'}lowerFluxBound']= self.reactions[index].lbound
|
|
1276
|
+
reaction.attrib['{'+self.fbc[species]+'}upperFluxBound']= self.reactions[index].ubound
|
|
1277
|
+
|
|
1278
|
+
is_switch_meta = True
|
|
1279
|
+
return is_switch_meta
|
|
1280
|
+
|
|
1281
|
+
|
|
1282
|
+
def sbml_remove_import(self, reaction_name:str, reaction:ET.Element, species:str):
|
|
1283
|
+
"""Remove the import direction of exchanged reaction by changing boundaries or the reaction node.
|
|
1284
|
+
During the process, new parameters are created for the boundaries that are changed.
|
|
1285
|
+
|
|
1286
|
+
Args:
|
|
1287
|
+
reaction_name (str): Reaction ID that needs to by modified
|
|
1288
|
+
reaction (ET.Element): Etree element (node) to modify
|
|
1289
|
+
species (str): Network to apply the import reaction deletion
|
|
1290
|
+
|
|
1291
|
+
Returns:
|
|
1292
|
+
bool: define if the import direction reaction has been modified
|
|
1293
|
+
"""
|
|
1294
|
+
is_rm_import=False
|
|
1295
|
+
if reaction_name in self.exchanged_reactions:
|
|
1296
|
+
index = self.exchanged_reactions[reaction_name]
|
|
1297
|
+
self.parameters[species][f'{reaction_name}_lower_bound'] = self.reactions[index].lbound
|
|
1298
|
+
self.parameters[species][f'{reaction_name}_upper_bound'] = self.reactions[index].ubound
|
|
1299
|
+
reaction.attrib['{'+self.fbc[species]+'}lowerFluxBound']= f'{reaction_name}_lower_bound'
|
|
1300
|
+
reaction.attrib['{'+self.fbc[species]+'}upperFluxBound']= f'{reaction_name}_upper_bound'
|
|
1301
|
+
is_rm_import = True
|
|
1302
|
+
return is_rm_import
|
|
1303
|
+
|
|
1304
|
+
|
|
1305
|
+
def sbml_review_parameters(self, species:str):
|
|
1306
|
+
"""While modifing boudaries for import reaction, new parameters has been created and needs to be added
|
|
1307
|
+
into the sbml file
|
|
1308
|
+
|
|
1309
|
+
Args:
|
|
1310
|
+
species (str): Network to add the created parameters
|
|
1311
|
+
"""
|
|
1312
|
+
# Replace list of parameters because we added new specific parameters for the exchange reactions
|
|
1313
|
+
parameters_copy = copy.copy(self.parameters[species])
|
|
1314
|
+
|
|
1315
|
+
for el in self.model[species]:
|
|
1316
|
+
tag = SBML.get_sbml_tag(el)
|
|
1317
|
+
if tag == "listOfParameters":
|
|
1318
|
+
node = copy.deepcopy(el[0])
|
|
1319
|
+
for param in el:
|
|
1320
|
+
id = param.attrib.get('id')
|
|
1321
|
+
# Corrects the already existant parameters
|
|
1322
|
+
if id in parameters_copy:
|
|
1323
|
+
param.attrib['value'] = str(parameters_copy[id])
|
|
1324
|
+
# delete the existant parameter from the list of parameter to keep
|
|
1325
|
+
# only the new parameters
|
|
1326
|
+
parameters_copy.pop(id)
|
|
1327
|
+
# create new paramaters node
|
|
1328
|
+
for key, value in parameters_copy.items():
|
|
1329
|
+
new_node = copy.deepcopy(node)
|
|
1330
|
+
new_node.attrib['id'] = key
|
|
1331
|
+
new_node.attrib['value'] = str(value)
|
|
1332
|
+
el.append(new_node)
|
|
1333
|
+
|
|
1334
|
+
###################################################################
|
|
1335
|
+
|
|
1336
|
+
|
|
1337
|
+
|
|
1338
|
+
###################################################################
|
|
1339
|
+
################## Class NetCom : herit NetBase ###################
|
|
1340
|
+
###################################################################
|
|
1341
|
+
|
|
1342
|
+
class Network(NetBase):
|
|
1343
|
+
def __init__(self, file:str, run_mode:str=None, targets_as_seeds:bool=False, use_topological_injections:bool=False,
|
|
1344
|
+
keep_import_reactions:bool=True, input_dict:dict=None, accumulation:bool=False, to_print:bool=True,
|
|
1345
|
+
write_sbml:bool=False):
|
|
1346
|
+
"""Initialize Object Network
|
|
1347
|
+
|
|
1348
|
+
Args:
|
|
1349
|
+
file (str): SBML source file
|
|
1350
|
+
run_mode (str, optional): Running command used (full or target or FBA). Defaults to None.
|
|
1351
|
+
targets_as_seeds (bool, optional): Targets can't be seeds and are noted as forbidden. Defaults to False.
|
|
1352
|
+
use_topological_injections (bool, optional): Metabolite of import reaction are seeds. Defaults to False.
|
|
1353
|
+
keep_import_reactions (bool, optional): Import reactions are not removed. Defaults to True.
|
|
1354
|
+
input_dict (dict, optional): The input dictionnary. Defaults to None.
|
|
1355
|
+
accumulation (bool, optional): Is accumulation authorized. Defaults to False. Defaults to False.
|
|
1356
|
+
to_print (bool, optional): Write messages into console if True. Defaults to True.
|
|
1357
|
+
write_sbml (bool, optional): Is a writing SBML file mode or not. Defaults to False.
|
|
1358
|
+
"""
|
|
1359
|
+
|
|
1360
|
+
super().__init__(targets_as_seeds, use_topological_injections,
|
|
1361
|
+
keep_import_reactions, accumulation)
|
|
1362
|
+
self.file = file
|
|
1363
|
+
self.run_mode = run_mode
|
|
1364
|
+
self.file_extension = ""
|
|
1365
|
+
self._set_file_extension(file)
|
|
1366
|
+
self._set_name()
|
|
1367
|
+
self.species=[self.name]
|
|
1368
|
+
self.sbml=dict()
|
|
1369
|
+
self.sbml[self.name], self.sbml_first_line, self.default_namespace = SBML.get_root(self.file)
|
|
1370
|
+
self.model[self.name] = SBML.get_model(self.sbml[self.name])
|
|
1371
|
+
self.fbc = {self.name: SBML.get_fbc(self.sbml[self.name])}
|
|
1372
|
+
self.parameters = {self.name: SBML.get_listOfParameters(self.model[self.name])}
|
|
1373
|
+
|
|
1374
|
+
# Instatiate objectives from target file if given by user
|
|
1375
|
+
is_user_objective = self.check_objectives(input_dict)
|
|
1376
|
+
# Find objectives on sbml file is not given
|
|
1377
|
+
is_reactant_found=False
|
|
1378
|
+
is_objective_error=False
|
|
1379
|
+
|
|
1380
|
+
logger.print_log("\nFinding objective ...", "info")
|
|
1381
|
+
if self.objectives is None or not self.objectives:
|
|
1382
|
+
try:
|
|
1383
|
+
is_reactant_found = self.find_objectives(input_dict, self.name)
|
|
1384
|
+
# for obj in self.objectives:
|
|
1385
|
+
# self.objectives_reaction_name.append(obj[1])
|
|
1386
|
+
except ValueError as e:
|
|
1387
|
+
is_objective_error = True
|
|
1388
|
+
logger.log.error(str(e))
|
|
1389
|
+
# Init networks with data given by user and objective reaction
|
|
1390
|
+
# write messages
|
|
1391
|
+
if self.run_mode is not None:
|
|
1392
|
+
# write console messages
|
|
1393
|
+
self.init_with_inputs(input_dict, is_reactant_found, is_objective_error, is_user_objective)
|
|
1394
|
+
|
|
1395
|
+
logger.print_log("Network normalisation in progress...", "info")
|
|
1396
|
+
logger.print_log("Can take several minutes", "info")
|
|
1397
|
+
normalisation_time = time()
|
|
1398
|
+
self.get_network(self.name, to_print, write_sbml)
|
|
1399
|
+
normalisation_time = time() - normalisation_time
|
|
1400
|
+
logger.print_log(f"Normalisation total time: {round(normalisation_time, 3)}s", "info")
|
|
1401
|
+
|
|
1402
|
+
|
|
1403
|
+
|
|
1404
|
+
|
|
1405
|
+
|
|
1406
|
+
###################################################################
|
|
1407
|
+
################## Class NetCom : herit NetBase ###################
|
|
1408
|
+
###################################################################
|
|
1409
|
+
class Netcom(NetBase):
|
|
1410
|
+
def __init__(self, comfile:str, sbmldir:str, temp_dir:str, run_mode:str=None, run_solve:str=None, community_mode:str=None,
|
|
1411
|
+
targets_as_seeds:bool=False, use_topological_injections:bool=False, keep_import_reactions:bool=True,
|
|
1412
|
+
input_dict:dict=None, accumulation:bool=False, to_print:bool=True,
|
|
1413
|
+
write_sbml:bool=False, equality_flux:bool=False):
|
|
1414
|
+
"""Initialise Object Netcom
|
|
1415
|
+
|
|
1416
|
+
Args:
|
|
1417
|
+
comfile (str): Text file path containing the list of files describing the community
|
|
1418
|
+
sbmldir (str): The directory path containing all the sbml files
|
|
1419
|
+
temp_dir (str): Temporary directory path for saving the merged sbml file
|
|
1420
|
+
run_mode (str, optional): Running command used (full or target or FBA). Defaults to None.
|
|
1421
|
+
targets_as_seeds (bool, optional): Targets can't be seeds and are noted as forbidden. Defaults to False.
|
|
1422
|
+
use_topological_injections (bool, optional): Metabolite of import reaction are seeds. Defaults to False.
|
|
1423
|
+
keep_import_reactions (bool, optional): Import reactions are not removed. Defaults to True.
|
|
1424
|
+
input_dict (dict, optional): he input dictionnary. Defaults to None.
|
|
1425
|
+
accumulation (bool, optional): Is accumulation authorized. Defaults to False.
|
|
1426
|
+
to_print (bool, optional): Write messages into console if True. Defaults to True.
|
|
1427
|
+
write_sbml (bool, optional): Is a writing SBML file mode or not. Defaults to False.
|
|
1428
|
+
"""
|
|
1429
|
+
super().__init__(targets_as_seeds, use_topological_injections,
|
|
1430
|
+
keep_import_reactions, accumulation, equality_flux)
|
|
1431
|
+
self.name=""
|
|
1432
|
+
self.comfile = comfile
|
|
1433
|
+
self.sbml_dir = sbmldir
|
|
1434
|
+
self.sbml=dict()
|
|
1435
|
+
self.files=list()
|
|
1436
|
+
self.run_mode = run_mode
|
|
1437
|
+
self.run_solve = run_solve
|
|
1438
|
+
self.community_mode = community_mode
|
|
1439
|
+
self.extension=str()
|
|
1440
|
+
self.is_community=True
|
|
1441
|
+
self._set_file_extension()
|
|
1442
|
+
self._set_name()
|
|
1443
|
+
self.temp_dir = temp_dir
|
|
1444
|
+
if targets_as_seeds:
|
|
1445
|
+
short_target_option="tas"
|
|
1446
|
+
else:
|
|
1447
|
+
short_target_option="taf"
|
|
1448
|
+
self.file = os.path.join(self.temp_dir, f"tmp_{self.name}_{self.community_mode}_{self.run_solve}_{short_target_option}.xml")
|
|
1449
|
+
|
|
1450
|
+
# get list of species from text file
|
|
1451
|
+
com_list_file = open(self.comfile, "r")
|
|
1452
|
+
data = com_list_file.read()
|
|
1453
|
+
self.species = data.split("\n")
|
|
1454
|
+
com_list_file.close()
|
|
1455
|
+
|
|
1456
|
+
self.get_sbml_data()
|
|
1457
|
+
is_user_objective = self.check_objectives(input_dict)
|
|
1458
|
+
# Find objectives on sbml file is not given
|
|
1459
|
+
is_reactant_found=False
|
|
1460
|
+
is_objective_error=False
|
|
1461
|
+
logger.print_log("\n Finding objectives of community ...", "info")
|
|
1462
|
+
if self.objectives is None or not self.objectives:
|
|
1463
|
+
for species in self.species:
|
|
1464
|
+
try:
|
|
1465
|
+
is_reactant_found = self.find_objectives(input_dict, species)
|
|
1466
|
+
except ValueError as e:
|
|
1467
|
+
is_objective_error = True
|
|
1468
|
+
logger.log.error(str(e))
|
|
1469
|
+
|
|
1470
|
+
# Init networks with data given by user and objective reaction
|
|
1471
|
+
# write messages
|
|
1472
|
+
if self.run_mode is not None:
|
|
1473
|
+
# write console messages
|
|
1474
|
+
self.init_with_inputs(input_dict, is_reactant_found, is_objective_error, is_user_objective)
|
|
1475
|
+
|
|
1476
|
+
logger.print_log("Network normalisation in progress ...", "info")
|
|
1477
|
+
normalisation_time = time()
|
|
1478
|
+
for species in self.species:
|
|
1479
|
+
self.get_network(species, to_print, write_sbml)
|
|
1480
|
+
|
|
1481
|
+
self.write_merge_sbml_file()
|
|
1482
|
+
normalisation_time = time() - normalisation_time
|
|
1483
|
+
logger.print_log(f"Normalisation total time: {round(normalisation_time, 3)}s", "info")
|
|
1484
|
+
|
|
1485
|
+
|
|
1486
|
+
|
|
1487
|
+
########################################################
|
|
1488
|
+
|
|
1489
|
+
|
|
1490
|
+
######################## SETTER ########################
|
|
1491
|
+
def _set_name(self):
|
|
1492
|
+
n = f'{os.path.splitext(os.path.basename(self.comfile))[0]}'
|
|
1493
|
+
self.name = n
|
|
1494
|
+
print(f"Community network name: {n}")
|
|
1495
|
+
########################################################
|
|
1496
|
+
|
|
1497
|
+
|
|
1498
|
+
####################### METHODS ########################
|
|
1499
|
+
def _set_file_extension(self):
|
|
1500
|
+
# Get extension of fisrt file into directory
|
|
1501
|
+
# We assumed that all files are constructed in the same way
|
|
1502
|
+
# and though all extensions into a directory are the same
|
|
1503
|
+
first_file =os.listdir(self.sbml_dir)[0]
|
|
1504
|
+
self.extension = os.path.splitext(first_file)[1]
|
|
1505
|
+
|
|
1506
|
+
|
|
1507
|
+
def get_sbml_data(self):
|
|
1508
|
+
"""Get all elements needed from sbml file for each network.
|
|
1509
|
+
These elements are the folowwing nodes from source file:
|
|
1510
|
+
- sbml
|
|
1511
|
+
- fbc
|
|
1512
|
+
- model
|
|
1513
|
+
- parameters
|
|
1514
|
+
"""
|
|
1515
|
+
for species in self.species:
|
|
1516
|
+
sbml_file = os.path.join(self.sbml_dir, f"{species}{self.extension}")
|
|
1517
|
+
try:
|
|
1518
|
+
existant_path(sbml_file)
|
|
1519
|
+
self.files.append(sbml_file)
|
|
1520
|
+
except FileNotFoundError as e :
|
|
1521
|
+
logger.log.error(str(e))
|
|
1522
|
+
exit(1)
|
|
1523
|
+
self.sbml[species], self.sbml_first_line, self.default_namespace = SBML.get_root(sbml_file)
|
|
1524
|
+
self.fbc[species] = SBML.get_fbc(self.sbml[species])
|
|
1525
|
+
self.model[species] = SBML.get_model(self.sbml[species])
|
|
1526
|
+
self.parameters[species] = SBML.get_listOfParameters(self.model[species])
|
|
1527
|
+
|
|
1528
|
+
|
|
1529
|
+
def sbml_prefix_id(self, element:ET.Element, species:str):
|
|
1530
|
+
"""Change the id of nodes by prefixing it with the network filename
|
|
1531
|
+
|
|
1532
|
+
Args:
|
|
1533
|
+
element (ET.Element): Etree element (node) to modify
|
|
1534
|
+
species (str): Network to be added as prefix
|
|
1535
|
+
"""
|
|
1536
|
+
tag=SBML.get_sbml_tag(element)
|
|
1537
|
+
id = element.attrib.get("id")
|
|
1538
|
+
|
|
1539
|
+
match tag:
|
|
1540
|
+
case 'reaction':
|
|
1541
|
+
element.attrib['id'] = prefix_id_network(self.is_community, id, species,"reaction")
|
|
1542
|
+
metaid = element.attrib.get("metaid")
|
|
1543
|
+
if metaid:
|
|
1544
|
+
element.attrib['metaid'] = prefix_id_network(self.is_community, metaid, species,"metaid")
|
|
1545
|
+
l_bound=element.attrib.get('{'+self.fbc[species]+'}lowerFluxBound')
|
|
1546
|
+
u_bound=element.attrib.get('{'+self.fbc[species]+'}upperFluxBound')
|
|
1547
|
+
element.attrib['{'+self.fbc[species]+'}lowerFluxBound']= prefix_id_network(self.is_community, l_bound,species)
|
|
1548
|
+
element.attrib['{'+self.fbc[species]+'}upperFluxBound']= prefix_id_network(self.is_community, u_bound,species)
|
|
1549
|
+
|
|
1550
|
+
# Clean sbml by deleting notes and gene product association
|
|
1551
|
+
# Otherwise etree create an non findable xmlns attribute to sbml node
|
|
1552
|
+
# which cause an error in Cobra
|
|
1553
|
+
# We do it first because while doing all together there is two problems:
|
|
1554
|
+
# 1: When a node is delete then the newt node is the not the source next node but the one after
|
|
1555
|
+
# 2: when keeping nodes into list to delete after by looping on list, "notes" node are not deleted
|
|
1556
|
+
for el in element:
|
|
1557
|
+
subtag = SBML.get_sbml_tag(el)
|
|
1558
|
+
if subtag == "notes" or subtag == "geneProductAssociation":
|
|
1559
|
+
element.remove(el)
|
|
1560
|
+
|
|
1561
|
+
for el in element:
|
|
1562
|
+
subtag = SBML.get_sbml_tag(el)
|
|
1563
|
+
list_remove_meta = list()
|
|
1564
|
+
list_add_meta = list()
|
|
1565
|
+
# copy and remove list of reactant and products from source
|
|
1566
|
+
if subtag == "listOfReactants" or subtag == "listOfProducts":
|
|
1567
|
+
for meta in el:
|
|
1568
|
+
new_meta = copy.deepcopy(meta)
|
|
1569
|
+
list_remove_meta.append(meta)
|
|
1570
|
+
metabolite_id = new_meta.attrib.get("species")
|
|
1571
|
+
pref_metabolite_id = prefix_id_network(self.is_community, metabolite_id, species,"metabolite")
|
|
1572
|
+
new_meta.attrib['species'] = pref_metabolite_id
|
|
1573
|
+
list_add_meta.append(new_meta)
|
|
1574
|
+
|
|
1575
|
+
# Need to treat after looping to not mess with ids
|
|
1576
|
+
for rm_meta in list_remove_meta:
|
|
1577
|
+
el.remove(rm_meta)
|
|
1578
|
+
for add_meta in list_add_meta:
|
|
1579
|
+
el.append(add_meta)
|
|
1580
|
+
|
|
1581
|
+
case 'species':
|
|
1582
|
+
element.attrib['id'] = prefix_id_network(self.is_community, id, species,"metabolite")
|
|
1583
|
+
for el in element:
|
|
1584
|
+
subtag = SBML.get_sbml_tag(el)
|
|
1585
|
+
if subtag == "notes":
|
|
1586
|
+
element.remove(el)
|
|
1587
|
+
|
|
1588
|
+
case 'parameter':
|
|
1589
|
+
element.attrib['id'] = prefix_id_network(self.is_community, id, species)
|
|
1590
|
+
|
|
1591
|
+
case 'listOfFluxObjectives':
|
|
1592
|
+
for o in element:
|
|
1593
|
+
name = o.attrib.get('{'+self.fbc[species]+'}reaction')
|
|
1594
|
+
o.attrib['{'+self.fbc[species]+'}reaction'] = prefix_id_network(self.is_community, name, species,"reaction")
|
|
1595
|
+
|
|
1596
|
+
|
|
1597
|
+
def append_model(self, merged_model:ET.Element, species:str):
|
|
1598
|
+
"""When a merged model has been created by copying the first network, all nodes are append to this
|
|
1599
|
+
merged model for the other networks
|
|
1600
|
+
|
|
1601
|
+
Args:
|
|
1602
|
+
merged_model (ET.Element): Etree element (node) to modify
|
|
1603
|
+
species (str): Network to append to the merged model
|
|
1604
|
+
"""
|
|
1605
|
+
meta_node = merged_model.find("listOfSpecies")
|
|
1606
|
+
param_node = merged_model.find("listOfParameters")
|
|
1607
|
+
reaction_node = merged_model.find("listOfReactions")
|
|
1608
|
+
|
|
1609
|
+
list_obj_node = merged_model.find("{"+self.fbc[species]+"}listOfObjectives")
|
|
1610
|
+
obj_node = list_obj_node.find("{"+self.fbc[species]+"}objective")
|
|
1611
|
+
flux_obj_node = obj_node.find("{"+self.fbc[species]+"}listOfFluxObjectives")
|
|
1612
|
+
|
|
1613
|
+
for meta in self.model[species].find("listOfSpecies"):
|
|
1614
|
+
meta_node.append(meta)
|
|
1615
|
+
|
|
1616
|
+
for param in self.model[species].find("listOfParameters"):
|
|
1617
|
+
param_node.append(param)
|
|
1618
|
+
|
|
1619
|
+
for react in self.model[species].find("listOfReactions"):
|
|
1620
|
+
reaction_node.append(react)
|
|
1621
|
+
|
|
1622
|
+
list_obj_to_append = self.model[species].find("{"+self.fbc[species]+"}listOfObjectives")
|
|
1623
|
+
obj_to_append = list_obj_to_append.find("{"+self.fbc[species]+"}objective")
|
|
1624
|
+
for obj in obj_to_append.find("{"+self.fbc[species]+"}listOfFluxObjectives"):
|
|
1625
|
+
flux_obj_node.append(obj)
|
|
1626
|
+
|
|
1627
|
+
|
|
1628
|
+
def prefix_parameter_dict(self, species:str):
|
|
1629
|
+
"""While writing the sbml file, all parameters saved into the dictionnary needs to be prefixed
|
|
1630
|
+
because boundaries are modified and prefixed for each reaction
|
|
1631
|
+
|
|
1632
|
+
Args:
|
|
1633
|
+
species (str): Network to be added as prefix
|
|
1634
|
+
"""
|
|
1635
|
+
new_dict=dict()
|
|
1636
|
+
for key, val in self.parameters[species].items():
|
|
1637
|
+
new_key=prefix_id_network(self.is_community, key,species)
|
|
1638
|
+
new_dict[new_key] = val
|
|
1639
|
+
self.parameters[species]=new_dict
|
|
1640
|
+
|
|
1641
|
+
|
|
1642
|
+
def write_merge_sbml_file(self):
|
|
1643
|
+
"""Compute all modifications needed (reaction normalization, prefixing id and boudaries
|
|
1644
|
+
and parameter and objective reacrion list) then merge all networks into one mode and
|
|
1645
|
+
rewrite it into temporary directory
|
|
1646
|
+
"""
|
|
1647
|
+
#TODO parallelize ?
|
|
1648
|
+
is_first=True
|
|
1649
|
+
|
|
1650
|
+
for species in self.model.keys():
|
|
1651
|
+
list_metabolites = SBML.get_listOfSpecies(self.model[species])
|
|
1652
|
+
list_parameters = SBML.get_parameters(self.model[species])
|
|
1653
|
+
list_objectives = SBML.get_objectives(self.model[species])
|
|
1654
|
+
list_reactions = SBML.get_listOfReactions(self.model[species])
|
|
1655
|
+
|
|
1656
|
+
self.prefix_parameter_dict(species)
|
|
1657
|
+
|
|
1658
|
+
# Prefix all metabolites with network id
|
|
1659
|
+
for metabolite in list_metabolites:
|
|
1660
|
+
self.sbml_prefix_id(metabolite, species)
|
|
1661
|
+
|
|
1662
|
+
# Prefix all parameters with network id
|
|
1663
|
+
for parameter in list_parameters:
|
|
1664
|
+
self.sbml_prefix_id(parameter, species)
|
|
1665
|
+
|
|
1666
|
+
# Prefix all objectives reaction with network id
|
|
1667
|
+
for objective in list_objectives[0]:
|
|
1668
|
+
if objective:
|
|
1669
|
+
self.sbml_prefix_id(objective, species)
|
|
1670
|
+
|
|
1671
|
+
# Delete the reactions from list of reation node
|
|
1672
|
+
for reaction in self.deleted_reactions.keys():
|
|
1673
|
+
if species in reaction:
|
|
1674
|
+
id_reaction = reaction.replace(f"_{species}","")
|
|
1675
|
+
node = list_reactions.find(f"reaction[@id='{id_reaction}']")
|
|
1676
|
+
list_reactions.remove(node)
|
|
1677
|
+
|
|
1678
|
+
# Corrects the SBML Model
|
|
1679
|
+
for reaction in list_reactions:
|
|
1680
|
+
reaction_name = reaction.attrib.get("id")
|
|
1681
|
+
reaction_name = prefix_id_network(self.is_community, reaction_name, species,"reaction")
|
|
1682
|
+
|
|
1683
|
+
# Prefix all reactions, reactants, products and bounds with
|
|
1684
|
+
# network id
|
|
1685
|
+
self.sbml_prefix_id(reaction, species)
|
|
1686
|
+
|
|
1687
|
+
# Change the reversibility
|
|
1688
|
+
self.sbml_review_reversibilty(reaction_name, reaction)
|
|
1689
|
+
# switch reactants and products
|
|
1690
|
+
self.sbml_switch_meta(reaction_name, reaction, species)
|
|
1691
|
+
# remove import reactions
|
|
1692
|
+
if not self.keep_import_reactions:
|
|
1693
|
+
self.sbml_remove_import(reaction_name, reaction, species)
|
|
1694
|
+
|
|
1695
|
+
self.sbml_review_parameters(species)
|
|
1696
|
+
|
|
1697
|
+
# Merge all neworks in one file
|
|
1698
|
+
if is_first:
|
|
1699
|
+
new_sbml = self.sbml[species]
|
|
1700
|
+
merged_model = copy.deepcopy(self.model[species])
|
|
1701
|
+
merged_model.attrib["id"]=self.name
|
|
1702
|
+
new_sbml.remove(self.model[species])
|
|
1703
|
+
new_sbml.append(merged_model)
|
|
1704
|
+
def_ns=self.default_namespace.split("=")
|
|
1705
|
+
new_sbml.set(def_ns[0], def_ns[1].replace('"',''))
|
|
1706
|
+
is_first=False
|
|
1707
|
+
else:
|
|
1708
|
+
self.append_model(merged_model, species)
|
|
1709
|
+
|
|
1710
|
+
|
|
1711
|
+
# Save file in temp dir
|
|
1712
|
+
str_model = self.sbml_first_line+SBML.etree_to_string(new_sbml)
|
|
1713
|
+
with open(self.file, 'w') as f:
|
|
1714
|
+
f.write(str_model)
|
|
1715
|
+
f.close()
|
|
1716
|
+
|
|
1717
|
+
########################################################
|
|
1718
|
+
|
|
1719
|
+
|
|
1720
|
+
def process_result(result, model, name, equality_flux, fluxes_init, fluxes_no_import, maximize, dtypes, accumulation,
|
|
1721
|
+
is_community, keep_import_reactions, objectives):
|
|
1722
|
+
# Deepcopy the result to avoid shared state
|
|
1723
|
+
result_copy = copy.deepcopy(result)
|
|
1724
|
+
|
|
1725
|
+
flux_time = time()
|
|
1726
|
+
result_copy.check_flux(model, equality_flux=equality_flux)
|
|
1727
|
+
|
|
1728
|
+
|
|
1729
|
+
objective, formatted_result, flux_no_import, flux_init = format_flux_result(
|
|
1730
|
+
result_copy,
|
|
1731
|
+
fluxes_init,
|
|
1732
|
+
fluxes_no_import,
|
|
1733
|
+
is_community,
|
|
1734
|
+
keep_import_reactions,
|
|
1735
|
+
objectives,
|
|
1736
|
+
)
|
|
1737
|
+
|
|
1738
|
+
warn = print_flux(
|
|
1739
|
+
formatted_result,
|
|
1740
|
+
maximize,
|
|
1741
|
+
is_community, is_parallele=True
|
|
1742
|
+
)
|
|
1743
|
+
|
|
1744
|
+
flux_time = round(time() - flux_time, 3)
|
|
1745
|
+
|
|
1746
|
+
df = pd.DataFrame([[name, objective, formatted_result.solver_type, formatted_result.search_mode,
|
|
1747
|
+
formatted_result.search_type, str(accumulation), formatted_result.name, formatted_result.size,
|
|
1748
|
+
formatted_result.chosen_lp, flux_init, flux_no_import,
|
|
1749
|
+
formatted_result.objective_flux_seeds, formatted_result.objective_flux_demands,
|
|
1750
|
+
str(formatted_result.OK), str(formatted_result.OK_seeds),
|
|
1751
|
+
str(formatted_result.OK_demands), flux_time]],
|
|
1752
|
+
columns=['species', 'biomass_reaction', 'solver_type', 'search_mode',
|
|
1753
|
+
'search_type', 'accumulation', 'model', 'size', 'lp_flux',
|
|
1754
|
+
'cobra_flux_init', 'cobra_flux_no_import', 'cobra_flux_seeds',
|
|
1755
|
+
'cobra_flux_demands', 'has_flux', 'has_flux_seeds',
|
|
1756
|
+
'has_flux_demands', 'timer'])
|
|
1757
|
+
return df.astype(dtypes), formatted_result.solver_type, formatted_result.search_mode, warn
|
|
1758
|
+
|
|
1759
|
+
|
|
1760
|
+
def format_flux_result(result, fluxes_init, fluxes_no_import, is_community, keep_import_reactions, objectives):
|
|
1761
|
+
"""
|
|
1762
|
+
Format Resmod result data with flux information.
|
|
1763
|
+
|
|
1764
|
+
Args:
|
|
1765
|
+
result (Resmod): The result object.
|
|
1766
|
+
fluxes_init (dict): Initial fluxes for each objective.
|
|
1767
|
+
fluxes_no_import (dict): Fluxes after disabling import reactions.
|
|
1768
|
+
is_community (bool): Whether this is a community model.
|
|
1769
|
+
keep_import_reactions (bool): Whether import reactions were retained.
|
|
1770
|
+
objectives (str or dict): Objectives used.
|
|
1771
|
+
|
|
1772
|
+
Returns:
|
|
1773
|
+
tuple: (objective, updated result, flux_no_import, flux_init)
|
|
1774
|
+
"""
|
|
1775
|
+
if keep_import_reactions:
|
|
1776
|
+
flux_no_import = None
|
|
1777
|
+
else:
|
|
1778
|
+
flux_no_import = fluxes_no_import
|
|
1779
|
+
|
|
1780
|
+
if is_community:
|
|
1781
|
+
objective = objectives
|
|
1782
|
+
flux_init = fluxes_init
|
|
1783
|
+
else:
|
|
1784
|
+
objective = result.tested_objective
|
|
1785
|
+
flux_init = fluxes_init[objective]
|
|
1786
|
+
flux_no_import = flux_no_import[objective]
|
|
1787
|
+
result.objective_flux_seeds = result.objective_flux_seeds[objective]
|
|
1788
|
+
result.objective_flux_demands = (
|
|
1789
|
+
result.objective_flux_demands.get(objective)
|
|
1790
|
+
if result.objective_flux_demands
|
|
1791
|
+
else None
|
|
1792
|
+
)
|
|
1793
|
+
|
|
1794
|
+
return objective, result, flux_no_import, flux_init
|
|
1795
|
+
|
|
1796
|
+
|
|
1797
|
+
|
|
1798
|
+
def print_flux(result, maximize, is_community, is_parallele=False):
|
|
1799
|
+
"""
|
|
1800
|
+
Standalone function to print flux results as a table row.
|
|
1801
|
+
|
|
1802
|
+
Args:
|
|
1803
|
+
result (Resmod): The current result to print.
|
|
1804
|
+
maximize (bool): Whether we're in maximize mode.
|
|
1805
|
+
is_community (bool): If the model is a community type.
|
|
1806
|
+
color (object): Color utility (like a namespace with .red_bright, .green_light, etc.).
|
|
1807
|
+
|
|
1808
|
+
Returns:
|
|
1809
|
+
bool: warning flag (e.g., for hybrid LP divergence).
|
|
1810
|
+
"""
|
|
1811
|
+
warning = False
|
|
1812
|
+
if result.name != "model_one_solution":
|
|
1813
|
+
if result.OK_seeds:
|
|
1814
|
+
if result.solver_type in {"HYBRID", "FBA"} and abs(result.chosen_lp - result.objective_flux_seeds) < 0.1:
|
|
1815
|
+
color_seeds = color_lp = color.green_light
|
|
1816
|
+
else:
|
|
1817
|
+
color_seeds = color_lp = color.cyan_light
|
|
1818
|
+
if result.solver_type in {"HYBRID", "FBA"} and not maximize and abs(result.chosen_lp - result.objective_flux_seeds) > 0.1:
|
|
1819
|
+
warning = True
|
|
1820
|
+
else:
|
|
1821
|
+
color_seeds = color.red_bright
|
|
1822
|
+
|
|
1823
|
+
concat_result = f"{result.name} | "
|
|
1824
|
+
|
|
1825
|
+
if not result.infeasible_seeds:
|
|
1826
|
+
if is_community:
|
|
1827
|
+
concat_result = add_print_seed_community(result, color_seeds, concat_result, result.infeasible_seeds)
|
|
1828
|
+
else:
|
|
1829
|
+
concat_result += f"{color_seeds}{result.objective_flux_seeds}{color.reset} | "
|
|
1830
|
+
warning, concat_result = add_print_demands(result, maximize, warning, concat_result)
|
|
1831
|
+
else:
|
|
1832
|
+
concat_result += "Infeasible | "
|
|
1833
|
+
if is_community:
|
|
1834
|
+
concat_result = add_print_seed_community(result, color_seeds, concat_result, result.infeasible_seeds)
|
|
1835
|
+
else:
|
|
1836
|
+
warning, concat_result = add_print_demands(result, maximize, warning, concat_result)
|
|
1837
|
+
|
|
1838
|
+
if result.solver_type in {"HYBRID", "FBA"}:
|
|
1839
|
+
lp_flux_rounded = round(result.chosen_lp, 4)
|
|
1840
|
+
concat_result += f" | {color_lp}{lp_flux_rounded}{color.reset}"
|
|
1841
|
+
|
|
1842
|
+
if not is_parallele:
|
|
1843
|
+
print(concat_result)
|
|
1844
|
+
|
|
1845
|
+
return warning
|
|
1846
|
+
|
|
1847
|
+
|
|
1848
|
+
def add_print_seed_community(result:Resmod, color_seeds:str, concat_result:str, is_infeasible:bool=False):
|
|
1849
|
+
"""Used for Community mode to add objectives line by line with its respective seeds flux value without rewriting de model name,
|
|
1850
|
+
and the corresponding demands value for each objective
|
|
1851
|
+
|
|
1852
|
+
Args:
|
|
1853
|
+
result (Resmod): Resmod object containing results data value and fluxes
|
|
1854
|
+
color_seeds (str): Text coloration for terminal
|
|
1855
|
+
maximize (bool): Needed for demands to define if a warning must be printed depending maximisation argument used or not (Hybrid-lpx)
|
|
1856
|
+
concat_result (str): String to print by concatening value of table (column and line)
|
|
1857
|
+
is_infeasible (bool, optional): Is the solution ingeasible with cobra. Defaults to False.
|
|
1858
|
+
|
|
1859
|
+
Returns:
|
|
1860
|
+
str: String to print by concatening value of table (column and line)
|
|
1861
|
+
"""
|
|
1862
|
+
is_first = True
|
|
1863
|
+
for objective, value in result.objective_flux_seeds.items():
|
|
1864
|
+
if is_first:
|
|
1865
|
+
if not is_infeasible:
|
|
1866
|
+
concat_result += color_seeds + objective + ": " + str(value) + color.reset + " | "
|
|
1867
|
+
# In community mode, no warining from Hybrid-lpx to print
|
|
1868
|
+
_, concat_result = add_print_demands(result, False, False, concat_result, objective)
|
|
1869
|
+
is_first=False
|
|
1870
|
+
else:
|
|
1871
|
+
if is_infeasible:
|
|
1872
|
+
next_line = f"\n | | "
|
|
1873
|
+
else:
|
|
1874
|
+
next_line = f"\n | " + color_seeds + objective + ": " + str(value) + color.reset + " | "
|
|
1875
|
+
_, next_line = add_print_demands(result, False, False, next_line, objective)
|
|
1876
|
+
concat_result += next_line
|
|
1877
|
+
return concat_result
|
|
1878
|
+
|
|
1879
|
+
|
|
1880
|
+
def add_print_demands(result:Resmod, maximize:bool, warning:bool, concat_result:str, objective:str=None):
|
|
1881
|
+
"""Print Demands flux respectively to each objective (called at the right time)
|
|
1882
|
+
|
|
1883
|
+
Args:
|
|
1884
|
+
result (Resmod): Resmod object containing results data value and fluxes
|
|
1885
|
+
maximize (bool): Define if a warning must be printed depending of maximisation argument used or not (Hybrid-lpx)
|
|
1886
|
+
warning (bool): Warning that has to be raised or not. Only used for Hybrid-lpx mode
|
|
1887
|
+
concat_result (str): String to print by concatening value of table (column and line)
|
|
1888
|
+
objective (str, optional): The objective reaction to add in results (for community mode). Defaults to None.
|
|
1889
|
+
|
|
1890
|
+
Returns:
|
|
1891
|
+
bool, str: warning, concat_result
|
|
1892
|
+
"""
|
|
1893
|
+
if result.OK_demands:
|
|
1894
|
+
if (result.solver_type=="HYBRID" or result.solver_type=="FBA") \
|
|
1895
|
+
and abs(result.chosen_lp - result.objective_flux_seeds) < 0.1:
|
|
1896
|
+
color_demands=color.green_light
|
|
1897
|
+
else:
|
|
1898
|
+
color_demands=color.cyan_light
|
|
1899
|
+
if (result.solver_type=="HYBRID" or result.solver_type=="FBA") \
|
|
1900
|
+
and not maximize and abs(result.chosen_lp - result.objective_flux_seeds) > 0.1:
|
|
1901
|
+
warning = True
|
|
1902
|
+
elif not result.OK_seeds:
|
|
1903
|
+
color_demands=color.red_bright
|
|
1904
|
+
else:
|
|
1905
|
+
color_demands=color.reset
|
|
1906
|
+
|
|
1907
|
+
if not result.infeasible_demands:
|
|
1908
|
+
flux_demand = result.objective_flux_demands
|
|
1909
|
+
supp = ""
|
|
1910
|
+
if flux_demand == None:
|
|
1911
|
+
flux_demand = "NA"
|
|
1912
|
+
else:
|
|
1913
|
+
if objective:
|
|
1914
|
+
flux_demand = result.objective_flux_demands[objective]
|
|
1915
|
+
supp = objective + ": "
|
|
1916
|
+
|
|
1917
|
+
concat_result += color_demands + supp + f"{flux_demand}" + color.reset
|
|
1918
|
+
else:
|
|
1919
|
+
concat_result += f"Infeasible"
|
|
1920
|
+
|
|
1921
|
+
return warning, concat_result
|