seed2lp 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. seed2lp/__init__.py +12 -0
  2. seed2lp/__main__.py +837 -0
  3. seed2lp/_version.py +2 -0
  4. seed2lp/argument.py +717 -0
  5. seed2lp/asp/atom_for_transfers.lp +7 -0
  6. seed2lp/asp/community_heuristic.lp +3 -0
  7. seed2lp/asp/community_search.lp +14 -0
  8. seed2lp/asp/constraints_targets.lp +15 -0
  9. seed2lp/asp/definition_atoms.lp +87 -0
  10. seed2lp/asp/enum-cc.lp +50 -0
  11. seed2lp/asp/flux.lp +70 -0
  12. seed2lp/asp/limit_transfers.lp +9 -0
  13. seed2lp/asp/maximize_flux.lp +2 -0
  14. seed2lp/asp/maximize_produced_target.lp +7 -0
  15. seed2lp/asp/minimize.lp +8 -0
  16. seed2lp/asp/seed-solving.lp +116 -0
  17. seed2lp/asp/seed_external.lp +1 -0
  18. seed2lp/asp/show_seeds.lp +2 -0
  19. seed2lp/asp/show_tranfers.lp +1 -0
  20. seed2lp/asp/test.lp +61 -0
  21. seed2lp/clingo_lpx.py +236 -0
  22. seed2lp/color.py +34 -0
  23. seed2lp/config.yaml +56 -0
  24. seed2lp/description.py +424 -0
  25. seed2lp/file.py +151 -0
  26. seed2lp/flux.py +365 -0
  27. seed2lp/linear.py +431 -0
  28. seed2lp/log_conf.yaml +25 -0
  29. seed2lp/logger.py +112 -0
  30. seed2lp/metabolite.py +46 -0
  31. seed2lp/network.py +1921 -0
  32. seed2lp/reaction.py +207 -0
  33. seed2lp/reasoning.py +459 -0
  34. seed2lp/reasoningcom.py +753 -0
  35. seed2lp/reasoninghybrid.py +791 -0
  36. seed2lp/resmod.py +74 -0
  37. seed2lp/sbml.py +307 -0
  38. seed2lp/scope.py +124 -0
  39. seed2lp/solver.py +333 -0
  40. seed2lp/temp_flux_com.py +74 -0
  41. seed2lp/utils.py +237 -0
  42. seed2lp-2.0.0.dist-info/METADATA +404 -0
  43. seed2lp-2.0.0.dist-info/RECORD +53 -0
  44. seed2lp-2.0.0.dist-info/WHEEL +5 -0
  45. seed2lp-2.0.0.dist-info/entry_points.txt +2 -0
  46. seed2lp-2.0.0.dist-info/licenses/LICENCE.txt +145 -0
  47. seed2lp-2.0.0.dist-info/top_level.txt +2 -0
  48. tests/__init__.py +0 -0
  49. tests/fba.py +147 -0
  50. tests/full_network.py +166 -0
  51. tests/normalization.py +188 -0
  52. tests/target.py +286 -0
  53. tests/utils.py +181 -0
seed2lp/__main__.py ADDED
@@ -0,0 +1,837 @@
1
+ """entry point for predator.
2
+
3
+ """
4
+
5
+
6
+ import argparse
7
+
8
+ from time import time
9
+ from sys import exit
10
+ from os import path
11
+ from shutil import copyfile
12
+ from .file import is_valid_dir
13
+
14
+ from . import utils, argument, file
15
+ from .sbml import read_SBML_species
16
+ from .network import Network, Netcom, NET_TITLE
17
+ from .reasoning import Reasoning
18
+ from .reasoningcom import ComReasoning
19
+ from .linear import Hybrid, FBA
20
+ from .description import Description
21
+ from .file import load_json
22
+ from pathlib import Path
23
+ from .scope import Scope
24
+ from . import logger
25
+
26
+ #Global variable needed
27
+ PROJECT_DIR = path.dirname(path.abspath(__file__))
28
+ CLEAN_TEMP=False
29
+
30
+
31
+ ####################### FUNCTIONS ##########################
32
+ def get_reaction_options(keep_import_reactions:bool, topological_injection:bool,
33
+ targets_as_seeds:bool, maximization:bool, mode:str, accumulation:bool,
34
+ solve:str="", is_partial_delsupset:bool=False, is_extended_transfers:bool=False):
35
+ """Get full options informations into a dictionnary
36
+
37
+ Args:
38
+ keep_import_reactions (bool): Import reactions are not removed if True
39
+ topological_injection (bool): Topological injection is used if True
40
+ targets_as_seeds (bool): Target are forbidden seed if True
41
+ maximization (bool): Execute an objective flux maximization if True (Hybrid or FBA)
42
+ mode (str): Witch mode is used (indiv : target, full or fba / community: global, bisteps or delsupset)
43
+ accumulation(bool): Allow accumulations in ASP
44
+ solve(str): which solve mode is used (reasoning, filter, guess_check or guess_check_div)
45
+ is_partial_delsupset(bool): Partial delete superset is used (no post python check of sets)
46
+
47
+ Returns:
48
+ dict: list of option used (short value is for filename differentiation)
49
+ """
50
+ options = dict()
51
+
52
+ if keep_import_reactions:
53
+ if topological_injection:
54
+ reaction_option = "Topological Injection"
55
+ short_option = "import_rxn_ti"
56
+ else:
57
+ reaction_option = "No Topological Injection"
58
+ short_option = "import_rxn_nti"
59
+ else:
60
+ reaction_option = "Remove Import Reaction"
61
+ short_option = "rm_rxn"
62
+
63
+ target_option=""
64
+ match mode:
65
+ case "target":
66
+ network_option = "Target"
67
+ short_option += "_tgt"
68
+ if targets_as_seeds:
69
+ target_option = "Targets are allowed seeds"
70
+ else:
71
+ target_option = "Targets are forbidden seeds"
72
+ short_option += "_taf"
73
+ case "full":
74
+ network_option = "Full network"
75
+ short_option += "_fn"
76
+ case "fba":
77
+ network_option = "FBA"
78
+ short_option += "_fba"
79
+ if targets_as_seeds:
80
+ target_option = "Targets are allowed seeds"
81
+ else:
82
+ target_option = "Targets are forbidden seeds"
83
+ short_option += "_taf"
84
+ case "global":
85
+ network_option = "Community Global"
86
+ short_option += "_com_global"
87
+ if targets_as_seeds:
88
+ target_option = "Targets are allowed seeds"
89
+ else:
90
+ target_option = "Targets are forbidden seeds"
91
+ short_option += "_taf"
92
+ case "bisteps":
93
+ network_option = "Community Bisteps"
94
+ short_option += "_com_bisteps"
95
+ if targets_as_seeds:
96
+ target_option = "Targets are allowed seeds"
97
+ else:
98
+ target_option = "Targets are forbidden seeds"
99
+ short_option += "_taf"
100
+ if is_extended_transfers:
101
+ short_option += "_extend_transf"
102
+ case "delsupset":
103
+ network_option = "Community delete superset"
104
+ short_option += "_com_delsupset"
105
+ if targets_as_seeds:
106
+ target_option = "Targets are allowed seeds"
107
+ else:
108
+ target_option = "Targets are forbidden seeds"
109
+ short_option += "_taf"
110
+ if is_partial_delsupset:
111
+ short_option += "_com_partial_delsupset"
112
+ if is_extended_transfers:
113
+ short_option += "_extend_transf"
114
+ case _:
115
+ network_option = mode
116
+ short_option += f"_{mode.lower()}"
117
+
118
+ match solve:
119
+ case 'reasoning':
120
+ solve_option = "REASONING"
121
+ short_option += "_reas"
122
+ case 'hybrid':
123
+ solve_option = "HYBRID"
124
+ short_option += "_hyb"
125
+ case 'guess_check':
126
+ solve_option = "REASONING GUESS-CHECK"
127
+ short_option += "_gc"
128
+ case 'guess_check_div':
129
+ solve_option = "REASONING GUESS-CHECK DIVERSITY"
130
+ short_option += "_gcd"
131
+ case 'filter':
132
+ solve_option = "REASONING FILTER"
133
+ short_option += "_fil"
134
+ case 'all':
135
+ if mode != "fba":
136
+ solve_option = "ALL"
137
+ short_option += "_all"
138
+ else:
139
+ solve_option = ""
140
+ short_option += ""
141
+
142
+ if maximization:
143
+ flux_option = "Maximization"
144
+ short_option += "_max"
145
+ else:
146
+ flux_option = "With flux"
147
+
148
+ if accumulation:
149
+ accu_option = "Allowed"
150
+ short_option += "_accu"
151
+ else:
152
+ accu_option = "Forbidden"
153
+ short_option += "_no_accu"
154
+
155
+ options["short"] = short_option
156
+ options["reaction"] = reaction_option
157
+ options["network"] = network_option
158
+ options["target"] = target_option
159
+ options["flux"] = flux_option
160
+ options["accumulation"] = accu_option
161
+ options["solve"] = solve_option
162
+
163
+ return options
164
+
165
+
166
+ def chek_inputs(sbml_file:str, input_dict:dict):
167
+ """ Checks the presence of elements in the sbml file
168
+
169
+ Args:
170
+ sbml_file (str): Network sbml file
171
+ input_dict (dict): Input data ordered in dictionnary
172
+
173
+ Raises:
174
+ ValueError: A reaction does not exist in network file
175
+ ValueError: A metabolite does not exist in network file
176
+ """
177
+ model_dict = read_SBML_species(sbml_file)
178
+ for key, list_element in input_dict.items():
179
+ if key == "Objective":
180
+ for reaction in list_element:
181
+ if f'{reaction[1]}' not in model_dict["Reactions"]:
182
+ raise ValueError(f"Reaction {reaction} does not exist in network file {sbml_file}\n")
183
+ else:
184
+ for metabolite in list_element:
185
+ if metabolite not in model_dict["Metabolites"]:
186
+ raise ValueError(f"Metabolite {metabolite} does not exist in network file {sbml_file}\n")
187
+
188
+
189
+ def get_input_datas(seeds_file:str=None,
190
+ forbidden_seeds:str=None, possible_seeds:str=None,
191
+ forbidden_transfers_file:str=None):
192
+ """Get data from files given by user
193
+
194
+ Args:
195
+ seeds_file (str, optional): Files containing mandatory seeds. Defaults to None.
196
+ forbidden_seeds (str, optional): Files containing forbidden seeds . Defaults to None.
197
+ possible_seeds (str, optional): Files containing possible seeds. Defaults to None.
198
+
199
+ Returns:
200
+ dict: Dictionnary of all input data given by the user
201
+ """
202
+ input_dict = dict()
203
+ if seeds_file:
204
+ if file.file_is_empty(seeds_file):
205
+ logger.log.warning(f"\n{seeds_file} is empty.\nPlease check your file and launch again\n")
206
+ else:
207
+ input_dict["Seeds"] = utils.get_ids_from_file(seeds_file, 'seed_user')
208
+ if forbidden_seeds:
209
+ if file.file_is_empty(forbidden_seeds):
210
+ logger.log.warning(f"\n{forbidden_seeds} is empty.\nPlease check your file and launch again\n")
211
+ else:
212
+ input_dict["Forbidden seeds"] = utils.get_ids_from_file(forbidden_seeds, 'forbidden')
213
+ if possible_seeds:
214
+ if file.file_is_empty(possible_seeds):
215
+ logger.log.warning(f"\n{possible_seeds} is empty.\nPlease check your file and launch again\n")
216
+ possible_seeds=None
217
+ else:
218
+ input_dict["Possible seeds"] = utils.get_ids_from_file(possible_seeds, 'sub_seed')
219
+ if forbidden_transfers_file:
220
+ if file.file_is_empty(forbidden_transfers_file):
221
+ logger.log.warning(f"\n{forbidden_transfers_file} is empty.\nPlease check your file and launch again\n")
222
+ #TODO: Finish forbidden transfers file
223
+ #else:
224
+ # input_dict["Forbidden transfers"] = utils.get_list_transfers(forbidden_transfers_file)
225
+ return input_dict
226
+
227
+
228
+ def get_targets(targets_file:str, input_dict:dict, is_community:bool) -> dict :
229
+ """Get metabolites target and objective reaction from file
230
+ Check if the given data exist into SBML file
231
+ ONLY USED WITH TARGET MODE
232
+
233
+ Args:
234
+ targets_file (str): Path of target file
235
+ input_dict (dict): Constructed dictionnary of inputs
236
+ is_community (bool): Community mode
237
+
238
+ Returns:
239
+ dict: Dictionnary of inputs completed
240
+ """
241
+
242
+ if file.file_is_empty(targets_file):
243
+ logger.log.warning(f"\n{targets_file} is empty.\nPlease check your file and launch again\n")
244
+ exit(1)
245
+ try:
246
+ input_dict["Targets"], input_dict["Objective"] = utils.get_targets_from_file(targets_file, is_community)
247
+ except ValueError as ve:
248
+ logger.log.error(str(ve))
249
+ logger.log.warning("Please check your file and launch again\n")
250
+ exit(1)
251
+ except NotImplementedError as nie:
252
+ print(str(nie))
253
+ exit(1)
254
+
255
+ return input_dict
256
+
257
+ def get_objective(objective:str, input_dict:dict):
258
+ """Get metabolites objective reaction from command line
259
+ Check if the given data exist into SBML file
260
+ ONLY USED WITH FULL NETWORK MODE
261
+
262
+ Args:
263
+ objective (str): Name of new objective reaction from command line
264
+ input_dict (dict): Constructed dictionnary of inputs
265
+
266
+ Returns:
267
+ dict: Dictionnary of inputs completed
268
+ """
269
+ #Working with one objective for now
270
+ objectives = list()
271
+ objectives.append(objective)
272
+ input_dict["Objective"] = objectives
273
+ return input_dict
274
+
275
+ def get_temp_dir(args):
276
+ """Get temporary directory from arguments or by default
277
+
278
+ Args:
279
+ args (dict): argument used
280
+
281
+ Returns:
282
+ str: path of temporary directory
283
+ """
284
+ if args['temp']:
285
+ temp = Path(args['temp']).resolve()
286
+ else:
287
+ temp = path.join(PROJECT_DIR,'tmp')
288
+ return file.is_valid_dir(temp)
289
+
290
+
291
+ def init_s2pl(args:dict, run_mode:str, is_community:bool=False):
292
+ """Check and validate input data, and get the options used
293
+
294
+ Args:
295
+ args (dict): argument used
296
+ run_mode (str): command used
297
+
298
+ Returns:
299
+ options, input_dict, out_dir, temp : options dictionnary, input dictionnary,
300
+ value of out_dir and temp dir
301
+ """
302
+ if 'maximize_flux' in args:
303
+ maximize = args['maximize_flux']
304
+ else:
305
+ maximize = False
306
+
307
+ if 'partial_delete_superset' in args:
308
+ part_del = args['partial_delete_superset']
309
+ else:
310
+ part_del = False
311
+
312
+ if 'all_transfers' in args:
313
+ all_transf = args['all_transfers']
314
+ else:
315
+ all_transf = False
316
+
317
+ options = \
318
+ get_reaction_options(args['keep_import_reactions'], args['topological_injection'],
319
+ args['targets_as_seeds'], maximize,
320
+ run_mode, args['accumulation'], args['solve'], part_del, all_transf)
321
+
322
+ if 'infile' in args:
323
+ infile=args['infile']
324
+ elif 'comfile' in args:
325
+ infile=args['comfile']
326
+ logger.get_logger(infile, options["short"],args['verbose'])
327
+
328
+ temp = get_temp_dir(args)
329
+
330
+ out_dir = args['output_dir']
331
+ file.is_valid_dir(out_dir)
332
+
333
+ ###############################################################
334
+ ################### Only for community mode ###################
335
+ ###############################################################
336
+ if 'forbidden_transfers_file' in args:
337
+ forbidden_transfers_file = args['forbidden_transfers_file']
338
+ else:
339
+ forbidden_transfers_file=None
340
+ ###############################################################
341
+ ###############################################################
342
+
343
+ # Getting the networks from sbml file
344
+ input_dict = get_input_datas(args['seeds_file'], args['forbidden_seeds_file'],
345
+ args['possible_seeds_file'],forbidden_transfers_file)
346
+ if 'targets_file' in args and args['targets_file']: # only in target mode
347
+ input_dict = get_targets(args['targets_file'], input_dict, is_community)
348
+ if 'objective' in args and args['objective']: # only in full network mode
349
+ input_dict = get_objective(args['objective'], input_dict)
350
+ return options, input_dict, out_dir, temp
351
+
352
+
353
+ def initiate_results(network:Network, options:dict, args:dict, run_mode:str):
354
+ """Initiate the result dictionnary with users options and given data such as forbidden seed,
355
+ possible seed and defined seeds.
356
+
357
+ Args:
358
+ network (Network): Network object define froms sbml file and given data from user
359
+ options (dict): Dictionnary containing the used options
360
+ args (dict): List or arguments
361
+ run_mode (str): Run mode used
362
+
363
+ Returns:
364
+ dict: A dictionnay containing all data used for solving
365
+ """
366
+ results=dict()
367
+ res_option = dict()
368
+ user_data = dict()
369
+ net = dict()
370
+
371
+ if network.targets:
372
+ user_data['TARGETS'] = network.targets
373
+ if network.seeds:
374
+ user_data['SEEDS'] = network.seeds
375
+ if network.forbidden_seeds:
376
+ user_data['FORBIDDEN SEEDS'] = network.forbidden_seeds
377
+ if network.possible_seeds:
378
+ if args['mode'] == 'minimize' or args['mode'] == 'all':
379
+ user_data['POSSIBLE SEEDS'] = network.possible_seeds
380
+ else:
381
+ logger.log.error("Possible seed can be used only with minimize mode")
382
+ exit(1)
383
+
384
+ res_option['REACTION'] = options['reaction']
385
+ if run_mode == "target" or run_mode == 'fba':
386
+ res_option['TARGET'] = options['target']
387
+ res_option['FLUX'] = options['flux']
388
+
389
+ ###############################################################
390
+ ################### Only for community mode ###################
391
+ ###############################################################
392
+ if run_mode == "community":
393
+ if args['equality_flux']:
394
+ res_option['FLUX EQUALITY'] = True
395
+ else:
396
+ res_option['FLUX EQUALITY'] = False
397
+ ###############################################################
398
+ ###############################################################
399
+
400
+ res_option['ACCUMULATION'] = options['accumulation']
401
+ results["OPTIONS"]=res_option
402
+ net["NAME"] = network.name
403
+ if run_mode == "community":
404
+ net["SPECIES"]=network.species
405
+ net["OBJECTIVE"] = network.objectives
406
+ net["SEARCH_MODE"] = options['network']
407
+ net["SOLVE"] = options["solve"]
408
+ results["NETWORK"] = net
409
+
410
+ results["USER DATA"] = user_data
411
+
412
+ if not args['targets_as_seeds']:
413
+ network.forbidden_seeds += [*network.targets]
414
+
415
+ return results
416
+
417
+ ############################################################
418
+
419
+
420
+ ############################################################
421
+ ####################### COMMANDS ###########################
422
+ ############################################################
423
+
424
+
425
+ #----------------------- SEED2LP ---------------------------
426
+ def run_seed2lp(args:dict, run_mode):
427
+ """Launch seed searching for one network after normalising it.
428
+
429
+ Args:
430
+ args (argparse): List or arguments
431
+ """
432
+ minimize=False
433
+ subset_minimal=False
434
+ solutions = dict()
435
+
436
+ options, input_dict, out_dir, temp = init_s2pl(args, run_mode)
437
+ # Verify if input data exist into sbml file
438
+ try:
439
+ chek_inputs(args['infile'], input_dict)
440
+ except ValueError as e :
441
+ logger.log.error(str(e))
442
+ exit(1)
443
+
444
+ time_data_extraction = time()
445
+ network = Network(args['infile'], run_mode, args['targets_as_seeds'],
446
+ args['topological_injection'], args['keep_import_reactions'],
447
+ input_dict, args['accumulation'])
448
+
449
+
450
+ time_data_extraction = time() - time_data_extraction
451
+
452
+
453
+ results = initiate_results(network,options,args,run_mode)
454
+ network.convert_to_facts()
455
+
456
+ if args['instance']:
457
+ with open(args['instance'], "w") as f:
458
+ f.write(network.facts)
459
+ exit(1)
460
+
461
+ network.simplify()
462
+
463
+ if args['mode'] == 'minimize' or args['mode'] == 'all':
464
+ minimize = True
465
+ if args['mode'] == 'subsetmin' or args['mode'] == 'all':
466
+ subset_minimal = True
467
+
468
+ # Global seed searching time
469
+ time_seed_search = time()
470
+
471
+ match run_mode:
472
+ case "target" | "full":
473
+ run_solve = args['solve']
474
+ if run_solve != "hybrid" or run_solve == 'all':
475
+ # In target mode we need objective reaction to detect targets from file if not given by user
476
+ # we force this whatever it is given or not
477
+ if network.is_objective_error and (run_solve != "reasoning" or run_mode == "target"):
478
+ end_message = " aborted! No Objective found.\n"
479
+ match run_solve,run_mode:
480
+ case _,"target":
481
+ logger.log.error(f"Mode Target {end_message}")
482
+ case 'filter','full':
483
+ logger.log.error(f"Solve Filter {end_message}")
484
+ case 'guess_check','full':
485
+ logger.log.error(f"Solve Guess Check {end_message}")
486
+ case 'guess_check_div','full':
487
+ logger.log.error(f"Solve Guess Check Diversity {end_message}")
488
+ # In reasoning classic, in Full Network, no need to have an objective reaction (event it is deleted)
489
+ case 'all','full': # | 'reasoning','target':
490
+ model = Reasoning(run_mode, "reasoning", network, args['time_limit'], args['number_solution'],
491
+ args['clingo_configuration'], args['clingo_strategy'],
492
+ args['intersection'], args['union'], minimize, subset_minimal,
493
+ temp, options['short'],
494
+ args['verbose'])
495
+ model.search_seed()
496
+ solutions['REASONING'] = model.output
497
+ results["RESULTS"] = solutions
498
+ # Intermediar saving in case of hybrid mode fails
499
+ file.save(f'{network.name}_{options["short"]}_results',out_dir, results, 'json')
500
+ logger.log.error(f"Solve Filter / Guess Check / Guess Check Diversity {end_message}")
501
+ solutions['REASONING-OTHER'] = "No objective found"
502
+ elif run_mode == "target" and not network.targets:
503
+ logger.log.error(f"Mode REASONING aborted! No target found")
504
+ solutions['REASONING'] = "No target found"
505
+ else:
506
+ model = Reasoning(run_mode, run_solve, network, args['time_limit'], args['number_solution'],
507
+ args['clingo_configuration'], args['clingo_strategy'],
508
+ args['intersection'], args['union'], minimize, subset_minimal,
509
+ temp, options['short'],
510
+ args['verbose'])
511
+ model.search_seed()
512
+ solutions['REASONING'] = model.output
513
+ results["RESULTS"] = solutions
514
+ # Intermediar saving in case of hybrid mode fails
515
+ file.save(f'{network.name}_{options["short"]}_results',out_dir, results, 'json')
516
+
517
+ if run_solve == "hybrid" or run_solve == 'all':
518
+ if not network.objectives or network.is_objective_error:
519
+ logger.log.error(f"Mode HYBRID aborted! No objective found")
520
+ solutions['HYBRID'] = "No objective found"
521
+ else:
522
+ model = Hybrid(run_mode, run_solve, network, args['time_limit'], args['number_solution'],
523
+ args['clingo_configuration'], args['clingo_strategy'],
524
+ args['intersection'], args['union'], minimize, subset_minimal,
525
+ args['maximize_flux'], temp,
526
+ options['short'], args['verbose'])
527
+ model.search_seed()
528
+ solutions['HYBRID'] = model.output
529
+ results["RESULTS"] = solutions
530
+ case "fba":
531
+ if not network.objectives or network.is_objective_error:
532
+ logger.log.error(f"Mode FBA aborted! No objective found")
533
+ solutions['FBA'] = "No objective found"
534
+ else:
535
+ model = FBA(run_mode, network, args['time_limit'], args['number_solution'],
536
+ args['clingo_configuration'], args['clingo_strategy'],
537
+ args['intersection'], args['union'], minimize, subset_minimal,
538
+ args['maximize_flux'], temp,
539
+ options['short'], args['verbose'])
540
+ model.search_seed()
541
+ solutions['FBA'] = model.output
542
+ results["RESULTS"] = solutions
543
+ time_seed_search = time() - time_seed_search
544
+
545
+ # Show the timers
546
+ timers = {'DATA EXTRACTION': time_data_extraction} |\
547
+ {'TOTAL SEED SEARCH': time_seed_search,
548
+ 'TOTAL': time_data_extraction + time_seed_search
549
+ }
550
+
551
+ namewidth = max(map(len, timers))
552
+ time_mess=""
553
+ for name, value in timers.items():
554
+ value = value if isinstance(value, str) else f'{round(value, 3)}s'
555
+ time_mess += f'\nTIME {name.center(namewidth)}: {value}'
556
+
557
+ print(time_mess)
558
+ logger.log.info(time_mess)
559
+ print("\n")
560
+
561
+ # Save the result into json file
562
+ file.save(f'{network.name}_{options["short"]}_results',out_dir, results, 'json')
563
+
564
+ # Save all fluxes into tsv file
565
+ if args['check_flux']:
566
+ network.check_fluxes(args['maximize_flux'])
567
+ file.save(f'{network.name}_{options["short"]}_fluxes', out_dir, network.fluxes, 'tsv')
568
+
569
+ if CLEAN_TEMP:
570
+ file.delete(network.instance_file)
571
+
572
+ return results, timers
573
+
574
+
575
+ #------------------ COMMUNITY SEED SEARCHING -------------------
576
+ def community(args:argparse, run_mode):
577
+ """Launch seed searching for a community after merging Networks and normalising it
578
+
579
+ Args:
580
+ args (argparse): List or arguments
581
+ """
582
+ solutions = dict()
583
+ community_mode = args['community_mode']
584
+ run_solve = args['solve']
585
+
586
+ options, input_dict, out_dir, temp = init_s2pl(args, community_mode, is_community=True)
587
+
588
+
589
+ time_data_extraction = time()
590
+
591
+ network=Netcom(args["comfile"], args["sbmldir"], temp, run_mode, run_solve, community_mode, args['targets_as_seeds'], args['topological_injection'],
592
+ args['keep_import_reactions'], input_dict, args['accumulation'], to_print=True,
593
+ write_sbml=True, equality_flux=args["equality_flux"])
594
+ time_data_extraction = time() - time_data_extraction
595
+
596
+ results = initiate_results(network,options,args,run_mode)
597
+ network.convert_to_facts()
598
+
599
+
600
+ # Global seed searching time
601
+ time_seed_search = time()
602
+
603
+
604
+ # The community mode works like target mode and need objective reaction to detect targets from file if not given by user
605
+ # we force this whatever it is given or not
606
+ if network.is_objective_error and (run_solve != "reasoning" or run_mode == "community"):
607
+ end_message = " aborted! \nMissing objective at least for one network.\n"
608
+ logger.log.error(f"Mode community {end_message}")
609
+
610
+ else:
611
+ model = ComReasoning(run_mode, run_solve, network, args['time_limit'], args['number_solution'],
612
+ args['clingo_configuration'], args['clingo_strategy'],
613
+ args['intersection'], args['union'],
614
+ temp, options['short'],
615
+ args['verbose'],
616
+ community_mode, args['partial_delete_superset'], args['all_transfers'],
617
+ args['not_shown_transfers'], args['limit_transfers'])
618
+ model.search_seed()
619
+ solutions['REASONING'] = model.output
620
+ results["RESULTS"] = solutions
621
+
622
+ file.save(f'{network.name}_{options["short"]}_results',out_dir, results, 'json')
623
+
624
+
625
+ results["RESULTS"] = solutions
626
+ time_seed_search = time() - time_seed_search
627
+
628
+ # Show the timers
629
+ timers = {'DATA EXTRACTION': time_data_extraction} |\
630
+ {'TOTAL SEED SEARCH': time_seed_search,
631
+ 'TOTAL': time_data_extraction + time_seed_search
632
+ }
633
+
634
+ namewidth = max(map(len, timers))
635
+ time_mess=""
636
+ for name, value in timers.items():
637
+ value = value if isinstance(value, str) else f'{round(value, 3)}s'
638
+ time_mess += f'\nTIME {name.center(namewidth)}: {value}'
639
+
640
+ print(time_mess)
641
+ logger.log.info(time_mess)
642
+ print("\n")
643
+
644
+ # Save all fluxes into tsv file
645
+ if args['check_flux']:
646
+ # There is no maximisation when Hybrid lpx is not used (and community mode do not solve in hybrid lpx)
647
+ network.check_fluxes(False)
648
+ file.save(f'{network.name}_{options["short"]}_fluxes', out_dir, network.fluxes, 'tsv')
649
+
650
+ if CLEAN_TEMP:
651
+ file.delete(network.instance_file)
652
+ file.delete(network.file)
653
+
654
+ return results, timers
655
+
656
+
657
+ #---------------------- NETWORK ---------------------------
658
+ def network_rendering(args:argparse):
659
+ """Launch rendering as Network description (reaction formula)
660
+ or as Graphs
661
+
662
+ Args:
663
+ args (argparse): List or arguments
664
+ """
665
+ if args["keep_import_reactions"]:
666
+ reac_status="import_rxn_"
667
+ else:
668
+ reac_status="rm_rxn_"
669
+ logger.get_logger(args['infile'], f"{reac_status}network_render", args['verbose'])
670
+
671
+ network = Description(args['infile'], args['keep_import_reactions'],
672
+ args['output_dir'], details = args['network_details'],
673
+ visu = args['visualize'],
674
+ visu_no_reaction = args['visualize_without_reactions'],
675
+ write_file = args['write_file'],)
676
+
677
+ if network.details:
678
+ network.get_details()
679
+ if network.visu or network.visu_no_reaction:
680
+ time_rendering = time()
681
+ print('Rendering…')
682
+ network.render_network()
683
+ time_rendering = time() - time_rendering
684
+ if network.write_file:
685
+ network.rewrite_sbml_file()
686
+
687
+
688
+
689
+
690
+ #---------------------- FLUX ---------------------------
691
+ def network_flux(args:argparse):
692
+ """Check the Network flux using cobra from a seed2lp result file.
693
+ Needs the sbml file of the network.
694
+ Write the file in output directory.
695
+
696
+ Args:
697
+ args (argparse): List or arguments
698
+ """
699
+ logger.get_logger(args['infile'], "check_fluxes", args['verbose'])
700
+ input_dict=dict()
701
+
702
+ data = load_json(args['result_file'])
703
+ input_dict["Objective"] = data["NETWORK"]["OBJECTIVE"]
704
+
705
+ network = Network(args['infile'], to_print=False, input_dict=input_dict)
706
+ maximize, solve = network.convert_data_to_resmod(data)
707
+ network.check_fluxes(maximize, args["flux_parallel"])
708
+
709
+ options = \
710
+ get_reaction_options(network.keep_import_reactions, network.use_topological_injections,
711
+ network.targets_as_seeds, maximize, network.run_mode, network.accumulation, solve)
712
+
713
+ file.save(f'{network.name}_{options["short"]}_fluxes_from_result', args['output_dir'], network.fluxes, 'tsv')
714
+
715
+
716
+ def network_flux_community(args:argparse):
717
+ """Check the Network flux using cobra from a seed2lp result file.
718
+ Needs a community file containing a list of networks and the sbml directory.
719
+ Write the file in output directory.
720
+
721
+ Args:
722
+ args (argparse): List or arguments
723
+ """
724
+ logger.get_logger(args['comfile'], "check_fluxes", args['verbose'])
725
+ input_dict=dict()
726
+
727
+ data = load_json(args['result_file'])
728
+ input_dict["Objective"] = data["NETWORK"]["OBJECTIVE"]
729
+
730
+
731
+ if data["NETWORK"]["SOLVE"] in NET_TITLE.CONVERT_TITLE_SOLVE:
732
+ run_solve = NET_TITLE.CONVERT_TITLE_SOLVE[data["NETWORK"]["SOLVE"]]
733
+ else:
734
+ run_solve = data["NETWORK"]["SOLVE"]
735
+
736
+ temp = get_temp_dir(args)
737
+
738
+ network=Netcom(args["comfile"], args["sbmldir"], temp, run_solve=run_solve, input_dict=input_dict, to_print=False,
739
+ write_sbml=True, equality_flux=args["equality_flux"])
740
+
741
+ maximize, solve = network.convert_data_to_resmod(data)
742
+
743
+ network.check_fluxes(maximize, args['flux_parallel'])
744
+ options = \
745
+ get_reaction_options(network.keep_import_reactions, network.use_topological_injections,
746
+ network.targets_as_seeds, maximize, network.run_mode, network.accumulation, solve)
747
+
748
+ file.save(f'{network.name}_{options["short"]}_fluxes_from_result', args['output_dir'], network.fluxes, 'tsv')
749
+
750
+
751
+ #---------------------- SCOPE ---------------------------
752
+ def scope(args:argparse):
753
+ """Check the Network flux using cobra from a seed2lp result file.
754
+ Needs the sbml file of the network.
755
+ Write the file in output directory.
756
+
757
+ Args:
758
+ args (argparse): List or arguments
759
+ """
760
+ logger.get_logger(args['infile'], "scope", args['verbose'])
761
+ input_dict=dict()
762
+ network = Network(args['infile'], to_print=False, input_dict=input_dict)
763
+ data = load_json(args['result_file'])
764
+ network.convert_data_to_resmod(data)
765
+ scope = Scope(args['infile'], network, args['output_dir'])
766
+ scope.execute()
767
+
768
+
769
+
770
+ #------------------- CONF FILE ------------------------
771
+ def save_conf(args:argparse):
772
+ """Save internal configuration file into output directory
773
+
774
+ Args:
775
+ args (argparse): List or arguments
776
+ """
777
+ conf_path = path.join(PROJECT_DIR,'config.yaml')
778
+ new_pah = path.join(args['output_dir'], 'config.yaml')
779
+ copyfile(conf_path, new_pah)
780
+
781
+
782
+ #------------------ WRITE TARGETS ----------------------
783
+ def get_objective_targets(args:argparse):
784
+ """Get the metabolites reactant of objective reaction or found
785
+
786
+ Args:
787
+ args (argparse): List or arguments
788
+ """
789
+ logger.get_logger(args['infile'], "objective_targets", args['verbose'])
790
+ input_dict = get_input_datas()
791
+ if 'objective' in args and args['objective']: # only in full network mode
792
+ input_dict = get_objective(args['objective'], input_dict)
793
+ try:
794
+ chek_inputs(args['infile'], input_dict)
795
+ except ValueError as e :
796
+ logger.log.error(str(e))
797
+ exit(1)
798
+ network = Network(args['infile'], run_mode="target", input_dict=input_dict, to_print=False)
799
+
800
+ print("List of targets: ",[*network.targets])
801
+ file.save(f"{network.name}_targets", args['output_dir'],[*network.targets],"txt")
802
+
803
+
804
+
805
+
806
+
807
+ ############################################################
808
+
809
+
810
+ def main():
811
+ args = argument.parse_args()
812
+ cfg = argument.get_config(args, PROJECT_DIR)
813
+
814
+ logger.set_log_dir(path.join(args.output_dir,"logs"))
815
+ is_valid_dir(logger.LOG_DIR)
816
+
817
+ match args.cmd:
818
+ case "target" | "full" | "fba":
819
+ run_seed2lp(cfg, args.cmd)
820
+ case "network":
821
+ network_rendering(cfg)
822
+ case "flux":
823
+ network_flux(cfg)
824
+ case "scope":
825
+ scope(cfg)
826
+ case "conf":
827
+ save_conf(cfg)
828
+ case "objective_targets":
829
+ get_objective_targets(cfg)
830
+ case "community":
831
+ community(cfg, args.cmd)
832
+ case "fluxcom":
833
+ network_flux_community(cfg)
834
+
835
+ if __name__ == '__main__':
836
+ main()
837
+