gsrap 0.7.1__py3-none-any.whl → 0.7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gsrap/.ipynb_checkpoints/__init__-checkpoint.py +3 -1
- gsrap/__init__.py +3 -1
- gsrap/commons/.ipynb_checkpoints/excelhub-checkpoint.py +27 -3
- gsrap/commons/.ipynb_checkpoints/figures-checkpoint.py +105 -0
- gsrap/commons/excelhub.py +27 -3
- gsrap/commons/figures.py +105 -0
- gsrap/mkmodel/.ipynb_checkpoints/mkmodel-checkpoint.py +1 -1
- gsrap/mkmodel/mkmodel.py +1 -1
- gsrap/parsedb/.ipynb_checkpoints/completeness-checkpoint.py +96 -63
- gsrap/parsedb/.ipynb_checkpoints/parsedb-checkpoint.py +3 -4
- gsrap/parsedb/completeness.py +96 -63
- gsrap/parsedb/parsedb.py +3 -4
- gsrap/runsims/.ipynb_checkpoints/simplegrowth-checkpoint.py +2 -2
- gsrap/runsims/simplegrowth.py +2 -2
- {gsrap-0.7.1.dist-info → gsrap-0.7.2.dist-info}/METADATA +3 -1
- {gsrap-0.7.1.dist-info → gsrap-0.7.2.dist-info}/RECORD +19 -17
- {gsrap-0.7.1.dist-info → gsrap-0.7.2.dist-info}/LICENSE.txt +0 -0
- {gsrap-0.7.1.dist-info → gsrap-0.7.2.dist-info}/WHEEL +0 -0
- {gsrap-0.7.1.dist-info → gsrap-0.7.2.dist-info}/entry_points.txt +0 -0
|
@@ -72,9 +72,9 @@ def main():
|
|
|
72
72
|
parsedb_parser.add_argument("--precursors", action='store_true', help="Verify biosynthesis of biomass precursors and show blocked ones.")
|
|
73
73
|
parsedb_parser.add_argument("--biosynth", action='store_true', help="Check biosynthesis of all metabolites and detect dead-ends.")
|
|
74
74
|
parsedb_parser.add_argument("-e", "--eggnog", nargs='+', metavar='', type=str, default='-', help="Path to the optional eggnog-mapper annotation table(s).")
|
|
75
|
-
#parsedb_parser.add_argument("-z", "--zeroes", action='store_true', help="Show maps/modules with 0%% coverage, in addition to partials (use only with --progress).")
|
|
76
75
|
parsedb_parser.add_argument("--goodbefore", metavar='', type=str, default='-', help="Syntax is {pure_mid}-{rid1}-{rid2}. From top to bottom, build the universe until reaction {rid1}, transport {rid2} and metabolite {pure_mid} are reached.")
|
|
77
76
|
parsedb_parser.add_argument("--onlyauthor", metavar='', type=str, default='-', help="Build the universe by parsing contents of the specified author ID only. Contents affected by --goodbefore are parsed anyway.")
|
|
77
|
+
parsedb_parser.add_argument("--nofigs", action='store_true', help="Do not generate figures.")
|
|
78
78
|
|
|
79
79
|
|
|
80
80
|
# add arguments for the 'mkmodel' command
|
|
@@ -94,6 +94,7 @@ def main():
|
|
|
94
94
|
mkmodel_parser.add_argument("--conditional", metavar='', type=float, default=0.5, help="Expected minimum fraction of reactions in a biosynthetic pathway for an actually present conditional biomass precursor.")
|
|
95
95
|
mkmodel_parser.add_argument("--biosynth", action='store_true', help="Check biosynthesis of all metabolites and detect dead-ends.")
|
|
96
96
|
mkmodel_parser.add_argument("-b", "--biomass", metavar='', type=str, default='-', help="Strain ID associated to experimental biomass data.")
|
|
97
|
+
mkmodel_parser.add_argument("--nofigs", action='store_true', help="Do not generate figures.")
|
|
97
98
|
|
|
98
99
|
|
|
99
100
|
# add arguments for the 'runsims' command
|
|
@@ -110,6 +111,7 @@ def main():
|
|
|
110
111
|
runsims_parser.add_argument("--omission", action='store_true', help="Perform single omission experiments to study auxotrophies.")
|
|
111
112
|
runsims_parser.add_argument("--essential", action='store_true', help="Predict essential genes (single-gene knock-out simulations).")
|
|
112
113
|
runsims_parser.add_argument("--factors", action='store_true', help="Predict putative growth factors.")
|
|
114
|
+
runsims_parser.add_argument("--nofigs", action='store_true', help="Do not generate figures.")
|
|
113
115
|
|
|
114
116
|
|
|
115
117
|
# check the inputted subcommand, automatic sys.exit(1) if a bad subprogram was specied.
|
gsrap/__init__.py
CHANGED
|
@@ -72,9 +72,9 @@ def main():
|
|
|
72
72
|
parsedb_parser.add_argument("--precursors", action='store_true', help="Verify biosynthesis of biomass precursors and show blocked ones.")
|
|
73
73
|
parsedb_parser.add_argument("--biosynth", action='store_true', help="Check biosynthesis of all metabolites and detect dead-ends.")
|
|
74
74
|
parsedb_parser.add_argument("-e", "--eggnog", nargs='+', metavar='', type=str, default='-', help="Path to the optional eggnog-mapper annotation table(s).")
|
|
75
|
-
#parsedb_parser.add_argument("-z", "--zeroes", action='store_true', help="Show maps/modules with 0%% coverage, in addition to partials (use only with --progress).")
|
|
76
75
|
parsedb_parser.add_argument("--goodbefore", metavar='', type=str, default='-', help="Syntax is {pure_mid}-{rid1}-{rid2}. From top to bottom, build the universe until reaction {rid1}, transport {rid2} and metabolite {pure_mid} are reached.")
|
|
77
76
|
parsedb_parser.add_argument("--onlyauthor", metavar='', type=str, default='-', help="Build the universe by parsing contents of the specified author ID only. Contents affected by --goodbefore are parsed anyway.")
|
|
77
|
+
parsedb_parser.add_argument("--nofigs", action='store_true', help="Do not generate figures.")
|
|
78
78
|
|
|
79
79
|
|
|
80
80
|
# add arguments for the 'mkmodel' command
|
|
@@ -94,6 +94,7 @@ def main():
|
|
|
94
94
|
mkmodel_parser.add_argument("--conditional", metavar='', type=float, default=0.5, help="Expected minimum fraction of reactions in a biosynthetic pathway for an actually present conditional biomass precursor.")
|
|
95
95
|
mkmodel_parser.add_argument("--biosynth", action='store_true', help="Check biosynthesis of all metabolites and detect dead-ends.")
|
|
96
96
|
mkmodel_parser.add_argument("-b", "--biomass", metavar='', type=str, default='-', help="Strain ID associated to experimental biomass data.")
|
|
97
|
+
mkmodel_parser.add_argument("--nofigs", action='store_true', help="Do not generate figures.")
|
|
97
98
|
|
|
98
99
|
|
|
99
100
|
# add arguments for the 'runsims' command
|
|
@@ -110,6 +111,7 @@ def main():
|
|
|
110
111
|
runsims_parser.add_argument("--omission", action='store_true', help="Perform single omission experiments to study auxotrophies.")
|
|
111
112
|
runsims_parser.add_argument("--essential", action='store_true', help="Predict essential genes (single-gene knock-out simulations).")
|
|
112
113
|
runsims_parser.add_argument("--factors", action='store_true', help="Predict putative growth factors.")
|
|
114
|
+
runsims_parser.add_argument("--nofigs", action='store_true', help="Do not generate figures.")
|
|
113
115
|
|
|
114
116
|
|
|
115
117
|
# check the inputted subcommand, automatic sys.exit(1) if a bad subprogram was specied.
|
|
@@ -1,8 +1,20 @@
|
|
|
1
1
|
import pandas as pnd
|
|
2
2
|
|
|
3
3
|
|
|
4
|
+
from .figures import figure_df_C_F1
|
|
4
5
|
|
|
5
|
-
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def write_excel_model(model, filepath, nofigs, df_E, df_B, df_P, df_S, df_C=None):
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
# generate figures
|
|
12
|
+
if nofigs == False:
|
|
13
|
+
|
|
14
|
+
if df_C is not None:
|
|
15
|
+
df_C_F1 = figure_df_C_F1(df_C)
|
|
16
|
+
|
|
17
|
+
|
|
6
18
|
|
|
7
19
|
df_M = []
|
|
8
20
|
df_R = []
|
|
@@ -33,6 +45,12 @@ def write_excel_model(model, filepath, df_E, df_B, df_P, df_S):
|
|
|
33
45
|
df_S.insert(0, 'mid', '') # new columns as first
|
|
34
46
|
df_S['mid'] = df_S.index
|
|
35
47
|
df_S = df_S.reset_index(drop=True)
|
|
48
|
+
|
|
49
|
+
# format df_C: universal reaction coverage
|
|
50
|
+
if df_C is not None:
|
|
51
|
+
df_C.insert(0, 'kr', '') # new columns as first
|
|
52
|
+
df_C['kr'] = df_C.index
|
|
53
|
+
df_C = df_C.reset_index(drop=True)
|
|
36
54
|
|
|
37
55
|
|
|
38
56
|
for m in model.metabolites:
|
|
@@ -81,7 +99,7 @@ def write_excel_model(model, filepath, df_E, df_B, df_P, df_S):
|
|
|
81
99
|
df_R = pnd.DataFrame.from_records(df_R)
|
|
82
100
|
df_T = pnd.DataFrame.from_records(df_T)
|
|
83
101
|
df_A = pnd.DataFrame.from_records(df_A)
|
|
84
|
-
with pnd.ExcelWriter(filepath) as writer:
|
|
102
|
+
with pnd.ExcelWriter(filepath, engine='xlsxwriter') as writer:
|
|
85
103
|
df_M.to_excel(writer, sheet_name='Metabolites', index=False)
|
|
86
104
|
df_R.to_excel(writer, sheet_name='Reactions', index=False)
|
|
87
105
|
df_T.to_excel(writer, sheet_name='Transporters', index=False)
|
|
@@ -90,7 +108,12 @@ def write_excel_model(model, filepath, df_E, df_B, df_P, df_S):
|
|
|
90
108
|
if df_B is not None: df_B.to_excel(writer, sheet_name='Biomass', index=False)
|
|
91
109
|
if df_P is not None and len(df_P)!=0: df_P.to_excel(writer, sheet_name='Biolog®', index=False)
|
|
92
110
|
if df_S is not None and len(df_S.columns)>2: df_S.to_excel(writer, sheet_name='Biosynth', index=False)
|
|
93
|
-
|
|
111
|
+
if df_C is not None:
|
|
112
|
+
df_C.to_excel(writer, sheet_name='Coverage', index=False)
|
|
113
|
+
if nofigs == False:
|
|
114
|
+
worksheet = writer.sheets['Coverage']
|
|
115
|
+
worksheet.insert_image('A1', 'df_C_F1.png', {'image_data': df_C_F1})
|
|
116
|
+
|
|
94
117
|
|
|
95
118
|
sheets_dict = {
|
|
96
119
|
'model_id': model.id,
|
|
@@ -102,6 +125,7 @@ def write_excel_model(model, filepath, df_E, df_B, df_P, df_S):
|
|
|
102
125
|
'Biomass': df_B,
|
|
103
126
|
'Biolog': df_P,
|
|
104
127
|
'Biosynth': df_S,
|
|
128
|
+
'Coverage': df_C,
|
|
105
129
|
}
|
|
106
130
|
return sheets_dict
|
|
107
131
|
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
from io import BytesIO
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import pandas as pnd
|
|
5
|
+
|
|
6
|
+
from scipy.spatial.distance import pdist
|
|
7
|
+
from scipy.cluster.hierarchy import linkage, cut_tree, dendrogram, leaves_list
|
|
8
|
+
|
|
9
|
+
import matplotlib.pyplot as plt
|
|
10
|
+
from matplotlib.patches import Patch
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def figure_df_C_F1(df_coverage):
|
|
15
|
+
|
|
16
|
+
bin_matrix = df_coverage[[i for i in df_coverage.columns if i not in ['map_ids', 'modeled']]]
|
|
17
|
+
strains = bin_matrix.columns
|
|
18
|
+
bin_matrix = bin_matrix.T # features in column
|
|
19
|
+
|
|
20
|
+
# pdist() / linkage() will loose the accession information. So here we save a dict:
|
|
21
|
+
index_to_strain = {i: strain for i, strain in enumerate(bin_matrix.index)}
|
|
22
|
+
|
|
23
|
+
# Calculate the linkage matrix using Ward clustering and Jaccard dissimilarity
|
|
24
|
+
distances = pdist(bin_matrix, 'jaccard')
|
|
25
|
+
linkage_matrix = linkage(distances, method='ward')
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# PART 0: create the frame
|
|
29
|
+
fig, axs = plt.subplots(
|
|
30
|
+
nrows=2, ncols=2,
|
|
31
|
+
figsize=(15, 10),
|
|
32
|
+
gridspec_kw={ # suplots width proportions.
|
|
33
|
+
'width_ratios': [0.5, 1.0],
|
|
34
|
+
'height_ratios': [0.015, 0.985]
|
|
35
|
+
}
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
# PART 1: dendrogram
|
|
39
|
+
dn = dendrogram(
|
|
40
|
+
linkage_matrix, ax=axs[1,0],
|
|
41
|
+
orientation='left',
|
|
42
|
+
color_threshold=0, above_threshold_color='black',
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
### PART 2: heatmap
|
|
47
|
+
ord_leaves = leaves_list(linkage_matrix)
|
|
48
|
+
ord_leaves = np.flip(ord_leaves) # because leaves are returned in the inverse sense.
|
|
49
|
+
ord_leaves = [index_to_strain[i] for i in ord_leaves] # convert index as number to index as accession
|
|
50
|
+
bin_matrix = bin_matrix.loc[ord_leaves, :] # reordered dataframe.
|
|
51
|
+
axs[1,1].matshow(
|
|
52
|
+
bin_matrix,
|
|
53
|
+
cmap='viridis',
|
|
54
|
+
aspect='auto', # non-squared pixels to fit the axis
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
### PART 3: coverage bar
|
|
59
|
+
axs[0,1].matshow(
|
|
60
|
+
df_coverage[['modeled']].T,
|
|
61
|
+
cmap='cool_r',
|
|
62
|
+
aspect='auto', # non-squared pixels to fit the axis
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
### PART 4: legends
|
|
67
|
+
legend_feat = [
|
|
68
|
+
Patch(facecolor=plt.colormaps.get_cmap('viridis')(0.0), edgecolor='black', label='Absent'),
|
|
69
|
+
Patch(facecolor=plt.colormaps.get_cmap('viridis')(1.0), edgecolor='black', label='Probably present'),
|
|
70
|
+
]
|
|
71
|
+
legend_cov = [
|
|
72
|
+
Patch(facecolor=plt.colormaps.get_cmap('cool_r')(0.0), edgecolor='black', label='Not modeled'),
|
|
73
|
+
Patch(facecolor=plt.colormaps.get_cmap('cool_r')(1.0), edgecolor='black', label='Modeled'),
|
|
74
|
+
]
|
|
75
|
+
l1 = axs[1,0].legend(handles=legend_cov, title='Universe coverage', loc='upper left')
|
|
76
|
+
l2 = axs[1,0].legend(handles=legend_feat, title='KEGG reaction in strain', loc='lower left')
|
|
77
|
+
axs[1,0].add_artist(l1) # keep both legends visible
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
### PART 5: aesthetics
|
|
81
|
+
plt.subplots_adjust(wspace=0, hspace=0) # adjust the space between subplots:
|
|
82
|
+
axs[0,0].axis('off') # remove frame and axis
|
|
83
|
+
axs[1,0].axis('off') # remove frame and axis
|
|
84
|
+
|
|
85
|
+
axs[0,1].yaxis.set_visible(False) # remove ticks, tick labels, axis label
|
|
86
|
+
|
|
87
|
+
axs[1,1].xaxis.set_ticks([]) # remove ticks
|
|
88
|
+
axs[1,1].set_xticklabels([]) # remove tick labels
|
|
89
|
+
axs[1,1].xaxis.set_label_position("bottom")
|
|
90
|
+
axs[1,1].set_xlabel("KEGG reactions")
|
|
91
|
+
|
|
92
|
+
axs[1,1].yaxis.set_ticks([]) # remove ticks
|
|
93
|
+
axs[1,1].set_yticklabels([]) # remove tick labels
|
|
94
|
+
axs[1,1].yaxis.set_label_position("right")
|
|
95
|
+
axs[1,1].set_ylabel(f"{len(strains)} strains", rotation=270, labelpad=13) # labelpad is in points (1 point = 1/72 inch)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
### PART 6: save fig
|
|
99
|
+
buf = BytesIO()
|
|
100
|
+
fig.savefig(buf, dpi=300, bbox_inches='tight') # labelpad is in inches (1 point = 1/72 inch)
|
|
101
|
+
plt.close(fig)
|
|
102
|
+
buf.seek(0) # rewind the buffer to the beginning
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
return buf
|
gsrap/commons/excelhub.py
CHANGED
|
@@ -1,8 +1,20 @@
|
|
|
1
1
|
import pandas as pnd
|
|
2
2
|
|
|
3
3
|
|
|
4
|
+
from .figures import figure_df_C_F1
|
|
4
5
|
|
|
5
|
-
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def write_excel_model(model, filepath, nofigs, df_E, df_B, df_P, df_S, df_C=None):
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
# generate figures
|
|
12
|
+
if nofigs == False:
|
|
13
|
+
|
|
14
|
+
if df_C is not None:
|
|
15
|
+
df_C_F1 = figure_df_C_F1(df_C)
|
|
16
|
+
|
|
17
|
+
|
|
6
18
|
|
|
7
19
|
df_M = []
|
|
8
20
|
df_R = []
|
|
@@ -33,6 +45,12 @@ def write_excel_model(model, filepath, df_E, df_B, df_P, df_S):
|
|
|
33
45
|
df_S.insert(0, 'mid', '') # new columns as first
|
|
34
46
|
df_S['mid'] = df_S.index
|
|
35
47
|
df_S = df_S.reset_index(drop=True)
|
|
48
|
+
|
|
49
|
+
# format df_C: universal reaction coverage
|
|
50
|
+
if df_C is not None:
|
|
51
|
+
df_C.insert(0, 'kr', '') # new columns as first
|
|
52
|
+
df_C['kr'] = df_C.index
|
|
53
|
+
df_C = df_C.reset_index(drop=True)
|
|
36
54
|
|
|
37
55
|
|
|
38
56
|
for m in model.metabolites:
|
|
@@ -81,7 +99,7 @@ def write_excel_model(model, filepath, df_E, df_B, df_P, df_S):
|
|
|
81
99
|
df_R = pnd.DataFrame.from_records(df_R)
|
|
82
100
|
df_T = pnd.DataFrame.from_records(df_T)
|
|
83
101
|
df_A = pnd.DataFrame.from_records(df_A)
|
|
84
|
-
with pnd.ExcelWriter(filepath) as writer:
|
|
102
|
+
with pnd.ExcelWriter(filepath, engine='xlsxwriter') as writer:
|
|
85
103
|
df_M.to_excel(writer, sheet_name='Metabolites', index=False)
|
|
86
104
|
df_R.to_excel(writer, sheet_name='Reactions', index=False)
|
|
87
105
|
df_T.to_excel(writer, sheet_name='Transporters', index=False)
|
|
@@ -90,7 +108,12 @@ def write_excel_model(model, filepath, df_E, df_B, df_P, df_S):
|
|
|
90
108
|
if df_B is not None: df_B.to_excel(writer, sheet_name='Biomass', index=False)
|
|
91
109
|
if df_P is not None and len(df_P)!=0: df_P.to_excel(writer, sheet_name='Biolog®', index=False)
|
|
92
110
|
if df_S is not None and len(df_S.columns)>2: df_S.to_excel(writer, sheet_name='Biosynth', index=False)
|
|
93
|
-
|
|
111
|
+
if df_C is not None:
|
|
112
|
+
df_C.to_excel(writer, sheet_name='Coverage', index=False)
|
|
113
|
+
if nofigs == False:
|
|
114
|
+
worksheet = writer.sheets['Coverage']
|
|
115
|
+
worksheet.insert_image('A1', 'df_C_F1.png', {'image_data': df_C_F1})
|
|
116
|
+
|
|
94
117
|
|
|
95
118
|
sheets_dict = {
|
|
96
119
|
'model_id': model.id,
|
|
@@ -102,6 +125,7 @@ def write_excel_model(model, filepath, df_E, df_B, df_P, df_S):
|
|
|
102
125
|
'Biomass': df_B,
|
|
103
126
|
'Biolog': df_P,
|
|
104
127
|
'Biosynth': df_S,
|
|
128
|
+
'Coverage': df_C,
|
|
105
129
|
}
|
|
106
130
|
return sheets_dict
|
|
107
131
|
|
gsrap/commons/figures.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
from io import BytesIO
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import pandas as pnd
|
|
5
|
+
|
|
6
|
+
from scipy.spatial.distance import pdist
|
|
7
|
+
from scipy.cluster.hierarchy import linkage, cut_tree, dendrogram, leaves_list
|
|
8
|
+
|
|
9
|
+
import matplotlib.pyplot as plt
|
|
10
|
+
from matplotlib.patches import Patch
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def figure_df_C_F1(df_coverage):
|
|
15
|
+
|
|
16
|
+
bin_matrix = df_coverage[[i for i in df_coverage.columns if i not in ['map_ids', 'modeled']]]
|
|
17
|
+
strains = bin_matrix.columns
|
|
18
|
+
bin_matrix = bin_matrix.T # features in column
|
|
19
|
+
|
|
20
|
+
# pdist() / linkage() will loose the accession information. So here we save a dict:
|
|
21
|
+
index_to_strain = {i: strain for i, strain in enumerate(bin_matrix.index)}
|
|
22
|
+
|
|
23
|
+
# Calculate the linkage matrix using Ward clustering and Jaccard dissimilarity
|
|
24
|
+
distances = pdist(bin_matrix, 'jaccard')
|
|
25
|
+
linkage_matrix = linkage(distances, method='ward')
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# PART 0: create the frame
|
|
29
|
+
fig, axs = plt.subplots(
|
|
30
|
+
nrows=2, ncols=2,
|
|
31
|
+
figsize=(15, 10),
|
|
32
|
+
gridspec_kw={ # suplots width proportions.
|
|
33
|
+
'width_ratios': [0.5, 1.0],
|
|
34
|
+
'height_ratios': [0.015, 0.985]
|
|
35
|
+
}
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
# PART 1: dendrogram
|
|
39
|
+
dn = dendrogram(
|
|
40
|
+
linkage_matrix, ax=axs[1,0],
|
|
41
|
+
orientation='left',
|
|
42
|
+
color_threshold=0, above_threshold_color='black',
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
### PART 2: heatmap
|
|
47
|
+
ord_leaves = leaves_list(linkage_matrix)
|
|
48
|
+
ord_leaves = np.flip(ord_leaves) # because leaves are returned in the inverse sense.
|
|
49
|
+
ord_leaves = [index_to_strain[i] for i in ord_leaves] # convert index as number to index as accession
|
|
50
|
+
bin_matrix = bin_matrix.loc[ord_leaves, :] # reordered dataframe.
|
|
51
|
+
axs[1,1].matshow(
|
|
52
|
+
bin_matrix,
|
|
53
|
+
cmap='viridis',
|
|
54
|
+
aspect='auto', # non-squared pixels to fit the axis
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
### PART 3: coverage bar
|
|
59
|
+
axs[0,1].matshow(
|
|
60
|
+
df_coverage[['modeled']].T,
|
|
61
|
+
cmap='cool_r',
|
|
62
|
+
aspect='auto', # non-squared pixels to fit the axis
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
### PART 4: legends
|
|
67
|
+
legend_feat = [
|
|
68
|
+
Patch(facecolor=plt.colormaps.get_cmap('viridis')(0.0), edgecolor='black', label='Absent'),
|
|
69
|
+
Patch(facecolor=plt.colormaps.get_cmap('viridis')(1.0), edgecolor='black', label='Probably present'),
|
|
70
|
+
]
|
|
71
|
+
legend_cov = [
|
|
72
|
+
Patch(facecolor=plt.colormaps.get_cmap('cool_r')(0.0), edgecolor='black', label='Not modeled'),
|
|
73
|
+
Patch(facecolor=plt.colormaps.get_cmap('cool_r')(1.0), edgecolor='black', label='Modeled'),
|
|
74
|
+
]
|
|
75
|
+
l1 = axs[1,0].legend(handles=legend_cov, title='Universe coverage', loc='upper left')
|
|
76
|
+
l2 = axs[1,0].legend(handles=legend_feat, title='KEGG reaction in strain', loc='lower left')
|
|
77
|
+
axs[1,0].add_artist(l1) # keep both legends visible
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
### PART 5: aesthetics
|
|
81
|
+
plt.subplots_adjust(wspace=0, hspace=0) # adjust the space between subplots:
|
|
82
|
+
axs[0,0].axis('off') # remove frame and axis
|
|
83
|
+
axs[1,0].axis('off') # remove frame and axis
|
|
84
|
+
|
|
85
|
+
axs[0,1].yaxis.set_visible(False) # remove ticks, tick labels, axis label
|
|
86
|
+
|
|
87
|
+
axs[1,1].xaxis.set_ticks([]) # remove ticks
|
|
88
|
+
axs[1,1].set_xticklabels([]) # remove tick labels
|
|
89
|
+
axs[1,1].xaxis.set_label_position("bottom")
|
|
90
|
+
axs[1,1].set_xlabel("KEGG reactions")
|
|
91
|
+
|
|
92
|
+
axs[1,1].yaxis.set_ticks([]) # remove ticks
|
|
93
|
+
axs[1,1].set_yticklabels([]) # remove tick labels
|
|
94
|
+
axs[1,1].yaxis.set_label_position("right")
|
|
95
|
+
axs[1,1].set_ylabel(f"{len(strains)} strains", rotation=270, labelpad=13) # labelpad is in points (1 point = 1/72 inch)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
### PART 6: save fig
|
|
99
|
+
buf = BytesIO()
|
|
100
|
+
fig.savefig(buf, dpi=300, bbox_inches='tight') # labelpad is in inches (1 point = 1/72 inch)
|
|
101
|
+
plt.close(fig)
|
|
102
|
+
buf.seek(0) # rewind the buffer to the beginning
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
return buf
|
|
@@ -141,7 +141,7 @@ def create_model_incore(params):
|
|
|
141
141
|
cobra.io.write_sbml_model(model, f'{args.outdir}/{model.id}.xml') # SBML # groups are saved only to SBML
|
|
142
142
|
logger.info(f"'{args.outdir}/{model.id}.xml' created!")
|
|
143
143
|
force_id_on_sbml(f'{args.outdir}/{model.id}.xml', model.id) # force introduction of the 'id=""' field
|
|
144
|
-
sheets_dict = write_excel_model(model, f'{args.outdir}/{model.id}.mkmodel.xlsx', None, df_B, df_P, df_S)
|
|
144
|
+
sheets_dict = write_excel_model(model, f'{args.outdir}/{model.id}.mkmodel.xlsx', args.nofigs, None, df_B, df_P, df_S)
|
|
145
145
|
logger.info(f"'{args.outdir}/{model.id}.mkmodel.xlsx' created!")
|
|
146
146
|
|
|
147
147
|
|
gsrap/mkmodel/mkmodel.py
CHANGED
|
@@ -141,7 +141,7 @@ def create_model_incore(params):
|
|
|
141
141
|
cobra.io.write_sbml_model(model, f'{args.outdir}/{model.id}.xml') # SBML # groups are saved only to SBML
|
|
142
142
|
logger.info(f"'{args.outdir}/{model.id}.xml' created!")
|
|
143
143
|
force_id_on_sbml(f'{args.outdir}/{model.id}.xml', model.id) # force introduction of the 'id=""' field
|
|
144
|
-
sheets_dict = write_excel_model(model, f'{args.outdir}/{model.id}.mkmodel.xlsx', None, df_B, df_P, df_S)
|
|
144
|
+
sheets_dict = write_excel_model(model, f'{args.outdir}/{model.id}.mkmodel.xlsx', args.nofigs, None, df_B, df_P, df_S)
|
|
145
145
|
logger.info(f"'{args.outdir}/{model.id}.mkmodel.xlsx' created!")
|
|
146
146
|
|
|
147
147
|
|
|
@@ -1,3 +1,6 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
|
|
1
4
|
import pandas as pnd
|
|
2
5
|
|
|
3
6
|
|
|
@@ -33,7 +36,7 @@ def parse_eggnog(model, eggnog, idcollection_dict):
|
|
|
33
36
|
|
|
34
37
|
|
|
35
38
|
|
|
36
|
-
def check_completeness(logger, model, progress, module, focus, eggnog,
|
|
39
|
+
def check_completeness(logger, model, progress, module, focus, eggnog, idcollection_dict, summary_dict):
|
|
37
40
|
# check KEGG annotations in the universe model to get '%' of completeness per pathway/module.
|
|
38
41
|
|
|
39
42
|
|
|
@@ -55,10 +58,22 @@ def check_completeness(logger, model, progress, module, focus, eggnog, zeroes, i
|
|
|
55
58
|
if 'kegg.reaction' in r.annotation.keys():
|
|
56
59
|
for kr_id in r.annotation['kegg.reaction']:
|
|
57
60
|
kr_ids_modeled.add(kr_id)
|
|
58
|
-
kr_uni_missing =
|
|
61
|
+
kr_uni_missing = kr_uni - kr_ids_modeled
|
|
59
62
|
kr_uni_coverage = len(kr_ids_modeled.intersection(kr_uni)) / len(kr_uni) * 100
|
|
60
|
-
logger.info(f"Coverage for '{kr_uni_label}': {round(kr_uni_coverage, 0)}% ({kr_uni_missing} missing).")
|
|
63
|
+
logger.info(f"Coverage for '{kr_uni_label}': {round(kr_uni_coverage, 0)}% ({len(kr_uni_missing)} missing).")
|
|
64
|
+
|
|
61
65
|
|
|
66
|
+
# define the map?????, containing krs not included in maps
|
|
67
|
+
krs_in_maps = set()
|
|
68
|
+
for i in summary_dict: krs_in_maps = krs_in_maps.union(i['kr_ids'])
|
|
69
|
+
krs_not_in_maps = idcollection_dict['kr'] - krs_in_maps
|
|
70
|
+
summary_dict.append({
|
|
71
|
+
'map_id': 'map?????',
|
|
72
|
+
'map_name': 'Not included in maps',
|
|
73
|
+
'kr_ids': krs_not_in_maps,
|
|
74
|
+
'cnt_r': len(krs_not_in_maps),
|
|
75
|
+
'mds': []
|
|
76
|
+
})
|
|
62
77
|
|
|
63
78
|
|
|
64
79
|
# get all the map / md codes:
|
|
@@ -112,52 +127,77 @@ def check_completeness(logger, model, progress, module, focus, eggnog, zeroes, i
|
|
|
112
127
|
missing_logger = (map_id, missing)
|
|
113
128
|
|
|
114
129
|
|
|
130
|
+
# put the map in the right bucket:
|
|
115
131
|
if missing == set() and map_krs != set():
|
|
116
132
|
maps_finished.add(map_id)
|
|
117
|
-
|
|
118
133
|
elif map_krs == set():
|
|
119
134
|
maps_noreac.add(map_id)
|
|
120
|
-
|
|
121
135
|
elif missing == map_krs:
|
|
122
136
|
maps_missing.add(map_id)
|
|
123
|
-
|
|
124
|
-
if zeroes:
|
|
125
|
-
list_coverage.append({
|
|
126
|
-
'map_id': map_id,
|
|
127
|
-
'map_name_short': map_name_short,
|
|
128
|
-
'perc_completeness': 0,
|
|
129
|
-
'perc_completeness_str': ' 0',
|
|
130
|
-
'present': present,
|
|
131
|
-
'missing': missing,
|
|
132
|
-
'md_ids': [j['md_id'] for j in i['mds']],
|
|
133
|
-
})
|
|
134
|
-
|
|
135
137
|
elif len(missing) < len(map_krs):
|
|
136
138
|
maps_partial.add(map_id)
|
|
137
139
|
|
|
138
|
-
# get '%' of completeness:
|
|
139
|
-
perc_completeness = len(present)/len(map_krs)*100
|
|
140
|
-
perc_completeness_str = str(round(perc_completeness)) # version to be printed
|
|
141
|
-
if len(perc_completeness_str)==1:
|
|
142
|
-
perc_completeness_str = ' ' + perc_completeness_str
|
|
143
|
-
|
|
144
|
-
list_coverage.append({
|
|
145
|
-
'map_id': map_id,
|
|
146
|
-
'map_name_short': map_name_short,
|
|
147
|
-
'perc_completeness': perc_completeness,
|
|
148
|
-
'perc_completeness_str': perc_completeness_str,
|
|
149
|
-
'present': present,
|
|
150
|
-
'missing': missing,
|
|
151
|
-
'md_ids': [j['md_id'] for j in i['mds']],
|
|
152
|
-
})
|
|
153
140
|
|
|
141
|
+
# get '%' of completeness:
|
|
142
|
+
if len(map_krs) != 0: perc_completeness = len(present)/len(map_krs)*100
|
|
143
|
+
else: perc_completeness = 100 # for maps_noreac
|
|
144
|
+
perc_completeness_str = str(round(perc_completeness)) # version to be printed
|
|
145
|
+
if len(perc_completeness_str)==1:
|
|
146
|
+
perc_completeness_str = ' ' + perc_completeness_str
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
# append map to list:
|
|
150
|
+
list_coverage.append({
|
|
151
|
+
'map_id': map_id,
|
|
152
|
+
'map_name_short': map_name_short,
|
|
153
|
+
'perc_completeness': perc_completeness,
|
|
154
|
+
'perc_completeness_str': perc_completeness_str,
|
|
155
|
+
'present': present,
|
|
156
|
+
'missing': missing,
|
|
157
|
+
'md_ids': [j['md_id'] for j in i['mds']],
|
|
158
|
+
})
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
# create coverage dataframe
|
|
163
|
+
if eggnog != '-' and len(eggnog) >= 2:
|
|
164
|
+
df_coverage = {}
|
|
165
|
+
for i in list_coverage:
|
|
166
|
+
for kr in i['present'].union(i['missing']):
|
|
167
|
+
if kr not in df_coverage.keys():
|
|
168
|
+
df_coverage[kr] = {'map_ids': set()}
|
|
169
|
+
df_coverage[kr]['map_ids'].add(i['map_id'])
|
|
170
|
+
df_coverage = pnd.DataFrame.from_records(df_coverage).T
|
|
171
|
+
df_coverage['modeled'] = False
|
|
172
|
+
for kr, row in df_coverage.iterrows():
|
|
173
|
+
if kr in kr_ids_modeled:
|
|
174
|
+
df_coverage.loc[kr, 'modeled'] = True
|
|
175
|
+
# build strain columns all at once
|
|
176
|
+
df_strains = [] # list of small DataFrames
|
|
177
|
+
for eggfile in eggnog:
|
|
178
|
+
strain = Path(eggfile).stem
|
|
179
|
+
eggset = parse_eggnog(model, eggfile, idcollection_dict)
|
|
180
|
+
col = df_coverage.index.to_series().isin(eggset).astype(int)
|
|
181
|
+
df_strains.append(col.rename(strain))
|
|
182
|
+
df_strains = pnd.concat(df_strains, axis=1)
|
|
183
|
+
# sort rows: upper rows are present in more strains
|
|
184
|
+
df_strains = df_strains.loc[df_strains.sum(axis=1).sort_values(ascending=False).index]
|
|
185
|
+
df_coverage = df_coverage.loc[df_strains.index]
|
|
186
|
+
df_coverage = pnd.concat([df_coverage, df_strains], axis=1)
|
|
187
|
+
# split in 2: modeled above, non-modeled below:
|
|
188
|
+
df_coverage = pnd.concat([df_coverage[df_coverage['modeled']==True], df_coverage[df_coverage['modeled']==False]])
|
|
189
|
+
else: # not interesting in a super-long table without strains in column
|
|
190
|
+
df_coverage = None
|
|
191
|
+
|
|
192
|
+
|
|
154
193
|
|
|
155
|
-
# order list by '%' of completness and print:
|
|
194
|
+
# order list by '%' of completness and print if needed:
|
|
156
195
|
list_coverage = sorted(list_coverage, key=lambda x: x['perc_completeness'], reverse=True)
|
|
157
196
|
for i in list_coverage:
|
|
158
197
|
if progress:
|
|
159
198
|
if focus=='-' or focus in i['md_ids'] or focus==i['map_id']:
|
|
160
|
-
|
|
199
|
+
if i['map_id'] in maps_missing or i['map_id'] in maps_partial:
|
|
200
|
+
logger.info(f"{i['map_id']}: {i['map_name_short']} {i['perc_completeness_str']}% completed, {len(i['present'])} added, {len(i['missing'])} missing.")
|
|
161
201
|
|
|
162
202
|
|
|
163
203
|
# get the correspondent pathway element of the 'summary_dict'
|
|
@@ -199,50 +239,43 @@ def check_completeness(logger, model, progress, module, focus, eggnog, zeroes, i
|
|
|
199
239
|
missing_logger = (md_id, missing)
|
|
200
240
|
|
|
201
241
|
|
|
242
|
+
# put the map in the right bucket:
|
|
202
243
|
if missing == set() and md_krs != set():
|
|
203
244
|
mds_completed.add(md_id)
|
|
204
|
-
|
|
205
245
|
elif md_krs == set():
|
|
206
246
|
mds_noreac.add(md_id)
|
|
207
|
-
|
|
208
247
|
elif missing == md_krs:
|
|
209
248
|
mds_missing.add(md_id)
|
|
210
|
-
|
|
211
|
-
if zeroes:
|
|
212
|
-
list_coverage_md.append({
|
|
213
|
-
'md_id': md_id,
|
|
214
|
-
'md_name_short': md_name_short,
|
|
215
|
-
'perc_completeness': 0,
|
|
216
|
-
'perc_completeness_str': ' 0',
|
|
217
|
-
'present': present,
|
|
218
|
-
'missing': missing,
|
|
219
|
-
})
|
|
220
|
-
|
|
221
249
|
elif len(missing) < len(md_krs):
|
|
222
250
|
mds_partial.add(md_id)
|
|
223
251
|
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
252
|
+
|
|
253
|
+
# get '%' of completeness:
|
|
254
|
+
if len(md_krs) != 0: perc_completeness = len(present)/len(md_krs)*100
|
|
255
|
+
else: perc_completeness = 100 # for mds_noreac
|
|
256
|
+
perc_completeness_str = str(round(perc_completeness)) # version to be printed
|
|
257
|
+
if len(perc_completeness_str)==1:
|
|
258
|
+
perc_completeness_str = ' ' + perc_completeness_str
|
|
229
259
|
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
260
|
+
|
|
261
|
+
# append md to list:
|
|
262
|
+
list_coverage_md.append({
|
|
263
|
+
'md_id': md_id,
|
|
264
|
+
'md_name_short': md_name_short,
|
|
265
|
+
'perc_completeness': perc_completeness,
|
|
266
|
+
'perc_completeness_str': perc_completeness_str,
|
|
267
|
+
'present': present,
|
|
268
|
+
'missing': missing,
|
|
269
|
+
})
|
|
238
270
|
|
|
239
271
|
|
|
240
|
-
# order list by '%' of completness and print:
|
|
272
|
+
# order list by '%' of completness and print if needed:
|
|
241
273
|
list_coverage_md = sorted(list_coverage_md, key=lambda x: x['perc_completeness'], reverse=True)
|
|
242
274
|
for z in list_coverage_md:
|
|
243
275
|
if module:
|
|
244
276
|
if focus=='-' or focus==z['md_id']:
|
|
245
|
-
|
|
277
|
+
if z['md_id'] in mds_missing or z['md_id'] in mds_partial:
|
|
278
|
+
logger.info(f"{spacer}{z['md_id']}: {z['md_name_short']} {z['perc_completeness_str']}% completed, {len(z['present'])} added, {len(z['missing'])} missing.")
|
|
246
279
|
|
|
247
280
|
|
|
248
281
|
# print summary:
|
|
@@ -254,6 +287,6 @@ def check_completeness(logger, model, progress, module, focus, eggnog, zeroes, i
|
|
|
254
287
|
logger.info(f"Maps: finished {len(maps_finished)} - partial {len(maps_partial)} - missing {len(maps_missing)} - noreac {len(maps_noreac)}")
|
|
255
288
|
|
|
256
289
|
|
|
257
|
-
return
|
|
290
|
+
return df_coverage
|
|
258
291
|
|
|
259
292
|
|
|
@@ -153,9 +153,8 @@ def main(args, logger):
|
|
|
153
153
|
|
|
154
154
|
###### CHECKS 1
|
|
155
155
|
# check universe completness
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
if response==1: return 1
|
|
156
|
+
df_C = check_completeness(logger, universe, args.progress, args.module, args.focus, args.eggnog, idcollection_dict, summary_dict)
|
|
157
|
+
if type(df_C)==int: return 1
|
|
159
158
|
|
|
160
159
|
|
|
161
160
|
|
|
@@ -194,7 +193,7 @@ def main(args, logger):
|
|
|
194
193
|
cobra.io.write_sbml_model(universe, f'{args.outdir}/universe.xml') # groups are saved only to SBML
|
|
195
194
|
logger.info(f"'{args.outdir}/universe.xml' created!")
|
|
196
195
|
force_id_on_sbml(f'{args.outdir}/universe.xml', 'universe') # force introduction of the 'id=""' field
|
|
197
|
-
sheets_dict = write_excel_model(universe, f'{args.outdir}/universe.parsedb.xlsx', df_E, None, None, df_S)
|
|
196
|
+
sheets_dict = write_excel_model(universe, f'{args.outdir}/universe.parsedb.xlsx', args.nofigs, df_E, None, None, df_S, df_C)
|
|
198
197
|
logger.info(f"'{args.outdir}/universe.parsedb.xlsx' created!")
|
|
199
198
|
|
|
200
199
|
|
gsrap/parsedb/completeness.py
CHANGED
|
@@ -1,3 +1,6 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
|
|
1
4
|
import pandas as pnd
|
|
2
5
|
|
|
3
6
|
|
|
@@ -33,7 +36,7 @@ def parse_eggnog(model, eggnog, idcollection_dict):
|
|
|
33
36
|
|
|
34
37
|
|
|
35
38
|
|
|
36
|
-
def check_completeness(logger, model, progress, module, focus, eggnog,
|
|
39
|
+
def check_completeness(logger, model, progress, module, focus, eggnog, idcollection_dict, summary_dict):
|
|
37
40
|
# check KEGG annotations in the universe model to get '%' of completeness per pathway/module.
|
|
38
41
|
|
|
39
42
|
|
|
@@ -55,10 +58,22 @@ def check_completeness(logger, model, progress, module, focus, eggnog, zeroes, i
|
|
|
55
58
|
if 'kegg.reaction' in r.annotation.keys():
|
|
56
59
|
for kr_id in r.annotation['kegg.reaction']:
|
|
57
60
|
kr_ids_modeled.add(kr_id)
|
|
58
|
-
kr_uni_missing =
|
|
61
|
+
kr_uni_missing = kr_uni - kr_ids_modeled
|
|
59
62
|
kr_uni_coverage = len(kr_ids_modeled.intersection(kr_uni)) / len(kr_uni) * 100
|
|
60
|
-
logger.info(f"Coverage for '{kr_uni_label}': {round(kr_uni_coverage, 0)}% ({kr_uni_missing} missing).")
|
|
63
|
+
logger.info(f"Coverage for '{kr_uni_label}': {round(kr_uni_coverage, 0)}% ({len(kr_uni_missing)} missing).")
|
|
64
|
+
|
|
61
65
|
|
|
66
|
+
# define the map?????, containing krs not included in maps
|
|
67
|
+
krs_in_maps = set()
|
|
68
|
+
for i in summary_dict: krs_in_maps = krs_in_maps.union(i['kr_ids'])
|
|
69
|
+
krs_not_in_maps = idcollection_dict['kr'] - krs_in_maps
|
|
70
|
+
summary_dict.append({
|
|
71
|
+
'map_id': 'map?????',
|
|
72
|
+
'map_name': 'Not included in maps',
|
|
73
|
+
'kr_ids': krs_not_in_maps,
|
|
74
|
+
'cnt_r': len(krs_not_in_maps),
|
|
75
|
+
'mds': []
|
|
76
|
+
})
|
|
62
77
|
|
|
63
78
|
|
|
64
79
|
# get all the map / md codes:
|
|
@@ -112,52 +127,77 @@ def check_completeness(logger, model, progress, module, focus, eggnog, zeroes, i
|
|
|
112
127
|
missing_logger = (map_id, missing)
|
|
113
128
|
|
|
114
129
|
|
|
130
|
+
# put the map in the right bucket:
|
|
115
131
|
if missing == set() and map_krs != set():
|
|
116
132
|
maps_finished.add(map_id)
|
|
117
|
-
|
|
118
133
|
elif map_krs == set():
|
|
119
134
|
maps_noreac.add(map_id)
|
|
120
|
-
|
|
121
135
|
elif missing == map_krs:
|
|
122
136
|
maps_missing.add(map_id)
|
|
123
|
-
|
|
124
|
-
if zeroes:
|
|
125
|
-
list_coverage.append({
|
|
126
|
-
'map_id': map_id,
|
|
127
|
-
'map_name_short': map_name_short,
|
|
128
|
-
'perc_completeness': 0,
|
|
129
|
-
'perc_completeness_str': ' 0',
|
|
130
|
-
'present': present,
|
|
131
|
-
'missing': missing,
|
|
132
|
-
'md_ids': [j['md_id'] for j in i['mds']],
|
|
133
|
-
})
|
|
134
|
-
|
|
135
137
|
elif len(missing) < len(map_krs):
|
|
136
138
|
maps_partial.add(map_id)
|
|
137
139
|
|
|
138
|
-
# get '%' of completeness:
|
|
139
|
-
perc_completeness = len(present)/len(map_krs)*100
|
|
140
|
-
perc_completeness_str = str(round(perc_completeness)) # version to be printed
|
|
141
|
-
if len(perc_completeness_str)==1:
|
|
142
|
-
perc_completeness_str = ' ' + perc_completeness_str
|
|
143
|
-
|
|
144
|
-
list_coverage.append({
|
|
145
|
-
'map_id': map_id,
|
|
146
|
-
'map_name_short': map_name_short,
|
|
147
|
-
'perc_completeness': perc_completeness,
|
|
148
|
-
'perc_completeness_str': perc_completeness_str,
|
|
149
|
-
'present': present,
|
|
150
|
-
'missing': missing,
|
|
151
|
-
'md_ids': [j['md_id'] for j in i['mds']],
|
|
152
|
-
})
|
|
153
140
|
|
|
141
|
+
# get '%' of completeness:
|
|
142
|
+
if len(map_krs) != 0: perc_completeness = len(present)/len(map_krs)*100
|
|
143
|
+
else: perc_completeness = 100 # for maps_noreac
|
|
144
|
+
perc_completeness_str = str(round(perc_completeness)) # version to be printed
|
|
145
|
+
if len(perc_completeness_str)==1:
|
|
146
|
+
perc_completeness_str = ' ' + perc_completeness_str
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
# append map to list:
|
|
150
|
+
list_coverage.append({
|
|
151
|
+
'map_id': map_id,
|
|
152
|
+
'map_name_short': map_name_short,
|
|
153
|
+
'perc_completeness': perc_completeness,
|
|
154
|
+
'perc_completeness_str': perc_completeness_str,
|
|
155
|
+
'present': present,
|
|
156
|
+
'missing': missing,
|
|
157
|
+
'md_ids': [j['md_id'] for j in i['mds']],
|
|
158
|
+
})
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
# create coverage dataframe
|
|
163
|
+
if eggnog != '-' and len(eggnog) >= 2:
|
|
164
|
+
df_coverage = {}
|
|
165
|
+
for i in list_coverage:
|
|
166
|
+
for kr in i['present'].union(i['missing']):
|
|
167
|
+
if kr not in df_coverage.keys():
|
|
168
|
+
df_coverage[kr] = {'map_ids': set()}
|
|
169
|
+
df_coverage[kr]['map_ids'].add(i['map_id'])
|
|
170
|
+
df_coverage = pnd.DataFrame.from_records(df_coverage).T
|
|
171
|
+
df_coverage['modeled'] = False
|
|
172
|
+
for kr, row in df_coverage.iterrows():
|
|
173
|
+
if kr in kr_ids_modeled:
|
|
174
|
+
df_coverage.loc[kr, 'modeled'] = True
|
|
175
|
+
# build strain columns all at once
|
|
176
|
+
df_strains = [] # list of small DataFrames
|
|
177
|
+
for eggfile in eggnog:
|
|
178
|
+
strain = Path(eggfile).stem
|
|
179
|
+
eggset = parse_eggnog(model, eggfile, idcollection_dict)
|
|
180
|
+
col = df_coverage.index.to_series().isin(eggset).astype(int)
|
|
181
|
+
df_strains.append(col.rename(strain))
|
|
182
|
+
df_strains = pnd.concat(df_strains, axis=1)
|
|
183
|
+
# sort rows: upper rows are present in more strains
|
|
184
|
+
df_strains = df_strains.loc[df_strains.sum(axis=1).sort_values(ascending=False).index]
|
|
185
|
+
df_coverage = df_coverage.loc[df_strains.index]
|
|
186
|
+
df_coverage = pnd.concat([df_coverage, df_strains], axis=1)
|
|
187
|
+
# split in 2: modeled above, non-modeled below:
|
|
188
|
+
df_coverage = pnd.concat([df_coverage[df_coverage['modeled']==True], df_coverage[df_coverage['modeled']==False]])
|
|
189
|
+
else: # not interesting in a super-long table without strains in column
|
|
190
|
+
df_coverage = None
|
|
191
|
+
|
|
192
|
+
|
|
154
193
|
|
|
155
|
-
# order list by '%' of completness and print:
|
|
194
|
+
# order list by '%' of completness and print if needed:
|
|
156
195
|
list_coverage = sorted(list_coverage, key=lambda x: x['perc_completeness'], reverse=True)
|
|
157
196
|
for i in list_coverage:
|
|
158
197
|
if progress:
|
|
159
198
|
if focus=='-' or focus in i['md_ids'] or focus==i['map_id']:
|
|
160
|
-
|
|
199
|
+
if i['map_id'] in maps_missing or i['map_id'] in maps_partial:
|
|
200
|
+
logger.info(f"{i['map_id']}: {i['map_name_short']} {i['perc_completeness_str']}% completed, {len(i['present'])} added, {len(i['missing'])} missing.")
|
|
161
201
|
|
|
162
202
|
|
|
163
203
|
# get the correspondent pathway element of the 'summary_dict'
|
|
@@ -199,50 +239,43 @@ def check_completeness(logger, model, progress, module, focus, eggnog, zeroes, i
|
|
|
199
239
|
missing_logger = (md_id, missing)
|
|
200
240
|
|
|
201
241
|
|
|
242
|
+
# put the map in the right bucket:
|
|
202
243
|
if missing == set() and md_krs != set():
|
|
203
244
|
mds_completed.add(md_id)
|
|
204
|
-
|
|
205
245
|
elif md_krs == set():
|
|
206
246
|
mds_noreac.add(md_id)
|
|
207
|
-
|
|
208
247
|
elif missing == md_krs:
|
|
209
248
|
mds_missing.add(md_id)
|
|
210
|
-
|
|
211
|
-
if zeroes:
|
|
212
|
-
list_coverage_md.append({
|
|
213
|
-
'md_id': md_id,
|
|
214
|
-
'md_name_short': md_name_short,
|
|
215
|
-
'perc_completeness': 0,
|
|
216
|
-
'perc_completeness_str': ' 0',
|
|
217
|
-
'present': present,
|
|
218
|
-
'missing': missing,
|
|
219
|
-
})
|
|
220
|
-
|
|
221
249
|
elif len(missing) < len(md_krs):
|
|
222
250
|
mds_partial.add(md_id)
|
|
223
251
|
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
252
|
+
|
|
253
|
+
# get '%' of completeness:
|
|
254
|
+
if len(md_krs) != 0: perc_completeness = len(present)/len(md_krs)*100
|
|
255
|
+
else: perc_completeness = 100 # for mds_noreac
|
|
256
|
+
perc_completeness_str = str(round(perc_completeness)) # version to be printed
|
|
257
|
+
if len(perc_completeness_str)==1:
|
|
258
|
+
perc_completeness_str = ' ' + perc_completeness_str
|
|
229
259
|
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
260
|
+
|
|
261
|
+
# append md to list:
|
|
262
|
+
list_coverage_md.append({
|
|
263
|
+
'md_id': md_id,
|
|
264
|
+
'md_name_short': md_name_short,
|
|
265
|
+
'perc_completeness': perc_completeness,
|
|
266
|
+
'perc_completeness_str': perc_completeness_str,
|
|
267
|
+
'present': present,
|
|
268
|
+
'missing': missing,
|
|
269
|
+
})
|
|
238
270
|
|
|
239
271
|
|
|
240
|
-
# order list by '%' of completness and print:
|
|
272
|
+
# order list by '%' of completness and print if needed:
|
|
241
273
|
list_coverage_md = sorted(list_coverage_md, key=lambda x: x['perc_completeness'], reverse=True)
|
|
242
274
|
for z in list_coverage_md:
|
|
243
275
|
if module:
|
|
244
276
|
if focus=='-' or focus==z['md_id']:
|
|
245
|
-
|
|
277
|
+
if z['md_id'] in mds_missing or z['md_id'] in mds_partial:
|
|
278
|
+
logger.info(f"{spacer}{z['md_id']}: {z['md_name_short']} {z['perc_completeness_str']}% completed, {len(z['present'])} added, {len(z['missing'])} missing.")
|
|
246
279
|
|
|
247
280
|
|
|
248
281
|
# print summary:
|
|
@@ -254,6 +287,6 @@ def check_completeness(logger, model, progress, module, focus, eggnog, zeroes, i
|
|
|
254
287
|
logger.info(f"Maps: finished {len(maps_finished)} - partial {len(maps_partial)} - missing {len(maps_missing)} - noreac {len(maps_noreac)}")
|
|
255
288
|
|
|
256
289
|
|
|
257
|
-
return
|
|
290
|
+
return df_coverage
|
|
258
291
|
|
|
259
292
|
|
gsrap/parsedb/parsedb.py
CHANGED
|
@@ -153,9 +153,8 @@ def main(args, logger):
|
|
|
153
153
|
|
|
154
154
|
###### CHECKS 1
|
|
155
155
|
# check universe completness
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
if response==1: return 1
|
|
156
|
+
df_C = check_completeness(logger, universe, args.progress, args.module, args.focus, args.eggnog, idcollection_dict, summary_dict)
|
|
157
|
+
if type(df_C)==int: return 1
|
|
159
158
|
|
|
160
159
|
|
|
161
160
|
|
|
@@ -194,7 +193,7 @@ def main(args, logger):
|
|
|
194
193
|
cobra.io.write_sbml_model(universe, f'{args.outdir}/universe.xml') # groups are saved only to SBML
|
|
195
194
|
logger.info(f"'{args.outdir}/universe.xml' created!")
|
|
196
195
|
force_id_on_sbml(f'{args.outdir}/universe.xml', 'universe') # force introduction of the 'id=""' field
|
|
197
|
-
sheets_dict = write_excel_model(universe, f'{args.outdir}/universe.parsedb.xlsx', df_E, None, None, df_S)
|
|
196
|
+
sheets_dict = write_excel_model(universe, f'{args.outdir}/universe.parsedb.xlsx', args.nofigs, df_E, None, None, df_S, df_C)
|
|
198
197
|
logger.info(f"'{args.outdir}/universe.parsedb.xlsx' created!")
|
|
199
198
|
|
|
200
199
|
|
|
@@ -57,9 +57,9 @@ def grow_on_media(logger, model, dbexp, media, fva, universe_in_parsedb=False):
|
|
|
57
57
|
df_G.loc[obj_id, f'{medium}'] = res_fba
|
|
58
58
|
if universe_in_parsedb:
|
|
59
59
|
if res_fba == 'infeasible' or res_fba == 0.0:
|
|
60
|
-
logger.warning(f"Growth on '{medium}': {res_fba}.")
|
|
60
|
+
logger.warning(f"Growth on medium '{medium}': {res_fba}.")
|
|
61
61
|
else:
|
|
62
|
-
logger.info(f"Growth on '{medium}': {res_fba}.")
|
|
62
|
+
logger.info(f"Growth on medium '{medium}': {res_fba}.")
|
|
63
63
|
|
|
64
64
|
|
|
65
65
|
# perform FVA if requested:
|
gsrap/runsims/simplegrowth.py
CHANGED
|
@@ -57,9 +57,9 @@ def grow_on_media(logger, model, dbexp, media, fva, universe_in_parsedb=False):
|
|
|
57
57
|
df_G.loc[obj_id, f'{medium}'] = res_fba
|
|
58
58
|
if universe_in_parsedb:
|
|
59
59
|
if res_fba == 'infeasible' or res_fba == 0.0:
|
|
60
|
-
logger.warning(f"Growth on '{medium}': {res_fba}.")
|
|
60
|
+
logger.warning(f"Growth on medium '{medium}': {res_fba}.")
|
|
61
61
|
else:
|
|
62
|
-
logger.info(f"Growth on '{medium}': {res_fba}.")
|
|
62
|
+
logger.info(f"Growth on medium '{medium}': {res_fba}.")
|
|
63
63
|
|
|
64
64
|
|
|
65
65
|
# perform FVA if requested:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: gsrap
|
|
3
|
-
Version: 0.7.
|
|
3
|
+
Version: 0.7.2
|
|
4
4
|
Summary:
|
|
5
5
|
License: GNU General Public License v3.0
|
|
6
6
|
Author: Gioele Lazzari
|
|
@@ -17,9 +17,11 @@ Requires-Dist: cobra (>=0.29)
|
|
|
17
17
|
Requires-Dist: colorlog (>=6.9.0)
|
|
18
18
|
Requires-Dist: gdown (>=5.2.0)
|
|
19
19
|
Requires-Dist: gempipe (>=1.38.1)
|
|
20
|
+
Requires-Dist: matplotlib (>=3.9.0)
|
|
20
21
|
Requires-Dist: memote (>=0.17.0)
|
|
21
22
|
Requires-Dist: openpyxl (>=3.1.0)
|
|
22
23
|
Requires-Dist: pandas (>=2.0.0)
|
|
24
|
+
Requires-Dist: xlsxwriter (>=3.1.0)
|
|
23
25
|
Description-Content-Type: text/markdown
|
|
24
26
|
|
|
25
27
|
Source code for `gsrap`.
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
gsrap/.ipynb_checkpoints/__init__-checkpoint.py,sha256=
|
|
2
|
-
gsrap/__init__.py,sha256=
|
|
1
|
+
gsrap/.ipynb_checkpoints/__init__-checkpoint.py,sha256=nTCojQmYOSv1tRDBEZu7p2Ne_jB9xTUEIylln19tHkk,13818
|
|
2
|
+
gsrap/__init__.py,sha256=nTCojQmYOSv1tRDBEZu7p2Ne_jB9xTUEIylln19tHkk,13818
|
|
3
3
|
gsrap/assets/.ipynb_checkpoints/PM1-checkpoint.csv,sha256=0qjaMVG_t9aFxbHbxON6ecmEUnWPwN9nhmxc61QFeCU,8761
|
|
4
4
|
gsrap/assets/.ipynb_checkpoints/PM2A-checkpoint.csv,sha256=rjYTdwe8lpRS552BYiUP3J71juG2ywVdR5Sux6fjZTY,8816
|
|
5
5
|
gsrap/assets/.ipynb_checkpoints/PM3B-checkpoint.csv,sha256=42IGX_2O5bRYSiHoMuVKT-T-bzVj0cSRZBvGOrbnQMA,8130
|
|
@@ -16,7 +16,8 @@ gsrap/commons/.ipynb_checkpoints/biomass-checkpoint.py,sha256=4u7WBaUgo42tBoXDU1
|
|
|
16
16
|
gsrap/commons/.ipynb_checkpoints/coeffs-checkpoint.py,sha256=qI3_GuqHkeA2KbK9pYdkqJaFwYemAVZJGLRR4QtHt6w,19182
|
|
17
17
|
gsrap/commons/.ipynb_checkpoints/downloads-checkpoint.py,sha256=JFrOYXrzLFhclwMtLmq8xo0QZVyjEn7QfzaTRad7y6I,8460
|
|
18
18
|
gsrap/commons/.ipynb_checkpoints/escherutils-checkpoint.py,sha256=lftRIKAbP4eztaZM83V3LKWZK4DtKDuCiC9A46paVoM,1148
|
|
19
|
-
gsrap/commons/.ipynb_checkpoints/excelhub-checkpoint.py,sha256=
|
|
19
|
+
gsrap/commons/.ipynb_checkpoints/excelhub-checkpoint.py,sha256=wQKAyWZxfy9w_uhR_DzBZz0v7vdiVM-PGDA3k1TOraI,6622
|
|
20
|
+
gsrap/commons/.ipynb_checkpoints/figures-checkpoint.py,sha256=qIjyMMFrm7AkUkdYY4ZZ8SjprUfoC3brW34oHRQvNQk,3689
|
|
20
21
|
gsrap/commons/.ipynb_checkpoints/fluxbal-checkpoint.py,sha256=jgC3-vI9Tbjvqohh2mJwFra4rl_pbUzHWrSa_QAxVO4,1262
|
|
21
22
|
gsrap/commons/.ipynb_checkpoints/logutils-checkpoint.py,sha256=VsnrkIsUftS3MOOwAd0n0peQ7a2X5ZEx930eCtzmW7g,1317
|
|
22
23
|
gsrap/commons/.ipynb_checkpoints/medium-checkpoint.py,sha256=VYKN8X1PNERP6uQDbznZXfgflLEvnw4j1T8AIAdrE7s,2902
|
|
@@ -27,7 +28,8 @@ gsrap/commons/biomass.py,sha256=4u7WBaUgo42tBoXDU1D0VUjICatb44e0jfswZrBeHYs,1798
|
|
|
27
28
|
gsrap/commons/coeffs.py,sha256=qI3_GuqHkeA2KbK9pYdkqJaFwYemAVZJGLRR4QtHt6w,19182
|
|
28
29
|
gsrap/commons/downloads.py,sha256=JFrOYXrzLFhclwMtLmq8xo0QZVyjEn7QfzaTRad7y6I,8460
|
|
29
30
|
gsrap/commons/escherutils.py,sha256=lftRIKAbP4eztaZM83V3LKWZK4DtKDuCiC9A46paVoM,1148
|
|
30
|
-
gsrap/commons/excelhub.py,sha256=
|
|
31
|
+
gsrap/commons/excelhub.py,sha256=wQKAyWZxfy9w_uhR_DzBZz0v7vdiVM-PGDA3k1TOraI,6622
|
|
32
|
+
gsrap/commons/figures.py,sha256=qIjyMMFrm7AkUkdYY4ZZ8SjprUfoC3brW34oHRQvNQk,3689
|
|
31
33
|
gsrap/commons/fluxbal.py,sha256=jgC3-vI9Tbjvqohh2mJwFra4rl_pbUzHWrSa_QAxVO4,1262
|
|
32
34
|
gsrap/commons/logutils.py,sha256=VsnrkIsUftS3MOOwAd0n0peQ7a2X5ZEx930eCtzmW7g,1317
|
|
33
35
|
gsrap/commons/medium.py,sha256=VYKN8X1PNERP6uQDbznZXfgflLEvnw4j1T8AIAdrE7s,2902
|
|
@@ -43,29 +45,29 @@ gsrap/mkmodel/.ipynb_checkpoints/__init__-checkpoint.py,sha256=PNze-26HMOwfdJ92K
|
|
|
43
45
|
gsrap/mkmodel/.ipynb_checkpoints/biologcuration-checkpoint.py,sha256=Nn7z-js-mzzeO23kVM2L7sJ5PNle7AkCUeBcEAYjlFU,15378
|
|
44
46
|
gsrap/mkmodel/.ipynb_checkpoints/gapfill-checkpoint.py,sha256=BPZw4sszlBhAYfHnV0pA7EpG0b2ePwS6kUfFt0Ww-ss,5159
|
|
45
47
|
gsrap/mkmodel/.ipynb_checkpoints/gapfillutils-checkpoint.py,sha256=S6nFUZ1Bbdf13nVJhGK2S5C_V3hd5zwTg2o5nzejngg,3123
|
|
46
|
-
gsrap/mkmodel/.ipynb_checkpoints/mkmodel-checkpoint.py,sha256=
|
|
48
|
+
gsrap/mkmodel/.ipynb_checkpoints/mkmodel-checkpoint.py,sha256=5dW5eAKW9iAFlPITM-0ZjL2FmFp3OTSvLI-xbJ-zGgs,8550
|
|
47
49
|
gsrap/mkmodel/.ipynb_checkpoints/polishing-checkpoint.py,sha256=R1UdFPxN8N27Iu0jsYW2N_1BkWEbBHaMYW6NkCYZK_k,3256
|
|
48
50
|
gsrap/mkmodel/.ipynb_checkpoints/pruner-checkpoint.py,sha256=BVOK1iFXpTgZswDgAv-TgHxKB6W3iucIAo1XWrAbu4A,7009
|
|
49
51
|
gsrap/mkmodel/__init__.py,sha256=PNze-26HMOwfdJ92KiXpr--VV1ftVfo3CAxBZgeokp8,92
|
|
50
52
|
gsrap/mkmodel/biologcuration.py,sha256=Nn7z-js-mzzeO23kVM2L7sJ5PNle7AkCUeBcEAYjlFU,15378
|
|
51
53
|
gsrap/mkmodel/gapfill.py,sha256=BPZw4sszlBhAYfHnV0pA7EpG0b2ePwS6kUfFt0Ww-ss,5159
|
|
52
54
|
gsrap/mkmodel/gapfillutils.py,sha256=S6nFUZ1Bbdf13nVJhGK2S5C_V3hd5zwTg2o5nzejngg,3123
|
|
53
|
-
gsrap/mkmodel/mkmodel.py,sha256=
|
|
55
|
+
gsrap/mkmodel/mkmodel.py,sha256=5dW5eAKW9iAFlPITM-0ZjL2FmFp3OTSvLI-xbJ-zGgs,8550
|
|
54
56
|
gsrap/mkmodel/polishing.py,sha256=R1UdFPxN8N27Iu0jsYW2N_1BkWEbBHaMYW6NkCYZK_k,3256
|
|
55
57
|
gsrap/mkmodel/pruner.py,sha256=BVOK1iFXpTgZswDgAv-TgHxKB6W3iucIAo1XWrAbu4A,7009
|
|
56
58
|
gsrap/parsedb/.ipynb_checkpoints/__init__-checkpoint.py,sha256=1k2K1gz4lIdXAwHEdJ0OhdkPu83woGv0Z4TpT1kGrTk,97
|
|
57
59
|
gsrap/parsedb/.ipynb_checkpoints/annotation-checkpoint.py,sha256=Y02_zXJj_tS1GyBdfuLBy9YJjMgx3mjX6tqr1KhQ-9Q,4810
|
|
58
|
-
gsrap/parsedb/.ipynb_checkpoints/completeness-checkpoint.py,sha256=
|
|
60
|
+
gsrap/parsedb/.ipynb_checkpoints/completeness-checkpoint.py,sha256=mLK2YawP9FjR3yogHhGxF2b2ti9DKmLu9uhibsq0jy4,11064
|
|
59
61
|
gsrap/parsedb/.ipynb_checkpoints/introduce-checkpoint.py,sha256=PuIdXvkF7gmihOEMECXVZ1V4VBOld8p3lZZ2rqXjPH8,16871
|
|
60
62
|
gsrap/parsedb/.ipynb_checkpoints/manual-checkpoint.py,sha256=F16wU8vLyM6V4F611ABuMJtwSAskL5KEgCJ7EQm_F9Y,2177
|
|
61
|
-
gsrap/parsedb/.ipynb_checkpoints/parsedb-checkpoint.py,sha256=
|
|
63
|
+
gsrap/parsedb/.ipynb_checkpoints/parsedb-checkpoint.py,sha256=tTWohNPIrw47a08ECFTyHuT4W5KlkznllIEBCpg7Kv0,7169
|
|
62
64
|
gsrap/parsedb/.ipynb_checkpoints/repeating-checkpoint.py,sha256=9PgsSw-H84eN_dFUwK5FLgbqvydsdic4-VjCrZqkfnY,5703
|
|
63
65
|
gsrap/parsedb/__init__.py,sha256=1k2K1gz4lIdXAwHEdJ0OhdkPu83woGv0Z4TpT1kGrTk,97
|
|
64
66
|
gsrap/parsedb/annotation.py,sha256=Y02_zXJj_tS1GyBdfuLBy9YJjMgx3mjX6tqr1KhQ-9Q,4810
|
|
65
|
-
gsrap/parsedb/completeness.py,sha256=
|
|
67
|
+
gsrap/parsedb/completeness.py,sha256=mLK2YawP9FjR3yogHhGxF2b2ti9DKmLu9uhibsq0jy4,11064
|
|
66
68
|
gsrap/parsedb/introduce.py,sha256=PuIdXvkF7gmihOEMECXVZ1V4VBOld8p3lZZ2rqXjPH8,16871
|
|
67
69
|
gsrap/parsedb/manual.py,sha256=F16wU8vLyM6V4F611ABuMJtwSAskL5KEgCJ7EQm_F9Y,2177
|
|
68
|
-
gsrap/parsedb/parsedb.py,sha256=
|
|
70
|
+
gsrap/parsedb/parsedb.py,sha256=tTWohNPIrw47a08ECFTyHuT4W5KlkznllIEBCpg7Kv0,7169
|
|
69
71
|
gsrap/parsedb/repeating.py,sha256=9PgsSw-H84eN_dFUwK5FLgbqvydsdic4-VjCrZqkfnY,5703
|
|
70
72
|
gsrap/runsims/.ipynb_checkpoints/__init__-checkpoint.py,sha256=6E6E1gWgH0V7ls4Omx4mxxC85gMJ_27YqhjugJzlZtY,97
|
|
71
73
|
gsrap/runsims/.ipynb_checkpoints/biosynth-checkpoint.py,sha256=fUlHUo4CfB4rGX9Dth87B1p5E5sz7i6spR7ZoqDDGaI,2836
|
|
@@ -74,7 +76,7 @@ gsrap/runsims/.ipynb_checkpoints/essentialgenes-checkpoint.py,sha256=MzHiuaU1gwi
|
|
|
74
76
|
gsrap/runsims/.ipynb_checkpoints/growthfactors-checkpoint.py,sha256=r_W4idtOSJBDh7HURRsU8s1TqfOZ3TfeVw2HHDxmnGU,2265
|
|
75
77
|
gsrap/runsims/.ipynb_checkpoints/precursors-checkpoint.py,sha256=1RNt_Rxs0L1lolDmYh4_CiZgiwHfU5B_AcomJO6vJ28,2219
|
|
76
78
|
gsrap/runsims/.ipynb_checkpoints/runsims-checkpoint.py,sha256=2FC5Gs8oSYyZTjHF3A7aXB_O6myVfcn3bCxQfLJlZTk,2842
|
|
77
|
-
gsrap/runsims/.ipynb_checkpoints/simplegrowth-checkpoint.py,sha256=
|
|
79
|
+
gsrap/runsims/.ipynb_checkpoints/simplegrowth-checkpoint.py,sha256=tCQHTMUqum1YwlBKRTNaQoag2co_yQlCaKmISOARAlE,2353
|
|
78
80
|
gsrap/runsims/.ipynb_checkpoints/singleomission-checkpoint.py,sha256=jMuKAi0pINP8Jlrm-yI-tX7D110VzttR3YfTSnDRe4I,2847
|
|
79
81
|
gsrap/runsims/__init__.py,sha256=6E6E1gWgH0V7ls4Omx4mxxC85gMJ_27YqhjugJzlZtY,97
|
|
80
82
|
gsrap/runsims/biosynth.py,sha256=fUlHUo4CfB4rGX9Dth87B1p5E5sz7i6spR7ZoqDDGaI,2836
|
|
@@ -83,10 +85,10 @@ gsrap/runsims/essentialgenes.py,sha256=MzHiuaU1gwiPdjZAgG7tkdYzkTTvoNCLp5tkezZhz
|
|
|
83
85
|
gsrap/runsims/growthfactors.py,sha256=r_W4idtOSJBDh7HURRsU8s1TqfOZ3TfeVw2HHDxmnGU,2265
|
|
84
86
|
gsrap/runsims/precursors.py,sha256=1RNt_Rxs0L1lolDmYh4_CiZgiwHfU5B_AcomJO6vJ28,2219
|
|
85
87
|
gsrap/runsims/runsims.py,sha256=2FC5Gs8oSYyZTjHF3A7aXB_O6myVfcn3bCxQfLJlZTk,2842
|
|
86
|
-
gsrap/runsims/simplegrowth.py,sha256=
|
|
88
|
+
gsrap/runsims/simplegrowth.py,sha256=tCQHTMUqum1YwlBKRTNaQoag2co_yQlCaKmISOARAlE,2353
|
|
87
89
|
gsrap/runsims/singleomission.py,sha256=jMuKAi0pINP8Jlrm-yI-tX7D110VzttR3YfTSnDRe4I,2847
|
|
88
|
-
gsrap-0.7.
|
|
89
|
-
gsrap-0.7.
|
|
90
|
-
gsrap-0.7.
|
|
91
|
-
gsrap-0.7.
|
|
92
|
-
gsrap-0.7.
|
|
90
|
+
gsrap-0.7.2.dist-info/LICENSE.txt,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
91
|
+
gsrap-0.7.2.dist-info/METADATA,sha256=bZQ9_fiR3Fo5_mUcnWD4GgMRYtFDjO0LG74yxtjSkBs,898
|
|
92
|
+
gsrap-0.7.2.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
|
93
|
+
gsrap-0.7.2.dist-info/entry_points.txt,sha256=S9MY0DjfnbKGlZbp5bV7W6dNFy3APoEV84u9x6MV1eI,36
|
|
94
|
+
gsrap-0.7.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|