PyamilySeq 0.5.1__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- PyamilySeq/Constants.py +1 -1
- PyamilySeq/PyamilySeq.py +43 -19
- PyamilySeq/PyamilySeq_Genus.py +84 -484
- PyamilySeq/PyamilySeq_Species.py +63 -514
- PyamilySeq/clusterings.py +324 -0
- PyamilySeq/utils.py +84 -1
- {PyamilySeq-0.5.1.dist-info → PyamilySeq-0.6.0.dist-info}/METADATA +52 -68
- PyamilySeq-0.6.0.dist-info/RECORD +15 -0
- PyamilySeq-0.5.1.dist-info/RECORD +0 -14
- {PyamilySeq-0.5.1.dist-info → PyamilySeq-0.6.0.dist-info}/LICENSE +0 -0
- {PyamilySeq-0.5.1.dist-info → PyamilySeq-0.6.0.dist-info}/WHEEL +0 -0
- {PyamilySeq-0.5.1.dist-info → PyamilySeq-0.6.0.dist-info}/entry_points.txt +0 -0
- {PyamilySeq-0.5.1.dist-info → PyamilySeq-0.6.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,324 @@
|
|
|
1
|
+
import subprocess
|
|
2
|
+
import shutil
|
|
3
|
+
import os
|
|
4
|
+
import glob
|
|
5
|
+
import sys
|
|
6
|
+
import copy
|
|
7
|
+
from collections import OrderedDict
|
|
8
|
+
from collections import defaultdict
|
|
9
|
+
|
|
10
|
+
def cluster_CDHIT(options, splitter):
|
|
11
|
+
First_in = open(options.clusters, 'r')
|
|
12
|
+
clusters = OrderedDict()
|
|
13
|
+
pangenome_clusters_First = OrderedDict()
|
|
14
|
+
pangenome_clusters_First_sequences = OrderedDict()
|
|
15
|
+
first = True
|
|
16
|
+
taxa_dict = defaultdict(int)
|
|
17
|
+
reps = OrderedDict()
|
|
18
|
+
## Load in all data for easier reuse later
|
|
19
|
+
for line in First_in:
|
|
20
|
+
if '>Cluster 7575' in line:
|
|
21
|
+
print()
|
|
22
|
+
if line.startswith('>'):
|
|
23
|
+
if first == False:
|
|
24
|
+
cluster_size = len(clusters[cluster_id])
|
|
25
|
+
reps.update({rep: [cluster_size, len(pangenome_clusters_First[cluster_id])]})
|
|
26
|
+
cluster_id = line.strip('>')
|
|
27
|
+
cluster_id = cluster_id.strip('\n')
|
|
28
|
+
cluster_id = cluster_id.split(' ')[1]
|
|
29
|
+
clusters.update({cluster_id: []})
|
|
30
|
+
pangenome_clusters_First.update({cluster_id: []})
|
|
31
|
+
pangenome_clusters_First_sequences.update({cluster_id: []})
|
|
32
|
+
|
|
33
|
+
first = False
|
|
34
|
+
else:
|
|
35
|
+
clustered = line.split('\t')[1]
|
|
36
|
+
clustered = clustered.split('>')[1]
|
|
37
|
+
clustered = clustered.split('...')[0]
|
|
38
|
+
taxa = clustered.split(splitter)[0]
|
|
39
|
+
taxa_dict[taxa] += 1
|
|
40
|
+
if '*' in line:
|
|
41
|
+
rep = clustered
|
|
42
|
+
reps.update({rep: [0, 0]})
|
|
43
|
+
if first == False:
|
|
44
|
+
clusters[cluster_id].append(clustered)
|
|
45
|
+
clustered_taxa = clustered.split(splitter)[0]
|
|
46
|
+
if clustered_taxa not in pangenome_clusters_First[cluster_id]:
|
|
47
|
+
pangenome_clusters_First[cluster_id].append(clustered_taxa)
|
|
48
|
+
pangenome_clusters_First_sequences[cluster_id].append(clustered)
|
|
49
|
+
return taxa_dict, pangenome_clusters_First, pangenome_clusters_First_sequences, reps
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
#@profile
|
|
54
|
+
def combined_clustering_counting(options, pangenome_clusters_First, reps, combined_pangenome_clusters_First_Second_clustered, splitter):
|
|
55
|
+
num_clustered_First = defaultdict(list)
|
|
56
|
+
pangenome_clusters_Type = copy.deepcopy(pangenome_clusters_First)
|
|
57
|
+
list_of_reps = list(reps.keys())
|
|
58
|
+
for cluster, pep_genomes in pangenome_clusters_First.items():
|
|
59
|
+
rep = list_of_reps[int(cluster)] # get the rep of the current pep cluster
|
|
60
|
+
Com_PEP_Genomes = 0
|
|
61
|
+
Seconds = 0
|
|
62
|
+
seen_Seconds = []
|
|
63
|
+
added_Second_genomes = 0
|
|
64
|
+
try: # get the cluster from the storf clusters which contains this rep
|
|
65
|
+
clustered_combined = combined_pangenome_clusters_First_Second_clustered[rep] # Not true clusters - I put a PEP as key myself
|
|
66
|
+
seen_clust_Genomes = []
|
|
67
|
+
num_clustered_First[cluster].append(rep + '_' + str(len(pep_genomes)))
|
|
68
|
+
for clust in clustered_combined:
|
|
69
|
+
if options.sequence_tag not in clust: # Not good enough at the moment
|
|
70
|
+
clust_Genome = clust.split(splitter)[0]
|
|
71
|
+
if clust_Genome not in seen_clust_Genomes:
|
|
72
|
+
seen_clust_Genomes.append(clust_Genome)
|
|
73
|
+
if clust_Genome not in pep_genomes:
|
|
74
|
+
Com_PEP_Genomes += 1
|
|
75
|
+
num_clustered_First[cluster].append(clust + '_' + str(reps[clust][1]))
|
|
76
|
+
elif options.sequence_tag in clust:
|
|
77
|
+
Seconds += 1
|
|
78
|
+
clust_Genome = clust.split(splitter)[0]
|
|
79
|
+
if clust_Genome not in seen_Seconds:
|
|
80
|
+
seen_Seconds.append(clust_Genome)
|
|
81
|
+
if clust_Genome not in seen_clust_Genomes:
|
|
82
|
+
seen_clust_Genomes.append(clust_Genome)
|
|
83
|
+
if clust_Genome not in pep_genomes:
|
|
84
|
+
added_Second_genomes += 1
|
|
85
|
+
else:
|
|
86
|
+
sys.exit("Error: looking for sequence_tag")
|
|
87
|
+
|
|
88
|
+
size_of_pep_clusters = []
|
|
89
|
+
peps = num_clustered_First[cluster]
|
|
90
|
+
for pep in peps:
|
|
91
|
+
pep = pep.rsplit('_', 1)
|
|
92
|
+
size_of_pep_clusters.append(int(pep[1]))
|
|
93
|
+
pangenome_clusters_Type[cluster] = [len(num_clustered_First[cluster]), sum(size_of_pep_clusters),
|
|
94
|
+
size_of_pep_clusters, added_Second_genomes, Seconds, len(seen_Seconds)]
|
|
95
|
+
|
|
96
|
+
except KeyError:
|
|
97
|
+
###Singleton
|
|
98
|
+
num_pep_genomes = [len(pep_genomes)]
|
|
99
|
+
pangenome_clusters_Type[cluster] = [1, len(pep_genomes), num_pep_genomes, added_Second_genomes, Seconds,
|
|
100
|
+
len(seen_Seconds)]
|
|
101
|
+
# pangenome_clusters_Type = [Number of First clustered genomes or genera, Size of the cluster, Ditto, Added Seconds,Number of Seconds,Unique Seconds ]
|
|
102
|
+
return pangenome_clusters_Type
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
#@profile
|
|
106
|
+
def single_clustering_counting(pangenome_clusters_First, reps):
|
|
107
|
+
num_clustered_First = defaultdict(list)
|
|
108
|
+
recorded_First = []
|
|
109
|
+
pangenome_clusters_Type = copy.deepcopy(pangenome_clusters_First)
|
|
110
|
+
list_of_reps = list(reps.keys())
|
|
111
|
+
for cluster, First_taxa in pangenome_clusters_First.items():
|
|
112
|
+
rep = list_of_reps[int(cluster)] # get the rep of the current pep cluster
|
|
113
|
+
|
|
114
|
+
try: # get the cluster from the storf clusters which contains this rep
|
|
115
|
+
num_clustered_First[cluster].append(rep + '_' + str(len(First_taxa)))
|
|
116
|
+
size_of_First_clusters = []
|
|
117
|
+
Firsts = num_clustered_First[cluster]
|
|
118
|
+
for First in Firsts:
|
|
119
|
+
First = First.rsplit('_', 1)
|
|
120
|
+
size_of_First_clusters.append(int(First[1]))
|
|
121
|
+
recorded_First.append(First[0])
|
|
122
|
+
pangenome_clusters_Type[cluster] = [len(num_clustered_First[cluster]), sum(size_of_First_clusters),
|
|
123
|
+
size_of_First_clusters, 0, 0, 0]
|
|
124
|
+
|
|
125
|
+
except KeyError:
|
|
126
|
+
###Singleton
|
|
127
|
+
num_First_taxa = [len(First_taxa)]
|
|
128
|
+
pangenome_clusters_Type[cluster] = [1, len(First_taxa), num_First_taxa, 0, 0, 0]
|
|
129
|
+
|
|
130
|
+
# pangenome_clusters_Type = [Number of First clustered genomes or genera, Size of the cluster, Ditto, 0,0,0 ]
|
|
131
|
+
return pangenome_clusters_Type
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
#@profile
|
|
136
|
+
def combined_clustering_CDHIT(options, taxa_dict, splitter):
|
|
137
|
+
Second_in = open(options.reclustered, 'r')
|
|
138
|
+
combined_pangenome_clusters_First = OrderedDict()
|
|
139
|
+
combined_pangenome_clusters_First_sequences = OrderedDict()
|
|
140
|
+
combined_pangenome_clusters_Second = OrderedDict()
|
|
141
|
+
combined_pangenome_clusters_Second_sequences = OrderedDict()
|
|
142
|
+
combined_pangenome_clusters_First_Second_clustered = OrderedDict()
|
|
143
|
+
|
|
144
|
+
not_Second_only_cluster_ids = []
|
|
145
|
+
already_seen_PEP = []
|
|
146
|
+
Combined_clusters = OrderedDict()
|
|
147
|
+
Combined_reps = OrderedDict()
|
|
148
|
+
first = True
|
|
149
|
+
for line in Second_in:
|
|
150
|
+
if line.startswith('>'):
|
|
151
|
+
if first == False:
|
|
152
|
+
cluster_size = len(Combined_clusters[cluster_id])
|
|
153
|
+
Combined_reps.update({rep: cluster_size})
|
|
154
|
+
for pep in combined_pangenome_clusters_First_sequences[cluster_id]:
|
|
155
|
+
if pep != []:
|
|
156
|
+
if pep in already_seen_PEP:
|
|
157
|
+
continue
|
|
158
|
+
else:
|
|
159
|
+
already_seen_PEP.append(pep)
|
|
160
|
+
if len(combined_pangenome_clusters_Second_sequences[cluster_id]) > 0 and len(combined_pangenome_clusters_First_sequences[cluster_id]) > 0:
|
|
161
|
+
if len(combined_pangenome_clusters_First_sequences[cluster_id]) > 1: # If we have clustered >1 PEP family, we need to record 1 as key and all others are val
|
|
162
|
+
all_but_first = combined_pangenome_clusters_First_sequences[cluster_id][1:]
|
|
163
|
+
storfs_clustered = combined_pangenome_clusters_Second_sequences[cluster_id]
|
|
164
|
+
VALUE = all_but_first + storfs_clustered
|
|
165
|
+
else:
|
|
166
|
+
VALUE = combined_pangenome_clusters_Second_sequences[cluster_id]
|
|
167
|
+
KEY = combined_pangenome_clusters_First_sequences[cluster_id][0]
|
|
168
|
+
combined_pangenome_clusters_First_Second_clustered.update({KEY: VALUE})
|
|
169
|
+
cluster_id = line.strip('>')
|
|
170
|
+
cluster_id = cluster_id.strip('\n')
|
|
171
|
+
cluster_id = cluster_id.split(' ')[1]
|
|
172
|
+
Combined_clusters.update({cluster_id: []})
|
|
173
|
+
combined_pangenome_clusters_First.update({cluster_id: []})
|
|
174
|
+
combined_pangenome_clusters_First_sequences.update({cluster_id: []})
|
|
175
|
+
combined_pangenome_clusters_Second.update({cluster_id: []})
|
|
176
|
+
combined_pangenome_clusters_Second_sequences.update({cluster_id: []})
|
|
177
|
+
|
|
178
|
+
first = False
|
|
179
|
+
else:
|
|
180
|
+
clustered = line.split('\t')[1]
|
|
181
|
+
clustered = clustered.split('>')[1]
|
|
182
|
+
clustered = clustered.split('...')[0]
|
|
183
|
+
genome = clustered.split(splitter)[0]
|
|
184
|
+
taxa_dict[genome] += 1
|
|
185
|
+
if '*' in line:
|
|
186
|
+
rep = clustered
|
|
187
|
+
Combined_reps.update({rep: 0})
|
|
188
|
+
if first == False:
|
|
189
|
+
Combined_clusters[cluster_id].append(clustered)
|
|
190
|
+
clustered_taxa = clustered.split(splitter)[0]
|
|
191
|
+
if options.sequence_tag in line:
|
|
192
|
+
if clustered_taxa not in combined_pangenome_clusters_Second[cluster_id]:
|
|
193
|
+
combined_pangenome_clusters_Second[cluster_id].append(clustered_taxa)
|
|
194
|
+
combined_pangenome_clusters_Second_sequences[cluster_id].append(clustered)
|
|
195
|
+
else:
|
|
196
|
+
if cluster_id not in not_Second_only_cluster_ids:
|
|
197
|
+
not_Second_only_cluster_ids.append(cluster_id) # Tell us which StORF_Reporter clustered are unmatched to a PEP
|
|
198
|
+
if clustered_taxa not in combined_pangenome_clusters_First[cluster_id]:
|
|
199
|
+
combined_pangenome_clusters_First[cluster_id].append(clustered_taxa)
|
|
200
|
+
combined_pangenome_clusters_First_sequences[cluster_id].append(clustered)
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
return combined_pangenome_clusters_First_Second_clustered,not_Second_only_cluster_ids, combined_pangenome_clusters_Second
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
def cluster_EdgeList(options,splitter):
|
|
207
|
+
if options.cluster_format == 'TSV':
|
|
208
|
+
separator = '\t'
|
|
209
|
+
elif options.cluster_format == 'CSV':
|
|
210
|
+
separator = ','
|
|
211
|
+
cluster_id = 0
|
|
212
|
+
last_rep = ''
|
|
213
|
+
first = True
|
|
214
|
+
First_in = open(options.clusters, 'r')
|
|
215
|
+
pangenome_clusters_First = OrderedDict()
|
|
216
|
+
pangenome_clusters_First_sequences = OrderedDict()
|
|
217
|
+
taxa_dict = defaultdict(int)
|
|
218
|
+
reps = OrderedDict()
|
|
219
|
+
for line in First_in:
|
|
220
|
+
rep, child = line.strip().split(separator)
|
|
221
|
+
child_taxa = child.split(splitter)[0] # Extracting the genome identifier from the child sequence
|
|
222
|
+
# Counting occurrences of genomes
|
|
223
|
+
taxa_dict[child_taxa] += 1
|
|
224
|
+
if first == True:
|
|
225
|
+
pangenome_clusters_First[0] = []
|
|
226
|
+
pangenome_clusters_First_sequences[0] = []
|
|
227
|
+
first = False
|
|
228
|
+
|
|
229
|
+
if rep != last_rep and last_rep != '':
|
|
230
|
+
cluster_id +=1
|
|
231
|
+
pangenome_clusters_First[cluster_id] = []
|
|
232
|
+
pangenome_clusters_First_sequences[cluster_id] = []
|
|
233
|
+
cluster_size = len(pangenome_clusters_First_sequences[cluster_id-1])
|
|
234
|
+
reps.update({last_rep: [cluster_size, len(pangenome_clusters_First[cluster_id-1])]})
|
|
235
|
+
pangenome_clusters_First[cluster_id] = []
|
|
236
|
+
pangenome_clusters_First_sequences[cluster_id] = []
|
|
237
|
+
if child_taxa not in pangenome_clusters_First[cluster_id]:
|
|
238
|
+
pangenome_clusters_First[cluster_id].append(child_taxa)
|
|
239
|
+
|
|
240
|
+
pangenome_clusters_First_sequences[cluster_id].append(child)
|
|
241
|
+
last_rep = rep
|
|
242
|
+
cluster_size = len(pangenome_clusters_First_sequences[cluster_id])
|
|
243
|
+
reps.update({rep: [cluster_size, len(pangenome_clusters_First[cluster_id])]})
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
return taxa_dict, pangenome_clusters_First, pangenome_clusters_First_sequences, reps
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def combined_clustering_Edge_List(options, splitter):
|
|
250
|
+
if options.cluster_format == 'TSV':
|
|
251
|
+
separator = '\t'
|
|
252
|
+
elif options.cluster_format == 'CSV':
|
|
253
|
+
separator = ','
|
|
254
|
+
|
|
255
|
+
cluster_id = 0
|
|
256
|
+
last_rep = ''
|
|
257
|
+
Second_in = open(options.reclustered, 'r')
|
|
258
|
+
combined_pangenome_clusters_First = OrderedDict()
|
|
259
|
+
combined_pangenome_clusters_First_sequences = OrderedDict()
|
|
260
|
+
combined_pangenome_clusters_Second = OrderedDict()
|
|
261
|
+
combined_pangenome_clusters_Second_sequences = OrderedDict()
|
|
262
|
+
combined_pangenome_clusters_First_Second_clustered = OrderedDict()
|
|
263
|
+
|
|
264
|
+
not_Second_only_cluster_ids = []
|
|
265
|
+
already_seen_PEP = []
|
|
266
|
+
Combined_clusters = OrderedDict()
|
|
267
|
+
Combined_reps = OrderedDict()
|
|
268
|
+
first = True
|
|
269
|
+
for line in Second_in:
|
|
270
|
+
rep, child = line.strip().split(separator)
|
|
271
|
+
child_taxa = child.split(splitter)[0] # Extracting the genome identifier from the child sequence
|
|
272
|
+
|
|
273
|
+
if first == True:
|
|
274
|
+
Combined_clusters.update({cluster_id: []})
|
|
275
|
+
combined_pangenome_clusters_First.update({cluster_id: []})
|
|
276
|
+
combined_pangenome_clusters_First_sequences.update({cluster_id: []})
|
|
277
|
+
combined_pangenome_clusters_Second.update({cluster_id: []})
|
|
278
|
+
combined_pangenome_clusters_Second_sequences.update({cluster_id: []})
|
|
279
|
+
Combined_reps.update({rep: 0})
|
|
280
|
+
first = False
|
|
281
|
+
|
|
282
|
+
if first == False:
|
|
283
|
+
if rep != last_rep and last_rep != '':
|
|
284
|
+
cluster_size = len(Combined_clusters[cluster_id])
|
|
285
|
+
Combined_reps.update({rep: cluster_size})
|
|
286
|
+
for pep in combined_pangenome_clusters_First_sequences[cluster_id]:
|
|
287
|
+
if pep != []:
|
|
288
|
+
if pep in already_seen_PEP:
|
|
289
|
+
continue
|
|
290
|
+
else:
|
|
291
|
+
already_seen_PEP.append(pep)
|
|
292
|
+
if len(combined_pangenome_clusters_Second_sequences[cluster_id]) > 0 and len(combined_pangenome_clusters_First_sequences[cluster_id]) > 0:
|
|
293
|
+
if len(combined_pangenome_clusters_First_sequences[cluster_id]) > 1: # If we have clustered >1 PEP family, we need to record 1 as key and all others are val
|
|
294
|
+
all_but_first = combined_pangenome_clusters_First_sequences[cluster_id][1:]
|
|
295
|
+
storfs_clustered = combined_pangenome_clusters_Second_sequences[cluster_id]
|
|
296
|
+
VALUE = all_but_first + storfs_clustered
|
|
297
|
+
else:
|
|
298
|
+
VALUE = combined_pangenome_clusters_Second_sequences[cluster_id]
|
|
299
|
+
KEY = combined_pangenome_clusters_First_sequences[cluster_id][0]
|
|
300
|
+
combined_pangenome_clusters_First_Second_clustered.update({KEY: VALUE})
|
|
301
|
+
|
|
302
|
+
cluster_id += 1
|
|
303
|
+
Combined_clusters.update({cluster_id: []})
|
|
304
|
+
combined_pangenome_clusters_First.update({cluster_id: []})
|
|
305
|
+
combined_pangenome_clusters_First_sequences.update({cluster_id: []})
|
|
306
|
+
combined_pangenome_clusters_Second.update({cluster_id: []})
|
|
307
|
+
combined_pangenome_clusters_Second_sequences.update({cluster_id: []})
|
|
308
|
+
Combined_reps.update({rep: 0})
|
|
309
|
+
|
|
310
|
+
Combined_clusters[cluster_id].append(child)
|
|
311
|
+
if options.sequence_tag in line:
|
|
312
|
+
if child_taxa not in combined_pangenome_clusters_Second[cluster_id]:
|
|
313
|
+
combined_pangenome_clusters_Second[cluster_id].append(child_taxa)
|
|
314
|
+
combined_pangenome_clusters_Second_sequences[cluster_id].append(child)
|
|
315
|
+
else:
|
|
316
|
+
if cluster_id not in not_Second_only_cluster_ids:
|
|
317
|
+
not_Second_only_cluster_ids.append(cluster_id) # Tell us which StORF_Reporter clustered are unmatched to a PEP
|
|
318
|
+
if child_taxa not in combined_pangenome_clusters_First[cluster_id]:
|
|
319
|
+
combined_pangenome_clusters_First[cluster_id].append(child_taxa)
|
|
320
|
+
combined_pangenome_clusters_First_sequences[cluster_id].append(child)
|
|
321
|
+
|
|
322
|
+
last_rep = rep
|
|
323
|
+
|
|
324
|
+
return combined_pangenome_clusters_First_Second_clustered,not_Second_only_cluster_ids, combined_pangenome_clusters_Second
|
PyamilySeq/utils.py
CHANGED
|
@@ -3,6 +3,8 @@ import shutil
|
|
|
3
3
|
import os
|
|
4
4
|
import glob
|
|
5
5
|
import collections
|
|
6
|
+
from tempfile import NamedTemporaryFile
|
|
7
|
+
import sys
|
|
6
8
|
|
|
7
9
|
|
|
8
10
|
def is_tool_installed(tool_name):
|
|
@@ -30,7 +32,76 @@ def fix_path(path):
|
|
|
30
32
|
return fixed_path
|
|
31
33
|
|
|
32
34
|
|
|
33
|
-
|
|
35
|
+
def wrap_sequence(sequence, width=60):
|
|
36
|
+
wrapped_sequence = []
|
|
37
|
+
for i in range(0, len(sequence), width):
|
|
38
|
+
wrapped_sequence.append(sequence[i:i + width])
|
|
39
|
+
return "\n".join(wrapped_sequence)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def read_fasta(fasta_file):
|
|
43
|
+
sequences = {}
|
|
44
|
+
current_sequence = None
|
|
45
|
+
with open(fasta_file, 'r') as file:
|
|
46
|
+
for line in file:
|
|
47
|
+
line = line.strip()
|
|
48
|
+
if not line:
|
|
49
|
+
continue # Skip empty lines
|
|
50
|
+
if line.startswith('>'):
|
|
51
|
+
current_sequence = line[1:] # Remove '>' character
|
|
52
|
+
sequences[current_sequence] = ''
|
|
53
|
+
else:
|
|
54
|
+
sequences[current_sequence] += line
|
|
55
|
+
return sequences
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def reorder_dict_by_keys(original_dict, sorted_keys):
|
|
59
|
+
return {k: original_dict[k] for k in sorted_keys}
|
|
60
|
+
def custom_sort_key(k, dict1, dict2):
|
|
61
|
+
return (len(dict1[k]), len(dict2[k]))
|
|
62
|
+
|
|
63
|
+
def sort_keys_by_values(dict1, dict2):
|
|
64
|
+
sorted_keys = sorted(dict1.keys(), key=lambda k: custom_sort_key(k, dict1, dict2), reverse=True)
|
|
65
|
+
return sorted_keys
|
|
66
|
+
|
|
67
|
+
def select_longest_gene(sequences):
|
|
68
|
+
"""Select the longest sequence for each genome."""
|
|
69
|
+
longest_sequences = {}
|
|
70
|
+
for seq_id, sequence in sequences.items():
|
|
71
|
+
genome = seq_id.split('|')[0] # Assuming genome name can be derived from the sequence ID
|
|
72
|
+
if genome not in longest_sequences or len(sequence) > len(longest_sequences[genome][1]):
|
|
73
|
+
longest_sequences[genome] = (seq_id, sequence)
|
|
74
|
+
return longest_sequences
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def run_mafft_on_sequences(options, sequences, output_file):
|
|
78
|
+
print("Conducting MAFFT alignment.")
|
|
79
|
+
"""Run mafft on the given sequences and write to output file."""
|
|
80
|
+
# Create a temporary input file for mafft
|
|
81
|
+
with NamedTemporaryFile('w', delete=False) as temp_input_file:
|
|
82
|
+
for header, sequence in sequences.items():
|
|
83
|
+
temp_input_file.write(f">{header}\n{sequence}\n")
|
|
84
|
+
temp_input_file_path = temp_input_file.name
|
|
85
|
+
|
|
86
|
+
# Run mafft
|
|
87
|
+
try:
|
|
88
|
+
with open(output_file, 'w') as output_f:
|
|
89
|
+
if options.verbose == True:
|
|
90
|
+
subprocess.run(
|
|
91
|
+
['mafft', '--auto', temp_input_file_path],
|
|
92
|
+
stdout=output_f,
|
|
93
|
+
stderr=sys.stderr,
|
|
94
|
+
check=True
|
|
95
|
+
)
|
|
96
|
+
else:
|
|
97
|
+
subprocess.run(
|
|
98
|
+
['mafft', '--auto', temp_input_file_path],
|
|
99
|
+
stdout=output_f,
|
|
100
|
+
stderr=subprocess.DEVNULL, # Suppress stderr
|
|
101
|
+
check=True
|
|
102
|
+
)
|
|
103
|
+
finally:
|
|
104
|
+
os.remove(temp_input_file_path) # Clean up the temporary file
|
|
34
105
|
|
|
35
106
|
|
|
36
107
|
|
|
@@ -45,6 +116,7 @@ def read_separate_files(input_dir, name_split, combined_out):
|
|
|
45
116
|
|
|
46
117
|
gff_features = []
|
|
47
118
|
with open(gff_file, 'r') as file:
|
|
119
|
+
seen_seq_ids = collections.defaultdict(int)
|
|
48
120
|
lines = file.readlines()
|
|
49
121
|
for line in lines:
|
|
50
122
|
line_data = line.split('\t')
|
|
@@ -54,6 +126,11 @@ def read_separate_files(input_dir, name_split, combined_out):
|
|
|
54
126
|
feature = line_data[2]
|
|
55
127
|
strand = line_data[6]
|
|
56
128
|
start, end = int(line_data[3]), int(line_data[4])
|
|
129
|
+
if seq_id in seen_seq_ids:
|
|
130
|
+
seq_id += '_' + str(seen_seq_ids[seq_id])
|
|
131
|
+
seen_seq_ids[seq_id] + 1
|
|
132
|
+
else:
|
|
133
|
+
seen_seq_ids[seq_id] = 1
|
|
57
134
|
seq_id = line_data[8].split('ID=')[1].split(';')[0]
|
|
58
135
|
gff_features.append((contig, start, end, strand, feature, seq_id))
|
|
59
136
|
fasta_dict = collections.defaultdict(str)
|
|
@@ -93,6 +170,7 @@ def read_combined_files(input_dir, name_split, combined_out):
|
|
|
93
170
|
fasta_dict = collections.defaultdict(str)
|
|
94
171
|
gff_features = []
|
|
95
172
|
with open(gff_file, 'r') as file:
|
|
173
|
+
seen_seq_ids = collections.defaultdict(int)
|
|
96
174
|
lines = file.readlines()
|
|
97
175
|
fasta_section = False
|
|
98
176
|
for line in lines:
|
|
@@ -114,6 +192,11 @@ def read_combined_files(input_dir, name_split, combined_out):
|
|
|
114
192
|
strand = line_data[6]
|
|
115
193
|
start, end = int(line_data[3]), int(line_data[4])
|
|
116
194
|
seq_id = line_data[8].split('ID=')[1].split(';')[0]
|
|
195
|
+
if seq_id in seen_seq_ids:
|
|
196
|
+
seq_id += '_' + str(seen_seq_ids[seq_id])
|
|
197
|
+
seen_seq_ids[seq_id] + 1
|
|
198
|
+
else:
|
|
199
|
+
seen_seq_ids[seq_id] = 1
|
|
117
200
|
gff_features.append((contig, start, end, strand, feature, seq_id))
|
|
118
201
|
|
|
119
202
|
for contig, fasta in fasta_dict.items():
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: PyamilySeq
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.6.0
|
|
4
4
|
Summary: PyamilySeq - A a tool to look for sequence-based gene families identified by clustering methods such as CD-HIT, DIAMOND, BLAST or MMseqs2.
|
|
5
5
|
Home-page: https://github.com/NickJD/PyamilySeq
|
|
6
6
|
Author: Nicholas Dimonaco
|
|
@@ -34,80 +34,83 @@ PyamilySeq requires Python 3.6 or higher. Install using pip:
|
|
|
34
34
|
pip install PyamilySeq
|
|
35
35
|
```
|
|
36
36
|
|
|
37
|
+
### Examples: Below are two examples of running PyamilySeq in its two main modes.
|
|
38
|
+
#### 'Full Mode': Will conduct clustering of sequences as part of PyamilySeq run
|
|
39
|
+
```bash
|
|
40
|
+
PyamilySeq -run_mode Full -group_mode Species -output_dir ../../test_data/testing -input_type combined -input_dir .../test_data/genomes -name_split _combined.gff3 -pid 0.99 -len_diff 0.99 -clust_tool CD-HIT -gpa True -con True -w 99 -verbose True
|
|
41
|
+
```
|
|
42
|
+
#### 'Partial Mode': Will take the output of a sequence clustering
|
|
43
|
+
```bash
|
|
44
|
+
PyamilySeq -run_mode Partial -group_mode Species -output_dir .../test_data/testing -cluster_file .../test_data/CD-HIT/combined_Ensmbl_pep_CD_90_60.clstr -clust_tool CD-HIT -original_fasta .../test_data/combined_Ensmbl_cds.fasta -gpa True -con True -w 99 -verbose True
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
```bash
|
|
48
|
+
Calculating Groups
|
|
49
|
+
Gene Groups:
|
|
50
|
+
first_core_99: 3103
|
|
51
|
+
first_core_95: 0
|
|
52
|
+
first_core_15: 3217
|
|
53
|
+
first_core_0: 4808
|
|
54
|
+
Total Number of Gene Groups (Including Singletons): 11128
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
|
|
37
58
|
## Usage - Menu
|
|
38
59
|
```
|
|
39
|
-
usage: PyamilySeq.py [-h] -run_mode {Full,Partial} -group_mode {Species,Genus}
|
|
40
|
-
-
|
|
41
|
-
[-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
[-reclustered RECLUSTERED] [-seq_tag SEQUENCE_TAG]
|
|
45
|
-
[-groups CORE_GROUPS] [-w WRITE_FAMILIES] [-con CON_CORE]
|
|
46
|
-
[-original_fasta ORIGINAL_FASTA]
|
|
47
|
-
[-gpa GENE_PRESENCE_ABSENCE_OUT] [-verbose {True,False}]
|
|
48
|
-
[-v]
|
|
49
|
-
|
|
50
|
-
PyamilySeq v0.5.1: PyamilySeq Run Parameters.
|
|
60
|
+
usage: PyamilySeq.py [-h] -run_mode {Full,Partial} -group_mode {Species,Genus} -clust_tool {CD-HIT} -output_dir OUTPUT_DIR [-input_type {separate,combined}] [-input_dir INPUT_DIR] [-name_split NAME_SPLIT]
|
|
61
|
+
[-pid PIDENT] [-len_diff LEN_DIFF] [-mem CLUSTERING_MEMORY] [-t CLUSTERING_THREADS] [-cluster_file CLUSTER_FILE] [-reclustered RECLUSTERED] [-seq_tag SEQUENCE_TAG]
|
|
62
|
+
[-core_groups CORE_GROUPS] [-genus_groups GENUS_GROUPS] [-w WRITE_FAMILIES] [-con CON_CORE] [-original_fasta ORIGINAL_FASTA] [-gpa GENE_PRESENCE_ABSENCE_OUT] [-verbose {True,False}] [-v]
|
|
63
|
+
|
|
64
|
+
PyamilySeq v0.6.0: PyamilySeq Run Parameters.
|
|
51
65
|
|
|
52
66
|
options:
|
|
53
67
|
-h, --help show this help message and exit
|
|
54
68
|
|
|
55
69
|
Required Arguments:
|
|
56
70
|
-run_mode {Full,Partial}
|
|
57
|
-
Run Mode: Should PyamilySeq be run in "Full" or
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
"Genus" mode? - Genus mode not currently functioning
|
|
62
|
-
-clust_tool {CD-HIT} Clustering tool to use: CD-HIT, DIAMOND, BLAST or
|
|
63
|
-
MMseqs2.
|
|
71
|
+
Run Mode: Should PyamilySeq be run in "Full" or "Partial" mode?
|
|
72
|
+
-group_mode {Species,Genus}
|
|
73
|
+
Group Mode: Should PyamilySeq be run in "Species" or "Genus" mode?
|
|
74
|
+
-clust_tool {CD-HIT} Clustering tool to use: CD-HIT, DIAMOND, BLAST or MMseqs2.
|
|
64
75
|
-output_dir OUTPUT_DIR
|
|
65
76
|
Directory for all output files.
|
|
66
77
|
|
|
67
78
|
Full-Mode Arguments - Required when "-run_mode Full" is used:
|
|
68
79
|
-input_type {separate,combined}
|
|
69
|
-
Type of input files: 'separate' for separate FASTA and
|
|
70
|
-
GFF files, 'combined' for GFF files with embedded
|
|
71
|
-
FASTA sequences.
|
|
80
|
+
Type of input files: 'separate' for separate FASTA and GFF files, 'combined' for GFF files with embedded FASTA sequences.
|
|
72
81
|
-input_dir INPUT_DIR Directory containing GFF/FASTA files.
|
|
73
82
|
-name_split NAME_SPLIT
|
|
74
|
-
substring used to split the filename and extract the
|
|
75
|
-
genome name ('_combined.gff3' or '.gff').
|
|
83
|
+
substring used to split the filename and extract the genome name ('_combined.gff3' or '.gff').
|
|
76
84
|
-pid PIDENT Default 0.95: Pident threshold for clustering.
|
|
77
|
-
-len_diff LEN_DIFF Default 0.80: Minimum length difference between
|
|
78
|
-
|
|
79
|
-
|
|
85
|
+
-len_diff LEN_DIFF Default 0.80: Minimum length difference between clustered sequences - (-s) threshold for CD-HIT clustering.
|
|
86
|
+
|
|
87
|
+
Clustering Runtime Arguments - Optional when "-run_mode Full" is used:
|
|
88
|
+
-mem CLUSTERING_MEMORY
|
|
89
|
+
Default 4000: Memory to be allocated for clustering (in MBs).
|
|
90
|
+
-t CLUSTERING_THREADS
|
|
91
|
+
Default 4: Threads to be allocated for clustering.
|
|
80
92
|
|
|
81
93
|
Partial-Mode Arguments - Required when "-run_mode Partial" is used:
|
|
82
94
|
-cluster_file CLUSTER_FILE
|
|
83
|
-
Clustering output file containing CD-HIT, TSV or CSV
|
|
84
|
-
Edge List
|
|
95
|
+
Clustering output file containing CD-HIT, TSV or CSV Edge List
|
|
85
96
|
|
|
86
97
|
Grouping Arguments - Use to fine-tune grouping of genes after clustering:
|
|
87
98
|
-reclustered RECLUSTERED
|
|
88
|
-
Clustering output file from secondary round of
|
|
89
|
-
clustering
|
|
99
|
+
Currently only works on Partial Mode: Clustering output file from secondary round of clustering.
|
|
90
100
|
-seq_tag SEQUENCE_TAG
|
|
91
|
-
Default - "StORF": Unique identifier to be used to
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
-
|
|
101
|
+
Default - "StORF": Unique identifier to be used to distinguish the second of two rounds of clustered sequences
|
|
102
|
+
-core_groups CORE_GROUPS
|
|
103
|
+
Default - ('99,95,15'): Gene family groups to use for "Species" mode
|
|
104
|
+
-genus_groups GENUS_GROUPS
|
|
105
|
+
Default - ('1,2,3,4,5,6'): Gene family groups to use for "Genus" mode
|
|
95
106
|
|
|
96
107
|
Output Parameters:
|
|
97
|
-
-w WRITE_FAMILIES Default - No output: Output sequences of identified
|
|
98
|
-
|
|
99
|
-
- Must provide FASTA file with -fasta
|
|
100
|
-
-con CON_CORE Default - No output: Output aligned and concatinated
|
|
101
|
-
sequences of identified families - used for MSA
|
|
102
|
-
(provide levels at which to output "-w 99,95" - Must
|
|
103
|
-
provide FASTA file with -fasta
|
|
108
|
+
-w WRITE_FAMILIES Default - No output: Output sequences of identified families (provide levels at which to output "-w 99,95" - Must provide FASTA file with -fasta
|
|
109
|
+
-con CON_CORE Default - No output: Output aligned and concatinated sequences of identified families - used for MSA (provide levels at which to output "-w 99,95" - Must provide FASTA file with -fasta
|
|
104
110
|
-original_fasta ORIGINAL_FASTA
|
|
105
|
-
FASTA file to use in conjunction with "-w" or "-con"
|
|
106
|
-
when running in Partial Mode.
|
|
111
|
+
FASTA file to use in conjunction with "-w" or "-con" when running in Partial Mode.
|
|
107
112
|
-gpa GENE_PRESENCE_ABSENCE_OUT
|
|
108
|
-
Default - False: If selected, a Roary formatted
|
|
109
|
-
gene_presence_absence.csv will be created - Required
|
|
110
|
-
for Coinfinder and other downstream tools
|
|
113
|
+
Default - False: If selected, a Roary formatted gene_presence_absence.csv will be created - Required for Coinfinder and other downstream tools
|
|
111
114
|
|
|
112
115
|
Misc:
|
|
113
116
|
-verbose {True,False}
|
|
@@ -116,25 +119,6 @@ Misc:
|
|
|
116
119
|
|
|
117
120
|
```
|
|
118
121
|
|
|
119
|
-
### Examples: Below are two examples of running PyamilySeq in its two main modes.
|
|
120
|
-
#### 'Full Mode': Will conduct clustering of sequences as part of PyamilySeq run
|
|
121
|
-
```bash
|
|
122
|
-
PyamilySeq -run_mode Full -group_mode Species -output_dir ../../test_data/testing -input_type combined -input_dir .../test_data/genomes -name_split _combined.gff3 -pid 0.99 -len_diff 0.99 -clust_tool CD-HIT -gpa True -con True -w 99 -verbose True
|
|
123
|
-
```
|
|
124
|
-
#### 'Partial Mode': Will take the output of a sequence clustering
|
|
125
|
-
```bash
|
|
126
|
-
PyamilySeq -run_mode Partial -group_mode Species -output_dir .../test_data/testing -cluster_file .../test_data/CD-HIT/combined_Ensmbl_pep_CD_90_60.clstr -clust_tool CD-HIT -original_fasta .../test_data/combined_Ensmbl_cds.fasta -gpa True -con True -w 99 -verbose True
|
|
127
|
-
```
|
|
128
|
-
|
|
129
|
-
```bash
|
|
130
|
-
Calculating Groups
|
|
131
|
-
Gene Groups:
|
|
132
|
-
first_core_99: 3103
|
|
133
|
-
first_core_95: 0
|
|
134
|
-
first_core_15: 3217
|
|
135
|
-
first_core_0: 4808
|
|
136
|
-
Total Number of Gene Groups (Including Singletons): 11128
|
|
137
|
-
```
|
|
138
122
|
|
|
139
123
|
## Seq-Combiner: This tool is provided to enable the pre-processing of multiple GFF/FASTA files together ready to be clustered by the user
|
|
140
124
|
### Example:
|
|
@@ -145,7 +129,7 @@ Seq-Combiner -input_dir .../test_data/genomes -name_split _combined.gff3 -output
|
|
|
145
129
|
```bash
|
|
146
130
|
usage: Seq_Combiner.py [-h] -input_dir INPUT_DIR -input_type {separate,combined} -name_split NAME_SPLIT -output_dir OUTPUT_DIR -output_name OUTPUT_FILE
|
|
147
131
|
|
|
148
|
-
Seq-Combiner v0.
|
|
132
|
+
Seq-Combiner v0.6.0: Seq-Combiner Run Parameters.
|
|
149
133
|
|
|
150
134
|
options:
|
|
151
135
|
-h, --help show this help message and exit
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
PyamilySeq/CD-Hit_StORF-Reporter_Cross-Genera_Builder.py,sha256=UzQ5iOKCNfurxmj1pnkowF11YfWBO5vnBCKxQK6goB8,26538
|
|
2
|
+
PyamilySeq/Constants.py,sha256=LNbn_6kugNL3dl4HFDkq8ZC4cizcXy6MWRz3hP4QJGI,31
|
|
3
|
+
PyamilySeq/PyamilySeq.py,sha256=_Lwk_NYUmOMzoJsCRpiMIx5CFuKYxnYBjZ6NhTdBiCE,13026
|
|
4
|
+
PyamilySeq/PyamilySeq_Genus.py,sha256=dKSM4ZGrOzZPIMkDveyiWD4WqBeYjwAP3g1diR7-0rE,12107
|
|
5
|
+
PyamilySeq/PyamilySeq_Species.py,sha256=urDUDkTu4-5L5rPlaWN2kicXOalqDDVK9lLOrOMdDUU,13219
|
|
6
|
+
PyamilySeq/Seq_Combiner.py,sha256=bHEIR-MZODSGm8n69ZIN-XzEoct5WZ1kH5Xa6uKCu4Y,1972
|
|
7
|
+
PyamilySeq/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
|
+
PyamilySeq/clusterings.py,sha256=Xa0YTvfmI0A8sPdaNHw3j2PVVvGR0JlrebUb_vP2kt8,16183
|
|
9
|
+
PyamilySeq/utils.py,sha256=bC1fpJ8SS14oB4EHvsgZbR0ttn83BiBttlMYeD6IS3g,9863
|
|
10
|
+
PyamilySeq-0.6.0.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
11
|
+
PyamilySeq-0.6.0.dist-info/METADATA,sha256=4O545LBKwlxMTj4q8kCth-IEYJW8Vz4YhaaJ5R3P9EE,7706
|
|
12
|
+
PyamilySeq-0.6.0.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91
|
|
13
|
+
PyamilySeq-0.6.0.dist-info/entry_points.txt,sha256=QtXD1tmnLvRAkIpGWZgXm1lfLH8GGeCwxmgoHZaTp98,102
|
|
14
|
+
PyamilySeq-0.6.0.dist-info/top_level.txt,sha256=J6JhugUQTq4rq96yibAlQu3o4KCM9WuYfqr3w1r119M,11
|
|
15
|
+
PyamilySeq-0.6.0.dist-info/RECORD,,
|
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
PyamilySeq/CD-Hit_StORF-Reporter_Cross-Genera_Builder.py,sha256=UzQ5iOKCNfurxmj1pnkowF11YfWBO5vnBCKxQK6goB8,26538
|
|
2
|
-
PyamilySeq/Constants.py,sha256=mmMeeD5svOnN-Kn7LUd16DY8rWTcU2WajldKlNkuuWY,31
|
|
3
|
-
PyamilySeq/PyamilySeq.py,sha256=7pyBujHmdYR6DVNlKQ2BqX9-AylTpNexrcWN-rtyauk,11275
|
|
4
|
-
PyamilySeq/PyamilySeq_Genus.py,sha256=JpLLu3QaahUHBe7E80xVHtFORGuyeUMOt9eiLN5uazc,31286
|
|
5
|
-
PyamilySeq/PyamilySeq_Species.py,sha256=6LHEdp6Vndoder8dlVSuyUKHdCbo5Rbxea1rnncrceY,35172
|
|
6
|
-
PyamilySeq/Seq_Combiner.py,sha256=bHEIR-MZODSGm8n69ZIN-XzEoct5WZ1kH5Xa6uKCu4Y,1972
|
|
7
|
-
PyamilySeq/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
|
-
PyamilySeq/utils.py,sha256=KhEnwzgVZyUu_Q92ukRPUrVD2505xSg9NY6e75nUmPQ,6487
|
|
9
|
-
PyamilySeq-0.5.1.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
10
|
-
PyamilySeq-0.5.1.dist-info/METADATA,sha256=FJRNy2WTazUXemY4jHYPmpJEdq5JQMebhfeicagLTCA,7792
|
|
11
|
-
PyamilySeq-0.5.1.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91
|
|
12
|
-
PyamilySeq-0.5.1.dist-info/entry_points.txt,sha256=QtXD1tmnLvRAkIpGWZgXm1lfLH8GGeCwxmgoHZaTp98,102
|
|
13
|
-
PyamilySeq-0.5.1.dist-info/top_level.txt,sha256=J6JhugUQTq4rq96yibAlQu3o4KCM9WuYfqr3w1r119M,11
|
|
14
|
-
PyamilySeq-0.5.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|