PyamilySeq 0.8.0__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- PyamilySeq/Cluster_Summary.py +163 -0
- PyamilySeq/Constants.py +1 -1
- PyamilySeq/Group_Splitter.py +163 -116
- PyamilySeq/PyamilySeq.py +21 -17
- PyamilySeq/Seq_Combiner.py +8 -4
- PyamilySeq/utils.py +53 -62
- {PyamilySeq-0.8.0.dist-info → PyamilySeq-0.9.0.dist-info}/METADATA +105 -44
- PyamilySeq-0.9.0.dist-info/RECORD +16 -0
- {PyamilySeq-0.8.0.dist-info → PyamilySeq-0.9.0.dist-info}/entry_points.txt +1 -0
- PyamilySeq-0.8.0.dist-info/RECORD +0 -15
- {PyamilySeq-0.8.0.dist-info → PyamilySeq-0.9.0.dist-info}/LICENSE +0 -0
- {PyamilySeq-0.8.0.dist-info → PyamilySeq-0.9.0.dist-info}/WHEEL +0 -0
- {PyamilySeq-0.8.0.dist-info → PyamilySeq-0.9.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
from collections import OrderedDict
|
|
3
|
+
from collections import defaultdict
|
|
4
|
+
|
|
5
|
+
try:
|
|
6
|
+
from .Constants import *
|
|
7
|
+
from .utils import *
|
|
8
|
+
except (ModuleNotFoundError, ImportError, NameError, TypeError) as error:
|
|
9
|
+
from Constants import *
|
|
10
|
+
from utils import *
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def categorise_percentage(percent):
|
|
14
|
+
"""Categorise the percentage of genomes with multicopy genes."""
|
|
15
|
+
if 20 <= percent < 40:
|
|
16
|
+
return "20-40%"
|
|
17
|
+
elif 40 <= percent < 60:
|
|
18
|
+
return "40-60%"
|
|
19
|
+
elif 60 <= percent < 80:
|
|
20
|
+
return "60-80%"
|
|
21
|
+
elif 80 <= percent < 95:
|
|
22
|
+
return "80-95%"
|
|
23
|
+
elif 95 <= percent < 99:
|
|
24
|
+
return "95-99%"
|
|
25
|
+
elif 99 <= percent <= 100:
|
|
26
|
+
return "99-100%"
|
|
27
|
+
return None
|
|
28
|
+
|
|
29
|
+
# Read cd-hit .clstr file and extract information
|
|
30
|
+
def read_cd_hit_output(clustering_output):
|
|
31
|
+
clusters = OrderedDict()
|
|
32
|
+
|
|
33
|
+
with open(clustering_output, 'r') as f:
|
|
34
|
+
current_cluster_id = None
|
|
35
|
+
|
|
36
|
+
for line in f:
|
|
37
|
+
line = line.strip()
|
|
38
|
+
if line.startswith(">Cluster"):
|
|
39
|
+
current_cluster_id = line.split(' ')[1]
|
|
40
|
+
clusters[current_cluster_id] = []
|
|
41
|
+
elif line and current_cluster_id is not None:
|
|
42
|
+
parts = line.split('\t')
|
|
43
|
+
if len(parts) > 1:
|
|
44
|
+
clustered_info = parts[1]
|
|
45
|
+
length = clustered_info.split(',')[0]
|
|
46
|
+
length = int(''.join(c for c in length if c.isdigit()))
|
|
47
|
+
clustered_header = clustered_info.split('>')[1].split('...')[0]
|
|
48
|
+
clustered_header = '>' + clustered_header
|
|
49
|
+
|
|
50
|
+
if 'at ' in clustered_info and '%' in clustered_info.split('at ')[-1]:
|
|
51
|
+
percent_identity = extract_identity(clustered_info)
|
|
52
|
+
elif line.endswith('*'):
|
|
53
|
+
percent_identity = 100.0
|
|
54
|
+
else:
|
|
55
|
+
raise ValueError("Percent identity not found in the string.")
|
|
56
|
+
|
|
57
|
+
clusters[current_cluster_id].append({
|
|
58
|
+
'header': clustered_header,
|
|
59
|
+
'length': length,
|
|
60
|
+
'percent_identity': percent_identity
|
|
61
|
+
})
|
|
62
|
+
|
|
63
|
+
return clusters
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
# Summarise the information for each cluster
|
|
67
|
+
def summarise_clusters(options,clusters, output):
|
|
68
|
+
multicopy_groups = defaultdict(int) # Counter for groups with multicopy genes
|
|
69
|
+
|
|
70
|
+
with open(output, 'w') as out_f:
|
|
71
|
+
out_f.write("Cluster_ID\tNum_Sequences\tAvg_Length\tLength_Range\tAvg_Identity\tIdentity_Range\n")
|
|
72
|
+
|
|
73
|
+
for cluster_id, seqs in clusters.items():
|
|
74
|
+
num_seqs = len(seqs)
|
|
75
|
+
lengths = [seq['length'] for seq in seqs]
|
|
76
|
+
identities = [seq['percent_identity'] for seq in seqs]
|
|
77
|
+
|
|
78
|
+
avg_length = sum(lengths) / num_seqs if num_seqs > 0 else 0
|
|
79
|
+
length_range = f"{min(lengths)}-{max(lengths)}" if num_seqs > 0 else "N/A"
|
|
80
|
+
|
|
81
|
+
avg_identity = sum(identities) / num_seqs if num_seqs > 0 else 0
|
|
82
|
+
identity_range = f"{min(identities):.2f}-{max(identities):.2f}" if num_seqs > 0 else "N/A"
|
|
83
|
+
|
|
84
|
+
out_f.write(
|
|
85
|
+
f"{cluster_id}\t{num_seqs}\t{avg_length:.2f}\t{length_range}\t{avg_identity:.2f}\t{identity_range}\n")
|
|
86
|
+
|
|
87
|
+
# Count genomes with more than one gene
|
|
88
|
+
genome_to_gene_count = defaultdict(int)
|
|
89
|
+
for seq in seqs:
|
|
90
|
+
genome = seq['header'].split('|')[0].replace('>','')
|
|
91
|
+
genome_to_gene_count[genome] += 1
|
|
92
|
+
|
|
93
|
+
num_genomes_with_multiple_genes = sum(1 for count in genome_to_gene_count.values() if count > 1)
|
|
94
|
+
|
|
95
|
+
# Calculate the percentage of genomes with multicopy genes
|
|
96
|
+
|
|
97
|
+
multicopy_percentage = (num_genomes_with_multiple_genes / options.genome_num) * 100
|
|
98
|
+
category = categorise_percentage(multicopy_percentage)
|
|
99
|
+
if category:
|
|
100
|
+
multicopy_groups[category] += 1
|
|
101
|
+
|
|
102
|
+
# Define the order of categories for printout
|
|
103
|
+
category_order = ["20-40%", "40-60%", "60-80%", "80-95%", "95-99%", "99-100%"]
|
|
104
|
+
|
|
105
|
+
# Print the number of clusters with multicopy genes in each percentage range, in the correct order
|
|
106
|
+
for category in category_order:
|
|
107
|
+
print(f"Number of clusters with multicopy genes in {category} range: {multicopy_groups[category]}")
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
# Main function to parse arguments and run the analysis
|
|
111
|
+
def main():
|
|
112
|
+
parser = argparse.ArgumentParser(description='PyamilySeq ' + PyamilySeq_Version + ': Cluster-Summary - A tool to summarise CD-HIT clustering files.')
|
|
113
|
+
### Required Arguments
|
|
114
|
+
required = parser.add_argument_group('Required Parameters')
|
|
115
|
+
required.add_argument('-input_clstr', action="store", dest="input_clstr",
|
|
116
|
+
help='Input CD-HIT .clstr file',
|
|
117
|
+
required=True)
|
|
118
|
+
required.add_argument('-output', action="store", dest="output",
|
|
119
|
+
help="Output TSV file to store cluster summaries - Will add '.tsv' if not provided by user",
|
|
120
|
+
required=True)
|
|
121
|
+
required.add_argument('-genome_num', action='store', dest='genome_num', type=int,
|
|
122
|
+
help='The total number of genomes must be provide',
|
|
123
|
+
required=True)
|
|
124
|
+
#required.add_argument("-clustering_format", action="store", dest="clustering_format", choices=['CD-HIT','TSV','CSV'],
|
|
125
|
+
# help="Clustering format to use: CD-HIT or TSV (MMseqs2, BLAST, DIAMOND) / CSV edge-list file (Node1\tNode2).",
|
|
126
|
+
# required=True)
|
|
127
|
+
|
|
128
|
+
optional = parser.add_argument_group('Optional Arguments')
|
|
129
|
+
optional.add_argument('-output_dir', action="store", dest="output_dir",
|
|
130
|
+
help='Default: Same as input file',
|
|
131
|
+
required=False)
|
|
132
|
+
|
|
133
|
+
misc = parser.add_argument_group("Misc Parameters")
|
|
134
|
+
misc.add_argument("-verbose", action="store_true", dest="verbose",
|
|
135
|
+
help="Print verbose output.",
|
|
136
|
+
required=False)
|
|
137
|
+
misc.add_argument("-v", "--version", action="version",
|
|
138
|
+
version=f"PyamilySeq: Group-Summary version {PyamilySeq_Version} - Exiting",
|
|
139
|
+
help="Print out version number and exit")
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
options = parser.parse_args()
|
|
143
|
+
print("Running PyamilySeq " + PyamilySeq_Version+ ": Group-Summary ")
|
|
144
|
+
|
|
145
|
+
### File handling
|
|
146
|
+
options.input_clstr = fix_path(options.input_clstr)
|
|
147
|
+
if options.output_dir is None:
|
|
148
|
+
options.output_dir = os.path.dirname(os.path.abspath(options.input_clstr))
|
|
149
|
+
output_path = os.path.abspath(options.output_dir)
|
|
150
|
+
if not os.path.exists(output_path):
|
|
151
|
+
os.makedirs(output_path)
|
|
152
|
+
output_name = options.output
|
|
153
|
+
if not output_name.endswith('.tsv'):
|
|
154
|
+
output_name += '.tsv'
|
|
155
|
+
output_file_path = os.path.join(output_path, output_name)
|
|
156
|
+
###
|
|
157
|
+
|
|
158
|
+
clusters = read_cd_hit_output(options.input_clstr)
|
|
159
|
+
summarise_clusters(options,clusters, output_file_path)
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
if __name__ == "__main__":
|
|
163
|
+
main()
|
PyamilySeq/Constants.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
PyamilySeq_Version = 'v0.
|
|
1
|
+
PyamilySeq_Version = 'v0.9.0'
|
|
2
2
|
|
PyamilySeq/Group_Splitter.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import collections
|
|
1
2
|
import subprocess
|
|
2
3
|
import os
|
|
3
4
|
import argparse
|
|
@@ -21,6 +22,7 @@ def run_cd_hit(options, input_file, clustering_output, clustering_mode):
|
|
|
21
22
|
'-T', str(options.clustering_threads),
|
|
22
23
|
'-M', str(options.clustering_memory),
|
|
23
24
|
'-d', "0",
|
|
25
|
+
'-g', "1",
|
|
24
26
|
'-sc', "1",
|
|
25
27
|
'-sf', "1"
|
|
26
28
|
]
|
|
@@ -29,24 +31,29 @@ def run_cd_hit(options, input_file, clustering_output, clustering_mode):
|
|
|
29
31
|
else:
|
|
30
32
|
subprocess.run(cdhit_command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
|
31
33
|
|
|
32
|
-
|
|
33
|
-
def calculate_new_rep_seq(cluster_data):
|
|
34
|
+
@profile
|
|
35
|
+
def calculate_new_rep_seq(cluster_data, length_weight=1.0, identity_weight=1.0):
|
|
34
36
|
total_length = sum(entry['length'] for entry in cluster_data)
|
|
35
37
|
avg_length = total_length / len(cluster_data)
|
|
36
38
|
|
|
37
39
|
total_identity = sum(entry['percent_identity'] for entry in cluster_data)
|
|
38
40
|
avg_identity = total_identity / len(cluster_data)
|
|
39
41
|
|
|
42
|
+
# Normalize length and identity
|
|
43
|
+
max_length = max(entry['length'] for entry in cluster_data)
|
|
44
|
+
max_identity = 100 # Assuming percent_identity is out of 100
|
|
45
|
+
|
|
40
46
|
# Calculate a score based on both length difference and percent identity
|
|
41
47
|
def score(entry):
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
return
|
|
48
|
+
normalized_length_diff = abs(entry['length'] - avg_length) / max_length
|
|
49
|
+
normalized_identity_diff = abs(entry['percent_identity'] - avg_identity) / max_identity
|
|
50
|
+
return (length_weight * normalized_length_diff) + (identity_weight * (1 - normalized_identity_diff))
|
|
45
51
|
|
|
46
52
|
rep_entry = min(cluster_data, key=score)
|
|
47
53
|
return rep_entry
|
|
48
54
|
|
|
49
55
|
|
|
56
|
+
|
|
50
57
|
def length_within_threshold(rep_length, length, len_diff):
|
|
51
58
|
return abs(rep_length - length) / rep_length <= len_diff
|
|
52
59
|
|
|
@@ -58,16 +65,22 @@ def check_if_all_identical(clustered_sequences):
|
|
|
58
65
|
return len(lengths) == 1 and len(perc_idents) == 1
|
|
59
66
|
|
|
60
67
|
|
|
61
|
-
|
|
68
|
+
|
|
69
|
+
def read_fasta_groups(options):
|
|
62
70
|
groups = defaultdict(list)
|
|
63
71
|
genome_count = defaultdict(int)
|
|
64
72
|
current_group = None
|
|
65
73
|
current_sequence = []
|
|
66
74
|
|
|
67
|
-
|
|
75
|
+
# Parse the list of specific group numbers if provided
|
|
76
|
+
selected_groups = None
|
|
77
|
+
if options.groups is not None:
|
|
78
|
+
selected_groups = [int(g.strip()) for g in options.groups.split(',')]
|
|
79
|
+
|
|
80
|
+
with open(options.input_fasta, 'r') as f:
|
|
68
81
|
for line in f:
|
|
69
82
|
if line.startswith('>'):
|
|
70
|
-
if current_group is not None:
|
|
83
|
+
if current_group is not None and (selected_groups is None or group_number in selected_groups):
|
|
71
84
|
groups[current_group].append((current_group_header, ''.join(current_sequence)))
|
|
72
85
|
|
|
73
86
|
current_group_header = line.strip()
|
|
@@ -75,6 +88,13 @@ def read_fasta_groups(fasta_file):
|
|
|
75
88
|
genome = current_group_header.split('|')[1]
|
|
76
89
|
current_sequence = []
|
|
77
90
|
genome_count[genome] += 1
|
|
91
|
+
|
|
92
|
+
# Only process if group matches the selected_groups or if no specific groups were provided
|
|
93
|
+
group_number = int(current_group.replace('>Group_', '')) # Assuming format 'Group_n'
|
|
94
|
+
if selected_groups is not None and group_number not in selected_groups:
|
|
95
|
+
current_group = None # Skip this group
|
|
96
|
+
continue
|
|
97
|
+
|
|
78
98
|
else:
|
|
79
99
|
current_sequence.append(line.strip())
|
|
80
100
|
|
|
@@ -110,11 +130,12 @@ def read_cd_hit_output(clustering_output):
|
|
|
110
130
|
clustered_header = clustered_info.split('>')[1].split('...')[0]
|
|
111
131
|
clustered_header = '>' + clustered_header
|
|
112
132
|
|
|
113
|
-
if 'at
|
|
114
|
-
percent_identity =
|
|
115
|
-
|
|
116
|
-
if '*' in line:
|
|
133
|
+
if 'at ' in clustered_info and '%' in clustered_info.split('at ')[-1]:
|
|
134
|
+
percent_identity = extract_identity(line)
|
|
135
|
+
elif line.endswith('*'):
|
|
117
136
|
percent_identity = 100.0
|
|
137
|
+
else:
|
|
138
|
+
raise ValueError("Percent identity not found in the string.")
|
|
118
139
|
|
|
119
140
|
clusters[current_cluster_id].append({
|
|
120
141
|
'header': clustered_header,
|
|
@@ -124,14 +145,16 @@ def read_cd_hit_output(clustering_output):
|
|
|
124
145
|
|
|
125
146
|
return clusters
|
|
126
147
|
|
|
127
|
-
|
|
128
|
-
def separate_groups(
|
|
129
|
-
groups, genome_count = read_fasta_groups(
|
|
148
|
+
@profile
|
|
149
|
+
def separate_groups(options, clustering_mode):
|
|
150
|
+
groups, genome_count = read_fasta_groups(options)
|
|
130
151
|
|
|
131
152
|
paralog_groups = defaultdict(int) # To track number of paralog groups
|
|
132
153
|
|
|
133
|
-
|
|
134
154
|
for group_header, sequences in groups.items():
|
|
155
|
+
if options.verbose:
|
|
156
|
+
print(f"\n###\nCurrent Group: {group_header.replace('>','')}\n")
|
|
157
|
+
|
|
135
158
|
group_name = group_header.split('|')[0] # Get the group part (e.g., '>Group_n')
|
|
136
159
|
|
|
137
160
|
# Count genomes with more than one gene
|
|
@@ -141,106 +164,109 @@ def separate_groups(input_fasta, options, clustering_mode):
|
|
|
141
164
|
genome_to_gene_count[genome] += 1
|
|
142
165
|
|
|
143
166
|
num_genomes_with_multiple_genes = sum(1 for count in genome_to_gene_count.values() if count > 1)
|
|
144
|
-
total_genomes = len(genome_to_gene_count)
|
|
145
167
|
|
|
146
168
|
# Check if the group meets the threshold for having paralogs
|
|
147
|
-
if
|
|
148
|
-
|
|
169
|
+
if options.groups == None:
|
|
170
|
+
if (num_genomes_with_multiple_genes / options.genome_num) * 100 < options.group_threshold:
|
|
171
|
+
continue
|
|
172
|
+
|
|
149
173
|
|
|
150
174
|
group_file_name = group_name.replace('>','')
|
|
151
175
|
|
|
152
|
-
temp_fasta = f"{options.output_dir}{group_file_name}.fasta"
|
|
176
|
+
temp_fasta = f"{options.output_dir}/{group_file_name}.fasta"
|
|
153
177
|
write_fasta(sequences, temp_fasta)
|
|
154
178
|
|
|
155
179
|
# Run cd-hit on the individual group
|
|
156
180
|
clustering_output = f"{options.output_dir}/{group_file_name}_clustering"
|
|
181
|
+
|
|
157
182
|
run_cd_hit(options, temp_fasta, clustering_output, clustering_mode)
|
|
158
183
|
|
|
159
184
|
# Read the clustering results to find subgroups
|
|
160
185
|
clustered_sequences = read_cd_hit_output(clustering_output + '.clstr')
|
|
161
186
|
|
|
162
|
-
|
|
163
|
-
|
|
187
|
+
if len(clustered_sequences) == 1:
|
|
188
|
+
# Detect if all sequences are identical in length and percentage identity
|
|
189
|
+
all_same = check_if_all_identical(clustered_sequences)
|
|
164
190
|
|
|
165
191
|
# **Global subgroup counter for the entire major group**
|
|
166
192
|
subgroup_id = 0
|
|
167
|
-
|
|
168
|
-
sequences_to_remove = []
|
|
193
|
+
|
|
169
194
|
|
|
170
195
|
if not all_same:
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
196
|
+
# Iterate through each cluster in clustered_sequences
|
|
197
|
+
for cluster_key, cluster in clustered_sequences.items():
|
|
198
|
+
|
|
199
|
+
remaining_sequences_tmp = sequences.copy() # Track unprocessed sequences
|
|
200
|
+
remaining_sequences = [entry for entry in remaining_sequences_tmp if entry[0] in
|
|
201
|
+
{seq_entry['header'] for seq_entry in cluster}]
|
|
202
|
+
sequences_to_remove = []
|
|
203
|
+
|
|
204
|
+
while remaining_sequences:
|
|
205
|
+
# Track subgroups for this cluster pass
|
|
206
|
+
subgroup_sequences = []
|
|
207
|
+
genome_seen = set()
|
|
208
|
+
|
|
209
|
+
# Recalculate representative sequence dynamically for this cluster
|
|
210
|
+
rep = calculate_new_rep_seq(
|
|
211
|
+
[entry for entry in cluster if entry['header'] in (h for h, _ in remaining_sequences)]
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
# Find the sequence corresponding to rep['header'] from the list of sequences
|
|
215
|
+
rep_seq = next((seq for header, seq in sequences if header == rep['header']), None)
|
|
216
|
+
|
|
217
|
+
# Save previously checked seqs, so we don't have to compare them again.
|
|
218
|
+
checked = collections.defaultdict(float)
|
|
219
|
+
|
|
220
|
+
# Process each genome to select the best matching sequence
|
|
221
|
+
for genome in genome_to_gene_count:
|
|
222
|
+
best_sequence = None
|
|
223
|
+
best_score = None # Initialise with a very low score, so that even negative scores can be selected
|
|
224
|
+
|
|
225
|
+
# Iterate over each sequence in the remaining sequences for this genome
|
|
226
|
+
for header, seq in remaining_sequences:
|
|
227
|
+
genome_id = header.split('|')[1]
|
|
228
|
+
|
|
229
|
+
if genome_id == genome: # Ensure this sequence belongs to the current genome
|
|
230
|
+
if rep_seq == seq:
|
|
231
|
+
levenshtein_distance = 0
|
|
232
|
+
else:
|
|
233
|
+
if seq in checked:
|
|
234
|
+
levenshtein_distance = checked[seq]
|
|
235
|
+
else:
|
|
236
|
+
levenshtein_distance = levenshtein_distance_calc(rep_seq,seq)
|
|
237
|
+
checked[seq] = levenshtein_distance
|
|
238
|
+
# Lower Levenshtein distance means more 'similar' sequences
|
|
239
|
+
score = levenshtein_distance
|
|
214
240
|
|
|
215
241
|
# Check if this sequence has a higher score than the current best
|
|
216
|
-
if
|
|
242
|
+
if best_sequence == None:
|
|
243
|
+
best_score = score
|
|
244
|
+
best_sequence = (header, seq) # Store the best matching sequence for this genome
|
|
245
|
+
elif score < best_score:
|
|
217
246
|
best_score = score
|
|
218
247
|
best_sequence = (header, seq) # Store the best matching sequence for this genome
|
|
219
248
|
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
249
|
+
# Add the best sequence for this genome to the subgroup
|
|
250
|
+
if best_sequence is not None:
|
|
251
|
+
new_header = f">{group_file_name}_subgroup_{subgroup_id}|{best_sequence[0].split('|')[1]}|{best_sequence[0].split('|')[2]}"
|
|
252
|
+
subgroup_sequences.append((new_header, best_sequence[1]))
|
|
253
|
+
sequences_to_remove.append(best_sequence)
|
|
254
|
+
genome_seen.add(genome)
|
|
255
|
+
|
|
256
|
+
# Write each subgroup into a separate FASTA file
|
|
257
|
+
if subgroup_sequences:
|
|
258
|
+
subgroup_file = f"{options.output_dir}/{group_file_name}_subgroup_{subgroup_id}.fasta"
|
|
259
|
+
write_fasta(subgroup_sequences, subgroup_file)
|
|
227
260
|
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
261
|
+
# Remove processed sequences from the remaining list
|
|
262
|
+
remaining_sequences = [item for item in remaining_sequences if
|
|
263
|
+
item[0] not in {h for h, _ in sequences_to_remove}]
|
|
231
264
|
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
write_fasta(subgroup_sequences, subgroup_file)
|
|
265
|
+
# Increment subgroup ID for the next subgroup
|
|
266
|
+
subgroup_id += 1
|
|
267
|
+
paralog_groups[group_name] += 1 # Count this group as a paralog group
|
|
236
268
|
|
|
237
|
-
# Remove processed sequences from the remaining list
|
|
238
|
-
remaining_sequences = [item for item in remaining_sequences if
|
|
239
|
-
item[0] not in {h for h, _ in sequences_to_remove}]
|
|
240
269
|
|
|
241
|
-
# Increment subgroup ID globally for the next subgroup
|
|
242
|
-
subgroup_id += 1
|
|
243
|
-
paralog_groups[group_name] += 1 # Count this group as a paralog group
|
|
244
270
|
|
|
245
271
|
|
|
246
272
|
else:
|
|
@@ -255,7 +281,7 @@ def separate_groups(input_fasta, options, clustering_mode):
|
|
|
255
281
|
|
|
256
282
|
# Determine the next subgroup for this genome
|
|
257
283
|
subgroup_id = genome_count[genome] % num_subgroups
|
|
258
|
-
new_header = f"{
|
|
284
|
+
new_header = f"{group_file_name}_subgroup_{subgroup_id}|{genome}|{header.split('|')[2]}"
|
|
259
285
|
subgroup_sequences[subgroup_id].append((new_header, seq))
|
|
260
286
|
|
|
261
287
|
# Increment the count for this genome
|
|
@@ -266,6 +292,12 @@ def separate_groups(input_fasta, options, clustering_mode):
|
|
|
266
292
|
subgroup_file = f"{options.output_dir}/{group_file_name}_subgroup_{subgroup_id}.fasta"
|
|
267
293
|
write_fasta(seqs, subgroup_file)
|
|
268
294
|
|
|
295
|
+
# Increment subgroup ID globally for the next subgroup
|
|
296
|
+
subgroup_id += 1
|
|
297
|
+
paralog_groups[group_name] += 1 # Count this group as a paralog group
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
|
|
269
301
|
# Clean up temporary fasta file if the option is set
|
|
270
302
|
if options.delete_temp_files:
|
|
271
303
|
if temp_fasta and os.path.exists(temp_fasta):
|
|
@@ -282,54 +314,69 @@ def separate_groups(input_fasta, options, clustering_mode):
|
|
|
282
314
|
|
|
283
315
|
|
|
284
316
|
def main():
|
|
285
|
-
parser = argparse.ArgumentParser(description='
|
|
317
|
+
parser = argparse.ArgumentParser(description='PyamilySeq ' + PyamilySeq_Version + ': Group-Splitter - A tool to split multi-copy gene groups identified by PyamilySeq.')
|
|
286
318
|
### Required Arguments
|
|
287
|
-
required = parser.add_argument_group('Required
|
|
319
|
+
required = parser.add_argument_group('Required Parameters')
|
|
288
320
|
required.add_argument('-input_fasta', action='store', dest='input_fasta',
|
|
289
321
|
help='Input FASTA file containing gene groups.',
|
|
290
322
|
required=True)
|
|
323
|
+
required.add_argument('-sequence_type', action='store', dest='sequence_type', default='DNA',choices=['AA', 'DNA'],
|
|
324
|
+
help='Default - DNA: Are groups "DNA" or "AA" sequences?',
|
|
325
|
+
required=True)
|
|
326
|
+
required.add_argument('-genome_num', action='store', dest='genome_num', type=int,
|
|
327
|
+
help='The total number of genomes must be provide',
|
|
328
|
+
required=True)
|
|
291
329
|
required.add_argument('-output_dir', action='store', dest='output_dir',
|
|
292
330
|
help='Output directory.',
|
|
293
331
|
required=True)
|
|
294
332
|
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
333
|
+
regrouping_params = parser.add_argument_group('Regrouping Parameters')
|
|
334
|
+
regrouping_params.add_argument('-groups', action="store", dest='groups', default=None,
|
|
335
|
+
help='Default - auto: Detect groups to be split (see -group_threshold). '
|
|
336
|
+
'Provide "-groups 1,2,3,4" with group IDs to split specific groups.',
|
|
337
|
+
required=False)
|
|
338
|
+
regrouping_params.add_argument('-group_threshold', action='store', dest='group_threshold', type=float, default=80,
|
|
339
|
+
help='Minimum percentage of genomes with multi-copy (default: 80.0) - Does not work with "-groups"')
|
|
340
|
+
|
|
341
|
+
cdhit_params = parser.add_argument_group('CD-HIT Reclustering Parameters')
|
|
342
|
+
cdhit_params.add_argument('-c', action='store', dest='pident', type=float, default=0.8,
|
|
343
|
+
help='Sequence identity threshold (default: 0.8) - Probably should be higher than what was used in initial clustering.')
|
|
344
|
+
cdhit_params.add_argument('-s', action='store', dest='len_diff', type=float, default=0.20,
|
|
345
|
+
help="Length difference cutoff (default: 0.20) - Often the most impactful parameter to split 'multi-copy' gene groups.")
|
|
346
|
+
cdhit_params.add_argument('-T', action='store', dest='clustering_threads', type=int, default=4,
|
|
302
347
|
help='Number of threads for clustering (default: 4)')
|
|
303
|
-
|
|
348
|
+
cdhit_params.add_argument('-M', action='store', dest='clustering_memory', type=int, default=2000,
|
|
304
349
|
help='Memory limit in MB for clustering (default: 2000)')
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
misc = parser.add_argument_group('Misc Arguments')
|
|
312
|
-
misc.add_argument('-v', action='store_true', dest='version',
|
|
313
|
-
help='Print out version number and exit',
|
|
350
|
+
|
|
351
|
+
|
|
352
|
+
misc = parser.add_argument_group("Misc Parameters")
|
|
353
|
+
misc.add_argument('-no_delete_temp_files', action='store_false', dest='delete_temp_files',
|
|
354
|
+
help='Default: Delete all temporary files after processing.',
|
|
314
355
|
required=False)
|
|
356
|
+
misc.add_argument("-verbose", action="store_true", dest="verbose" ,
|
|
357
|
+
help="Print verbose output.",
|
|
358
|
+
required=False)
|
|
359
|
+
misc.add_argument("-v", "--version", action="version",
|
|
360
|
+
version=f"PyamilySeq: Group-Splitter version {PyamilySeq_Version} - Exiting",
|
|
361
|
+
help="Print out version number and exit")
|
|
362
|
+
|
|
315
363
|
|
|
316
364
|
options = parser.parse_args()
|
|
365
|
+
print("Running PyamilySeq: Group-Splitter " + PyamilySeq_Version)
|
|
317
366
|
|
|
318
|
-
# Check for version flag
|
|
319
|
-
if options.version:
|
|
320
|
-
print(f"Group-Splitter version {PyamilySeq_Version}")
|
|
321
|
-
exit(0)
|
|
322
367
|
|
|
323
|
-
options = parser.parse_args()
|
|
324
368
|
|
|
325
369
|
if not os.path.exists(options.output_dir):
|
|
326
370
|
os.makedirs(options.output_dir)
|
|
327
371
|
|
|
328
|
-
|
|
329
|
-
|
|
372
|
+
if options.sequence_type == 'DNA':
|
|
373
|
+
clustering_mode = 'cd-hit-est'
|
|
374
|
+
else:
|
|
375
|
+
clustering_mode = 'cd-hit'
|
|
330
376
|
|
|
331
|
-
|
|
377
|
+
separate_groups(options, clustering_mode)
|
|
332
378
|
|
|
333
379
|
|
|
334
380
|
if __name__ == "__main__":
|
|
381
|
+
|
|
335
382
|
main()
|
PyamilySeq/PyamilySeq.py
CHANGED
|
@@ -27,9 +27,10 @@ def run_cd_hit(options, input_file, clustering_output, clustering_mode):
|
|
|
27
27
|
'-o', clustering_output,
|
|
28
28
|
'-c', str(options.pident),
|
|
29
29
|
'-s', str(options.len_diff),
|
|
30
|
-
'-T', str(options.
|
|
30
|
+
'-T', str(options.threads),
|
|
31
31
|
'-M', str(options.clustering_memory),
|
|
32
32
|
'-d', "0",
|
|
33
|
+
'-g', "1",
|
|
33
34
|
'-sc', "1",
|
|
34
35
|
'-sf', "1"
|
|
35
36
|
]
|
|
@@ -42,7 +43,7 @@ def run_cd_hit(options, input_file, clustering_output, clustering_mode):
|
|
|
42
43
|
def main():
|
|
43
44
|
parser = argparse.ArgumentParser(description='PyamilySeq ' + PyamilySeq_Version + ': A tool that groups genes into unique clusters.')
|
|
44
45
|
### Required Arguments
|
|
45
|
-
required = parser.add_argument_group('Required
|
|
46
|
+
required = parser.add_argument_group('Required Parameters')
|
|
46
47
|
required.add_argument('-run_mode', action='store', dest='run_mode', choices=['Full','Partial'],
|
|
47
48
|
help='Run Mode: Should PyamilySeq be run in "Full" or "Partial" mode?',
|
|
48
49
|
required=True)
|
|
@@ -56,7 +57,7 @@ def main():
|
|
|
56
57
|
help="Directory for all output files.",
|
|
57
58
|
required=True)
|
|
58
59
|
### Full-Mode Arguments
|
|
59
|
-
full_mode_args = parser.add_argument_group('Full-Mode
|
|
60
|
+
full_mode_args = parser.add_argument_group('Full-Mode Parameters - Required when "-run_mode Full" is used')
|
|
60
61
|
full_mode_args.add_argument("-input_type", action="store", dest="input_type", choices=['separate', 'combined'],
|
|
61
62
|
help="Type of input files: 'separate' for separate FASTA and GFF files,"
|
|
62
63
|
" 'combined' for GFF files with embedded FASTA sequences.",
|
|
@@ -84,18 +85,18 @@ def main():
|
|
|
84
85
|
clustering_args.add_argument("-mem", action="store", dest="clustering_memory", type=int, default=4000,
|
|
85
86
|
help="Default 4000: Memory to be allocated for clustering (in MBs).",
|
|
86
87
|
required=False)
|
|
87
|
-
clustering_args.add_argument("-t", action="store", dest="
|
|
88
|
-
help="Default
|
|
88
|
+
clustering_args.add_argument("-t", action="store", dest="threads", type=int, default=8,
|
|
89
|
+
help="Default 8: Threads to be allocated for clustering and/or alignment.",
|
|
89
90
|
required=False)
|
|
90
91
|
|
|
91
92
|
###Partial-Mode Arguments
|
|
92
|
-
partial_mode_args = parser.add_argument_group(
|
|
93
|
-
partial_mode_args.add_argument(
|
|
94
|
-
help=
|
|
93
|
+
partial_mode_args = parser.add_argument_group("Partial-Mode Parameters - Required when '-run_mode Partial' is used")
|
|
94
|
+
partial_mode_args.add_argument("-cluster_file", action="store", dest="cluster_file",
|
|
95
|
+
help="Clustering output file containing CD-HIT, TSV or CSV Edge List",
|
|
95
96
|
required=False)
|
|
96
97
|
|
|
97
98
|
###Grouping Arguments
|
|
98
|
-
grouping_args = parser.add_argument_group('Grouping
|
|
99
|
+
grouping_args = parser.add_argument_group('Grouping Parameters - Use to fine-tune grouping of genes after clustering')
|
|
99
100
|
grouping_args.add_argument('-reclustered', action='store', dest='reclustered',
|
|
100
101
|
help='Currently only works on Partial Mode: Clustering output file from secondary round of clustering.',
|
|
101
102
|
required=False)
|
|
@@ -129,18 +130,20 @@ def main():
|
|
|
129
130
|
required=False)
|
|
130
131
|
|
|
131
132
|
### Misc Arguments
|
|
132
|
-
misc = parser.add_argument_group(
|
|
133
|
-
misc.add_argument(
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
133
|
+
misc = parser.add_argument_group("Misc Parameters")
|
|
134
|
+
misc.add_argument("-verbose", action="store_true", dest="verbose",
|
|
135
|
+
help="Print verbose output.",
|
|
136
|
+
required=False)
|
|
137
|
+
misc.add_argument("-v", "--version", action="version",
|
|
138
|
+
version=f"PyamilySeq version {PyamilySeq_Version} - Exiting",
|
|
139
|
+
help="Print out version number and exit")
|
|
140
|
+
|
|
138
141
|
|
|
139
142
|
options = parser.parse_args()
|
|
143
|
+
print("Running PyamilySeq: " + PyamilySeq_Version)
|
|
140
144
|
|
|
141
145
|
### Checking all required parameters are provided by user #!!# Doesn't seem to work
|
|
142
146
|
if options.run_mode == 'Full':
|
|
143
|
-
|
|
144
147
|
if options.reclustered != None:
|
|
145
148
|
sys.exit("Currently reclustering only works on Partial Mode.")
|
|
146
149
|
required_full_mode = [options.input_type, options.input_dir, options.name_split, options.clustering_format,
|
|
@@ -254,6 +257,7 @@ def main():
|
|
|
254
257
|
self.output_dir = options.output_dir
|
|
255
258
|
self.gene_presence_absence_out = options.gene_presence_absence_out
|
|
256
259
|
self.write_groups = options.write_groups
|
|
260
|
+
self.threads = options.threads
|
|
257
261
|
self.align_core = options.align_core
|
|
258
262
|
self.fasta = combined_out_file
|
|
259
263
|
self.verbose = options.verbose
|
|
@@ -272,6 +276,7 @@ def main():
|
|
|
272
276
|
self.output_dir = options.output_dir
|
|
273
277
|
self.gene_presence_absence_out = options.gene_presence_absence_out
|
|
274
278
|
self.write_groups = options.write_groups
|
|
279
|
+
self.threads = options.threads
|
|
275
280
|
self.align_core = options.align_core
|
|
276
281
|
self.fasta = options.original_fasta
|
|
277
282
|
self.verbose = options.verbose
|
|
@@ -288,5 +293,4 @@ def main():
|
|
|
288
293
|
"Please report any issues to: https://github.com/NickJD/PyamilySeq/issues\n#####")
|
|
289
294
|
|
|
290
295
|
if __name__ == "__main__":
|
|
291
|
-
print("Running PyamilySeq "+PyamilySeq_Version)
|
|
292
296
|
main()
|
PyamilySeq/Seq_Combiner.py
CHANGED
|
@@ -11,7 +11,7 @@ except (ModuleNotFoundError, ImportError, NameError, TypeError) as error:
|
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
def main():
|
|
14
|
-
parser = argparse.ArgumentParser(description='
|
|
14
|
+
parser = argparse.ArgumentParser(description='PyamilySeq ' + PyamilySeq_Version + ': Seq-Combiner - A tool to extract sequences from GFF/FASTA files and prepare them for PyamilySeq.')
|
|
15
15
|
### Required Arguments
|
|
16
16
|
required = parser.add_argument_group('Required Arguments')
|
|
17
17
|
required.add_argument('-input_dir', action='store', dest='input_dir',
|
|
@@ -31,6 +31,7 @@ def main():
|
|
|
31
31
|
required.add_argument("-output_name", action="store", dest="output_file",
|
|
32
32
|
help="Output file name.",
|
|
33
33
|
required=True)
|
|
34
|
+
|
|
34
35
|
optional = parser.add_argument_group('Optional Arguments')
|
|
35
36
|
optional.add_argument('-gene_ident', action='store', dest='gene_ident', default='CDS',
|
|
36
37
|
help='Default - "CDS": Identifier used for extraction of sequences such as "misc_RNA,gene,mRNA,CDS,rRNA,tRNA,tmRNA,CRISPR,ncRNA,regulatory_region,oriC,pseudo"'
|
|
@@ -40,9 +41,9 @@ def main():
|
|
|
40
41
|
help='Default - False: Translate extracted sequences to their AA counterpart?',
|
|
41
42
|
required=False)
|
|
42
43
|
misc = parser.add_argument_group('Misc Arguments')
|
|
43
|
-
misc.add_argument(
|
|
44
|
-
|
|
45
|
-
|
|
44
|
+
misc.add_argument("-v", "--version", action="version",
|
|
45
|
+
version=f"PyamilySeq: Seq-Combiner version {PyamilySeq_Version} - Exiting",
|
|
46
|
+
help="Print out version number and exit")
|
|
46
47
|
|
|
47
48
|
options = parser.parse_args()
|
|
48
49
|
|
|
@@ -50,6 +51,9 @@ def main():
|
|
|
50
51
|
sys.exit(PyamilySeq_Version)
|
|
51
52
|
|
|
52
53
|
output_path = os.path.abspath(options.output_dir)
|
|
54
|
+
if not os.path.exists(output_path):
|
|
55
|
+
os.makedirs(output_path)
|
|
56
|
+
|
|
53
57
|
combined_out_file = os.path.join(output_path, options.output_file)
|
|
54
58
|
|
|
55
59
|
if options.input_type == 'separate':
|
PyamilySeq/utils.py
CHANGED
|
@@ -6,7 +6,42 @@ import collections
|
|
|
6
6
|
from tempfile import NamedTemporaryFile
|
|
7
7
|
import sys
|
|
8
8
|
from line_profiler_pycharm import profile
|
|
9
|
-
|
|
9
|
+
import re
|
|
10
|
+
|
|
11
|
+
####
|
|
12
|
+
# Placeholder for the distance function
|
|
13
|
+
levenshtein_distance_cal = None
|
|
14
|
+
# Check for Levenshtein library once
|
|
15
|
+
try:
|
|
16
|
+
import Levenshtein as LV
|
|
17
|
+
# Assign the optimized function
|
|
18
|
+
def levenshtein_distance_calc(seq1, seq2):
|
|
19
|
+
return LV.distance(seq1, seq2)
|
|
20
|
+
except (ModuleNotFoundError, ImportError):
|
|
21
|
+
print("Levenshtein package not installed - Will fallback to slower Python implementation.")
|
|
22
|
+
# Fallback implementation
|
|
23
|
+
def levenshtein_distance_calc(seq1, seq2):
|
|
24
|
+
# Slower Python implementation of Levenshtein distance
|
|
25
|
+
len1, len2 = len(seq1), len(seq2)
|
|
26
|
+
dp = [[0] * (len2 + 1) for _ in range(len1 + 1)]
|
|
27
|
+
|
|
28
|
+
for i in range(len1 + 1):
|
|
29
|
+
dp[i][0] = i
|
|
30
|
+
for j in range(len2 + 1):
|
|
31
|
+
dp[0][j] = j
|
|
32
|
+
|
|
33
|
+
for i in range(1, len1 + 1):
|
|
34
|
+
for j in range(1, len2 + 1):
|
|
35
|
+
if seq1[i - 1] == seq2[j - 1]:
|
|
36
|
+
cost = 0
|
|
37
|
+
else:
|
|
38
|
+
cost = 1
|
|
39
|
+
dp[i][j] = min(dp[i - 1][j] + 1, # Deletion
|
|
40
|
+
dp[i][j - 1] + 1, # Insertion
|
|
41
|
+
dp[i - 1][j - 1] + cost) # Substitution
|
|
42
|
+
|
|
43
|
+
return dp[len1][len2]
|
|
44
|
+
#####
|
|
10
45
|
|
|
11
46
|
################### We are currently fixed using Table 11
|
|
12
47
|
gencode = {
|
|
@@ -31,63 +66,6 @@ def translate_frame(sequence):
|
|
|
31
66
|
translate = ''.join([gencode.get(sequence[3 * i:3 * i + 3], 'X') for i in range(len(sequence) // 3)])
|
|
32
67
|
return translate
|
|
33
68
|
|
|
34
|
-
@profile
|
|
35
|
-
def calculate_similarity(seq1, seq2):
|
|
36
|
-
len1, len2 = len(seq1), len(seq2)
|
|
37
|
-
|
|
38
|
-
# If lengths are the same, directly compare without alignment
|
|
39
|
-
if len1 == len2:
|
|
40
|
-
matches = sum(c1 == c2 for c1, c2 in zip(seq1, seq2))
|
|
41
|
-
return (matches / len1) * 100 # Return similarity based on the length
|
|
42
|
-
|
|
43
|
-
# For different lengths, proceed with global alignment
|
|
44
|
-
# Initialize the scoring matrix
|
|
45
|
-
score_matrix = [[0] * (len2 + 1) for _ in range(len1 + 1)]
|
|
46
|
-
|
|
47
|
-
# Fill the first row and first column with gap penalties
|
|
48
|
-
for i in range(len1 + 1):
|
|
49
|
-
score_matrix[i][0] = -i # Gap penalty for seq1
|
|
50
|
-
for j in range(len2 + 1):
|
|
51
|
-
score_matrix[0][j] = -j # Gap penalty for seq2
|
|
52
|
-
|
|
53
|
-
# Fill the score matrix
|
|
54
|
-
for i in range(1, len1 + 1):
|
|
55
|
-
for j in range(1, len2 + 1):
|
|
56
|
-
match = score_matrix[i - 1][j - 1] + (1 if seq1[i - 1] == seq2[j - 1] else -1)
|
|
57
|
-
delete = score_matrix[i - 1][j] - 1 # Gap in seq2
|
|
58
|
-
insert = score_matrix[i][j - 1] - 1 # Gap in seq1
|
|
59
|
-
score_matrix[i][j] = max(match, delete, insert)
|
|
60
|
-
|
|
61
|
-
# Traceback to find the alignment (if needed for detailed output)
|
|
62
|
-
aligned_seq1, aligned_seq2 = "", ""
|
|
63
|
-
i, j = len1, len2
|
|
64
|
-
|
|
65
|
-
while i > 0 or j > 0:
|
|
66
|
-
current_score = score_matrix[i][j]
|
|
67
|
-
if i > 0 and j > 0 and current_score == score_matrix[i - 1][j - 1] + (1 if seq1[i - 1] == seq2[j - 1] else -1):
|
|
68
|
-
aligned_seq1 += seq1[i - 1]
|
|
69
|
-
aligned_seq2 += seq2[j - 1]
|
|
70
|
-
i -= 1
|
|
71
|
-
j -= 1
|
|
72
|
-
elif i > 0 and current_score == score_matrix[i - 1][j] - 1:
|
|
73
|
-
aligned_seq1 += seq1[i - 1]
|
|
74
|
-
aligned_seq2 += "-"
|
|
75
|
-
i -= 1
|
|
76
|
-
else:
|
|
77
|
-
aligned_seq1 += "-"
|
|
78
|
-
aligned_seq2 += seq2[j - 1]
|
|
79
|
-
j -= 1
|
|
80
|
-
|
|
81
|
-
# Reverse the aligned sequences if needed
|
|
82
|
-
aligned_seq1 = aligned_seq1[::-1]
|
|
83
|
-
aligned_seq2 = aligned_seq2[::-1]
|
|
84
|
-
|
|
85
|
-
# Calculate matches from aligned sequences
|
|
86
|
-
matches = sum(c1 == c2 for c1, c2 in zip(aligned_seq1, aligned_seq2))
|
|
87
|
-
|
|
88
|
-
# Calculate the similarity percentage based on the maximum length
|
|
89
|
-
max_length = max(len(seq1), len(seq2))
|
|
90
|
-
return (matches / max_length) * 100
|
|
91
69
|
|
|
92
70
|
|
|
93
71
|
|
|
@@ -110,12 +88,24 @@ def reverse_complement(seq):
|
|
|
110
88
|
complement = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'N': 'N'}
|
|
111
89
|
return ''.join(complement[base] for base in reversed(seq))
|
|
112
90
|
|
|
91
|
+
|
|
113
92
|
def fix_path(path):
|
|
114
93
|
fixed_path = os.path.normpath(path)
|
|
115
94
|
fixed_path = os.path.realpath(fixed_path)
|
|
116
95
|
return fixed_path
|
|
117
96
|
|
|
118
97
|
|
|
98
|
+
def extract_identity(clustered_info):
|
|
99
|
+
# Use regex to capture percentage, including optional '-' or '+' before it
|
|
100
|
+
match = re.search(r'at [+-/]*(\d+\.\d+)%', clustered_info)
|
|
101
|
+
|
|
102
|
+
if match:
|
|
103
|
+
percent_identity = float(match.group(1)) # Extract the percentage value
|
|
104
|
+
return percent_identity
|
|
105
|
+
else:
|
|
106
|
+
raise ValueError("Percent identity not found in the string.")
|
|
107
|
+
|
|
108
|
+
|
|
119
109
|
def wrap_sequence(sequence, width=60):
|
|
120
110
|
wrapped_sequence = []
|
|
121
111
|
for i in range(0, len(sequence), width):
|
|
@@ -172,14 +162,15 @@ def run_mafft_on_sequences(options, sequences, output_file):
|
|
|
172
162
|
with open(output_file, 'w') as output_f:
|
|
173
163
|
if options.verbose == True:
|
|
174
164
|
subprocess.run(
|
|
175
|
-
['mafft', '--auto', temp_input_file_path],
|
|
165
|
+
['mafft', '--auto', '--thread', str(options.threads), temp_input_file_path],
|
|
176
166
|
stdout=output_f,
|
|
177
167
|
stderr=sys.stderr,
|
|
178
168
|
check=True
|
|
179
169
|
)
|
|
170
|
+
|
|
180
171
|
else:
|
|
181
172
|
subprocess.run(
|
|
182
|
-
['mafft', '--auto', temp_input_file_path],
|
|
173
|
+
['mafft', '--auto', '--thread', str(options.threads), temp_input_file_path],
|
|
183
174
|
stdout=output_f,
|
|
184
175
|
stderr=subprocess.DEVNULL, # Suppress stderr
|
|
185
176
|
check=True
|
|
@@ -385,7 +376,7 @@ def process_gene_families(options, directory, output_file):
|
|
|
385
376
|
|
|
386
377
|
# Iterate over each gene family file
|
|
387
378
|
for gene_file in os.listdir(directory):
|
|
388
|
-
if gene_file.endswith('.fasta'):
|
|
379
|
+
if gene_file.endswith('.fasta') and not gene_file.endswith('combined_group_sequences.fasta'):
|
|
389
380
|
gene_path = os.path.join(directory, gene_file)
|
|
390
381
|
|
|
391
382
|
# Read sequences from the gene family file
|
|
@@ -395,7 +386,7 @@ def process_gene_families(options, directory, output_file):
|
|
|
395
386
|
longest_sequences = select_longest_gene(sequences)
|
|
396
387
|
|
|
397
388
|
# Run mafft on the longest sequences
|
|
398
|
-
aligned_file = f"{gene_file}_aligned.fasta"
|
|
389
|
+
aligned_file = f"{directory}/{gene_file}_aligned.fasta.tmp"
|
|
399
390
|
run_mafft_on_sequences(options, {seq_id: seq for seq_id, seq in longest_sequences.values()}, aligned_file)
|
|
400
391
|
|
|
401
392
|
# Read aligned sequences and concatenate them
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: PyamilySeq
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.9.0
|
|
4
4
|
Summary: PyamilySeq - A a tool to look for sequence-based gene groups identified by clustering methods such as CD-HIT, DIAMOND, BLAST or MMseqs2.
|
|
5
5
|
Home-page: https://github.com/NickJD/PyamilySeq
|
|
6
6
|
Author: Nicholas Dimonaco
|
|
@@ -12,6 +12,7 @@ Classifier: Operating System :: OS Independent
|
|
|
12
12
|
Requires-Python: >=3.6
|
|
13
13
|
Description-Content-Type: text/markdown
|
|
14
14
|
License-File: LICENSE
|
|
15
|
+
Requires-Dist: levenshtein
|
|
15
16
|
|
|
16
17
|
# PyamilySeq - !BETA!
|
|
17
18
|
**PyamilySeq** is a Python tool for clustering gene sequences into groups based on sequence similarity identified by tools such as CD-HIT, BLAST, DIAMOND or MMseqs2.
|
|
@@ -34,19 +35,18 @@ PyamilySeq probably requires Python 3.6 or higher. Install using pip:
|
|
|
34
35
|
```bash
|
|
35
36
|
pip install PyamilySeq
|
|
36
37
|
```
|
|
37
|
-
|
|
38
|
+
PyamilySeq is regularly updated with bugfixes and new features so to update to the newest version add '-U' to end of the pip install command.
|
|
38
39
|
## Example usage: Below are two examples of running PyamilySeq in its two main modes.
|
|
39
40
|
### 'Full Mode': Will conduct clustering of sequences with CD-HIT as part of PyamilySeq run
|
|
40
41
|
```
|
|
41
42
|
PyamilySeq -run_mode Full -group_mode Species -clustering_format CD-HIT -output_dir .../test_data/testing/Full
|
|
42
|
-
-input_type combined -input_dir .../test_data/genomes -name_split _combined.gff3 -pid 0.95 -len_diff 0.80
|
|
43
|
-
-gpa -a -w 99
|
|
43
|
+
-input_type combined -input_dir .../test_data/genomes -name_split _combined.gff3 -pid 0.95 -len_diff 0.80 -a -w 99
|
|
44
44
|
```
|
|
45
45
|
### 'Partial Mode': Will take the output of a sequence clustering.
|
|
46
46
|
```
|
|
47
|
-
PyamilySeq -run_mode Partial -group_mode Species -clustering_format TSV -output_dir .../test_data/
|
|
48
|
-
-cluster_file .../test_data/
|
|
49
|
-
-original_fasta .../test_data/species/combined_Ensmbl_cds.fasta -
|
|
47
|
+
PyamilySeq -run_mode Partial -group_mode Species -clustering_format TSV -output_dir .../test_data/species/testing/Partial
|
|
48
|
+
-cluster_file .../test_data/species/MMseqs2/combined_Ensmbl_pep_cluster.tsv
|
|
49
|
+
-original_fasta .../test_data/species/combined_Ensmbl_cds.fasta -a -w 99 -verbose
|
|
50
50
|
|
|
51
51
|
```
|
|
52
52
|
#### Note: '-clustering_format TSV/CSV' requires input to be two in two columns as below (Same format as MMseqs2 tsv) - Genome name and sequence name are separated by '|'.
|
|
@@ -58,7 +58,7 @@ Escherichia_coli_110957|ENSB:TIZS9kbTvShDvyX Escherichia_coli_110957|ENSB:TIZS9k
|
|
|
58
58
|
```
|
|
59
59
|
### Example output:
|
|
60
60
|
```
|
|
61
|
-
Running PyamilySeq v0.
|
|
61
|
+
Running PyamilySeq v0.9.0
|
|
62
62
|
Calculating Groups
|
|
63
63
|
Gene Groups:
|
|
64
64
|
First_core_99: 2682
|
|
@@ -80,7 +80,7 @@ PyamilySeq -run_mode Partial -group_mode Genus -clustering_format CD-HIT -output
|
|
|
80
80
|
-cluster_file .../test_data/genus/CD-HIT/combined_cds_cd-hit_80_60.clstr -gpa
|
|
81
81
|
```
|
|
82
82
|
```commandline
|
|
83
|
-
Running PyamilySeq v0.
|
|
83
|
+
Running PyamilySeq v0.9.0
|
|
84
84
|
Calculating Groups
|
|
85
85
|
Genus Groups:
|
|
86
86
|
First_genera_1: 28549
|
|
@@ -137,14 +137,14 @@ Please report any issues to: https://github.com/NickJD/PyamilySeq/issues
|
|
|
137
137
|
## PyamilySeq - Menu:
|
|
138
138
|
### PyamilySeq is separated into two main 'run modes', Full and Partial. They each have their own set of required and optional arguments.
|
|
139
139
|
```
|
|
140
|
-
Running PyamilySeq v0.
|
|
140
|
+
Running PyamilySeq v0.9.0
|
|
141
141
|
usage: PyamilySeq.py [-h] -run_mode {Full,Partial} -group_mode {Species,Genus} -clustering_format {CD-HIT,TSV,CSV} -output_dir OUTPUT_DIR
|
|
142
142
|
[-input_type {separate,combined}] [-input_dir INPUT_DIR] [-name_split NAME_SPLIT] [-sequence_type {AA,DNA}] [-gene_ident GENE_IDENT]
|
|
143
143
|
[-pid PIDENT] [-len_diff LEN_DIFF] [-mem CLUSTERING_MEMORY] [-t CLUSTERING_THREADS] [-cluster_file CLUSTER_FILE]
|
|
144
144
|
[-reclustered RECLUSTERED] [-seq_tag SEQUENCE_TAG] [-core_groups CORE_GROUPS] [-genus_groups GENUS_GROUPS] [-w WRITE_GROUPS] [-a]
|
|
145
145
|
[-original_fasta ORIGINAL_FASTA] [-gpa] [-verbose] [-v]
|
|
146
146
|
|
|
147
|
-
PyamilySeq v0.
|
|
147
|
+
PyamilySeq v0.9.0: A tool that groups genes into unique clusters.
|
|
148
148
|
|
|
149
149
|
options:
|
|
150
150
|
-h, --help show this help message and exit
|
|
@@ -176,8 +176,9 @@ Full-Mode Arguments - Required when "-run_mode Full" is used:
|
|
|
176
176
|
Clustering Runtime Arguments - Optional when "-run_mode Full" is used:
|
|
177
177
|
-mem CLUSTERING_MEMORY
|
|
178
178
|
Default 4000: Memory to be allocated for clustering (in MBs).
|
|
179
|
-
-t
|
|
180
|
-
|
|
179
|
+
-t THREADS Default 8: Threads to be allocated for clustering
|
|
180
|
+
and/or alignment.
|
|
181
|
+
|
|
181
182
|
|
|
182
183
|
Partial-Mode Arguments - Required when "-run_mode Partial" is used:
|
|
183
184
|
-cluster_file CLUSTER_FILE
|
|
@@ -197,15 +198,16 @@ Output Parameters:
|
|
|
197
198
|
-w WRITE_GROUPS Default - No output: Output sequences of identified groups (provide levels at which to output - Species "-w 99,95" Genus "-w 2,3" -
|
|
198
199
|
Must provide FASTA file with -original_fasta if in Partial run mode.
|
|
199
200
|
-a Default - No output: SLOW! (Only works for Species mode) Output aligned and concatinated sequences of identified groups -provide
|
|
200
|
-
group levels at which to output "-w 99,95" - Must provide FASTA file with -original_fasta in
|
|
201
|
+
group levels at which to output "-w 99,95" - Must provide FASTA file with -original_fasta in Partialrun mode.
|
|
201
202
|
-original_fasta ORIGINAL_FASTA
|
|
202
|
-
FASTA file to use in conjunction with "-w" or "-
|
|
203
|
-
-
|
|
204
|
-
|
|
203
|
+
FASTA file to use in conjunction with "-w" or "-con" when running in Partial Mode.
|
|
204
|
+
-no_gpa Do not create a Roary/Panaroo formatted gene_presence_absence.csv (created by default) - Required for Coinfinder and other
|
|
205
|
+
downstream tools
|
|
206
|
+
|
|
207
|
+
Misc Parameters:
|
|
208
|
+
-verbose Print verbose output.
|
|
209
|
+
-v, --version Print out version number and exit
|
|
205
210
|
|
|
206
|
-
Misc:
|
|
207
|
-
-verbose Default - False: Print out runtime messages
|
|
208
|
-
-v Default - False: Print out version number and exit
|
|
209
211
|
```
|
|
210
212
|
|
|
211
213
|
|
|
@@ -215,13 +217,14 @@ Misc:
|
|
|
215
217
|
## Seq-Combiner: This tool is provided to enable the pre-processing of multiple GFF/FASTA files together ready to be clustered by the user.
|
|
216
218
|
### Example:
|
|
217
219
|
```bash
|
|
218
|
-
Seq-Combiner -input_dir .../test_data/genomes -name_split
|
|
220
|
+
Seq-Combiner -input_dir .../test_data/genomes -name_split .gff3 -output_dir .../test_data/genomes -output_name combine_fasta_seqs.fa -input_type combined
|
|
219
221
|
```
|
|
220
222
|
### Seq-Combiner Menu:
|
|
221
223
|
```
|
|
222
|
-
usage: Seq_Combiner.py [-h] -input_dir INPUT_DIR -input_type {separate,combined,fasta} -name_split NAME_SPLIT -output_dir OUTPUT_DIR -output_name
|
|
224
|
+
usage: Seq_Combiner.py [-h] -input_dir INPUT_DIR -input_type {separate,combined,fasta} -name_split NAME_SPLIT -output_dir OUTPUT_DIR -output_name
|
|
225
|
+
OUTPUT_FILE [-gene_ident GENE_IDENT] [-translate] [-v]
|
|
223
226
|
|
|
224
|
-
|
|
227
|
+
PyamilySeq v0.9.0: Seq-Combiner - A tool to extract sequences from GFF/FASTA files and prepare them for PyamilySeq.
|
|
225
228
|
|
|
226
229
|
options:
|
|
227
230
|
-h, --help show this help message and exit
|
|
@@ -229,7 +232,8 @@ options:
|
|
|
229
232
|
Required Arguments:
|
|
230
233
|
-input_dir INPUT_DIR Directory location where the files are located.
|
|
231
234
|
-input_type {separate,combined,fasta}
|
|
232
|
-
Type of input files: "separate" for separate FASTA and GFF files, "combined" for GFF files with embedded FASTA sequences and "fasta"
|
|
235
|
+
Type of input files: "separate" for separate FASTA and GFF files, "combined" for GFF files with embedded FASTA sequences and "fasta"
|
|
236
|
+
for combining multiple FASTA files together.
|
|
233
237
|
-name_split NAME_SPLIT
|
|
234
238
|
substring used to split the filename and extract the genome name ('_combined.gff3' or '.gff').
|
|
235
239
|
-output_dir OUTPUT_DIR
|
|
@@ -239,46 +243,103 @@ Required Arguments:
|
|
|
239
243
|
|
|
240
244
|
Optional Arguments:
|
|
241
245
|
-gene_ident GENE_IDENT
|
|
242
|
-
Default - "CDS": Identifier used for extraction of sequences such as
|
|
246
|
+
Default - "CDS": Identifier used for extraction of sequences such as
|
|
247
|
+
"misc_RNA,gene,mRNA,CDS,rRNA,tRNA,tmRNA,CRISPR,ncRNA,regulatory_region,oriC,pseudo" - Not compatible with "fasta" input mode.
|
|
243
248
|
-translate Default - False: Translate extracted sequences to their AA counterpart?
|
|
244
249
|
|
|
245
250
|
Misc Arguments:
|
|
246
|
-
-v
|
|
247
|
-
|
|
251
|
+
-v, --version Print out version number and exit
|
|
248
252
|
|
|
249
253
|
```
|
|
250
254
|
|
|
251
|
-
|
|
252
|
-
|
|
255
|
+
## Group-Splitter: This tool can split multi-copy gene groups using CD-HIT after initial PyamilySeq analysis.
|
|
256
|
+
### Example:
|
|
257
|
+
```bash
|
|
258
|
+
Group-Splitter -genome_num 74 -input_fasta .../test/species/ -output_dir .../test/species/ -sequence_type AA
|
|
253
259
|
```
|
|
254
|
-
|
|
255
|
-
|
|
260
|
+
### Group-Splitter Menu:
|
|
261
|
+
```
|
|
262
|
+
usage: Group_Splitter.py [-h] -input_fasta INPUT_FASTA -sequence_type {AA,DNA}
|
|
263
|
+
-genome_num GENOME_NUM -output_dir OUTPUT_DIR
|
|
264
|
+
[-groups GROUPS] [-group_threshold GROUP_THRESHOLD]
|
|
265
|
+
[-c PIDENT] [-s LEN_DIFF] [-T CLUSTERING_THREADS]
|
|
266
|
+
[-M CLUSTERING_MEMORY] [-no_delete_temp_files]
|
|
267
|
+
[-verbose] [-v]
|
|
256
268
|
|
|
257
|
-
|
|
269
|
+
PyamilySeq v0.9.0: Group-Splitter - A tool to split multi-copy gene groups
|
|
270
|
+
identified by PyamilySeq.
|
|
258
271
|
|
|
259
272
|
options:
|
|
260
273
|
-h, --help show this help message and exit
|
|
261
274
|
|
|
262
|
-
Required
|
|
275
|
+
Required Parameters:
|
|
263
276
|
-input_fasta INPUT_FASTA
|
|
264
277
|
Input FASTA file containing gene groups.
|
|
278
|
+
-sequence_type {AA,DNA}
|
|
279
|
+
Default - DNA: Are groups "DNA" or "AA" sequences?
|
|
280
|
+
-genome_num GENOME_NUM
|
|
281
|
+
The total number of genomes must be provide
|
|
265
282
|
-output_dir OUTPUT_DIR
|
|
266
283
|
Output directory.
|
|
267
284
|
|
|
268
|
-
|
|
269
|
-
-
|
|
270
|
-
|
|
271
|
-
|
|
285
|
+
Regrouping Parameters:
|
|
286
|
+
-groups GROUPS Default - auto: Detect groups to be split (see
|
|
287
|
+
-group_threshold). Provide "-groups 1,2,3,4" with
|
|
288
|
+
group IDs to split specific groups.
|
|
289
|
+
-group_threshold GROUP_THRESHOLD
|
|
290
|
+
Minimum percentage of genomes with multi-copy
|
|
291
|
+
(default: 80.0) - Does not work with "-groups"
|
|
292
|
+
|
|
293
|
+
CD-HIT Reclustering Parameters:
|
|
294
|
+
-c PIDENT Sequence identity threshold (default: 0.8) - Probably
|
|
295
|
+
should be higher than what was used in initial
|
|
296
|
+
clustering.
|
|
297
|
+
-s LEN_DIFF Length difference cutoff (default: 0.20) - Often the
|
|
298
|
+
most impactful parameter to split 'multi-copy' gene
|
|
299
|
+
groups.
|
|
300
|
+
-T CLUSTERING_THREADS
|
|
272
301
|
Number of threads for clustering (default: 4)
|
|
273
|
-
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
302
|
+
-M CLUSTERING_MEMORY Memory limit in MB for clustering (default: 2000)
|
|
303
|
+
|
|
304
|
+
Misc Parameters:
|
|
305
|
+
-no_delete_temp_files
|
|
306
|
+
Default: Delete all temporary files after processing.
|
|
277
307
|
-verbose Print verbose output.
|
|
278
|
-
-
|
|
308
|
+
-v, --version Print out version number and exit
|
|
309
|
+
|
|
310
|
+
```
|
|
311
|
+
|
|
312
|
+
## Cluster-Summary menu: This tool can be used to summarise CD-HIT .clstr files:
|
|
313
|
+
### Example:
|
|
314
|
+
```bash
|
|
315
|
+
Cluster-Summary -genome_num 74 -input_clstr .../test_data/species/E-coli/E-coli_extracted_pep_cd-hit_80.clstr -output_tsv .../test_data/species/E-coli/E-coli_extracted_pep_cd-hit_80_Summary.tsv
|
|
316
|
+
```
|
|
317
|
+
### Cluster-Summary Menu:
|
|
318
|
+
```
|
|
319
|
+
usage: Cluster_Summary.py [-h] -input_clstr INPUT_CLSTR -output OUTPUT -genome_num GENOME_NUM
|
|
320
|
+
[-output_dir OUTPUT_DIR] [-verbose] [-v]
|
|
321
|
+
|
|
322
|
+
PyamilySeq v0.9.0: Cluster-Summary - A tool to summarise CD-HIT clustering files.
|
|
323
|
+
|
|
324
|
+
options:
|
|
325
|
+
-h, --help show this help message and exit
|
|
326
|
+
|
|
327
|
+
Required Parameters:
|
|
328
|
+
-input_clstr INPUT_CLSTR
|
|
329
|
+
Input CD-HIT .clstr file
|
|
330
|
+
-output OUTPUT Output TSV file to store cluster summaries - Will add '.tsv' if not
|
|
331
|
+
provided by user
|
|
332
|
+
-genome_num GENOME_NUM
|
|
333
|
+
The total number of genomes must be provide
|
|
334
|
+
|
|
335
|
+
Optional Arguments:
|
|
336
|
+
-output_dir OUTPUT_DIR
|
|
337
|
+
Default: Same as input file
|
|
338
|
+
|
|
339
|
+
Misc Parameters:
|
|
340
|
+
-verbose Print verbose output.
|
|
341
|
+
-v, --version Print out version number and exit
|
|
279
342
|
|
|
280
|
-
Misc Arguments:
|
|
281
|
-
-v Print out version number and exit
|
|
282
343
|
```
|
|
283
344
|
|
|
284
345
|
### All example input and output data can be found in the 'test_data' directory.
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
PyamilySeq/Cluster_Summary.py,sha256=OwAPjODoFIECQUGuPywXORdQn-wHqyRnIIhxSzLTm2E,6982
|
|
2
|
+
PyamilySeq/Constants.py,sha256=ubY4rmpkQIwxfY6vq4rjO34PtlQPWEJyinwFke3BSGE,31
|
|
3
|
+
PyamilySeq/Group_Splitter.py,sha256=QQD5gK1QhMDlqMhLvLWsq-Eh8-k2vC-h4L8bqdkGpXE,17445
|
|
4
|
+
PyamilySeq/PyamilySeq.py,sha256=Frl21S-l4fZdDLFoqeTxB5QqMdsKq5VSQv98Xf_uxMU,15283
|
|
5
|
+
PyamilySeq/PyamilySeq_Genus.py,sha256=hC34cHIFu8YaXXgcPyVwuWENlsxx-7mT-Qr6PAdio4U,12414
|
|
6
|
+
PyamilySeq/PyamilySeq_Species.py,sha256=spgS-h-lrySZBiOiB6jX6pPRaL5j8f5V1Hq3XOjBOko,14404
|
|
7
|
+
PyamilySeq/Seq_Combiner.py,sha256=hMXmA-M3tduONX4pM5qDb2dzBIFLdsIsWLezejxowhQ,3521
|
|
8
|
+
PyamilySeq/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
9
|
+
PyamilySeq/clusterings.py,sha256=rcWFv0IiWoS4aUNRjDDwNEL86l1wIKa4vK4htAxy8Hg,18787
|
|
10
|
+
PyamilySeq/utils.py,sha256=sjsx5oAIPacvVbfURqPwoq7XfZIk9V_PhGugBVT6jLE,18626
|
|
11
|
+
PyamilySeq-0.9.0.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
12
|
+
PyamilySeq-0.9.0.dist-info/METADATA,sha256=gsO5symEXI7C8SGzLD2SfyIcCt9yYmbXBIoBCU05BL8,16958
|
|
13
|
+
PyamilySeq-0.9.0.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
14
|
+
PyamilySeq-0.9.0.dist-info/entry_points.txt,sha256=KuGG_QEvagQHf-Ftohb1oItkx_SknDq66wcOiBqb7PY,200
|
|
15
|
+
PyamilySeq-0.9.0.dist-info/top_level.txt,sha256=J6JhugUQTq4rq96yibAlQu3o4KCM9WuYfqr3w1r119M,11
|
|
16
|
+
PyamilySeq-0.9.0.dist-info/RECORD,,
|
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
PyamilySeq/Constants.py,sha256=lbVZv4vDHroA83KCDTIGuVb6bubKYZbwLmhYHxedXQc,31
|
|
2
|
-
PyamilySeq/Group_Splitter.py,sha256=raZMV9SN7Qqw5Hci5qpkaahR66JMQf6dX8TvThjh3kU,14986
|
|
3
|
-
PyamilySeq/PyamilySeq.py,sha256=0607A9nqafoQ8IhBxGgGJ-v3DVV6C6-LgzdDIXb2C-c,15179
|
|
4
|
-
PyamilySeq/PyamilySeq_Genus.py,sha256=hC34cHIFu8YaXXgcPyVwuWENlsxx-7mT-Qr6PAdio4U,12414
|
|
5
|
-
PyamilySeq/PyamilySeq_Species.py,sha256=spgS-h-lrySZBiOiB6jX6pPRaL5j8f5V1Hq3XOjBOko,14404
|
|
6
|
-
PyamilySeq/Seq_Combiner.py,sha256=dPDu6LlT3B-ZDn3wKZ3AeWraDgv2Tub_16l9CLc3tQ0,3353
|
|
7
|
-
PyamilySeq/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
|
-
PyamilySeq/clusterings.py,sha256=rcWFv0IiWoS4aUNRjDDwNEL86l1wIKa4vK4htAxy8Hg,18787
|
|
9
|
-
PyamilySeq/utils.py,sha256=6UtYJW3_0rDhEhvrJi6R3smvKu2n_bjqUkuzr5DcJM4,19061
|
|
10
|
-
PyamilySeq-0.8.0.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
11
|
-
PyamilySeq-0.8.0.dist-info/METADATA,sha256=ZnpQvAQy5EXGrzS0G9y5qH2Rhmb0LW2HvOT-b5WJLoo,14436
|
|
12
|
-
PyamilySeq-0.8.0.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
13
|
-
PyamilySeq-0.8.0.dist-info/entry_points.txt,sha256=15BsozBN6vRWvZeQon05dY4YQT7DqP5i2TUqFWRGCvc,150
|
|
14
|
-
PyamilySeq-0.8.0.dist-info/top_level.txt,sha256=J6JhugUQTq4rq96yibAlQu3o4KCM9WuYfqr3w1r119M,11
|
|
15
|
-
PyamilySeq-0.8.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|