PyamilySeq 0.8.1__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- PyamilySeq/Cluster_Summary.py +163 -0
- PyamilySeq/Constants.py +1 -1
- PyamilySeq/Group_Splitter.py +145 -113
- PyamilySeq/PyamilySeq.py +16 -15
- PyamilySeq/Seq_Combiner.py +8 -4
- PyamilySeq/utils.py +38 -62
- {PyamilySeq-0.8.1.dist-info → PyamilySeq-0.9.0.dist-info}/METADATA +100 -42
- PyamilySeq-0.9.0.dist-info/RECORD +16 -0
- {PyamilySeq-0.8.1.dist-info → PyamilySeq-0.9.0.dist-info}/entry_points.txt +1 -0
- PyamilySeq-0.8.1.dist-info/RECORD +0 -15
- {PyamilySeq-0.8.1.dist-info → PyamilySeq-0.9.0.dist-info}/LICENSE +0 -0
- {PyamilySeq-0.8.1.dist-info → PyamilySeq-0.9.0.dist-info}/WHEEL +0 -0
- {PyamilySeq-0.8.1.dist-info → PyamilySeq-0.9.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
from collections import OrderedDict
|
|
3
|
+
from collections import defaultdict
|
|
4
|
+
|
|
5
|
+
try:
|
|
6
|
+
from .Constants import *
|
|
7
|
+
from .utils import *
|
|
8
|
+
except (ModuleNotFoundError, ImportError, NameError, TypeError) as error:
|
|
9
|
+
from Constants import *
|
|
10
|
+
from utils import *
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def categorise_percentage(percent):
|
|
14
|
+
"""Categorise the percentage of genomes with multicopy genes."""
|
|
15
|
+
if 20 <= percent < 40:
|
|
16
|
+
return "20-40%"
|
|
17
|
+
elif 40 <= percent < 60:
|
|
18
|
+
return "40-60%"
|
|
19
|
+
elif 60 <= percent < 80:
|
|
20
|
+
return "60-80%"
|
|
21
|
+
elif 80 <= percent < 95:
|
|
22
|
+
return "80-95%"
|
|
23
|
+
elif 95 <= percent < 99:
|
|
24
|
+
return "95-99%"
|
|
25
|
+
elif 99 <= percent <= 100:
|
|
26
|
+
return "99-100%"
|
|
27
|
+
return None
|
|
28
|
+
|
|
29
|
+
# Read cd-hit .clstr file and extract information
|
|
30
|
+
def read_cd_hit_output(clustering_output):
|
|
31
|
+
clusters = OrderedDict()
|
|
32
|
+
|
|
33
|
+
with open(clustering_output, 'r') as f:
|
|
34
|
+
current_cluster_id = None
|
|
35
|
+
|
|
36
|
+
for line in f:
|
|
37
|
+
line = line.strip()
|
|
38
|
+
if line.startswith(">Cluster"):
|
|
39
|
+
current_cluster_id = line.split(' ')[1]
|
|
40
|
+
clusters[current_cluster_id] = []
|
|
41
|
+
elif line and current_cluster_id is not None:
|
|
42
|
+
parts = line.split('\t')
|
|
43
|
+
if len(parts) > 1:
|
|
44
|
+
clustered_info = parts[1]
|
|
45
|
+
length = clustered_info.split(',')[0]
|
|
46
|
+
length = int(''.join(c for c in length if c.isdigit()))
|
|
47
|
+
clustered_header = clustered_info.split('>')[1].split('...')[0]
|
|
48
|
+
clustered_header = '>' + clustered_header
|
|
49
|
+
|
|
50
|
+
if 'at ' in clustered_info and '%' in clustered_info.split('at ')[-1]:
|
|
51
|
+
percent_identity = extract_identity(clustered_info)
|
|
52
|
+
elif line.endswith('*'):
|
|
53
|
+
percent_identity = 100.0
|
|
54
|
+
else:
|
|
55
|
+
raise ValueError("Percent identity not found in the string.")
|
|
56
|
+
|
|
57
|
+
clusters[current_cluster_id].append({
|
|
58
|
+
'header': clustered_header,
|
|
59
|
+
'length': length,
|
|
60
|
+
'percent_identity': percent_identity
|
|
61
|
+
})
|
|
62
|
+
|
|
63
|
+
return clusters
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
# Summarise the information for each cluster
|
|
67
|
+
def summarise_clusters(options,clusters, output):
|
|
68
|
+
multicopy_groups = defaultdict(int) # Counter for groups with multicopy genes
|
|
69
|
+
|
|
70
|
+
with open(output, 'w') as out_f:
|
|
71
|
+
out_f.write("Cluster_ID\tNum_Sequences\tAvg_Length\tLength_Range\tAvg_Identity\tIdentity_Range\n")
|
|
72
|
+
|
|
73
|
+
for cluster_id, seqs in clusters.items():
|
|
74
|
+
num_seqs = len(seqs)
|
|
75
|
+
lengths = [seq['length'] for seq in seqs]
|
|
76
|
+
identities = [seq['percent_identity'] for seq in seqs]
|
|
77
|
+
|
|
78
|
+
avg_length = sum(lengths) / num_seqs if num_seqs > 0 else 0
|
|
79
|
+
length_range = f"{min(lengths)}-{max(lengths)}" if num_seqs > 0 else "N/A"
|
|
80
|
+
|
|
81
|
+
avg_identity = sum(identities) / num_seqs if num_seqs > 0 else 0
|
|
82
|
+
identity_range = f"{min(identities):.2f}-{max(identities):.2f}" if num_seqs > 0 else "N/A"
|
|
83
|
+
|
|
84
|
+
out_f.write(
|
|
85
|
+
f"{cluster_id}\t{num_seqs}\t{avg_length:.2f}\t{length_range}\t{avg_identity:.2f}\t{identity_range}\n")
|
|
86
|
+
|
|
87
|
+
# Count genomes with more than one gene
|
|
88
|
+
genome_to_gene_count = defaultdict(int)
|
|
89
|
+
for seq in seqs:
|
|
90
|
+
genome = seq['header'].split('|')[0].replace('>','')
|
|
91
|
+
genome_to_gene_count[genome] += 1
|
|
92
|
+
|
|
93
|
+
num_genomes_with_multiple_genes = sum(1 for count in genome_to_gene_count.values() if count > 1)
|
|
94
|
+
|
|
95
|
+
# Calculate the percentage of genomes with multicopy genes
|
|
96
|
+
|
|
97
|
+
multicopy_percentage = (num_genomes_with_multiple_genes / options.genome_num) * 100
|
|
98
|
+
category = categorise_percentage(multicopy_percentage)
|
|
99
|
+
if category:
|
|
100
|
+
multicopy_groups[category] += 1
|
|
101
|
+
|
|
102
|
+
# Define the order of categories for printout
|
|
103
|
+
category_order = ["20-40%", "40-60%", "60-80%", "80-95%", "95-99%", "99-100%"]
|
|
104
|
+
|
|
105
|
+
# Print the number of clusters with multicopy genes in each percentage range, in the correct order
|
|
106
|
+
for category in category_order:
|
|
107
|
+
print(f"Number of clusters with multicopy genes in {category} range: {multicopy_groups[category]}")
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
# Main function to parse arguments and run the analysis
|
|
111
|
+
def main():
|
|
112
|
+
parser = argparse.ArgumentParser(description='PyamilySeq ' + PyamilySeq_Version + ': Cluster-Summary - A tool to summarise CD-HIT clustering files.')
|
|
113
|
+
### Required Arguments
|
|
114
|
+
required = parser.add_argument_group('Required Parameters')
|
|
115
|
+
required.add_argument('-input_clstr', action="store", dest="input_clstr",
|
|
116
|
+
help='Input CD-HIT .clstr file',
|
|
117
|
+
required=True)
|
|
118
|
+
required.add_argument('-output', action="store", dest="output",
|
|
119
|
+
help="Output TSV file to store cluster summaries - Will add '.tsv' if not provided by user",
|
|
120
|
+
required=True)
|
|
121
|
+
required.add_argument('-genome_num', action='store', dest='genome_num', type=int,
|
|
122
|
+
help='The total number of genomes must be provide',
|
|
123
|
+
required=True)
|
|
124
|
+
#required.add_argument("-clustering_format", action="store", dest="clustering_format", choices=['CD-HIT','TSV','CSV'],
|
|
125
|
+
# help="Clustering format to use: CD-HIT or TSV (MMseqs2, BLAST, DIAMOND) / CSV edge-list file (Node1\tNode2).",
|
|
126
|
+
# required=True)
|
|
127
|
+
|
|
128
|
+
optional = parser.add_argument_group('Optional Arguments')
|
|
129
|
+
optional.add_argument('-output_dir', action="store", dest="output_dir",
|
|
130
|
+
help='Default: Same as input file',
|
|
131
|
+
required=False)
|
|
132
|
+
|
|
133
|
+
misc = parser.add_argument_group("Misc Parameters")
|
|
134
|
+
misc.add_argument("-verbose", action="store_true", dest="verbose",
|
|
135
|
+
help="Print verbose output.",
|
|
136
|
+
required=False)
|
|
137
|
+
misc.add_argument("-v", "--version", action="version",
|
|
138
|
+
version=f"PyamilySeq: Group-Summary version {PyamilySeq_Version} - Exiting",
|
|
139
|
+
help="Print out version number and exit")
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
options = parser.parse_args()
|
|
143
|
+
print("Running PyamilySeq " + PyamilySeq_Version+ ": Group-Summary ")
|
|
144
|
+
|
|
145
|
+
### File handling
|
|
146
|
+
options.input_clstr = fix_path(options.input_clstr)
|
|
147
|
+
if options.output_dir is None:
|
|
148
|
+
options.output_dir = os.path.dirname(os.path.abspath(options.input_clstr))
|
|
149
|
+
output_path = os.path.abspath(options.output_dir)
|
|
150
|
+
if not os.path.exists(output_path):
|
|
151
|
+
os.makedirs(output_path)
|
|
152
|
+
output_name = options.output
|
|
153
|
+
if not output_name.endswith('.tsv'):
|
|
154
|
+
output_name += '.tsv'
|
|
155
|
+
output_file_path = os.path.join(output_path, output_name)
|
|
156
|
+
###
|
|
157
|
+
|
|
158
|
+
clusters = read_cd_hit_output(options.input_clstr)
|
|
159
|
+
summarise_clusters(options,clusters, output_file_path)
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
if __name__ == "__main__":
|
|
163
|
+
main()
|
PyamilySeq/Constants.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
PyamilySeq_Version = 'v0.
|
|
1
|
+
PyamilySeq_Version = 'v0.9.0'
|
|
2
2
|
|
PyamilySeq/Group_Splitter.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import collections
|
|
1
2
|
import subprocess
|
|
2
3
|
import os
|
|
3
4
|
import argparse
|
|
@@ -21,6 +22,7 @@ def run_cd_hit(options, input_file, clustering_output, clustering_mode):
|
|
|
21
22
|
'-T', str(options.clustering_threads),
|
|
22
23
|
'-M', str(options.clustering_memory),
|
|
23
24
|
'-d', "0",
|
|
25
|
+
'-g', "1",
|
|
24
26
|
'-sc', "1",
|
|
25
27
|
'-sf', "1"
|
|
26
28
|
]
|
|
@@ -29,24 +31,29 @@ def run_cd_hit(options, input_file, clustering_output, clustering_mode):
|
|
|
29
31
|
else:
|
|
30
32
|
subprocess.run(cdhit_command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
|
31
33
|
|
|
32
|
-
|
|
33
|
-
def calculate_new_rep_seq(cluster_data):
|
|
34
|
+
@profile
|
|
35
|
+
def calculate_new_rep_seq(cluster_data, length_weight=1.0, identity_weight=1.0):
|
|
34
36
|
total_length = sum(entry['length'] for entry in cluster_data)
|
|
35
37
|
avg_length = total_length / len(cluster_data)
|
|
36
38
|
|
|
37
39
|
total_identity = sum(entry['percent_identity'] for entry in cluster_data)
|
|
38
40
|
avg_identity = total_identity / len(cluster_data)
|
|
39
41
|
|
|
42
|
+
# Normalize length and identity
|
|
43
|
+
max_length = max(entry['length'] for entry in cluster_data)
|
|
44
|
+
max_identity = 100 # Assuming percent_identity is out of 100
|
|
45
|
+
|
|
40
46
|
# Calculate a score based on both length difference and percent identity
|
|
41
47
|
def score(entry):
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
return
|
|
48
|
+
normalized_length_diff = abs(entry['length'] - avg_length) / max_length
|
|
49
|
+
normalized_identity_diff = abs(entry['percent_identity'] - avg_identity) / max_identity
|
|
50
|
+
return (length_weight * normalized_length_diff) + (identity_weight * (1 - normalized_identity_diff))
|
|
45
51
|
|
|
46
52
|
rep_entry = min(cluster_data, key=score)
|
|
47
53
|
return rep_entry
|
|
48
54
|
|
|
49
55
|
|
|
56
|
+
|
|
50
57
|
def length_within_threshold(rep_length, length, len_diff):
|
|
51
58
|
return abs(rep_length - length) / rep_length <= len_diff
|
|
52
59
|
|
|
@@ -58,16 +65,22 @@ def check_if_all_identical(clustered_sequences):
|
|
|
58
65
|
return len(lengths) == 1 and len(perc_idents) == 1
|
|
59
66
|
|
|
60
67
|
|
|
61
|
-
|
|
68
|
+
|
|
69
|
+
def read_fasta_groups(options):
|
|
62
70
|
groups = defaultdict(list)
|
|
63
71
|
genome_count = defaultdict(int)
|
|
64
72
|
current_group = None
|
|
65
73
|
current_sequence = []
|
|
66
74
|
|
|
67
|
-
|
|
75
|
+
# Parse the list of specific group numbers if provided
|
|
76
|
+
selected_groups = None
|
|
77
|
+
if options.groups is not None:
|
|
78
|
+
selected_groups = [int(g.strip()) for g in options.groups.split(',')]
|
|
79
|
+
|
|
80
|
+
with open(options.input_fasta, 'r') as f:
|
|
68
81
|
for line in f:
|
|
69
82
|
if line.startswith('>'):
|
|
70
|
-
if current_group is not None:
|
|
83
|
+
if current_group is not None and (selected_groups is None or group_number in selected_groups):
|
|
71
84
|
groups[current_group].append((current_group_header, ''.join(current_sequence)))
|
|
72
85
|
|
|
73
86
|
current_group_header = line.strip()
|
|
@@ -75,6 +88,13 @@ def read_fasta_groups(fasta_file):
|
|
|
75
88
|
genome = current_group_header.split('|')[1]
|
|
76
89
|
current_sequence = []
|
|
77
90
|
genome_count[genome] += 1
|
|
91
|
+
|
|
92
|
+
# Only process if group matches the selected_groups or if no specific groups were provided
|
|
93
|
+
group_number = int(current_group.replace('>Group_', '')) # Assuming format 'Group_n'
|
|
94
|
+
if selected_groups is not None and group_number not in selected_groups:
|
|
95
|
+
current_group = None # Skip this group
|
|
96
|
+
continue
|
|
97
|
+
|
|
78
98
|
else:
|
|
79
99
|
current_sequence.append(line.strip())
|
|
80
100
|
|
|
@@ -110,10 +130,9 @@ def read_cd_hit_output(clustering_output):
|
|
|
110
130
|
clustered_header = clustered_info.split('>')[1].split('...')[0]
|
|
111
131
|
clustered_header = '>' + clustered_header
|
|
112
132
|
|
|
113
|
-
if 'at' in clustered_info:
|
|
133
|
+
if 'at ' in clustered_info and '%' in clustered_info.split('at ')[-1]:
|
|
114
134
|
percent_identity = extract_identity(line)
|
|
115
|
-
|
|
116
|
-
elif '*' in line:
|
|
135
|
+
elif line.endswith('*'):
|
|
117
136
|
percent_identity = 100.0
|
|
118
137
|
else:
|
|
119
138
|
raise ValueError("Percent identity not found in the string.")
|
|
@@ -126,13 +145,16 @@ def read_cd_hit_output(clustering_output):
|
|
|
126
145
|
|
|
127
146
|
return clusters
|
|
128
147
|
|
|
129
|
-
|
|
130
|
-
def separate_groups(
|
|
131
|
-
groups, genome_count = read_fasta_groups(
|
|
148
|
+
@profile
|
|
149
|
+
def separate_groups(options, clustering_mode):
|
|
150
|
+
groups, genome_count = read_fasta_groups(options)
|
|
132
151
|
|
|
133
152
|
paralog_groups = defaultdict(int) # To track number of paralog groups
|
|
134
153
|
|
|
135
154
|
for group_header, sequences in groups.items():
|
|
155
|
+
if options.verbose:
|
|
156
|
+
print(f"\n###\nCurrent Group: {group_header.replace('>','')}\n")
|
|
157
|
+
|
|
136
158
|
group_name = group_header.split('|')[0] # Get the group part (e.g., '>Group_n')
|
|
137
159
|
|
|
138
160
|
# Count genomes with more than one gene
|
|
@@ -142,11 +164,12 @@ def separate_groups(input_fasta, options, clustering_mode):
|
|
|
142
164
|
genome_to_gene_count[genome] += 1
|
|
143
165
|
|
|
144
166
|
num_genomes_with_multiple_genes = sum(1 for count in genome_to_gene_count.values() if count > 1)
|
|
145
|
-
total_genomes = len(genome_to_gene_count)
|
|
146
167
|
|
|
147
168
|
# Check if the group meets the threshold for having paralogs
|
|
148
|
-
if
|
|
149
|
-
|
|
169
|
+
if options.groups == None:
|
|
170
|
+
if (num_genomes_with_multiple_genes / options.genome_num) * 100 < options.group_threshold:
|
|
171
|
+
continue
|
|
172
|
+
|
|
150
173
|
|
|
151
174
|
group_file_name = group_name.replace('>','')
|
|
152
175
|
|
|
@@ -161,88 +184,89 @@ def separate_groups(input_fasta, options, clustering_mode):
|
|
|
161
184
|
# Read the clustering results to find subgroups
|
|
162
185
|
clustered_sequences = read_cd_hit_output(clustering_output + '.clstr')
|
|
163
186
|
|
|
164
|
-
|
|
165
|
-
|
|
187
|
+
if len(clustered_sequences) == 1:
|
|
188
|
+
# Detect if all sequences are identical in length and percentage identity
|
|
189
|
+
all_same = check_if_all_identical(clustered_sequences)
|
|
166
190
|
|
|
167
191
|
# **Global subgroup counter for the entire major group**
|
|
168
192
|
subgroup_id = 0
|
|
169
|
-
|
|
170
|
-
sequences_to_remove = []
|
|
193
|
+
|
|
171
194
|
|
|
172
195
|
if not all_same:
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
196
|
+
# Iterate through each cluster in clustered_sequences
|
|
197
|
+
for cluster_key, cluster in clustered_sequences.items():
|
|
198
|
+
|
|
199
|
+
remaining_sequences_tmp = sequences.copy() # Track unprocessed sequences
|
|
200
|
+
remaining_sequences = [entry for entry in remaining_sequences_tmp if entry[0] in
|
|
201
|
+
{seq_entry['header'] for seq_entry in cluster}]
|
|
202
|
+
sequences_to_remove = []
|
|
203
|
+
|
|
204
|
+
while remaining_sequences:
|
|
205
|
+
# Track subgroups for this cluster pass
|
|
206
|
+
subgroup_sequences = []
|
|
207
|
+
genome_seen = set()
|
|
208
|
+
|
|
209
|
+
# Recalculate representative sequence dynamically for this cluster
|
|
210
|
+
rep = calculate_new_rep_seq(
|
|
211
|
+
[entry for entry in cluster if entry['header'] in (h for h, _ in remaining_sequences)]
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
# Find the sequence corresponding to rep['header'] from the list of sequences
|
|
215
|
+
rep_seq = next((seq for header, seq in sequences if header == rep['header']), None)
|
|
216
|
+
|
|
217
|
+
# Save previously checked seqs, so we don't have to compare them again.
|
|
218
|
+
checked = collections.defaultdict(float)
|
|
219
|
+
|
|
220
|
+
# Process each genome to select the best matching sequence
|
|
221
|
+
for genome in genome_to_gene_count:
|
|
222
|
+
best_sequence = None
|
|
223
|
+
best_score = None # Initialise with a very low score, so that even negative scores can be selected
|
|
224
|
+
|
|
225
|
+
# Iterate over each sequence in the remaining sequences for this genome
|
|
226
|
+
for header, seq in remaining_sequences:
|
|
227
|
+
genome_id = header.split('|')[1]
|
|
228
|
+
|
|
229
|
+
if genome_id == genome: # Ensure this sequence belongs to the current genome
|
|
230
|
+
if rep_seq == seq:
|
|
231
|
+
levenshtein_distance = 0
|
|
232
|
+
else:
|
|
233
|
+
if seq in checked:
|
|
234
|
+
levenshtein_distance = checked[seq]
|
|
235
|
+
else:
|
|
236
|
+
levenshtein_distance = levenshtein_distance_calc(rep_seq,seq)
|
|
237
|
+
checked[seq] = levenshtein_distance
|
|
238
|
+
# Lower Levenshtein distance means more 'similar' sequences
|
|
239
|
+
score = levenshtein_distance
|
|
216
240
|
|
|
217
241
|
# Check if this sequence has a higher score than the current best
|
|
218
|
-
if
|
|
242
|
+
if best_sequence == None:
|
|
243
|
+
best_score = score
|
|
244
|
+
best_sequence = (header, seq) # Store the best matching sequence for this genome
|
|
245
|
+
elif score < best_score:
|
|
219
246
|
best_score = score
|
|
220
247
|
best_sequence = (header, seq) # Store the best matching sequence for this genome
|
|
221
248
|
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
genome_seen.add(genome)
|
|
249
|
+
# Add the best sequence for this genome to the subgroup
|
|
250
|
+
if best_sequence is not None:
|
|
251
|
+
new_header = f">{group_file_name}_subgroup_{subgroup_id}|{best_sequence[0].split('|')[1]}|{best_sequence[0].split('|')[2]}"
|
|
252
|
+
subgroup_sequences.append((new_header, best_sequence[1]))
|
|
253
|
+
sequences_to_remove.append(best_sequence)
|
|
254
|
+
genome_seen.add(genome)
|
|
229
255
|
|
|
230
|
-
#
|
|
231
|
-
|
|
232
|
-
|
|
256
|
+
# Write each subgroup into a separate FASTA file
|
|
257
|
+
if subgroup_sequences:
|
|
258
|
+
subgroup_file = f"{options.output_dir}/{group_file_name}_subgroup_{subgroup_id}.fasta"
|
|
259
|
+
write_fasta(subgroup_sequences, subgroup_file)
|
|
233
260
|
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
261
|
+
# Remove processed sequences from the remaining list
|
|
262
|
+
remaining_sequences = [item for item in remaining_sequences if
|
|
263
|
+
item[0] not in {h for h, _ in sequences_to_remove}]
|
|
264
|
+
|
|
265
|
+
# Increment subgroup ID for the next subgroup
|
|
266
|
+
subgroup_id += 1
|
|
267
|
+
paralog_groups[group_name] += 1 # Count this group as a paralog group
|
|
238
268
|
|
|
239
|
-
# Remove processed sequences from the remaining list
|
|
240
|
-
remaining_sequences = [item for item in remaining_sequences if
|
|
241
|
-
item[0] not in {h for h, _ in sequences_to_remove}]
|
|
242
269
|
|
|
243
|
-
# Increment subgroup ID globally for the next subgroup
|
|
244
|
-
subgroup_id += 1
|
|
245
|
-
paralog_groups[group_name] += 1 # Count this group as a paralog group
|
|
246
270
|
|
|
247
271
|
|
|
248
272
|
else:
|
|
@@ -290,48 +314,57 @@ def separate_groups(input_fasta, options, clustering_mode):
|
|
|
290
314
|
|
|
291
315
|
|
|
292
316
|
def main():
|
|
293
|
-
parser = argparse.ArgumentParser(description='
|
|
317
|
+
parser = argparse.ArgumentParser(description='PyamilySeq ' + PyamilySeq_Version + ': Group-Splitter - A tool to split multi-copy gene groups identified by PyamilySeq.')
|
|
294
318
|
### Required Arguments
|
|
295
|
-
required = parser.add_argument_group('Required
|
|
319
|
+
required = parser.add_argument_group('Required Parameters')
|
|
296
320
|
required.add_argument('-input_fasta', action='store', dest='input_fasta',
|
|
297
321
|
help='Input FASTA file containing gene groups.',
|
|
298
322
|
required=True)
|
|
299
323
|
required.add_argument('-sequence_type', action='store', dest='sequence_type', default='DNA',choices=['AA', 'DNA'],
|
|
300
324
|
help='Default - DNA: Are groups "DNA" or "AA" sequences?',
|
|
301
|
-
required=
|
|
325
|
+
required=True)
|
|
326
|
+
required.add_argument('-genome_num', action='store', dest='genome_num', type=int,
|
|
327
|
+
help='The total number of genomes must be provide',
|
|
328
|
+
required=True)
|
|
302
329
|
required.add_argument('-output_dir', action='store', dest='output_dir',
|
|
303
330
|
help='Output directory.',
|
|
304
331
|
required=True)
|
|
305
332
|
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
333
|
+
regrouping_params = parser.add_argument_group('Regrouping Parameters')
|
|
334
|
+
regrouping_params.add_argument('-groups', action="store", dest='groups', default=None,
|
|
335
|
+
help='Default - auto: Detect groups to be split (see -group_threshold). '
|
|
336
|
+
'Provide "-groups 1,2,3,4" with group IDs to split specific groups.',
|
|
337
|
+
required=False)
|
|
338
|
+
regrouping_params.add_argument('-group_threshold', action='store', dest='group_threshold', type=float, default=80,
|
|
339
|
+
help='Minimum percentage of genomes with multi-copy (default: 80.0) - Does not work with "-groups"')
|
|
340
|
+
|
|
341
|
+
cdhit_params = parser.add_argument_group('CD-HIT Reclustering Parameters')
|
|
342
|
+
cdhit_params.add_argument('-c', action='store', dest='pident', type=float, default=0.8,
|
|
343
|
+
help='Sequence identity threshold (default: 0.8) - Probably should be higher than what was used in initial clustering.')
|
|
344
|
+
cdhit_params.add_argument('-s', action='store', dest='len_diff', type=float, default=0.20,
|
|
345
|
+
help="Length difference cutoff (default: 0.20) - Often the most impactful parameter to split 'multi-copy' gene groups.")
|
|
346
|
+
cdhit_params.add_argument('-T', action='store', dest='clustering_threads', type=int, default=4,
|
|
313
347
|
help='Number of threads for clustering (default: 4)')
|
|
314
|
-
|
|
348
|
+
cdhit_params.add_argument('-M', action='store', dest='clustering_memory', type=int, default=2000,
|
|
315
349
|
help='Memory limit in MB for clustering (default: 2000)')
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
misc =
|
|
323
|
-
|
|
324
|
-
help='Print out version number and exit',
|
|
350
|
+
|
|
351
|
+
|
|
352
|
+
misc = parser.add_argument_group("Misc Parameters")
|
|
353
|
+
misc.add_argument('-no_delete_temp_files', action='store_false', dest='delete_temp_files',
|
|
354
|
+
help='Default: Delete all temporary files after processing.',
|
|
355
|
+
required=False)
|
|
356
|
+
misc.add_argument("-verbose", action="store_true", dest="verbose" ,
|
|
357
|
+
help="Print verbose output.",
|
|
325
358
|
required=False)
|
|
359
|
+
misc.add_argument("-v", "--version", action="version",
|
|
360
|
+
version=f"PyamilySeq: Group-Splitter version {PyamilySeq_Version} - Exiting",
|
|
361
|
+
help="Print out version number and exit")
|
|
362
|
+
|
|
326
363
|
|
|
327
364
|
options = parser.parse_args()
|
|
365
|
+
print("Running PyamilySeq: Group-Splitter " + PyamilySeq_Version)
|
|
328
366
|
|
|
329
|
-
# Check for version flag
|
|
330
|
-
if options.version:
|
|
331
|
-
print(f"Group-Splitter version {PyamilySeq_Version}")
|
|
332
|
-
exit(0)
|
|
333
367
|
|
|
334
|
-
options = parser.parse_args()
|
|
335
368
|
|
|
336
369
|
if not os.path.exists(options.output_dir):
|
|
337
370
|
os.makedirs(options.output_dir)
|
|
@@ -341,10 +374,9 @@ def main():
|
|
|
341
374
|
else:
|
|
342
375
|
clustering_mode = 'cd-hit'
|
|
343
376
|
|
|
344
|
-
separate_groups(options
|
|
345
|
-
|
|
346
|
-
print("Done")
|
|
377
|
+
separate_groups(options, clustering_mode)
|
|
347
378
|
|
|
348
379
|
|
|
349
380
|
if __name__ == "__main__":
|
|
381
|
+
|
|
350
382
|
main()
|
PyamilySeq/PyamilySeq.py
CHANGED
|
@@ -30,6 +30,7 @@ def run_cd_hit(options, input_file, clustering_output, clustering_mode):
|
|
|
30
30
|
'-T', str(options.threads),
|
|
31
31
|
'-M', str(options.clustering_memory),
|
|
32
32
|
'-d', "0",
|
|
33
|
+
'-g', "1",
|
|
33
34
|
'-sc', "1",
|
|
34
35
|
'-sf', "1"
|
|
35
36
|
]
|
|
@@ -42,7 +43,7 @@ def run_cd_hit(options, input_file, clustering_output, clustering_mode):
|
|
|
42
43
|
def main():
|
|
43
44
|
parser = argparse.ArgumentParser(description='PyamilySeq ' + PyamilySeq_Version + ': A tool that groups genes into unique clusters.')
|
|
44
45
|
### Required Arguments
|
|
45
|
-
required = parser.add_argument_group('Required
|
|
46
|
+
required = parser.add_argument_group('Required Parameters')
|
|
46
47
|
required.add_argument('-run_mode', action='store', dest='run_mode', choices=['Full','Partial'],
|
|
47
48
|
help='Run Mode: Should PyamilySeq be run in "Full" or "Partial" mode?',
|
|
48
49
|
required=True)
|
|
@@ -56,7 +57,7 @@ def main():
|
|
|
56
57
|
help="Directory for all output files.",
|
|
57
58
|
required=True)
|
|
58
59
|
### Full-Mode Arguments
|
|
59
|
-
full_mode_args = parser.add_argument_group('Full-Mode
|
|
60
|
+
full_mode_args = parser.add_argument_group('Full-Mode Parameters - Required when "-run_mode Full" is used')
|
|
60
61
|
full_mode_args.add_argument("-input_type", action="store", dest="input_type", choices=['separate', 'combined'],
|
|
61
62
|
help="Type of input files: 'separate' for separate FASTA and GFF files,"
|
|
62
63
|
" 'combined' for GFF files with embedded FASTA sequences.",
|
|
@@ -89,13 +90,13 @@ def main():
|
|
|
89
90
|
required=False)
|
|
90
91
|
|
|
91
92
|
###Partial-Mode Arguments
|
|
92
|
-
partial_mode_args = parser.add_argument_group(
|
|
93
|
-
partial_mode_args.add_argument(
|
|
94
|
-
help=
|
|
93
|
+
partial_mode_args = parser.add_argument_group("Partial-Mode Parameters - Required when '-run_mode Partial' is used")
|
|
94
|
+
partial_mode_args.add_argument("-cluster_file", action="store", dest="cluster_file",
|
|
95
|
+
help="Clustering output file containing CD-HIT, TSV or CSV Edge List",
|
|
95
96
|
required=False)
|
|
96
97
|
|
|
97
98
|
###Grouping Arguments
|
|
98
|
-
grouping_args = parser.add_argument_group('Grouping
|
|
99
|
+
grouping_args = parser.add_argument_group('Grouping Parameters - Use to fine-tune grouping of genes after clustering')
|
|
99
100
|
grouping_args.add_argument('-reclustered', action='store', dest='reclustered',
|
|
100
101
|
help='Currently only works on Partial Mode: Clustering output file from secondary round of clustering.',
|
|
101
102
|
required=False)
|
|
@@ -129,19 +130,20 @@ def main():
|
|
|
129
130
|
required=False)
|
|
130
131
|
|
|
131
132
|
### Misc Arguments
|
|
132
|
-
misc = parser.add_argument_group(
|
|
133
|
-
misc.add_argument(
|
|
134
|
-
help=
|
|
135
|
-
required
|
|
136
|
-
misc.add_argument(
|
|
137
|
-
|
|
138
|
-
|
|
133
|
+
misc = parser.add_argument_group("Misc Parameters")
|
|
134
|
+
misc.add_argument("-verbose", action="store_true", dest="verbose",
|
|
135
|
+
help="Print verbose output.",
|
|
136
|
+
required=False)
|
|
137
|
+
misc.add_argument("-v", "--version", action="version",
|
|
138
|
+
version=f"PyamilySeq version {PyamilySeq_Version} - Exiting",
|
|
139
|
+
help="Print out version number and exit")
|
|
140
|
+
|
|
139
141
|
|
|
140
142
|
options = parser.parse_args()
|
|
143
|
+
print("Running PyamilySeq: " + PyamilySeq_Version)
|
|
141
144
|
|
|
142
145
|
### Checking all required parameters are provided by user #!!# Doesn't seem to work
|
|
143
146
|
if options.run_mode == 'Full':
|
|
144
|
-
|
|
145
147
|
if options.reclustered != None:
|
|
146
148
|
sys.exit("Currently reclustering only works on Partial Mode.")
|
|
147
149
|
required_full_mode = [options.input_type, options.input_dir, options.name_split, options.clustering_format,
|
|
@@ -291,5 +293,4 @@ def main():
|
|
|
291
293
|
"Please report any issues to: https://github.com/NickJD/PyamilySeq/issues\n#####")
|
|
292
294
|
|
|
293
295
|
if __name__ == "__main__":
|
|
294
|
-
print("Running PyamilySeq "+PyamilySeq_Version)
|
|
295
296
|
main()
|
PyamilySeq/Seq_Combiner.py
CHANGED
|
@@ -11,7 +11,7 @@ except (ModuleNotFoundError, ImportError, NameError, TypeError) as error:
|
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
def main():
|
|
14
|
-
parser = argparse.ArgumentParser(description='
|
|
14
|
+
parser = argparse.ArgumentParser(description='PyamilySeq ' + PyamilySeq_Version + ': Seq-Combiner - A tool to extract sequences from GFF/FASTA files and prepare them for PyamilySeq.')
|
|
15
15
|
### Required Arguments
|
|
16
16
|
required = parser.add_argument_group('Required Arguments')
|
|
17
17
|
required.add_argument('-input_dir', action='store', dest='input_dir',
|
|
@@ -31,6 +31,7 @@ def main():
|
|
|
31
31
|
required.add_argument("-output_name", action="store", dest="output_file",
|
|
32
32
|
help="Output file name.",
|
|
33
33
|
required=True)
|
|
34
|
+
|
|
34
35
|
optional = parser.add_argument_group('Optional Arguments')
|
|
35
36
|
optional.add_argument('-gene_ident', action='store', dest='gene_ident', default='CDS',
|
|
36
37
|
help='Default - "CDS": Identifier used for extraction of sequences such as "misc_RNA,gene,mRNA,CDS,rRNA,tRNA,tmRNA,CRISPR,ncRNA,regulatory_region,oriC,pseudo"'
|
|
@@ -40,9 +41,9 @@ def main():
|
|
|
40
41
|
help='Default - False: Translate extracted sequences to their AA counterpart?',
|
|
41
42
|
required=False)
|
|
42
43
|
misc = parser.add_argument_group('Misc Arguments')
|
|
43
|
-
misc.add_argument(
|
|
44
|
-
|
|
45
|
-
|
|
44
|
+
misc.add_argument("-v", "--version", action="version",
|
|
45
|
+
version=f"PyamilySeq: Seq-Combiner version {PyamilySeq_Version} - Exiting",
|
|
46
|
+
help="Print out version number and exit")
|
|
46
47
|
|
|
47
48
|
options = parser.parse_args()
|
|
48
49
|
|
|
@@ -50,6 +51,9 @@ def main():
|
|
|
50
51
|
sys.exit(PyamilySeq_Version)
|
|
51
52
|
|
|
52
53
|
output_path = os.path.abspath(options.output_dir)
|
|
54
|
+
if not os.path.exists(output_path):
|
|
55
|
+
os.makedirs(output_path)
|
|
56
|
+
|
|
53
57
|
combined_out_file = os.path.join(output_path, options.output_file)
|
|
54
58
|
|
|
55
59
|
if options.input_type == 'separate':
|
PyamilySeq/utils.py
CHANGED
|
@@ -8,6 +8,40 @@ import sys
|
|
|
8
8
|
from line_profiler_pycharm import profile
|
|
9
9
|
import re
|
|
10
10
|
|
|
11
|
+
####
|
|
12
|
+
# Placeholder for the distance function
|
|
13
|
+
levenshtein_distance_cal = None
|
|
14
|
+
# Check for Levenshtein library once
|
|
15
|
+
try:
|
|
16
|
+
import Levenshtein as LV
|
|
17
|
+
# Assign the optimized function
|
|
18
|
+
def levenshtein_distance_calc(seq1, seq2):
|
|
19
|
+
return LV.distance(seq1, seq2)
|
|
20
|
+
except (ModuleNotFoundError, ImportError):
|
|
21
|
+
print("Levenshtein package not installed - Will fallback to slower Python implementation.")
|
|
22
|
+
# Fallback implementation
|
|
23
|
+
def levenshtein_distance_calc(seq1, seq2):
|
|
24
|
+
# Slower Python implementation of Levenshtein distance
|
|
25
|
+
len1, len2 = len(seq1), len(seq2)
|
|
26
|
+
dp = [[0] * (len2 + 1) for _ in range(len1 + 1)]
|
|
27
|
+
|
|
28
|
+
for i in range(len1 + 1):
|
|
29
|
+
dp[i][0] = i
|
|
30
|
+
for j in range(len2 + 1):
|
|
31
|
+
dp[0][j] = j
|
|
32
|
+
|
|
33
|
+
for i in range(1, len1 + 1):
|
|
34
|
+
for j in range(1, len2 + 1):
|
|
35
|
+
if seq1[i - 1] == seq2[j - 1]:
|
|
36
|
+
cost = 0
|
|
37
|
+
else:
|
|
38
|
+
cost = 1
|
|
39
|
+
dp[i][j] = min(dp[i - 1][j] + 1, # Deletion
|
|
40
|
+
dp[i][j - 1] + 1, # Insertion
|
|
41
|
+
dp[i - 1][j - 1] + cost) # Substitution
|
|
42
|
+
|
|
43
|
+
return dp[len1][len2]
|
|
44
|
+
#####
|
|
11
45
|
|
|
12
46
|
################### We are currently fixed using Table 11
|
|
13
47
|
gencode = {
|
|
@@ -32,63 +66,6 @@ def translate_frame(sequence):
|
|
|
32
66
|
translate = ''.join([gencode.get(sequence[3 * i:3 * i + 3], 'X') for i in range(len(sequence) // 3)])
|
|
33
67
|
return translate
|
|
34
68
|
|
|
35
|
-
@profile
|
|
36
|
-
def calculate_similarity(seq1, seq2):
|
|
37
|
-
len1, len2 = len(seq1), len(seq2)
|
|
38
|
-
|
|
39
|
-
# If lengths are the same, directly compare without alignment
|
|
40
|
-
if len1 == len2:
|
|
41
|
-
matches = sum(c1 == c2 for c1, c2 in zip(seq1, seq2))
|
|
42
|
-
return (matches / len1) * 100 # Return similarity based on the length
|
|
43
|
-
|
|
44
|
-
# For different lengths, proceed with global alignment
|
|
45
|
-
# Initialize the scoring matrix
|
|
46
|
-
score_matrix = [[0] * (len2 + 1) for _ in range(len1 + 1)]
|
|
47
|
-
|
|
48
|
-
# Fill the first row and first column with gap penalties
|
|
49
|
-
for i in range(len1 + 1):
|
|
50
|
-
score_matrix[i][0] = -i # Gap penalty for seq1
|
|
51
|
-
for j in range(len2 + 1):
|
|
52
|
-
score_matrix[0][j] = -j # Gap penalty for seq2
|
|
53
|
-
|
|
54
|
-
# Fill the score matrix
|
|
55
|
-
for i in range(1, len1 + 1):
|
|
56
|
-
for j in range(1, len2 + 1):
|
|
57
|
-
match = score_matrix[i - 1][j - 1] + (1 if seq1[i - 1] == seq2[j - 1] else -1)
|
|
58
|
-
delete = score_matrix[i - 1][j] - 1 # Gap in seq2
|
|
59
|
-
insert = score_matrix[i][j - 1] - 1 # Gap in seq1
|
|
60
|
-
score_matrix[i][j] = max(match, delete, insert)
|
|
61
|
-
|
|
62
|
-
# Traceback to find the alignment (if needed for detailed output)
|
|
63
|
-
aligned_seq1, aligned_seq2 = "", ""
|
|
64
|
-
i, j = len1, len2
|
|
65
|
-
|
|
66
|
-
while i > 0 or j > 0:
|
|
67
|
-
current_score = score_matrix[i][j]
|
|
68
|
-
if i > 0 and j > 0 and current_score == score_matrix[i - 1][j - 1] + (1 if seq1[i - 1] == seq2[j - 1] else -1):
|
|
69
|
-
aligned_seq1 += seq1[i - 1]
|
|
70
|
-
aligned_seq2 += seq2[j - 1]
|
|
71
|
-
i -= 1
|
|
72
|
-
j -= 1
|
|
73
|
-
elif i > 0 and current_score == score_matrix[i - 1][j] - 1:
|
|
74
|
-
aligned_seq1 += seq1[i - 1]
|
|
75
|
-
aligned_seq2 += "-"
|
|
76
|
-
i -= 1
|
|
77
|
-
else:
|
|
78
|
-
aligned_seq1 += "-"
|
|
79
|
-
aligned_seq2 += seq2[j - 1]
|
|
80
|
-
j -= 1
|
|
81
|
-
|
|
82
|
-
# Reverse the aligned sequences if needed
|
|
83
|
-
aligned_seq1 = aligned_seq1[::-1]
|
|
84
|
-
aligned_seq2 = aligned_seq2[::-1]
|
|
85
|
-
|
|
86
|
-
# Calculate matches from aligned sequences
|
|
87
|
-
matches = sum(c1 == c2 for c1, c2 in zip(aligned_seq1, aligned_seq2))
|
|
88
|
-
|
|
89
|
-
# Calculate the similarity percentage based on the maximum length
|
|
90
|
-
max_length = max(len(seq1), len(seq2))
|
|
91
|
-
return (matches / max_length) * 100
|
|
92
69
|
|
|
93
70
|
|
|
94
71
|
|
|
@@ -119,15 +96,16 @@ def fix_path(path):
|
|
|
119
96
|
|
|
120
97
|
|
|
121
98
|
def extract_identity(clustered_info):
|
|
122
|
-
# Use
|
|
123
|
-
match = re.search(r'at
|
|
99
|
+
# Use regex to capture percentage, including optional '-' or '+' before it
|
|
100
|
+
match = re.search(r'at [+-/]*(\d+\.\d+)%', clustered_info)
|
|
124
101
|
|
|
125
102
|
if match:
|
|
126
|
-
percent_identity = float(match.group(
|
|
103
|
+
percent_identity = float(match.group(1)) # Extract the percentage value
|
|
127
104
|
return percent_identity
|
|
128
105
|
else:
|
|
129
106
|
raise ValueError("Percent identity not found in the string.")
|
|
130
107
|
|
|
108
|
+
|
|
131
109
|
def wrap_sequence(sequence, width=60):
|
|
132
110
|
wrapped_sequence = []
|
|
133
111
|
for i in range(0, len(sequence), width):
|
|
@@ -415,8 +393,6 @@ def process_gene_families(options, directory, output_file):
|
|
|
415
393
|
aligned_sequences = read_fasta(aligned_file)
|
|
416
394
|
for genome, aligned_seq in aligned_sequences.items():
|
|
417
395
|
genome_name = genome.split('|')[0]
|
|
418
|
-
if 'Group' in genome_name:
|
|
419
|
-
print(2)
|
|
420
396
|
if genome_name not in concatenated_sequences:
|
|
421
397
|
concatenated_sequences[genome_name] = ""
|
|
422
398
|
concatenated_sequences[genome_name] += aligned_seq
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: PyamilySeq
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.9.0
|
|
4
4
|
Summary: PyamilySeq - A a tool to look for sequence-based gene groups identified by clustering methods such as CD-HIT, DIAMOND, BLAST or MMseqs2.
|
|
5
5
|
Home-page: https://github.com/NickJD/PyamilySeq
|
|
6
6
|
Author: Nicholas Dimonaco
|
|
@@ -12,6 +12,7 @@ Classifier: Operating System :: OS Independent
|
|
|
12
12
|
Requires-Python: >=3.6
|
|
13
13
|
Description-Content-Type: text/markdown
|
|
14
14
|
License-File: LICENSE
|
|
15
|
+
Requires-Dist: levenshtein
|
|
15
16
|
|
|
16
17
|
# PyamilySeq - !BETA!
|
|
17
18
|
**PyamilySeq** is a Python tool for clustering gene sequences into groups based on sequence similarity identified by tools such as CD-HIT, BLAST, DIAMOND or MMseqs2.
|
|
@@ -34,19 +35,18 @@ PyamilySeq probably requires Python 3.6 or higher. Install using pip:
|
|
|
34
35
|
```bash
|
|
35
36
|
pip install PyamilySeq
|
|
36
37
|
```
|
|
37
|
-
|
|
38
|
+
PyamilySeq is regularly updated with bugfixes and new features so to update to the newest version add '-U' to end of the pip install command.
|
|
38
39
|
## Example usage: Below are two examples of running PyamilySeq in its two main modes.
|
|
39
40
|
### 'Full Mode': Will conduct clustering of sequences with CD-HIT as part of PyamilySeq run
|
|
40
41
|
```
|
|
41
42
|
PyamilySeq -run_mode Full -group_mode Species -clustering_format CD-HIT -output_dir .../test_data/testing/Full
|
|
42
|
-
-input_type combined -input_dir .../test_data/genomes -name_split _combined.gff3 -pid 0.95 -len_diff 0.80
|
|
43
|
-
-gpa -a -w 99
|
|
43
|
+
-input_type combined -input_dir .../test_data/genomes -name_split _combined.gff3 -pid 0.95 -len_diff 0.80 -a -w 99
|
|
44
44
|
```
|
|
45
45
|
### 'Partial Mode': Will take the output of a sequence clustering.
|
|
46
46
|
```
|
|
47
|
-
PyamilySeq -run_mode Partial -group_mode Species -clustering_format TSV -output_dir .../test_data/
|
|
48
|
-
-cluster_file .../test_data/
|
|
49
|
-
-original_fasta .../test_data/species/combined_Ensmbl_cds.fasta -
|
|
47
|
+
PyamilySeq -run_mode Partial -group_mode Species -clustering_format TSV -output_dir .../test_data/species/testing/Partial
|
|
48
|
+
-cluster_file .../test_data/species/MMseqs2/combined_Ensmbl_pep_cluster.tsv
|
|
49
|
+
-original_fasta .../test_data/species/combined_Ensmbl_cds.fasta -a -w 99 -verbose
|
|
50
50
|
|
|
51
51
|
```
|
|
52
52
|
#### Note: '-clustering_format TSV/CSV' requires input to be two in two columns as below (Same format as MMseqs2 tsv) - Genome name and sequence name are separated by '|'.
|
|
@@ -58,7 +58,7 @@ Escherichia_coli_110957|ENSB:TIZS9kbTvShDvyX Escherichia_coli_110957|ENSB:TIZS9k
|
|
|
58
58
|
```
|
|
59
59
|
### Example output:
|
|
60
60
|
```
|
|
61
|
-
Running PyamilySeq v0.
|
|
61
|
+
Running PyamilySeq v0.9.0
|
|
62
62
|
Calculating Groups
|
|
63
63
|
Gene Groups:
|
|
64
64
|
First_core_99: 2682
|
|
@@ -80,7 +80,7 @@ PyamilySeq -run_mode Partial -group_mode Genus -clustering_format CD-HIT -output
|
|
|
80
80
|
-cluster_file .../test_data/genus/CD-HIT/combined_cds_cd-hit_80_60.clstr -gpa
|
|
81
81
|
```
|
|
82
82
|
```commandline
|
|
83
|
-
Running PyamilySeq v0.
|
|
83
|
+
Running PyamilySeq v0.9.0
|
|
84
84
|
Calculating Groups
|
|
85
85
|
Genus Groups:
|
|
86
86
|
First_genera_1: 28549
|
|
@@ -137,14 +137,14 @@ Please report any issues to: https://github.com/NickJD/PyamilySeq/issues
|
|
|
137
137
|
## PyamilySeq - Menu:
|
|
138
138
|
### PyamilySeq is separated into two main 'run modes', Full and Partial. They each have their own set of required and optional arguments.
|
|
139
139
|
```
|
|
140
|
-
Running PyamilySeq v0.
|
|
140
|
+
Running PyamilySeq v0.9.0
|
|
141
141
|
usage: PyamilySeq.py [-h] -run_mode {Full,Partial} -group_mode {Species,Genus} -clustering_format {CD-HIT,TSV,CSV} -output_dir OUTPUT_DIR
|
|
142
142
|
[-input_type {separate,combined}] [-input_dir INPUT_DIR] [-name_split NAME_SPLIT] [-sequence_type {AA,DNA}] [-gene_ident GENE_IDENT]
|
|
143
143
|
[-pid PIDENT] [-len_diff LEN_DIFF] [-mem CLUSTERING_MEMORY] [-t CLUSTERING_THREADS] [-cluster_file CLUSTER_FILE]
|
|
144
144
|
[-reclustered RECLUSTERED] [-seq_tag SEQUENCE_TAG] [-core_groups CORE_GROUPS] [-genus_groups GENUS_GROUPS] [-w WRITE_GROUPS] [-a]
|
|
145
145
|
[-original_fasta ORIGINAL_FASTA] [-gpa] [-verbose] [-v]
|
|
146
146
|
|
|
147
|
-
PyamilySeq v0.
|
|
147
|
+
PyamilySeq v0.9.0: A tool that groups genes into unique clusters.
|
|
148
148
|
|
|
149
149
|
options:
|
|
150
150
|
-h, --help show this help message and exit
|
|
@@ -198,15 +198,16 @@ Output Parameters:
|
|
|
198
198
|
-w WRITE_GROUPS Default - No output: Output sequences of identified groups (provide levels at which to output - Species "-w 99,95" Genus "-w 2,3" -
|
|
199
199
|
Must provide FASTA file with -original_fasta if in Partial run mode.
|
|
200
200
|
-a Default - No output: SLOW! (Only works for Species mode) Output aligned and concatinated sequences of identified groups -provide
|
|
201
|
-
group levels at which to output "-w 99,95" - Must provide FASTA file with -original_fasta in
|
|
201
|
+
group levels at which to output "-w 99,95" - Must provide FASTA file with -original_fasta in Partialrun mode.
|
|
202
202
|
-original_fasta ORIGINAL_FASTA
|
|
203
|
-
FASTA file to use in conjunction with "-w" or "-
|
|
204
|
-
-
|
|
205
|
-
|
|
203
|
+
FASTA file to use in conjunction with "-w" or "-con" when running in Partial Mode.
|
|
204
|
+
-no_gpa Do not create a Roary/Panaroo formatted gene_presence_absence.csv (created by default) - Required for Coinfinder and other
|
|
205
|
+
downstream tools
|
|
206
|
+
|
|
207
|
+
Misc Parameters:
|
|
208
|
+
-verbose Print verbose output.
|
|
209
|
+
-v, --version Print out version number and exit
|
|
206
210
|
|
|
207
|
-
Misc:
|
|
208
|
-
-verbose Default - False: Print out runtime messages
|
|
209
|
-
-v Default - False: Print out version number and exit
|
|
210
211
|
```
|
|
211
212
|
|
|
212
213
|
|
|
@@ -216,13 +217,14 @@ Misc:
|
|
|
216
217
|
## Seq-Combiner: This tool is provided to enable the pre-processing of multiple GFF/FASTA files together ready to be clustered by the user.
|
|
217
218
|
### Example:
|
|
218
219
|
```bash
|
|
219
|
-
Seq-Combiner -input_dir .../test_data/genomes -name_split
|
|
220
|
+
Seq-Combiner -input_dir .../test_data/genomes -name_split .gff3 -output_dir .../test_data/genomes -output_name combine_fasta_seqs.fa -input_type combined
|
|
220
221
|
```
|
|
221
222
|
### Seq-Combiner Menu:
|
|
222
223
|
```
|
|
223
|
-
usage: Seq_Combiner.py [-h] -input_dir INPUT_DIR -input_type {separate,combined,fasta} -name_split NAME_SPLIT -output_dir OUTPUT_DIR -output_name
|
|
224
|
+
usage: Seq_Combiner.py [-h] -input_dir INPUT_DIR -input_type {separate,combined,fasta} -name_split NAME_SPLIT -output_dir OUTPUT_DIR -output_name
|
|
225
|
+
OUTPUT_FILE [-gene_ident GENE_IDENT] [-translate] [-v]
|
|
224
226
|
|
|
225
|
-
|
|
227
|
+
PyamilySeq v0.9.0: Seq-Combiner - A tool to extract sequences from GFF/FASTA files and prepare them for PyamilySeq.
|
|
226
228
|
|
|
227
229
|
options:
|
|
228
230
|
-h, --help show this help message and exit
|
|
@@ -230,7 +232,8 @@ options:
|
|
|
230
232
|
Required Arguments:
|
|
231
233
|
-input_dir INPUT_DIR Directory location where the files are located.
|
|
232
234
|
-input_type {separate,combined,fasta}
|
|
233
|
-
Type of input files: "separate" for separate FASTA and GFF files, "combined" for GFF files with embedded FASTA sequences and "fasta"
|
|
235
|
+
Type of input files: "separate" for separate FASTA and GFF files, "combined" for GFF files with embedded FASTA sequences and "fasta"
|
|
236
|
+
for combining multiple FASTA files together.
|
|
234
237
|
-name_split NAME_SPLIT
|
|
235
238
|
substring used to split the filename and extract the genome name ('_combined.gff3' or '.gff').
|
|
236
239
|
-output_dir OUTPUT_DIR
|
|
@@ -240,48 +243,103 @@ Required Arguments:
|
|
|
240
243
|
|
|
241
244
|
Optional Arguments:
|
|
242
245
|
-gene_ident GENE_IDENT
|
|
243
|
-
Default - "CDS": Identifier used for extraction of sequences such as
|
|
246
|
+
Default - "CDS": Identifier used for extraction of sequences such as
|
|
247
|
+
"misc_RNA,gene,mRNA,CDS,rRNA,tRNA,tmRNA,CRISPR,ncRNA,regulatory_region,oriC,pseudo" - Not compatible with "fasta" input mode.
|
|
244
248
|
-translate Default - False: Translate extracted sequences to their AA counterpart?
|
|
245
249
|
|
|
246
250
|
Misc Arguments:
|
|
247
|
-
-v
|
|
248
|
-
|
|
251
|
+
-v, --version Print out version number and exit
|
|
249
252
|
|
|
250
253
|
```
|
|
251
254
|
|
|
252
|
-
|
|
253
|
-
|
|
255
|
+
## Group-Splitter: This tool can split multi-copy gene groups using CD-HIT after initial PyamilySeq analysis.
|
|
256
|
+
### Example:
|
|
257
|
+
```bash
|
|
258
|
+
Group-Splitter -genome_num 74 -input_fasta .../test/species/ -output_dir .../test/species/ -sequence_type AA
|
|
259
|
+
```
|
|
260
|
+
### Group-Splitter Menu:
|
|
254
261
|
```
|
|
255
|
-
usage: Group_Splitter.py [-h] -input_fasta INPUT_FASTA -
|
|
256
|
-
|
|
262
|
+
usage: Group_Splitter.py [-h] -input_fasta INPUT_FASTA -sequence_type {AA,DNA}
|
|
263
|
+
-genome_num GENOME_NUM -output_dir OUTPUT_DIR
|
|
264
|
+
[-groups GROUPS] [-group_threshold GROUP_THRESHOLD]
|
|
265
|
+
[-c PIDENT] [-s LEN_DIFF] [-T CLUSTERING_THREADS]
|
|
266
|
+
[-M CLUSTERING_MEMORY] [-no_delete_temp_files]
|
|
267
|
+
[-verbose] [-v]
|
|
257
268
|
|
|
258
|
-
|
|
269
|
+
PyamilySeq v0.9.0: Group-Splitter - A tool to split multi-copy gene groups
|
|
270
|
+
identified by PyamilySeq.
|
|
259
271
|
|
|
260
272
|
options:
|
|
261
273
|
-h, --help show this help message and exit
|
|
262
274
|
|
|
263
|
-
Required
|
|
275
|
+
Required Parameters:
|
|
264
276
|
-input_fasta INPUT_FASTA
|
|
265
277
|
Input FASTA file containing gene groups.
|
|
266
278
|
-sequence_type {AA,DNA}
|
|
267
279
|
Default - DNA: Are groups "DNA" or "AA" sequences?
|
|
280
|
+
-genome_num GENOME_NUM
|
|
281
|
+
The total number of genomes must be provide
|
|
268
282
|
-output_dir OUTPUT_DIR
|
|
269
283
|
Output directory.
|
|
270
284
|
|
|
271
|
-
|
|
272
|
-
-
|
|
273
|
-
|
|
274
|
-
|
|
285
|
+
Regrouping Parameters:
|
|
286
|
+
-groups GROUPS Default - auto: Detect groups to be split (see
|
|
287
|
+
-group_threshold). Provide "-groups 1,2,3,4" with
|
|
288
|
+
group IDs to split specific groups.
|
|
289
|
+
-group_threshold GROUP_THRESHOLD
|
|
290
|
+
Minimum percentage of genomes with multi-copy
|
|
291
|
+
(default: 80.0) - Does not work with "-groups"
|
|
292
|
+
|
|
293
|
+
CD-HIT Reclustering Parameters:
|
|
294
|
+
-c PIDENT Sequence identity threshold (default: 0.8) - Probably
|
|
295
|
+
should be higher than what was used in initial
|
|
296
|
+
clustering.
|
|
297
|
+
-s LEN_DIFF Length difference cutoff (default: 0.20) - Often the
|
|
298
|
+
most impactful parameter to split 'multi-copy' gene
|
|
299
|
+
groups.
|
|
300
|
+
-T CLUSTERING_THREADS
|
|
275
301
|
Number of threads for clustering (default: 4)
|
|
276
|
-
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
302
|
+
-M CLUSTERING_MEMORY Memory limit in MB for clustering (default: 2000)
|
|
303
|
+
|
|
304
|
+
Misc Parameters:
|
|
305
|
+
-no_delete_temp_files
|
|
306
|
+
Default: Delete all temporary files after processing.
|
|
280
307
|
-verbose Print verbose output.
|
|
281
|
-
-
|
|
308
|
+
-v, --version Print out version number and exit
|
|
309
|
+
|
|
310
|
+
```
|
|
311
|
+
|
|
312
|
+
## Cluster-Summary menu: This tool can be used to summarise CD-HIT .clstr files:
|
|
313
|
+
### Example:
|
|
314
|
+
```bash
|
|
315
|
+
Cluster-Summary -genome_num 74 -input_clstr .../test_data/species/E-coli/E-coli_extracted_pep_cd-hit_80.clstr -output_tsv .../test_data/species/E-coli/E-coli_extracted_pep_cd-hit_80_Summary.tsv
|
|
316
|
+
```
|
|
317
|
+
### Cluster-Summary Menu:
|
|
318
|
+
```
|
|
319
|
+
usage: Cluster_Summary.py [-h] -input_clstr INPUT_CLSTR -output OUTPUT -genome_num GENOME_NUM
|
|
320
|
+
[-output_dir OUTPUT_DIR] [-verbose] [-v]
|
|
321
|
+
|
|
322
|
+
PyamilySeq v0.9.0: Cluster-Summary - A tool to summarise CD-HIT clustering files.
|
|
323
|
+
|
|
324
|
+
options:
|
|
325
|
+
-h, --help show this help message and exit
|
|
326
|
+
|
|
327
|
+
Required Parameters:
|
|
328
|
+
-input_clstr INPUT_CLSTR
|
|
329
|
+
Input CD-HIT .clstr file
|
|
330
|
+
-output OUTPUT Output TSV file to store cluster summaries - Will add '.tsv' if not
|
|
331
|
+
provided by user
|
|
332
|
+
-genome_num GENOME_NUM
|
|
333
|
+
The total number of genomes must be provide
|
|
334
|
+
|
|
335
|
+
Optional Arguments:
|
|
336
|
+
-output_dir OUTPUT_DIR
|
|
337
|
+
Default: Same as input file
|
|
338
|
+
|
|
339
|
+
Misc Parameters:
|
|
340
|
+
-verbose Print verbose output.
|
|
341
|
+
-v, --version Print out version number and exit
|
|
282
342
|
|
|
283
|
-
Misc Arguments:
|
|
284
|
-
-v Print out version number and exit
|
|
285
343
|
```
|
|
286
344
|
|
|
287
345
|
### All example input and output data can be found in the 'test_data' directory.
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
PyamilySeq/Cluster_Summary.py,sha256=OwAPjODoFIECQUGuPywXORdQn-wHqyRnIIhxSzLTm2E,6982
|
|
2
|
+
PyamilySeq/Constants.py,sha256=ubY4rmpkQIwxfY6vq4rjO34PtlQPWEJyinwFke3BSGE,31
|
|
3
|
+
PyamilySeq/Group_Splitter.py,sha256=QQD5gK1QhMDlqMhLvLWsq-Eh8-k2vC-h4L8bqdkGpXE,17445
|
|
4
|
+
PyamilySeq/PyamilySeq.py,sha256=Frl21S-l4fZdDLFoqeTxB5QqMdsKq5VSQv98Xf_uxMU,15283
|
|
5
|
+
PyamilySeq/PyamilySeq_Genus.py,sha256=hC34cHIFu8YaXXgcPyVwuWENlsxx-7mT-Qr6PAdio4U,12414
|
|
6
|
+
PyamilySeq/PyamilySeq_Species.py,sha256=spgS-h-lrySZBiOiB6jX6pPRaL5j8f5V1Hq3XOjBOko,14404
|
|
7
|
+
PyamilySeq/Seq_Combiner.py,sha256=hMXmA-M3tduONX4pM5qDb2dzBIFLdsIsWLezejxowhQ,3521
|
|
8
|
+
PyamilySeq/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
9
|
+
PyamilySeq/clusterings.py,sha256=rcWFv0IiWoS4aUNRjDDwNEL86l1wIKa4vK4htAxy8Hg,18787
|
|
10
|
+
PyamilySeq/utils.py,sha256=sjsx5oAIPacvVbfURqPwoq7XfZIk9V_PhGugBVT6jLE,18626
|
|
11
|
+
PyamilySeq-0.9.0.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
12
|
+
PyamilySeq-0.9.0.dist-info/METADATA,sha256=gsO5symEXI7C8SGzLD2SfyIcCt9yYmbXBIoBCU05BL8,16958
|
|
13
|
+
PyamilySeq-0.9.0.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
14
|
+
PyamilySeq-0.9.0.dist-info/entry_points.txt,sha256=KuGG_QEvagQHf-Ftohb1oItkx_SknDq66wcOiBqb7PY,200
|
|
15
|
+
PyamilySeq-0.9.0.dist-info/top_level.txt,sha256=J6JhugUQTq4rq96yibAlQu3o4KCM9WuYfqr3w1r119M,11
|
|
16
|
+
PyamilySeq-0.9.0.dist-info/RECORD,,
|
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
PyamilySeq/Constants.py,sha256=J_jZheqHCbmFVCLrY8nMe4T5VZQOQ7PbT_HmYSi58WM,31
|
|
2
|
-
PyamilySeq/Group_Splitter.py,sha256=wrz-vcQ2gJ40MLLczFY8te35_uYrOBuh2v-fJSIVsWo,15578
|
|
3
|
-
PyamilySeq/PyamilySeq.py,sha256=OAtz6b7dnvA-Qg0dnf2JXImiOtsDrDfVit7Q6DFbuPU,15265
|
|
4
|
-
PyamilySeq/PyamilySeq_Genus.py,sha256=hC34cHIFu8YaXXgcPyVwuWENlsxx-7mT-Qr6PAdio4U,12414
|
|
5
|
-
PyamilySeq/PyamilySeq_Species.py,sha256=spgS-h-lrySZBiOiB6jX6pPRaL5j8f5V1Hq3XOjBOko,14404
|
|
6
|
-
PyamilySeq/Seq_Combiner.py,sha256=dPDu6LlT3B-ZDn3wKZ3AeWraDgv2Tub_16l9CLc3tQ0,3353
|
|
7
|
-
PyamilySeq/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
|
-
PyamilySeq/clusterings.py,sha256=rcWFv0IiWoS4aUNRjDDwNEL86l1wIKa4vK4htAxy8Hg,18787
|
|
9
|
-
PyamilySeq/utils.py,sha256=vjPSIua4E72JTWlzH4CUaRcR-Z6Nr-RQ9N_92tfZI_w,19686
|
|
10
|
-
PyamilySeq-0.8.1.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
11
|
-
PyamilySeq-0.8.1.dist-info/METADATA,sha256=weIjFQkc7ggqkPlPkSA5an8eFiUzhDyxGl9t7-rJPsA,14555
|
|
12
|
-
PyamilySeq-0.8.1.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
13
|
-
PyamilySeq-0.8.1.dist-info/entry_points.txt,sha256=15BsozBN6vRWvZeQon05dY4YQT7DqP5i2TUqFWRGCvc,150
|
|
14
|
-
PyamilySeq-0.8.1.dist-info/top_level.txt,sha256=J6JhugUQTq4rq96yibAlQu3o4KCM9WuYfqr3w1r119M,11
|
|
15
|
-
PyamilySeq-0.8.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|