PyamilySeq 1.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- PyamilySeq/Cluster_Summary.py +163 -0
- PyamilySeq/Group_Splitter.py +571 -0
- PyamilySeq/PyamilySeq.py +316 -0
- PyamilySeq/PyamilySeq_Genus.py +242 -0
- PyamilySeq/PyamilySeq_Species.py +309 -0
- PyamilySeq/Seq_Combiner.py +66 -0
- PyamilySeq/Seq_Extractor.py +64 -0
- PyamilySeq/Seq_Finder.py +56 -0
- PyamilySeq/__init__.py +0 -0
- PyamilySeq/clusterings.py +452 -0
- PyamilySeq/constants.py +2 -0
- PyamilySeq/utils.py +566 -0
- PyamilySeq-1.0.1.dist-info/METADATA +381 -0
- PyamilySeq-1.0.1.dist-info/RECORD +18 -0
- PyamilySeq-1.0.1.dist-info/entry_points.txt +7 -0
- PyamilySeq-1.0.1.dist-info/top_level.txt +1 -0
- PyamilySeq-1.0.0.dist-info/METADATA +0 -17
- PyamilySeq-1.0.0.dist-info/RECORD +0 -6
- PyamilySeq-1.0.0.dist-info/entry_points.txt +0 -2
- PyamilySeq-1.0.0.dist-info/top_level.txt +0 -1
- {PyamilySeq-1.0.0.dist-info → PyamilySeq-1.0.1.dist-info}/LICENSE +0 -0
- {PyamilySeq-1.0.0.dist-info → PyamilySeq-1.0.1.dist-info}/WHEEL +0 -0
PyamilySeq/utils.py
ADDED
|
@@ -0,0 +1,566 @@
|
|
|
1
|
+
import subprocess
|
|
2
|
+
import shutil
|
|
3
|
+
import os
|
|
4
|
+
import glob
|
|
5
|
+
import collections
|
|
6
|
+
from tempfile import NamedTemporaryFile
|
|
7
|
+
import sys
|
|
8
|
+
import re
|
|
9
|
+
import math
|
|
10
|
+
|
|
11
|
+
####
|
|
12
|
+
# Placeholder for the distance function
|
|
13
|
+
levenshtein_distance_cal = None
|
|
14
|
+
# Check for Levenshtein library once
|
|
15
|
+
try:
|
|
16
|
+
import Levenshtein as LV
|
|
17
|
+
# Assign the optimized function
|
|
18
|
+
def levenshtein_distance_calc(seq1, seq2):
|
|
19
|
+
return LV.distance(seq1, seq2)
|
|
20
|
+
except (ModuleNotFoundError, ImportError):
|
|
21
|
+
print("Levenshtein package not installed - Will fallback to slower Python implementation.")
|
|
22
|
+
# Fallback implementation
|
|
23
|
+
def levenshtein_distance_calc(seq1, seq2):
|
|
24
|
+
# Slower Python implementation of Levenshtein distance
|
|
25
|
+
len1, len2 = len(seq1), len(seq2)
|
|
26
|
+
dp = [[0] * (len2 + 1) for _ in range(len1 + 1)]
|
|
27
|
+
|
|
28
|
+
for i in range(len1 + 1):
|
|
29
|
+
dp[i][0] = i
|
|
30
|
+
for j in range(len2 + 1):
|
|
31
|
+
dp[0][j] = j
|
|
32
|
+
|
|
33
|
+
for i in range(1, len1 + 1):
|
|
34
|
+
for j in range(1, len2 + 1):
|
|
35
|
+
if seq1[i - 1] == seq2[j - 1]:
|
|
36
|
+
cost = 0
|
|
37
|
+
else:
|
|
38
|
+
cost = 1
|
|
39
|
+
dp[i][j] = min(dp[i - 1][j] + 1, # Deletion
|
|
40
|
+
dp[i][j - 1] + 1, # Insertion
|
|
41
|
+
dp[i - 1][j - 1] + cost) # Substitution
|
|
42
|
+
|
|
43
|
+
return dp[len1][len2]
|
|
44
|
+
#####
|
|
45
|
+
|
|
46
|
+
################### We are currently fixed using Table 11
|
|
47
|
+
codon_table = {
|
|
48
|
+
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
|
|
49
|
+
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
|
|
50
|
+
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
|
|
51
|
+
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
|
|
52
|
+
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
|
|
53
|
+
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
|
|
54
|
+
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
|
|
55
|
+
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
|
|
56
|
+
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
|
|
57
|
+
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
|
|
58
|
+
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
|
|
59
|
+
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
|
|
60
|
+
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
|
|
61
|
+
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
|
|
62
|
+
'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',
|
|
63
|
+
'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W'}
|
|
64
|
+
|
|
65
|
+
def translate_frame(sequence):
|
|
66
|
+
translate = ''.join([codon_table.get(sequence[3 * i:3 * i + 3], 'X') for i in range(len(sequence) // 3)])
|
|
67
|
+
return translate
|
|
68
|
+
|
|
69
|
+
def translate_dna_to_aa(dna_fasta, aa_fasta):
|
|
70
|
+
def translate_dna_sequence(dna_seq):
|
|
71
|
+
aa_seq = ""
|
|
72
|
+
for i in range(0, len(dna_seq) - 2, 3):
|
|
73
|
+
codon = dna_seq[i:i+3]
|
|
74
|
+
aa_seq += codon_table.get(codon, 'X') # 'X' for unknown codons
|
|
75
|
+
return aa_seq
|
|
76
|
+
|
|
77
|
+
with open(dna_fasta, 'r') as infile, open(aa_fasta, 'w') as outfile:
|
|
78
|
+
dna_seq = ""
|
|
79
|
+
header = ""
|
|
80
|
+
for line in infile:
|
|
81
|
+
if line.startswith('>'):
|
|
82
|
+
if dna_seq:
|
|
83
|
+
aa_seq = translate_dna_sequence(dna_seq)
|
|
84
|
+
wrapped_aa_seq = wrap_sequence(aa_seq, 60)
|
|
85
|
+
outfile.write(f"{header}\n{wrapped_aa_seq}\n")
|
|
86
|
+
header = line.strip()
|
|
87
|
+
dna_seq = ""
|
|
88
|
+
else:
|
|
89
|
+
dna_seq += line.strip()
|
|
90
|
+
if dna_seq:
|
|
91
|
+
aa_seq = translate_dna_sequence(dna_seq)
|
|
92
|
+
wrapped_aa_seq = wrap_sequence(aa_seq, 60)
|
|
93
|
+
outfile.write(f"{header}\n{wrapped_aa_seq}\n")
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def detect_sequence_type(fasta_file):
|
|
97
|
+
with open(fasta_file, 'r') as f:
|
|
98
|
+
for line in f:
|
|
99
|
+
if line.startswith('>'):
|
|
100
|
+
continue
|
|
101
|
+
if any(base in line for base in 'EFILPQZ'):
|
|
102
|
+
return False # Contains amino acids
|
|
103
|
+
return True # Contains DNA
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def is_tool_installed(tool_name):
|
|
107
|
+
"""Check if a tool is installed and available in PATH."""
|
|
108
|
+
# Check if the tool is in the system PATH
|
|
109
|
+
if shutil.which(tool_name) is None:
|
|
110
|
+
return False
|
|
111
|
+
|
|
112
|
+
# Try running the tool to ensure it's executable
|
|
113
|
+
try:
|
|
114
|
+
subprocess.run([tool_name, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
|
|
115
|
+
return True
|
|
116
|
+
except subprocess.CalledProcessError:
|
|
117
|
+
return True # The tool is installed and ran, even if it returns an error code
|
|
118
|
+
except FileNotFoundError:
|
|
119
|
+
return False # This shouldn't happen due to the earlier check
|
|
120
|
+
|
|
121
|
+
def reverse_complement(seq):
|
|
122
|
+
complement = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'N': 'N'}
|
|
123
|
+
return ''.join(complement[base] for base in reversed(seq))
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def fix_path(path):
|
|
127
|
+
fixed_path = os.path.normpath(path)
|
|
128
|
+
fixed_path = os.path.realpath(fixed_path)
|
|
129
|
+
return fixed_path
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def extract_identity(clustered_info):
|
|
133
|
+
# Use regex to capture percentage, including optional '-' or '+' before it
|
|
134
|
+
match = re.search(r'at [+-/]*(\d+\.\d+)%', clustered_info)
|
|
135
|
+
|
|
136
|
+
if match:
|
|
137
|
+
percent_identity = float(match.group(1)) # Extract the percentage value
|
|
138
|
+
return percent_identity
|
|
139
|
+
else:
|
|
140
|
+
raise ValueError("Percent identity not found in the string.")
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def wrap_sequence(sequence, width=60):
|
|
144
|
+
wrapped_sequence = []
|
|
145
|
+
for i in range(0, len(sequence), width):
|
|
146
|
+
wrapped_sequence.append(sequence[i:i + width])
|
|
147
|
+
return "\n".join(wrapped_sequence)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def read_genomes_from_fasta(fasta_file):
|
|
151
|
+
genomes = set()
|
|
152
|
+
with open(fasta_file, 'r') as file:
|
|
153
|
+
for line in file:
|
|
154
|
+
line = line.strip()
|
|
155
|
+
if line.startswith('>'):
|
|
156
|
+
genome = line.split('|')[1]
|
|
157
|
+
genomes.add(genome)
|
|
158
|
+
return list(genomes)
|
|
159
|
+
|
|
160
|
+
def read_fasta(fasta_file):
|
|
161
|
+
sequences = {}
|
|
162
|
+
current_sequence = None
|
|
163
|
+
with open(fasta_file, 'r') as file:
|
|
164
|
+
for line in file:
|
|
165
|
+
line = line.strip()
|
|
166
|
+
if not line:
|
|
167
|
+
continue # Skip empty lines
|
|
168
|
+
if line.startswith('>'):
|
|
169
|
+
current_sequence = line[1:] # Remove '>' character
|
|
170
|
+
sequences[current_sequence] = ''
|
|
171
|
+
else:
|
|
172
|
+
sequences[current_sequence] += line
|
|
173
|
+
return sequences
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def reorder_dict_by_keys(original_dict, sorted_keys):
|
|
177
|
+
return {k: original_dict[k] for k in sorted_keys}
|
|
178
|
+
def custom_sort_key(k, dict1, dict2):
|
|
179
|
+
return (len(dict1[k]), len(dict2[k]))
|
|
180
|
+
|
|
181
|
+
def sort_keys_by_values(dict1, dict2):
|
|
182
|
+
sorted_keys = sorted(dict1.keys(), key=lambda k: custom_sort_key(k, dict1, dict2), reverse=True)
|
|
183
|
+
return sorted_keys
|
|
184
|
+
|
|
185
|
+
def select_longest_gene(sequences, subgrouped):
|
|
186
|
+
"""Select the longest sequence for each genome."""
|
|
187
|
+
longest_sequences = {}
|
|
188
|
+
for seq_id, sequence in sequences.items():
|
|
189
|
+
if subgrouped == False:
|
|
190
|
+
genome = seq_id.split('|')[0] # Assuming genome name can be derived from the sequence ID
|
|
191
|
+
elif subgrouped == True:
|
|
192
|
+
genome = seq_id.split('|')[1]
|
|
193
|
+
if genome not in longest_sequences or len(sequence) > len(longest_sequences[genome][1]):
|
|
194
|
+
longest_sequences[genome] = (seq_id, sequence)
|
|
195
|
+
return longest_sequences
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def run_mafft_on_sequences(options, sequences, output_file):
|
|
199
|
+
#print("Conducting MAFFT alignment.")
|
|
200
|
+
"""Run mafft on the given sequences and write to output file."""
|
|
201
|
+
# Create a temporary input file for mafft
|
|
202
|
+
with NamedTemporaryFile('w', delete=False) as temp_input_file:
|
|
203
|
+
for header, sequence in sequences.items():
|
|
204
|
+
temp_input_file.write(f">{header}\n{sequence}\n")
|
|
205
|
+
temp_input_file_path = temp_input_file.name
|
|
206
|
+
|
|
207
|
+
# Run mafft
|
|
208
|
+
try:
|
|
209
|
+
with open(output_file, 'w') as output_f:
|
|
210
|
+
if options.verbose == True:
|
|
211
|
+
subprocess.run(
|
|
212
|
+
['mafft', '--auto', '--thread', str(options.threads), temp_input_file_path],
|
|
213
|
+
stdout=output_f,
|
|
214
|
+
stderr=sys.stderr,
|
|
215
|
+
check=True
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
else:
|
|
219
|
+
subprocess.run(
|
|
220
|
+
['mafft', '--auto', '--thread', str(options.threads), temp_input_file_path],
|
|
221
|
+
stdout=output_f,
|
|
222
|
+
stderr=subprocess.DEVNULL, # Suppress stderr
|
|
223
|
+
check=True
|
|
224
|
+
)
|
|
225
|
+
finally:
|
|
226
|
+
os.remove(temp_input_file_path) # Clean up the temporary file
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
def read_separate_files(input_dir, name_split, gene_ident, combined_out, translate):
|
|
232
|
+
with open(combined_out, 'w') as combined_out_file, open(combined_out.replace('_dna.fasta','_aa.fasta'), 'w') as combined_out_file_aa:
|
|
233
|
+
for gff_file in glob.glob(os.path.join(input_dir, '*' + name_split)):
|
|
234
|
+
genome_name = os.path.basename(gff_file).split(name_split)[0]
|
|
235
|
+
corresponding_fasta_file = os.path.splitext(gff_file)[0] + '.fa'
|
|
236
|
+
if not os.path.exists(corresponding_fasta_file):
|
|
237
|
+
continue
|
|
238
|
+
|
|
239
|
+
gff_features = []
|
|
240
|
+
with open(gff_file, 'r') as file:
|
|
241
|
+
seen_seq_ids = collections.defaultdict(int)
|
|
242
|
+
lines = file.readlines()
|
|
243
|
+
for line in lines:
|
|
244
|
+
line_data = line.split('\t')
|
|
245
|
+
if len(line_data) == 9:
|
|
246
|
+
if any(gene_type in line_data[2] for gene_type in gene_ident):
|
|
247
|
+
contig = line_data[0]
|
|
248
|
+
feature = line_data[2]
|
|
249
|
+
strand = line_data[6]
|
|
250
|
+
start, end = int(line_data[3]), int(line_data[4])
|
|
251
|
+
if seq_id in seen_seq_ids:
|
|
252
|
+
seq_id += '_' + str(seen_seq_ids[seq_id])
|
|
253
|
+
seen_seq_ids[seq_id] + 1
|
|
254
|
+
else:
|
|
255
|
+
seen_seq_ids[seq_id] = 1
|
|
256
|
+
seq_id = line_data[8].split('ID=')[1].split(';')[0]
|
|
257
|
+
gff_features.append((contig, start, end, strand, feature, seq_id))
|
|
258
|
+
fasta_dict = collections.defaultdict(str)
|
|
259
|
+
with open(corresponding_fasta_file, 'r') as file:
|
|
260
|
+
lines = file.readlines()
|
|
261
|
+
for line in lines:
|
|
262
|
+
if line.startswith('>'):
|
|
263
|
+
current_contig = line[1:].split()[0]
|
|
264
|
+
fasta_dict[current_contig] = ['', '']
|
|
265
|
+
else:
|
|
266
|
+
fasta_dict[current_contig][0] += line.strip()
|
|
267
|
+
|
|
268
|
+
for contig, fasta in fasta_dict.items():
|
|
269
|
+
reverse_sequence = reverse_complement(fasta[0])
|
|
270
|
+
fasta_dict[contig][1] = reverse_sequence
|
|
271
|
+
|
|
272
|
+
if fasta_dict and gff_features:
|
|
273
|
+
for contig, start, end, strand, feature, seq_id in gff_features:
|
|
274
|
+
if contig in fasta_dict:
|
|
275
|
+
if strand == '+':
|
|
276
|
+
full_sequence = fasta_dict[contig][0]
|
|
277
|
+
seq = full_sequence[start - 1:end]
|
|
278
|
+
elif strand == '-':
|
|
279
|
+
corrected_start = max(len(fasta_dict[contig][0]) - int(end), 1)
|
|
280
|
+
corrected_stop = max(len(fasta_dict[contig][0]) - int(start - 1), 1)
|
|
281
|
+
full_sequence = fasta_dict[contig][1]
|
|
282
|
+
seq = full_sequence[corrected_start:corrected_stop]
|
|
283
|
+
|
|
284
|
+
if translate == True:
|
|
285
|
+
seq_aa = translate_frame(seq)
|
|
286
|
+
wrapped_sequence_aa = '\n'.join([seq_aa[i:i + 60] for i in range(0, len(seq_aa), 60)])
|
|
287
|
+
combined_out_file_aa.write(f">{genome_name}|{seq_id}\n{wrapped_sequence_aa}\n")
|
|
288
|
+
wrapped_sequence = '\n'.join([seq[i:i + 60] for i in range(0, len(seq), 60)])
|
|
289
|
+
combined_out_file.write(f">{genome_name}|{seq_id}\n{wrapped_sequence}\n")
|
|
290
|
+
|
|
291
|
+
if translate == False:
|
|
292
|
+
#Clean up unused file
|
|
293
|
+
os.remove(combined_out_file_aa.name)
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
def read_combined_files(input_dir, name_split, gene_ident, combined_out, translate):
|
|
297
|
+
with open(combined_out, 'w') as combined_out_file, open(combined_out.replace('_dna.fasta','_aa.fasta'), 'w') as combined_out_file_aa:
|
|
298
|
+
for gff_file in glob.glob(os.path.join(input_dir, '*' + name_split)):
|
|
299
|
+
genome_name = os.path.basename(gff_file).split(name_split)[0]
|
|
300
|
+
fasta_dict = collections.defaultdict(str)
|
|
301
|
+
gff_features = []
|
|
302
|
+
with open(gff_file, 'r') as file:
|
|
303
|
+
seen_seq_ids = collections.defaultdict(int)
|
|
304
|
+
lines = file.readlines()
|
|
305
|
+
fasta_section = False
|
|
306
|
+
for line in lines:
|
|
307
|
+
if line.startswith('##FASTA'):
|
|
308
|
+
fasta_section = True
|
|
309
|
+
continue
|
|
310
|
+
if fasta_section:
|
|
311
|
+
if line.startswith('>'):
|
|
312
|
+
current_contig = line[1:].split()[0]
|
|
313
|
+
fasta_dict[current_contig] = ['','']
|
|
314
|
+
else:
|
|
315
|
+
fasta_dict[current_contig][0] +=line.strip()
|
|
316
|
+
else:
|
|
317
|
+
line_data = line.split('\t')
|
|
318
|
+
if len(line_data) == 9:
|
|
319
|
+
if any(gene_type in line_data[2] for gene_type in gene_ident):
|
|
320
|
+
contig = line_data[0]
|
|
321
|
+
feature = line_data[2]
|
|
322
|
+
strand = line_data[6]
|
|
323
|
+
start, end = int(line_data[3]), int(line_data[4])
|
|
324
|
+
seq_id = line_data[8].split('ID=')[1].split(';')[0]
|
|
325
|
+
if seq_id in seen_seq_ids:
|
|
326
|
+
seq_id += '_' + str(seen_seq_ids[seq_id])
|
|
327
|
+
seen_seq_ids[seq_id] + 1
|
|
328
|
+
else:
|
|
329
|
+
seen_seq_ids[seq_id] = 1
|
|
330
|
+
gff_features.append((contig, start, end, strand, feature, seq_id))
|
|
331
|
+
|
|
332
|
+
for contig, fasta in fasta_dict.items():
|
|
333
|
+
reverse_sequence = reverse_complement(fasta[0])
|
|
334
|
+
fasta_dict[contig][1]=reverse_sequence
|
|
335
|
+
|
|
336
|
+
if fasta_dict and gff_features:
|
|
337
|
+
for contig, start, end, strand, feature, seq_id in gff_features:
|
|
338
|
+
if contig in fasta_dict:
|
|
339
|
+
if strand == '+':
|
|
340
|
+
full_sequence = fasta_dict[contig][0]
|
|
341
|
+
seq = full_sequence[start - 1:end]
|
|
342
|
+
elif strand == '-':
|
|
343
|
+
corrected_start = max(len(fasta_dict[contig][0]) - int(end), 1)
|
|
344
|
+
corrected_stop = max(len(fasta_dict[contig][0]) - int(start - 1), 1)
|
|
345
|
+
full_sequence = fasta_dict[contig][1]
|
|
346
|
+
seq = full_sequence[corrected_start:corrected_stop]
|
|
347
|
+
|
|
348
|
+
if translate == True:
|
|
349
|
+
seq_aa = translate_frame(seq)
|
|
350
|
+
wrapped_sequence_aa = '\n'.join([seq_aa[i:i + 60] for i in range(0, len(seq_aa), 60)])
|
|
351
|
+
combined_out_file_aa.write(f">{genome_name}|{seq_id}\n{wrapped_sequence_aa}\n")
|
|
352
|
+
wrapped_sequence = '\n'.join([seq[i:i + 60] for i in range(0, len(seq), 60)])
|
|
353
|
+
combined_out_file.write(f">{genome_name}|{seq_id}\n{wrapped_sequence}\n")
|
|
354
|
+
|
|
355
|
+
if translate == False:
|
|
356
|
+
#Clean up unused file
|
|
357
|
+
os.remove(combined_out_file_aa.name)
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
def read_fasta_files(input_dir, name_split, combined_out, translate):
|
|
362
|
+
with open(combined_out, 'w') as combined_out_file, open(combined_out.replace('_dna.fasta','_aa.fasta'), 'w') as combined_out_file_aa:
|
|
363
|
+
for fasta_file in glob.glob(os.path.join(input_dir, '*' + name_split)):
|
|
364
|
+
genome_name = os.path.basename(fasta_file).split(name_split)[0]
|
|
365
|
+
fasta_dict = collections.defaultdict(str)
|
|
366
|
+
with open(fasta_file, 'r') as file:
|
|
367
|
+
lines = file.readlines()
|
|
368
|
+
for line in lines:
|
|
369
|
+
if line.startswith('>'):
|
|
370
|
+
current_seq = line[1:].split()[0]
|
|
371
|
+
fasta_dict[current_seq] = ''
|
|
372
|
+
else:
|
|
373
|
+
fasta_dict[current_seq] +=line.strip()
|
|
374
|
+
for seq_id, seq in fasta_dict.items():
|
|
375
|
+
if translate == True:
|
|
376
|
+
seq_aa = translate_frame(seq)
|
|
377
|
+
wrapped_sequence_aa = '\n'.join([seq_aa[i:i + 60] for i in range(0, len(seq_aa), 60)])
|
|
378
|
+
combined_out_file_aa.write(f">{genome_name}|{seq_id}\n{wrapped_sequence_aa}\n")
|
|
379
|
+
wrapped_sequence = '\n'.join([seq[i:i + 60] for i in range(0, len(seq), 60)])
|
|
380
|
+
combined_out_file.write(f">{genome_name}|{seq_id}\n{wrapped_sequence}\n")
|
|
381
|
+
|
|
382
|
+
if translate == False:
|
|
383
|
+
#Clean up unused file
|
|
384
|
+
os.remove(combined_out_file_aa)
|
|
385
|
+
|
|
386
|
+
def write_groups_func(options, output_dir, key_order, cores, sequences,
|
|
387
|
+
pangenome_clusters_First_sequences_sorted, combined_pangenome_clusters_Second_sequences):
|
|
388
|
+
"""
|
|
389
|
+
Writes individual FASTA files and a combined FASTA file for all sequences.
|
|
390
|
+
|
|
391
|
+
Parameters:
|
|
392
|
+
- options: Command-line options.
|
|
393
|
+
- output_dir: Directory where output FASTA files will be saved.
|
|
394
|
+
- key_order: The order in which to process keys.
|
|
395
|
+
- cores: Dictionary of core genes.
|
|
396
|
+
- sequences: Dictionary mapping headers to sequences.
|
|
397
|
+
- pangenome_clusters_First_sequences_sorted: Dictionary of first sequence clusters.
|
|
398
|
+
- combined_pangenome_clusters_Second_sequences: Dictionary of second sequence clusters.
|
|
399
|
+
"""
|
|
400
|
+
# Create output directory if it doesn't exist
|
|
401
|
+
if not os.path.exists(output_dir):
|
|
402
|
+
os.makedirs(output_dir)
|
|
403
|
+
|
|
404
|
+
combined_fasta_filename = os.path.join(output_dir, "combined_group_sequences_dna.fasta")
|
|
405
|
+
|
|
406
|
+
# Open combined FASTA file for writing all sequences
|
|
407
|
+
with open(combined_fasta_filename, 'w') as combined_fasta, open(combined_fasta_filename.replace('_dna.fasta','_aa.fasta'), 'w') as combined_fasta_aa:
|
|
408
|
+
for key_prefix in key_order:
|
|
409
|
+
for key, values in cores.items():
|
|
410
|
+
if any(part in options.write_groups.split(',') for part in key.split('_')):
|
|
411
|
+
if key.startswith(key_prefix):
|
|
412
|
+
for value in values:
|
|
413
|
+
output_filename = f"{key}_{value}_dna.fasta"
|
|
414
|
+
if 'First' in key_prefix:
|
|
415
|
+
sequences_to_write = pangenome_clusters_First_sequences_sorted[value]
|
|
416
|
+
else:
|
|
417
|
+
sequences_to_write = combined_pangenome_clusters_Second_sequences[value]
|
|
418
|
+
|
|
419
|
+
# Write individual FASTA file
|
|
420
|
+
with open(os.path.join(output_dir,output_filename), 'w') as outfile, open(os.path.join(output_dir, output_filename.replace('_dna.fasta','_aa.fasta')), 'w') as outfile_aa:
|
|
421
|
+
for header in sequences_to_write:
|
|
422
|
+
if header in sequences:
|
|
423
|
+
sequence = sequences[header]
|
|
424
|
+
wrapped_sequence = wrap_sequence(sequence)
|
|
425
|
+
# Handle Amino Acid Sequences (AA)
|
|
426
|
+
if options.sequence_type == 'AA':
|
|
427
|
+
seq_aa = translate_frame(sequence)
|
|
428
|
+
wrapped_sequence_aa = wrap_sequence(seq_aa)
|
|
429
|
+
# Write individual group file for AA, if option is enabled
|
|
430
|
+
if options.write_individual_groups:
|
|
431
|
+
outfile_aa.write(f">{header}\n")
|
|
432
|
+
outfile_aa.write(f"{wrapped_sequence_aa}\n")
|
|
433
|
+
else:
|
|
434
|
+
os.remove(outfile_aa.name) # Delete individual file if option is disabled
|
|
435
|
+
# Always write to the combined AA file
|
|
436
|
+
combined_fasta_aa.write(f">Group_{value}|{header}\n")
|
|
437
|
+
combined_fasta_aa.write(f"{wrapped_sequence_aa}\n")
|
|
438
|
+
# Handle Nucleotide Sequences
|
|
439
|
+
else:
|
|
440
|
+
# If the option is disabled, delete individual AA file (if created)
|
|
441
|
+
try:
|
|
442
|
+
os.remove(outfile_aa.name) # Ensure outfile_aa is removed when sequence_type isn't 'AA'
|
|
443
|
+
except FileNotFoundError:
|
|
444
|
+
pass
|
|
445
|
+
# Write individual group file for nucleotide sequence, if option is enabled
|
|
446
|
+
if options.write_individual_groups:
|
|
447
|
+
outfile.write(f">{header}\n")
|
|
448
|
+
outfile.write(f"{wrapped_sequence}\n")
|
|
449
|
+
else:
|
|
450
|
+
os.remove(outfile.name) # Delete individual file if option is disabled
|
|
451
|
+
# Always write to the combined nucleotide file
|
|
452
|
+
combined_fasta.write(f">Group_{value}|{header}\n")
|
|
453
|
+
combined_fasta.write(f"{wrapped_sequence}\n")
|
|
454
|
+
|
|
455
|
+
else:
|
|
456
|
+
if options.verbose == True:
|
|
457
|
+
print(f"Sequence {header} not found in original_fasta file.")
|
|
458
|
+
if options.sequence_type != 'AA':
|
|
459
|
+
#Clean up unused file
|
|
460
|
+
os.remove(combined_fasta_aa.name)
|
|
461
|
+
print(f"Combined FASTA file saved to: {combined_fasta_filename}")
|
|
462
|
+
|
|
463
|
+
|
|
464
|
+
# def process_gene_groups(options, group_directory, sub_group_directory, paralog_groups, output_file):
|
|
465
|
+
# """Process each gene family file to select the longest sequence per genome and concatenate aligned sequences."""
|
|
466
|
+
# concatenated_sequences = {}
|
|
467
|
+
# output_file = group_directory.replace('Gene_Groups_Output',output_file)
|
|
468
|
+
#
|
|
469
|
+
# # Iterate over each gene family file
|
|
470
|
+
# for gene_file in os.listdir(group_directory):
|
|
471
|
+
# if gene_file.endswith('.fasta') and not gene_file.endswith('combined_group_sequences.fasta') :
|
|
472
|
+
# gene_path = os.path.join(group_directory, gene_file)
|
|
473
|
+
#
|
|
474
|
+
# # Read sequences from the gene family file
|
|
475
|
+
# sequences = read_fasta(gene_path)
|
|
476
|
+
#
|
|
477
|
+
# # Select the longest sequence for each genome
|
|
478
|
+
# longest_sequences = select_longest_gene(sequences)
|
|
479
|
+
#
|
|
480
|
+
# # Run mafft on the longest sequences
|
|
481
|
+
# aligned_file = f"{group_directory}/{gene_file}_aligned.fasta.tmp"
|
|
482
|
+
# run_mafft_on_sequences(options, {seq_id: seq for seq_id, seq in longest_sequences.values()}, aligned_file)
|
|
483
|
+
#
|
|
484
|
+
# # Read aligned sequences and concatenate them
|
|
485
|
+
# aligned_sequences = read_fasta(aligned_file)
|
|
486
|
+
# for genome, aligned_seq in aligned_sequences.items():
|
|
487
|
+
# genome_name = genome.split('|')[0]
|
|
488
|
+
# if genome_name not in concatenated_sequences:
|
|
489
|
+
# concatenated_sequences[genome_name] = ""
|
|
490
|
+
# concatenated_sequences[genome_name] += aligned_seq
|
|
491
|
+
#
|
|
492
|
+
# # Clean up aligned file
|
|
493
|
+
# os.remove(aligned_file)
|
|
494
|
+
#
|
|
495
|
+
# # Write the concatenated sequences to the output file
|
|
496
|
+
# with open(output_file, 'w') as out:
|
|
497
|
+
# for genome, sequence in concatenated_sequences.items():
|
|
498
|
+
# out.write(f">{genome}\n")
|
|
499
|
+
# wrapped_sequence = wrap_sequence(sequence, 60)
|
|
500
|
+
# out.write(f"{wrapped_sequence}\n")
|
|
501
|
+
|
|
502
|
+
def perform_alignment(gene_path,group_directory, gene_file, options, concatenated_sequences, subgrouped):
|
|
503
|
+
# Read sequences from the gene family file
|
|
504
|
+
sequences = read_fasta(gene_path)
|
|
505
|
+
|
|
506
|
+
# Select the longest sequence for each genome
|
|
507
|
+
longest_sequences = select_longest_gene(sequences, subgrouped)
|
|
508
|
+
|
|
509
|
+
# Run mafft on the longest sequences
|
|
510
|
+
aligned_file = f"{group_directory}/{gene_file}_aligned.fasta.tmp"
|
|
511
|
+
run_mafft_on_sequences(options, {seq_id: seq for seq_id, seq in longest_sequences.values()}, aligned_file)
|
|
512
|
+
|
|
513
|
+
# Read aligned sequences and concatenate them
|
|
514
|
+
aligned_sequences = read_fasta(aligned_file)
|
|
515
|
+
# Find the length of the longest sequence in aligned_sequences
|
|
516
|
+
max_length = max(len(seq) for seq in aligned_sequences.values())
|
|
517
|
+
|
|
518
|
+
for genome, sequence in concatenated_sequences.items():
|
|
519
|
+
if any(genome in key for key in aligned_sequences.keys()):
|
|
520
|
+
genome_name_in_aligned = next(key for key in aligned_sequences.keys() if genome in key)#.split('|')[split_by]
|
|
521
|
+
concatenated_sequences[genome] += aligned_sequences[genome_name_in_aligned]
|
|
522
|
+
else:
|
|
523
|
+
concatenated_sequences[genome] += "-" * max_length
|
|
524
|
+
|
|
525
|
+
# Clean up aligned file
|
|
526
|
+
os.remove(aligned_file)
|
|
527
|
+
|
|
528
|
+
return concatenated_sequences
|
|
529
|
+
|
|
530
|
+
def process_gene_groups(options, group_directory, sub_group_directory, paralog_groups, genome_list, output_file):
|
|
531
|
+
"""Process each gene family file to select the longest sequence per genome and concatenate aligned sequences."""
|
|
532
|
+
concatenated_sequences = {genome: "" for genome in genome_list}
|
|
533
|
+
output_file = group_directory.replace('Gene_Groups_Output', output_file)
|
|
534
|
+
if paralog_groups != None:
|
|
535
|
+
threshold_size = math.floor(int(options.align_core) * int(options.genome_num) / 100)
|
|
536
|
+
|
|
537
|
+
if options.align_aa == True:
|
|
538
|
+
affix = '_aa.fasta'
|
|
539
|
+
else:
|
|
540
|
+
affix = '_dna.fasta'
|
|
541
|
+
|
|
542
|
+
# Iterate over each gene family file
|
|
543
|
+
for gene_file in os.listdir(group_directory):
|
|
544
|
+
if gene_file.endswith(affix) and not gene_file.startswith('combined_group_sequences'):
|
|
545
|
+
#print(gene_file)
|
|
546
|
+
current_group = int(gene_file.split('_')[3].split('.')[0])
|
|
547
|
+
gene_path = os.path.join(group_directory, gene_file)
|
|
548
|
+
|
|
549
|
+
# Check for matching group in paralog_groups
|
|
550
|
+
if sub_group_directory and paralog_groups and '>Group_'+str(current_group) in paralog_groups:
|
|
551
|
+
for subgroup, size in enumerate(paralog_groups['>Group_' + str(current_group)]['sizes']):
|
|
552
|
+
if size >= threshold_size:
|
|
553
|
+
gene_path = os.path.join(sub_group_directory,f"Group_{current_group}_subgroup_{subgroup}{affix}")
|
|
554
|
+
concatenated_sequences = perform_alignment(gene_path, group_directory, gene_file, options, concatenated_sequences, True)
|
|
555
|
+
|
|
556
|
+
else:
|
|
557
|
+
concatenated_sequences = perform_alignment(gene_path, group_directory, gene_file, options, concatenated_sequences, False)
|
|
558
|
+
|
|
559
|
+
|
|
560
|
+
# Write the concatenated sequences to the output file
|
|
561
|
+
with open(output_file, 'w') as out:
|
|
562
|
+
for genome, sequence in concatenated_sequences.items():
|
|
563
|
+
out.write(f">{genome}\n")
|
|
564
|
+
wrapped_sequence = wrap_sequence(sequence, 60)
|
|
565
|
+
out.write(f"{wrapped_sequence}\n")
|
|
566
|
+
|