PyamilySeq 0.8.1__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
PyamilySeq/utils.py DELETED
@@ -1,432 +0,0 @@
1
- import subprocess
2
- import shutil
3
- import os
4
- import glob
5
- import collections
6
- from tempfile import NamedTemporaryFile
7
- import sys
8
- from line_profiler_pycharm import profile
9
- import re
10
-
11
-
12
- ################### We are currently fixed using Table 11
13
- gencode = {
14
- 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
15
- 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
16
- 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
17
- 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
18
- 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
19
- 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
20
- 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
21
- 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
22
- 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
23
- 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
24
- 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
25
- 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
26
- 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
27
- 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
28
- 'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',
29
- 'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W'}
30
-
31
- def translate_frame(sequence):
32
- translate = ''.join([gencode.get(sequence[3 * i:3 * i + 3], 'X') for i in range(len(sequence) // 3)])
33
- return translate
34
-
35
- @profile
36
- def calculate_similarity(seq1, seq2):
37
- len1, len2 = len(seq1), len(seq2)
38
-
39
- # If lengths are the same, directly compare without alignment
40
- if len1 == len2:
41
- matches = sum(c1 == c2 for c1, c2 in zip(seq1, seq2))
42
- return (matches / len1) * 100 # Return similarity based on the length
43
-
44
- # For different lengths, proceed with global alignment
45
- # Initialize the scoring matrix
46
- score_matrix = [[0] * (len2 + 1) for _ in range(len1 + 1)]
47
-
48
- # Fill the first row and first column with gap penalties
49
- for i in range(len1 + 1):
50
- score_matrix[i][0] = -i # Gap penalty for seq1
51
- for j in range(len2 + 1):
52
- score_matrix[0][j] = -j # Gap penalty for seq2
53
-
54
- # Fill the score matrix
55
- for i in range(1, len1 + 1):
56
- for j in range(1, len2 + 1):
57
- match = score_matrix[i - 1][j - 1] + (1 if seq1[i - 1] == seq2[j - 1] else -1)
58
- delete = score_matrix[i - 1][j] - 1 # Gap in seq2
59
- insert = score_matrix[i][j - 1] - 1 # Gap in seq1
60
- score_matrix[i][j] = max(match, delete, insert)
61
-
62
- # Traceback to find the alignment (if needed for detailed output)
63
- aligned_seq1, aligned_seq2 = "", ""
64
- i, j = len1, len2
65
-
66
- while i > 0 or j > 0:
67
- current_score = score_matrix[i][j]
68
- if i > 0 and j > 0 and current_score == score_matrix[i - 1][j - 1] + (1 if seq1[i - 1] == seq2[j - 1] else -1):
69
- aligned_seq1 += seq1[i - 1]
70
- aligned_seq2 += seq2[j - 1]
71
- i -= 1
72
- j -= 1
73
- elif i > 0 and current_score == score_matrix[i - 1][j] - 1:
74
- aligned_seq1 += seq1[i - 1]
75
- aligned_seq2 += "-"
76
- i -= 1
77
- else:
78
- aligned_seq1 += "-"
79
- aligned_seq2 += seq2[j - 1]
80
- j -= 1
81
-
82
- # Reverse the aligned sequences if needed
83
- aligned_seq1 = aligned_seq1[::-1]
84
- aligned_seq2 = aligned_seq2[::-1]
85
-
86
- # Calculate matches from aligned sequences
87
- matches = sum(c1 == c2 for c1, c2 in zip(aligned_seq1, aligned_seq2))
88
-
89
- # Calculate the similarity percentage based on the maximum length
90
- max_length = max(len(seq1), len(seq2))
91
- return (matches / max_length) * 100
92
-
93
-
94
-
95
- def is_tool_installed(tool_name):
96
- """Check if a tool is installed and available in PATH."""
97
- # Check if the tool is in the system PATH
98
- if shutil.which(tool_name) is None:
99
- return False
100
-
101
- # Try running the tool to ensure it's executable
102
- try:
103
- subprocess.run([tool_name, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
104
- return True
105
- except subprocess.CalledProcessError:
106
- return True # The tool is installed and ran, even if it returns an error code
107
- except FileNotFoundError:
108
- return False # This shouldn't happen due to the earlier check
109
-
110
- def reverse_complement(seq):
111
- complement = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'N': 'N'}
112
- return ''.join(complement[base] for base in reversed(seq))
113
-
114
-
115
- def fix_path(path):
116
- fixed_path = os.path.normpath(path)
117
- fixed_path = os.path.realpath(fixed_path)
118
- return fixed_path
119
-
120
-
121
- def extract_identity(clustered_info):
122
- # Use regular expressions to capture the percentage value at the end of the line
123
- match = re.search(r'at ([-+]*)(\d+\.\d+)%', clustered_info)
124
-
125
- if match:
126
- percent_identity = float(match.group(2)) # Extract the percentage value
127
- return percent_identity
128
- else:
129
- raise ValueError("Percent identity not found in the string.")
130
-
131
- def wrap_sequence(sequence, width=60):
132
- wrapped_sequence = []
133
- for i in range(0, len(sequence), width):
134
- wrapped_sequence.append(sequence[i:i + width])
135
- return "\n".join(wrapped_sequence)
136
-
137
-
138
- def read_fasta(fasta_file):
139
- sequences = {}
140
- current_sequence = None
141
- with open(fasta_file, 'r') as file:
142
- for line in file:
143
- line = line.strip()
144
- if not line:
145
- continue # Skip empty lines
146
- if line.startswith('>'):
147
- current_sequence = line[1:] # Remove '>' character
148
- sequences[current_sequence] = ''
149
- else:
150
- sequences[current_sequence] += line
151
- return sequences
152
-
153
-
154
- def reorder_dict_by_keys(original_dict, sorted_keys):
155
- return {k: original_dict[k] for k in sorted_keys}
156
- def custom_sort_key(k, dict1, dict2):
157
- return (len(dict1[k]), len(dict2[k]))
158
-
159
- def sort_keys_by_values(dict1, dict2):
160
- sorted_keys = sorted(dict1.keys(), key=lambda k: custom_sort_key(k, dict1, dict2), reverse=True)
161
- return sorted_keys
162
-
163
- def select_longest_gene(sequences):
164
- """Select the longest sequence for each genome."""
165
- longest_sequences = {}
166
- for seq_id, sequence in sequences.items():
167
- genome = seq_id.split('|')[0] # Assuming genome name can be derived from the sequence ID
168
- if genome not in longest_sequences or len(sequence) > len(longest_sequences[genome][1]):
169
- longest_sequences[genome] = (seq_id, sequence)
170
- return longest_sequences
171
-
172
-
173
- def run_mafft_on_sequences(options, sequences, output_file):
174
- #print("Conducting MAFFT alignment.")
175
- """Run mafft on the given sequences and write to output file."""
176
- # Create a temporary input file for mafft
177
- with NamedTemporaryFile('w', delete=False) as temp_input_file:
178
- for header, sequence in sequences.items():
179
- temp_input_file.write(f">{header}\n{sequence}\n")
180
- temp_input_file_path = temp_input_file.name
181
-
182
- # Run mafft
183
- try:
184
- with open(output_file, 'w') as output_f:
185
- if options.verbose == True:
186
- subprocess.run(
187
- ['mafft', '--auto', '--thread', str(options.threads), temp_input_file_path],
188
- stdout=output_f,
189
- stderr=sys.stderr,
190
- check=True
191
- )
192
-
193
- else:
194
- subprocess.run(
195
- ['mafft', '--auto', '--thread', str(options.threads), temp_input_file_path],
196
- stdout=output_f,
197
- stderr=subprocess.DEVNULL, # Suppress stderr
198
- check=True
199
- )
200
- finally:
201
- os.remove(temp_input_file_path) # Clean up the temporary file
202
-
203
-
204
-
205
-
206
- def read_separate_files(input_dir, name_split, gene_ident, combined_out, translate):
207
- with open(combined_out, 'w') as combined_out_file:
208
- for gff_file in glob.glob(os.path.join(input_dir, '*' + name_split)):
209
- genome_name = os.path.basename(gff_file).split(name_split)[0]
210
- corresponding_fasta_file = os.path.splitext(gff_file)[0] + '.fa'
211
- if not os.path.exists(corresponding_fasta_file):
212
- continue
213
-
214
- gff_features = []
215
- with open(gff_file, 'r') as file:
216
- seen_seq_ids = collections.defaultdict(int)
217
- lines = file.readlines()
218
- for line in lines:
219
- line_data = line.split('\t')
220
- if len(line_data) == 9:
221
- if any(gene_type in line_data[2] for gene_type in gene_ident):
222
- contig = line_data[0]
223
- feature = line_data[2]
224
- strand = line_data[6]
225
- start, end = int(line_data[3]), int(line_data[4])
226
- if seq_id in seen_seq_ids:
227
- seq_id += '_' + str(seen_seq_ids[seq_id])
228
- seen_seq_ids[seq_id] + 1
229
- else:
230
- seen_seq_ids[seq_id] = 1
231
- seq_id = line_data[8].split('ID=')[1].split(';')[0]
232
- gff_features.append((contig, start, end, strand, feature, seq_id))
233
- fasta_dict = collections.defaultdict(str)
234
- with open(corresponding_fasta_file, 'r') as file:
235
- lines = file.readlines()
236
- for line in lines:
237
- if line.startswith('>'):
238
- current_contig = line[1:].split()[0]
239
- fasta_dict[current_contig] = ['', '']
240
- else:
241
- fasta_dict[current_contig][0] += line.strip()
242
-
243
- for contig, fasta in fasta_dict.items():
244
- reverse_sequence = reverse_complement(fasta[0])
245
- fasta_dict[contig][1] = reverse_sequence
246
-
247
- if fasta_dict and gff_features:
248
- for contig, start, end, strand, feature, seq_id in gff_features:
249
- if contig in fasta_dict:
250
- if strand == '+':
251
- full_sequence = fasta_dict[contig][0]
252
- cds_sequence = full_sequence[start - 1:end]
253
- elif strand == '-':
254
- corrected_start = max(len(fasta_dict[contig][0]) - int(end), 1)
255
- corrected_stop = max(len(fasta_dict[contig][0]) - int(start - 1), 1)
256
- full_sequence = fasta_dict[contig][1]
257
- cds_sequence = full_sequence[corrected_start:corrected_stop]
258
- if translate == True:
259
- cds_sequence = translate_frame(cds_sequence)
260
- wrapped_sequence = '\n'.join([cds_sequence[i:i + 60] for i in range(0, len(cds_sequence), 60)])
261
- combined_out_file.write(f">{genome_name}|{seq_id}\n{wrapped_sequence}\n")
262
-
263
-
264
- def read_combined_files(input_dir, name_split, gene_ident, combined_out, translate):
265
- with open(combined_out, 'w') as combined_out_file:
266
- for gff_file in glob.glob(os.path.join(input_dir, '*' + name_split)):
267
- genome_name = os.path.basename(gff_file).split(name_split)[0]
268
- fasta_dict = collections.defaultdict(str)
269
- gff_features = []
270
- with open(gff_file, 'r') as file:
271
- seen_seq_ids = collections.defaultdict(int)
272
- lines = file.readlines()
273
- fasta_section = False
274
- for line in lines:
275
- if line.startswith('##FASTA'):
276
- fasta_section = True
277
- continue
278
- if fasta_section:
279
- if line.startswith('>'):
280
- current_contig = line[1:].split()[0]
281
- fasta_dict[current_contig] = ['','']
282
- else:
283
- fasta_dict[current_contig][0] +=line.strip()
284
- else:
285
- line_data = line.split('\t')
286
- if len(line_data) == 9:
287
- if any(gene_type in line_data[2] for gene_type in gene_ident):
288
- contig = line_data[0]
289
- feature = line_data[2]
290
- strand = line_data[6]
291
- start, end = int(line_data[3]), int(line_data[4])
292
- seq_id = line_data[8].split('ID=')[1].split(';')[0]
293
- if seq_id in seen_seq_ids:
294
- seq_id += '_' + str(seen_seq_ids[seq_id])
295
- seen_seq_ids[seq_id] + 1
296
- else:
297
- seen_seq_ids[seq_id] = 1
298
- gff_features.append((contig, start, end, strand, feature, seq_id))
299
-
300
- for contig, fasta in fasta_dict.items():
301
- reverse_sequence = reverse_complement(fasta[0])
302
- fasta_dict[contig][1]=reverse_sequence
303
-
304
- if fasta_dict and gff_features:
305
- for contig, start, end, strand, feature, seq_id in gff_features:
306
- if contig in fasta_dict:
307
- if strand == '+':
308
- full_sequence = fasta_dict[contig][0]
309
- cds_sequence = full_sequence[start - 1:end]
310
- elif strand == '-':
311
- corrected_start = max(len(fasta_dict[contig][0]) - int(end), 1)
312
- corrected_stop = max(len(fasta_dict[contig][0]) - int(start - 1), 1)
313
- full_sequence = fasta_dict[contig][1]
314
- cds_sequence = full_sequence[corrected_start:corrected_stop]
315
-
316
- if translate == True:
317
- cds_sequence = translate_frame(cds_sequence)
318
- wrapped_sequence = '\n'.join([cds_sequence[i:i + 60] for i in range(0, len(cds_sequence), 60)])
319
- combined_out_file.write(f">{genome_name}|{seq_id}\n{wrapped_sequence}\n")
320
-
321
-
322
- def read_fasta_files(input_dir, name_split, combined_out, translate):
323
- with open(combined_out, 'w') as combined_out_file:
324
- for fasta_file in glob.glob(os.path.join(input_dir, '*' + name_split)):
325
- genome_name = os.path.basename(fasta_file).split(name_split)[0]
326
- fasta_dict = collections.defaultdict(str)
327
- with open(fasta_file, 'r') as file:
328
- lines = file.readlines()
329
- for line in lines:
330
- if line.startswith('>'):
331
- current_seq = line[1:].split()[0]
332
- fasta_dict[current_seq] = ''
333
- else:
334
- fasta_dict[current_seq] +=line.strip()
335
- for id, seq in fasta_dict.items():
336
- if translate == True:
337
- seq = translate_frame(seq)
338
- wrapped_sequence = '\n'.join([seq[i:i + 60] for i in range(0, len(seq), 60)])
339
- combined_out_file.write(f">{genome_name}|{id}\n{wrapped_sequence}\n")
340
-
341
-
342
- def write_groups(options, output_dir, key_order, cores, sequences,
343
- pangenome_clusters_First_sequences_sorted, combined_pangenome_clusters_Second_sequences):
344
- """
345
- Writes individual FASTA files and a combined FASTA file for all sequences.
346
-
347
- Parameters:
348
- - options: Command-line options.
349
- - output_dir: Directory where output FASTA files will be saved.
350
- - key_order: The order in which to process keys.
351
- - cores: Dictionary of core genes.
352
- - sequences: Dictionary mapping headers to sequences.
353
- - pangenome_clusters_First_sequences_sorted: Dictionary of first sequence clusters.
354
- - combined_pangenome_clusters_Second_sequences: Dictionary of second sequence clusters.
355
- """
356
- # Create output directory if it doesn't exist
357
- if not os.path.exists(output_dir):
358
- os.makedirs(output_dir)
359
-
360
- combined_fasta_filename = os.path.join(output_dir, "combined_group_sequences.fasta")
361
-
362
- # Open combined FASTA file for writing all sequences
363
- with open(combined_fasta_filename, 'w') as combined_fasta:
364
- for key_prefix in key_order:
365
- for key, values in cores.items():
366
- if any(part in options.write_groups.split(',') for part in key.split('_')):
367
- if key.startswith(key_prefix):
368
- for value in values:
369
- output_filename = f"{key}_{value}.fasta"
370
- if 'First' in key_prefix:
371
- sequences_to_write = pangenome_clusters_First_sequences_sorted[value]
372
- else:
373
- sequences_to_write = combined_pangenome_clusters_Second_sequences[value]
374
-
375
- # Write individual FASTA file
376
- with open(os.path.join(output_dir, output_filename), 'w') as outfile:
377
- for header in sequences_to_write:
378
- if header in sequences:
379
- sequence = sequences[header]
380
- outfile.write(f">{header}\n")
381
- wrapped_sequence = wrap_sequence(sequence)
382
- outfile.write(f"{wrapped_sequence}\n")
383
-
384
- # Also write to the combined FASTA file
385
- combined_fasta.write(f">Group_{value}|{header}\n")
386
- combined_fasta.write(f"{wrapped_sequence}\n")
387
- else:
388
- if options.verbose:
389
- print(f"Sequence {header} not found in original_fasta file.")
390
-
391
- print(f"Combined FASTA file saved to: {combined_fasta_filename}")
392
-
393
-
394
- def process_gene_families(options, directory, output_file):
395
- """Process each gene family file to select the longest sequence per genome and concatenate aligned sequences."""
396
- concatenated_sequences = {}
397
- output_file = directory.replace('Gene_Families_Output',output_file)
398
-
399
- # Iterate over each gene family file
400
- for gene_file in os.listdir(directory):
401
- if gene_file.endswith('.fasta') and not gene_file.endswith('combined_group_sequences.fasta'):
402
- gene_path = os.path.join(directory, gene_file)
403
-
404
- # Read sequences from the gene family file
405
- sequences = read_fasta(gene_path)
406
-
407
- # Select the longest sequence for each genome
408
- longest_sequences = select_longest_gene(sequences)
409
-
410
- # Run mafft on the longest sequences
411
- aligned_file = f"{directory}/{gene_file}_aligned.fasta.tmp"
412
- run_mafft_on_sequences(options, {seq_id: seq for seq_id, seq in longest_sequences.values()}, aligned_file)
413
-
414
- # Read aligned sequences and concatenate them
415
- aligned_sequences = read_fasta(aligned_file)
416
- for genome, aligned_seq in aligned_sequences.items():
417
- genome_name = genome.split('|')[0]
418
- if 'Group' in genome_name:
419
- print(2)
420
- if genome_name not in concatenated_sequences:
421
- concatenated_sequences[genome_name] = ""
422
- concatenated_sequences[genome_name] += aligned_seq
423
-
424
- # Clean up aligned file
425
- os.remove(aligned_file)
426
-
427
- # Write the concatenated sequences to the output file
428
- with open(output_file, 'w') as out:
429
- for genome, sequence in concatenated_sequences.items():
430
- out.write(f">{genome}\n")
431
- wrapped_sequence = wrap_sequence(sequence, 60)
432
- out.write(f"{wrapped_sequence}\n")