gwaslab 3.6.8__py3-none-any.whl → 3.6.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gwaslab might be problematic. Click here for more details.

@@ -1,1630 +0,0 @@
1
- import pandas as pd
2
- import numpy as np
3
- from pysam import VariantFile
4
- from Bio import SeqIO
5
- from itertools import repeat
6
- from multiprocessing import Pool
7
- from functools import partial
8
- import re
9
- import os
10
- import gc
11
- from gwaslab.g_Log import Log
12
- from gwaslab.qc_fix_sumstats import fixchr
13
- from gwaslab.qc_fix_sumstats import fixpos
14
- from gwaslab.qc_fix_sumstats import sortcolumn
15
- from gwaslab.qc_fix_sumstats import _df_split
16
- from gwaslab.qc_fix_sumstats import check_col
17
- from gwaslab.qc_fix_sumstats import start_to
18
- from gwaslab.qc_fix_sumstats import finished
19
- from gwaslab.qc_fix_sumstats import skipped
20
- from gwaslab.qc_fix_sumstats import sortcoordinate
21
- from gwaslab.qc_check_datatype import check_dataframe_shape
22
- from gwaslab.bd_common_data import get_number_to_chr
23
- from gwaslab.bd_common_data import get_chr_list
24
- from gwaslab.bd_common_data import get_chr_to_number
25
- from gwaslab.bd_common_data import get_number_to_NC
26
- from gwaslab.bd_common_data import _maketrans
27
- from gwaslab.g_vchange_status import vchange_status
28
- from gwaslab.g_version import _get_version
29
- from gwaslab.cache_manager import CacheManager, PALINDROMIC_INDEL, NON_PALINDROMIC
30
- from gwaslab.g_vchange_status import STATUS_CATEGORIES
31
- #rsidtochrpos
32
- #checkref
33
- #parallelizeassignrsid
34
- #inferstrand
35
- #parallelecheckaf
36
-
37
- ### CONSTANTS AND MAPPINGS ###
38
-
39
- PADDING_VALUE = 100
40
-
41
- # chr(0) should not be used in the mapping dict because it's a reserved value.
42
- # Instead of starting from chr(1), we start from chr(2) because this could be useful in the future
43
- # to compute the complementary allele with a simple XOR operation (e.g. 2 ^ 1 = 3, 3 ^ 1 = 2, 4 ^ 1 = 5, 5 ^ 1 = 4, ...)
44
- MAPPING = {
45
- "A": chr(2),
46
- "T": chr(3),
47
- "C": chr(4),
48
- "G": chr(5),
49
- "N": chr(6),
50
- }
51
- assert all(value != chr(0) for value in MAPPING.values()), "Mapping in the dictionary should not be equal to chr(0). This is a reserved value"
52
-
53
- _COMPLEMENTARY_MAPPING = {
54
- "A": "T",
55
- "C": "G",
56
- "G": "C",
57
- "T": "A",
58
- "N": "N",
59
- }
60
- COMPLEMENTARY_MAPPING = {k: MAPPING[v] for k,v in _COMPLEMENTARY_MAPPING.items()}
61
-
62
- TRANSLATE_TABLE = _maketrans(MAPPING)
63
- TRANSLATE_TABLE_COMPL = _maketrans(COMPLEMENTARY_MAPPING)
64
-
65
- #20220808
66
- #################################################################################################################
67
-
68
- ###~!!!!
69
- def rsidtochrpos(sumstats,
70
- path=None, ref_rsid_to_chrpos_tsv=None, snpid="SNPID",
71
- rsid="rsID", chrom="CHR",pos="POS",ref_rsid="rsID",ref_chr="CHR",ref_pos="POS", build="19",
72
- overwrite=False,remove=False,chunksize=5000000,verbose=True,log=Log()):
73
- '''
74
- assign chr:pos based on rsID
75
- '''
76
- ##start function with col checking##########################################################
77
- _start_line = "assign CHR and POS using rsIDs"
78
- _end_line = "assigning CHR and POS using rsIDs"
79
- _start_cols = [rsid]
80
- _start_function = ".rsid_to_chrpos()"
81
- _must_args ={}
82
-
83
- is_enough_info = start_to(sumstats=sumstats,
84
- log=log,
85
- verbose=verbose,
86
- start_line=_start_line,
87
- end_line=_end_line,
88
- start_cols=_start_cols,
89
- start_function=_start_function,
90
- **_must_args)
91
- if is_enough_info == False: return sumstats
92
- ############################################################################################
93
-
94
- log.write(" -rsID dictionary file: "+ path,verbose=verbose)
95
-
96
- if ref_rsid_to_chrpos_tsv is not None:
97
- path = ref_rsid_to_chrpos_tsv
98
-
99
- if snpid in sumstats.columns and sum(sumstats[rsid].isna())>0:
100
- log.write(" -Filling na in rsID columns with SNPID...",verbose=verbose)
101
- sumstats.loc[sumstats[rsid].isna(),rsid] = sumstats.loc[sumstats[rsid].isna(),snpid]
102
-
103
- if sum(sumstats[rsid].isna())>0:
104
- log.write(" -Filling na in rsID columns with NA_xxx for {} variants...".format(sum(sumstats[rsid].isna())),verbose=verbose)
105
- sumstats.loc[sumstats[rsid].isna(),rsid] = ["NA_" + str(x+1) for x in range(len(sumstats.loc[sumstats[rsid].isna(),rsid]))]
106
-
107
- dic_chuncks = pd.read_csv(path,sep="\t",usecols=[ref_rsid,ref_chr,ref_pos],
108
- chunksize=chunksize,index_col = ref_rsid,
109
- dtype={ref_rsid:"string",ref_chr:"Int64",ref_pos:"Int64"})
110
-
111
- sumstats = sumstats.set_index(rsid)
112
-
113
- #if chr or pos columns not in sumstats
114
- if chrom not in sumstats.columns:
115
- sumstats[chrom] =pd.Series(dtype="Int64")
116
- if pos not in sumstats.columns:
117
- sumstats[pos] =pd.Series(dtype="Int64")
118
-
119
- log.write(" -Setting block size: ",chunksize,verbose=verbose)
120
- log.write(" -Loading block: ",end="",verbose=verbose)
121
- for i,dic in enumerate(dic_chuncks):
122
- dic_to_update = dic[dic.index.notnull()]
123
- log.write(i," ",end=" ",show_time=False)
124
- dic_to_update = dic_to_update.rename(index={ref_rsid:rsid})
125
- dic_to_update = dic_to_update.rename(columns={ref_chr:chrom,ref_pos:pos})
126
- dic_to_update = dic_to_update[~dic_to_update.index.duplicated(keep='first')]
127
- sumstats.update(dic_to_update,overwrite="True")
128
- gc.collect()
129
-
130
- log.write("\n",end="",show_time=False,verbose=verbose)
131
- sumstats = sumstats.reset_index()
132
- sumstats = sumstats.rename(columns = {'index':rsid})
133
- log.write(" -Updating CHR and POS finished.Start to re-fixing CHR and POS... ",verbose=verbose)
134
- sumstats = fixchr(sumstats,verbose=verbose)
135
- sumstats = fixpos(sumstats,verbose=verbose)
136
- sumstats = sortcolumn(sumstats,verbose=verbose)
137
-
138
- finished(log,verbose,_end_line)
139
- return sumstats
140
- ####################################################################################################
141
-
142
-
143
- ####################################################################################################################
144
-
145
- def merge_chrpos(sumstats_part,all_groups_max,path,build,status):
146
- group=str(sumstats_part["group"].mode(dropna=True)[0])
147
- if group in [str(i) for i in range(all_groups_max+1)]:
148
- try:
149
- to_merge=pd.read_hdf(path, key="group_"+str(group)).drop_duplicates(subset="rsn")
150
- to_merge = to_merge.set_index("rsn")
151
- is_chrpos_fixable = sumstats_part.index.isin(to_merge.index)
152
- sumstats_part.loc[is_chrpos_fixable,status] = vchange_status(sumstats_part.loc[is_chrpos_fixable, status], 1,"139",3*build[0])
153
- sumstats_part.loc[is_chrpos_fixable,status] = vchange_status(sumstats_part.loc[is_chrpos_fixable, status], 2,"987",3*build[1])
154
- sumstats_part.update(to_merge)
155
- except:
156
- pass
157
- return sumstats_part
158
-
159
-
160
- def parallelrsidtochrpos(sumstats, rsid="rsID", chrom="CHR",pos="POS", path=None, ref_rsid_to_chrpos_vcf = None, ref_rsid_to_chrpos_hdf5 = None, build="99",status="STATUS",
161
- n_cores=4,block_size=20000000,verbose=True,log=Log()):
162
-
163
- ##start function with col checking##########################################################
164
- _start_line = "assign CHR and POS using rsIDs"
165
- _end_line = "assigning CHR and POS using rsIDs"
166
- _start_cols = [rsid]
167
- _start_function = ".rsid_to_chrpos2()"
168
- _must_args ={}
169
-
170
- is_enough_info = start_to(sumstats=sumstats,
171
- log=log,
172
- verbose=verbose,
173
- start_line=_start_line,
174
- end_line=_end_line,
175
- start_cols=_start_cols,
176
- start_function=_start_function,
177
- **_must_args)
178
- if is_enough_info == False: return sumstats
179
- ############################################################################################
180
-
181
- if ref_rsid_to_chrpos_hdf5 is not None:
182
- path = ref_rsid_to_chrpos_hdf5
183
- elif ref_rsid_to_chrpos_vcf is not None:
184
- vcf_file_name = os.path.basename(ref_rsid_to_chrpos_vcf)
185
- vcf_dir_path = os.path.dirname(ref_rsid_to_chrpos_vcf)
186
- path = "{}/{}.rsID_CHR_POS_groups_{}.h5".format(vcf_dir_path,vcf_file_name,int(block_size))
187
-
188
- if path is None:
189
- raise ValueError("Please provide path to hdf5 file.")
190
-
191
- sumstats["rsn"] = pd.to_numeric(sumstats[rsid].str.strip("rs"),errors="coerce").astype("Int64")
192
-
193
- log.write(" -Source hdf5 file: ",path,verbose=verbose)
194
- log.write(" -Cores to use : ",n_cores,verbose=verbose)
195
- log.write(" -Blocksize (make sure it is the same as hdf5 file ): ",block_size,verbose=verbose)
196
-
197
- input_columns= sumstats.columns
198
- sumstats_nonrs = sumstats.loc[sumstats["rsn"].isna()|sumstats["rsn"].duplicated(keep='first') ,:].copy()
199
- sumstats_rs = sumstats.loc[sumstats["rsn"].notnull(),:].copy()
200
-
201
- log.write(" -Non-Valid rsIDs: ",sum(sumstats["rsn"].isna()),verbose=verbose)
202
- log.write(" -Duplicated rsIDs except for the first occurrence: ",sum(sumstats.loc[~sumstats["rsn"].isna(), "rsn"].duplicated(keep='first')),verbose=verbose)
203
- log.write(" -Valid rsIDs: ", len(sumstats_rs),verbose=verbose)
204
-
205
- del sumstats
206
- gc.collect()
207
-
208
- # assign group number
209
- sumstats_rs.loc[:,"group"]= sumstats_rs.loc[:,"rsn"]//block_size
210
-
211
- # all groups
212
-
213
-
214
- # set index
215
- sumstats_rs = sumstats_rs.set_index("rsn")
216
-
217
- #
218
- pool = Pool(n_cores)
219
- if chrom not in input_columns:
220
- log.write(" -Initiating CHR ... ",verbose=verbose)
221
- sumstats_rs[chrom]=pd.Series(dtype="Int64")
222
-
223
- if pos not in input_columns:
224
- log.write(" -Initiating POS ... ",verbose=verbose)
225
- sumstats_rs[pos]=pd.Series(dtype="Int64")
226
-
227
- df_split=[y for x, y in sumstats_rs.groupby('group', as_index=False)]
228
- log.write(" -Divided into groups: ",len(df_split),verbose=verbose)
229
- log.write(" -",set(sumstats_rs.loc[:,"group"].unique()),verbose=verbose)
230
-
231
- # check keys
232
- store = pd.HDFStore(path, 'r')
233
- all_groups = store.keys()
234
- all_groups_len = len(all_groups)
235
- store.close()
236
- all_groups_max = max(map(lambda x: int(x.split("_")[1]), all_groups))
237
- log.write(" -Number of groups in HDF5: ",all_groups_len,verbose=verbose)
238
- log.write(" -Max index of groups in HDF5: ",all_groups_max,verbose=verbose)
239
-
240
- # update CHR and POS using rsID with multiple threads
241
- sumstats_rs = pd.concat(pool.map(partial(merge_chrpos,all_groups_max=all_groups_max,path=path,build=build,status=status),df_split),ignore_index=True)
242
- sumstats_rs[["CHR","POS"]] = sumstats_rs[["CHR","POS"]].astype("Int64")
243
- del df_split
244
- gc.collect()
245
- log.write(" -Merging group data... ",verbose=verbose)
246
- # drop group and rsn
247
- sumstats_rs = sumstats_rs.drop(columns=["group"])
248
- sumstats_nonrs = sumstats_nonrs.drop(columns=["rsn"])
249
-
250
- # merge back
251
- log.write(" -Append data... ",verbose=verbose)
252
- sumstats = pd.concat([sumstats_rs,sumstats_nonrs],ignore_index=True)
253
-
254
- del sumstats_rs
255
- del sumstats_nonrs
256
- gc.collect()
257
-
258
- # check
259
- sumstats = fixchr(sumstats,verbose=verbose)
260
- sumstats = fixpos(sumstats,verbose=verbose)
261
- sumstats = sortcolumn(sumstats,verbose=verbose)
262
-
263
- pool.close()
264
- pool.join()
265
-
266
- finished(log, verbose, _end_line)
267
- return sumstats
268
- ####################################################################################################################
269
- # old version
270
- def _old_check_status(row,record):
271
- #pos,ea,nea
272
- # status
273
- #0 / -----> match
274
- #1 / -----> Flipped Fixed
275
- #2 / -----> Reverse_complementary Fixed
276
- #3 / -----> flipped
277
- #4 / -----> reverse_complementary
278
- #5 / ------> reverse_complementary + flipped
279
- #6 / -----> both allele on genome + unable to distinguish
280
- #7 / ----> reverse_complementary + both allele on genome + unable to distinguish
281
- #8 / -----> not on ref genome
282
- #9 / ------> unchecked
283
-
284
- status_pre=row.iloc[3][:5]
285
- status_end=row.iloc[3][6:]
286
-
287
- ## nea == ref
288
- if row.iloc[2] == record[row.iloc[0]-1: row.iloc[0]+len(row.iloc[2])-1].seq.upper():
289
- ## ea == ref
290
- if row.iloc[1] == record[row.iloc[0]-1: row.iloc[0]+len(row.iloc[1])-1].seq.upper():
291
- ## len(nea) >len(ea):
292
- if len(row.iloc[2])!=len(row.iloc[1]):
293
- # indels both on ref, unable to identify
294
- return status_pre+"6"+status_end
295
- else:
296
- #nea == ref & ea != ref
297
- return status_pre+"0"+status_end
298
- ## nea!=ref
299
- else:
300
- # ea == ref_seq -> need to flip
301
- if row.iloc[1] == record[row.iloc[0]-1: row.iloc[0]+len(row.iloc[1])-1].seq.upper():
302
- return status_pre+"3"+status_end
303
- # ea !=ref
304
- else:
305
- #_reverse_complementary
306
- row.iloc[1] = get_reverse_complementary_allele(row.iloc[1])
307
- row.iloc[2] = get_reverse_complementary_allele(row.iloc[2])
308
- ## nea == ref
309
- if row.iloc[2] == record[row.iloc[0]-1: row.iloc[0]+len(row.iloc[2])-1].seq.upper():
310
- ## ea == ref
311
- if row.iloc[1] == record[row.iloc[0]-1: row.iloc[0]+len(row.iloc[1])-1].seq.upper():
312
- ## len(nea) >len(ea):
313
- if len(row.iloc[2])!=len(row.iloc[1]):
314
- return status_pre+"8"+status_end # indel reverse complementary
315
- else:
316
- return status_pre+"4"+status_end
317
- else:
318
- # ea == ref_seq -> need to flip
319
- if row.iloc[1] == record[row.iloc[0]-1: row.iloc[0]+len(row.iloc[1])-1].seq.upper():
320
- return status_pre+"5"+status_end
321
- # ea !=ref
322
- return status_pre+"8"+status_end
323
-
324
- def oldcheckref(sumstats,ref_seq,chrom="CHR",pos="POS",ea="EA",nea="NEA",status="STATUS",chr_dict=get_chr_to_number(),remove=False,verbose=True,log=Log()):
325
- ##start function with col checking##########################################################
326
- _start_line = "check if NEA is aligned with reference sequence"
327
- _end_line = "checking if NEA is aligned with reference sequence"
328
- _start_cols = [chrom,pos,ea,nea,status]
329
- _start_function = ".check_ref()"
330
- _must_args ={}
331
- is_enough_info = start_to(sumstats=sumstats,
332
- log=log,
333
- verbose=verbose,
334
- start_line=_start_line,
335
- end_line=_end_line,
336
- start_cols=_start_cols,
337
- start_function=_start_function,
338
- **_must_args)
339
- if is_enough_info == False: return sumstats
340
- ############################################################################################
341
- log.write(" -Reference genome FASTA file: "+ ref_seq,verbose=verbose)
342
- log.write(" -Checking records: ", end="",verbose=verbose)
343
- chromlist = get_chr_list(add_number=True)
344
- records = SeqIO.parse(ref_seq, "fasta")
345
- for record in records:
346
- #record = next(records)
347
- if record is not None:
348
- record_chr = str(record.id).strip("chrCHR").upper()
349
- if record_chr in chr_dict.keys():
350
- i = chr_dict[record_chr]
351
- else:
352
- i = record_chr
353
- if i in chromlist:
354
- log.write(record_chr," ", end="",show_time=False,verbose=verbose)
355
- to_check_ref = (sumstats[chrom]==i) & (~sumstats[pos].isna()) & (~sumstats[nea].isna()) & (~sumstats[ea].isna())
356
- sumstats.loc[to_check_ref,status] = sumstats.loc[to_check_ref,[pos,ea,nea,status]].apply(lambda x:_old_check_status(x,record),axis=1)
357
-
358
- log.write("\n",end="",show_time=False,verbose=verbose)
359
-
360
- sumstats[status] = pd.Categorical(sumstats[status],categories=STATUS_CATEGORIES)
361
-
362
- available_to_check =sum( (~sumstats[pos].isna()) & (~sumstats[nea].isna()) & (~sumstats[ea].isna()))
363
- status_0=sum(sumstats["STATUS"].str.match("\w\w\w\w\w[0]\w", case=False, flags=0, na=False))
364
- status_3=sum(sumstats["STATUS"].str.match("\w\w\w\w\w[3]\w", case=False, flags=0, na=False))
365
- status_4=sum(sumstats["STATUS"].str.match("\w\w\w\w\w[4]\w", case=False, flags=0, na=False))
366
- status_5=sum(sumstats["STATUS"].str.match("\w\w\w\w\w[5]\w", case=False, flags=0, na=False))
367
- status_6=sum(sumstats["STATUS"].str.match("\w\w\w\w\w[6]\w", case=False, flags=0, na=False))
368
- #status_7=sum(sumstats["STATUS"].str.match("\w\w\w\w\w[7]\w", case=False, flags=0, na=False))
369
- status_8=sum(sumstats["STATUS"].str.match("\w\w\w\w\w[8]\w", case=False, flags=0, na=False))
370
-
371
- log.write(" -Variants allele on given reference sequence : ",status_0,verbose=verbose)
372
- log.write(" -Variants flipped : ",status_3,verbose=verbose)
373
- raw_matching_rate = (status_3+status_0)/available_to_check
374
- flip_rate = status_3/available_to_check
375
- log.write(" -Raw Matching rate : ","{:.2f}%".format(raw_matching_rate*100),verbose=verbose)
376
- if raw_matching_rate <0.8:
377
- log.warning("Matching rate is low, please check if the right reference genome is used.")
378
- if flip_rate > 0.85 :
379
- log.write(" -Flipping variants rate > 0.85, it is likely that the EA is aligned with REF in the original dataset.",verbose=verbose)
380
-
381
- log.write(" -Variants inferred reverse_complement : ",status_4,verbose=verbose)
382
- log.write(" -Variants inferred reverse_complement_flipped : ",status_5,verbose=verbose)
383
- log.write(" -Both allele on genome + unable to distinguish : ",status_6,verbose=verbose)
384
- #log.write(" -Reverse_complementary + both allele on genome + unable to distinguish: ",status_7)
385
- log.write(" -Variants not on given reference sequence : ",status_8,verbose=verbose)
386
-
387
- if remove is True:
388
- sumstats = sumstats.loc[~sumstats["STATUS"].str.match("\w\w\w\w\w[8]\w"),:]
389
- log.write(" -Variants not on given reference sequence were removed.",verbose=verbose)
390
-
391
- finished(log, verbose, _end_line)
392
- return sumstats
393
-
394
- #20240320 check if non-effect allele is aligned with reference genome
395
- def _fast_check_status(x: pd.DataFrame, record: np.array, starting_positions: np.array, records_len: np.array):
396
- # starting_positions and records_len must be 1D arrays containing data only for the chromosomes contained in x,
397
- # and these arrays must be ordered in the same way as the chromosomes in np.unique(x['CHR'].values).
398
- # status
399
- #0 / -----> match
400
- #1 / -----> Flipped Fixed
401
- #2 / -----> Reverse_complementary Fixed
402
- #3 / -----> flipped
403
- #4 / -----> reverse_complementary
404
- #5 / ------> reverse_complementary + flipped
405
- #6 / -----> both allele on genome + unable to distinguish
406
- #7 / ----> reverse_complementary + both allele on genome + unable to distinguish
407
- #8 / -----> not on ref genome
408
- #9 / ------> unchecked
409
- if x.empty:
410
- return np.array([])
411
-
412
- # x is expected to be a DataFrame with these columns in that order: ['CHR', 'POS', 'EA', 'NEA', 'STATUS']
413
- # In this way, we don't need to specify the columns names
414
- _chrom = x.iloc[:, 0]
415
- _pos = x.iloc[:, 1]
416
- _ea = x.iloc[:, 2]
417
- _nea = x.iloc[:, 3]
418
- _status = x.iloc[:, 4]
419
-
420
- # position of the status (i.e. x['STATUS']) that will be modified
421
- status_flip_idx = 5
422
-
423
- pos = _pos.values.astype(np.int64) # convert to int64 because they could be of type 'object'
424
-
425
- # Rebase the chromosome numbers to 0-based indexing
426
- # e.g. ['1', '2', '4', '2'] -> [0, 1, 2, 1]
427
- # This is needed because record is a single 1D array containing all the records for all the selected chromosomes,
428
- # so for instance if record contains the records for chr1, chr2, chr4 ([...chr1...chr2...chr4...]), we need to
429
- # rebase the chromosome numbers to 0-based indexing to index the correct record portion when we do starting_positions[chrom]
430
- # Note that in x there are only the rows for the same chromosomes for which we have the records in record
431
- # (i.e. we don't have rows for chr3 if we don't have the record for chr3). This filtering is done in the caller function
432
- _chrom = _chrom.values
433
- unique_values, _ = np.unique(_chrom, return_inverse=True) # Get the sorted unique values and their indices
434
- chrom = np.searchsorted(unique_values, _chrom) # Replace each value in '_chrom' with its corresponding index in the sorted unique values
435
-
436
- max_len_nea = _nea.str.len().max()
437
- max_len_ea = _ea.str.len().max()
438
-
439
- ########################################## mask for variants with out of range POS
440
- mask_outlier = pos > records_len[chrom]
441
-
442
- #########################################
443
-
444
- # Let's apply the same magic used for the fasta records (check build_fasta_records() for details) to convert the NEA and EA to
445
- # a numpy array of integers in a very fast way.
446
- # In that case we start from a pd.Series to we can apply some built-in methods.
447
- # Also, when doing nea.view('<u4'), each row will be automatically right-padded with zeros to reach the max_len_nea.
448
- # For this reason, we then replace the zeros with out padding value
449
- # (and that's why the mapping dict can't have chr(0) as a value, otherwise we would have zeros for both padding and a character)
450
- # Reshaping is needed because .view('<u4') will create a flattened array
451
- nea = _nea.str.translate(TRANSLATE_TABLE).to_numpy().astype(f'<U{max_len_nea}')
452
- nea = nea.view('<u4').reshape(-1, max_len_nea).astype(np.uint8)
453
- nea[nea == 0] = PADDING_VALUE # padding value
454
- ###########################################
455
-
456
- ###########################################
457
- # Create a mask holding True at the position of non-padding values
458
- mask_nea = nea != PADDING_VALUE
459
-
460
- # Create the reverse complement of NEA
461
- # In this case, we manually left-pad the translated string with the padding value, since the padding done by view('<u4') would be right-padded
462
- # and that will make hard the reverse operation (because we would have e.g. [2, 2, 4, 100, ..., 100] which will be hard to convert into [4, 2, 2, 100, ..., 100])
463
- rev_nea = _nea.str.translate(TRANSLATE_TABLE_COMPL).str.pad(max_len_nea, 'left', chr(PADDING_VALUE)).to_numpy().astype(f'<U{max_len_nea}')
464
- rev_nea = rev_nea.view('<u4').reshape(-1, max_len_nea).astype(np.uint8)
465
- rev_nea = rev_nea[:, ::-1]
466
-
467
-
468
- # Let's do everything again for EA
469
- ea = _ea.str.translate(TRANSLATE_TABLE).to_numpy().astype(f'<U{max_len_ea}')
470
- ea = ea.view('<u4').reshape(-1, max_len_ea).astype(np.uint8)
471
- ea[ea == 0] = PADDING_VALUE # padding value
472
- ###########################################
473
-
474
- ###########################################
475
- mask_ea = ea != PADDING_VALUE
476
-
477
- rev_ea = _ea.str.translate(TRANSLATE_TABLE_COMPL).str.pad(max_len_ea, 'left', chr(PADDING_VALUE)).to_numpy().astype(f'<U{max_len_ea}')
478
- rev_ea = rev_ea.view('<u4').reshape(-1, max_len_ea).astype(np.uint8)
479
- rev_ea = rev_ea[:, ::-1]
480
-
481
-
482
- # Convert the status (which are integers represented as strings) to a numpy array of integers.
483
- # Again, use the same concept as before to do this in a very fast way.
484
- # e.g. ["9999999", "9939999", "9929999"] -> [[9, 9, 9, 9, 9, 9, 9], [9, 9, 3, 9, 9, 9, 9], [9, 9, 2, 9, 9, 9, 9]]
485
- assert _status.str.len().value_counts().nunique() == 1 # all the status strings should have the same length, let's be sure of that.
486
- status_len = len(_status.iloc[0])
487
- mapping_status = {str(v): chr(v) for v in range(10)}
488
- table_stats = _maketrans(mapping_status)
489
- status = _status.str.translate(table_stats).to_numpy().astype(f'<U{status_len}')
490
- status = status.view('<u4').reshape(-1, status_len).astype(np.uint8)
491
-
492
-
493
- # Expand the position to a 2D array and subtract 1 to convert to 0-based indexing
494
- # e.g. [2, 21, 46] -> [[1], [20], [45]]
495
- pos = np.expand_dims(pos, axis=-1) - 1
496
-
497
- # Create a modified indices array specifying the starting position of each chromosome in the concatenated record array
498
- modified_indices = starting_positions[chrom]
499
- modified_indices = modified_indices[:, np.newaxis] # Add a new axis to modified_indices to align with the dimensions of pos
500
-
501
- # Create the range of indices: [0, ..., max_len_nea-1]
502
- indices_range = np.arange(max_len_nea)
503
-
504
- # Add the range of indices to the starting indices
505
- # e.g. pos = [[1], [20], [45]], indices_range = [0, 1, 2], indices = [[1, 2, 3], [20, 21, 22], [45, 46, 47]]
506
- indices = pos + indices_range
507
-
508
- # Modify indices to select the correct absolute position in the concatenated record array
509
- indices = indices + modified_indices
510
-
511
- # Let's pad the fasta records array because if there is a (pos, chrom) for which (pos+starting_position[chrom]+max_len_nea > len(record) we get out of bounds error.
512
- # This basically happens if there is a pos for the last chromosome for which pos+max_len_nea > len(record for that chrom).
513
- # This is very unlikely to happen but we should handle this case.
514
- record = np.pad(record, (0, max_len_nea), constant_values=PADDING_VALUE)
515
-
516
- # Index the record array using the computed indices.
517
- # Since we use np.take, indices must all have the same length, and this is why we added the padding to NEA
518
- # and we create the indices using max_len_nea (long story short, we can't obtain a scattered/ragged array)
519
- output_nea = np.take(record, indices, mode="clip")
520
- ##################################################################
521
- output_nea[mask_outlier] = PADDING_VALUE
522
- ##################################################################
523
-
524
- # Check if the NEA is equal to the reference sequence at the given position
525
- # In a non-matrix way, this is equivalent (for one single element) to:
526
- # nea == record[pos-1: pos+len(nea)-1]
527
- # where for example:
528
- # a) nea = "AC", record = "ACTG", pos = 1 -> True
529
- # b) nea = "T", record = "ACTG", pos = 3 -> True
530
- # c) nea = "AG", record = "ACTG", pos = 1 -> False
531
- # Since we want to do everything in a vectorized way, we will compare the padded NEA with the output
532
- # and then we use the mask to focus only on the non-padded elements
533
- # Pseudo example (X represents the padding value):
534
- # nea = ['AC', 'T'], record = 'ACTGAAG', pos = [1, 3]
535
- # -> nea = ['AC', 'TX'], indices = [[1, 2], [3, 4]], mask = [[True, True], [True, False]], output_nea = [['A', 'C'], ['T', 'G']]
536
- # -> nea == output_nea: [[True, True], [True, False]], mask: [[True, True], [True, False]]
537
- # -> nea == output_nea + ~mask: [[True, True], [True, True]]
538
- # -> np.all(nea == output_nea + ~mask, 1): [True, True]
539
-
540
- nea_eq_ref = np.all((nea == output_nea) + ~mask_nea, 1)
541
- rev_nea_eq_ref = np.all((rev_nea == output_nea) + ~mask_nea, 1)
542
-
543
- # Let's do everything again for EA
544
- indices_range = np.arange(max_len_ea)
545
- indices = pos + indices_range
546
- indices = indices + modified_indices
547
- output_ea = np.take(record, indices, mode="clip")
548
- ##################################################################
549
- output_ea[mask_outlier] = PADDING_VALUE
550
- ##################################################################
551
-
552
-
553
- ea_eq_ref = np.all((ea == output_ea) + ~mask_ea, 1)
554
- rev_ea_eq_ref = np.all((rev_ea == output_ea) + ~mask_ea, 1)
555
-
556
- masks_max_len = max(mask_nea.shape[1], mask_ea.shape[1])
557
-
558
- len_nea_eq_len_ea = np.all(
559
- np.pad(mask_nea, ((0,0),(0, masks_max_len-mask_nea.shape[1])), constant_values=False) ==
560
- np.pad(mask_ea, ((0,0),(0, masks_max_len-mask_ea.shape[1])), constant_values=False)
561
- , axis=1) # pad masks with False to reach same shape
562
- len_rev_nea_eq_rev_len_ea = len_nea_eq_len_ea
563
-
564
- # The following conditions replicates the if-else statements of the original check_status function:
565
- # https://github.com/Cloufield/gwaslab/blob/f6b4c4e58a26e5d67d6587141cde27acf9ce2a11/src/gwaslab/hm_harmonize_sumstats.py#L238
566
-
567
- # nea == ref && ea == ref && len(nea) != len(ea)
568
- status[nea_eq_ref * ea_eq_ref * ~len_nea_eq_len_ea, status_flip_idx] = 6
569
-
570
- # nea == ref && ea != ref
571
- status[nea_eq_ref * ~ea_eq_ref, status_flip_idx] = 0
572
-
573
- # nea != ref && ea == ref
574
- status[~nea_eq_ref * ea_eq_ref, status_flip_idx] = 3
575
-
576
- # nea != ref && ea != ref && rev_nea == ref && rev_ea == ref && len(rev_nea) != len(rev_ea)
577
- status[~nea_eq_ref * ~ea_eq_ref * rev_nea_eq_ref * rev_ea_eq_ref * ~len_rev_nea_eq_rev_len_ea, status_flip_idx] = 8
578
-
579
- # nea != ref && ea != ref && rev_nea == ref && rev_ea != ref
580
- status[~nea_eq_ref * ~ea_eq_ref * rev_nea_eq_ref * ~rev_ea_eq_ref, status_flip_idx] = 4
581
-
582
- # nea != ref && ea != ref && rev_nea != ref && rev_ea == ref
583
- status[~nea_eq_ref * ~ea_eq_ref * ~rev_nea_eq_ref * rev_ea_eq_ref, status_flip_idx] = 5
584
-
585
- # nea != ref && ea != ref && rev_nea != ref && rev_ea != ref
586
- status[~nea_eq_ref * ~ea_eq_ref * ~rev_nea_eq_ref * ~rev_ea_eq_ref, status_flip_idx] = 8
587
-
588
- # Convert back the (now modified) 2D status array to a numpy array of strings in a very fast way.
589
- # Since 'status' is a 2D array of integers ranging from 0 to 9, we can build the integer representation
590
- # of each row using the efficent operation below (e.g. [1, 2, 3, 4, 5] -> [12345]).
591
- # Then we convert this integer to a string using the f'<U{status.shape[1]}' dtype (e.g. 12345 -> '12345')
592
- # The "naive" way would be:
593
- # status_str = [''.join(map(str, l)) for l in status]
594
- # status_arr = np.array(status_str)
595
- status_flat = np.sum(status * 10**np.arange(status.shape[1]-1, -1, -1), axis=1)
596
- status_arr = status_flat.astype(f'<U{status.shape[1]}')
597
-
598
- return status_arr
599
-
600
-
601
- def check_status(sumstats: pd.DataFrame, fasta_records_dict, log=Log(), verbose=True):
602
-
603
- chrom,pos,ea,nea,status = sumstats.columns
604
-
605
- # First, convert the fasta records to a single numpy array of integers
606
- record, starting_positions_dict, records_len_dict = build_fasta_records(fasta_records_dict, pos_as_dict=True, log=log, verbose=verbose)
607
-
608
- # In _fast_check_status(), several 2D numpy arrays are created and they are padded to have shape[1] == max_len_nea or max_len_ea
609
- # Since most of the NEA and EA strings are short, we perform the check first on the records having short NEA and EA strings,
610
- # and then we perform the check on the records having long NEA and EA strings. In this way we can speed up the process (since the
611
- # arrays are smaller) and save memory.
612
- max_len = 4 # this is a chosen value, we could compute it using some stats about the length and count of NEA and EA strings
613
- condition = (sumstats[nea].str.len() <= max_len) & (sumstats[ea].str.len() <= max_len)
614
-
615
- log.write(f" -Checking records for ( len(NEA) <= {max_len} and len(EA) <= {max_len} )", verbose=verbose)
616
- sumstats_cond = sumstats[condition]
617
- unique_chrom_cond = sumstats_cond[chrom].unique()
618
- starting_pos_cond = np.array([starting_positions_dict[k] for k in unique_chrom_cond])
619
- records_len_cond = np.array([records_len_dict[k] for k in unique_chrom_cond])
620
-
621
- sumstats.loc[condition, status] = _fast_check_status(sumstats_cond, record=record, starting_positions=starting_pos_cond, records_len=records_len_cond)
622
-
623
- log.write(f" -Checking records for ( len(NEA) > {max_len} or len(EA) > {max_len} )", verbose=verbose)
624
- sumstats_not_cond = sumstats[~condition]
625
- unique_chrom_not_cond = sumstats_not_cond[chrom].unique()
626
- starting_not_pos_cond = np.array([starting_positions_dict[k] for k in unique_chrom_not_cond])
627
- records_len_not_cond = np.array([records_len_dict[k] for k in unique_chrom_not_cond])
628
- sumstats.loc[~condition, status] = _fast_check_status(sumstats_not_cond, record=record, starting_positions=starting_not_pos_cond, records_len=records_len_not_cond)
629
-
630
- return sumstats[status].values
631
-
632
-
633
- def checkref(sumstats,ref_seq,chrom="CHR",pos="POS",ea="EA",nea="NEA",status="STATUS",chr_dict=get_chr_to_number(),remove=False,verbose=True,log=Log()):
634
- ##start function with col checking##########################################################
635
- _start_line = "check if NEA is aligned with reference sequence"
636
- _end_line = "checking if NEA is aligned with reference sequence"
637
- _start_cols = [chrom,pos,ea,nea,status]
638
- _start_function = ".check_ref()"
639
- _must_args ={}
640
-
641
- is_enough_info = start_to(sumstats=sumstats,
642
- log=log,
643
- verbose=verbose,
644
- start_line=_start_line,
645
- end_line=_end_line,
646
- start_cols=_start_cols,
647
- start_function=_start_function,
648
- **_must_args)
649
- if is_enough_info == False: return sumstats
650
- ############################################################################################
651
- log.write(" -Reference genome FASTA file: "+ ref_seq,verbose=verbose)
652
- log.write(" -Loading fasta records:",end="", verbose=verbose)
653
- chromlist = get_chr_list(add_number=True)
654
- records = SeqIO.parse(ref_seq, "fasta")
655
-
656
- sumstats = sortcoordinate(sumstats,verbose=False)
657
-
658
- all_records_dict = {}
659
- chroms_in_sumstats = sumstats[chrom].unique() # load records from Fasta file only for the chromosomes present in the sumstats
660
- for record in records:
661
- #record = next(records)
662
- if record is not None:
663
- record_chr = str(record.id).strip("chrCHR").upper()
664
- if record_chr in chr_dict.keys():
665
- i = chr_dict[record_chr]
666
- else:
667
- i = record_chr
668
- if (i in chromlist) and (i in chroms_in_sumstats):
669
- log.write(record_chr," ", end="",show_time=False,verbose=verbose)
670
- all_records_dict.update({i: record})
671
- log.write("",show_time=False,verbose=verbose)
672
-
673
- if len(all_records_dict) > 0:
674
- log.write(" -Checking records", verbose=verbose)
675
- all_records_dict = dict(sorted(all_records_dict.items())) # sort by key in case the fasta records are not already ordered by chromosome
676
- to_check_ref = (sumstats[chrom].isin(list(all_records_dict.keys()))) & (~sumstats[pos].isna()) & (~sumstats[nea].isna()) & (~sumstats[ea].isna())
677
- sumstats_to_check = sumstats.loc[to_check_ref,[chrom,pos,ea,nea,status]]
678
- sumstats.loc[to_check_ref,status] = check_status(sumstats_to_check, all_records_dict, log=log, verbose=verbose)
679
- log.write(" -Finished checking records", verbose=verbose)
680
-
681
- sumstats[status] = pd.Categorical(sumstats[status],categories=STATUS_CATEGORIES)
682
-
683
- available_to_check =sum( (~sumstats[pos].isna()) & (~sumstats[nea].isna()) & (~sumstats[ea].isna()))
684
- status_0=sum(sumstats["STATUS"].str.match("\w\w\w\w\w[0]\w", case=False, flags=0, na=False))
685
- status_3=sum(sumstats["STATUS"].str.match("\w\w\w\w\w[3]\w", case=False, flags=0, na=False))
686
- status_4=sum(sumstats["STATUS"].str.match("\w\w\w\w\w[4]\w", case=False, flags=0, na=False))
687
- status_5=sum(sumstats["STATUS"].str.match("\w\w\w\w\w[5]\w", case=False, flags=0, na=False))
688
- status_6=sum(sumstats["STATUS"].str.match("\w\w\w\w\w[6]\w", case=False, flags=0, na=False))
689
- #status_7=sum(sumstats["STATUS"].str.match("\w\w\w\w\w[7]\w", case=False, flags=0, na=False))
690
- status_8=sum(sumstats["STATUS"].str.match("\w\w\w\w\w[8]\w", case=False, flags=0, na=False))
691
-
692
- log.write(" -Variants allele on given reference sequence : ",status_0,verbose=verbose)
693
- log.write(" -Variants flipped : ",status_3,verbose=verbose)
694
- raw_matching_rate = (status_3+status_0)/available_to_check
695
- flip_rate = status_3/available_to_check
696
- log.write(" -Raw Matching rate : ","{:.2f}%".format(raw_matching_rate*100),verbose=verbose)
697
- if raw_matching_rate <0.8:
698
- log.warning("Matching rate is low, please check if the right reference genome is used.")
699
- if flip_rate > 0.85 :
700
- log.write(" -Flipping variants rate > 0.85, it is likely that the EA is aligned with REF in the original dataset.",verbose=verbose)
701
-
702
- log.write(" -Variants inferred reverse_complement : ",status_4,verbose=verbose)
703
- log.write(" -Variants inferred reverse_complement_flipped : ",status_5,verbose=verbose)
704
- log.write(" -Both allele on genome + unable to distinguish : ",status_6,verbose=verbose)
705
- #log.write(" -Reverse_complementary + both allele on genome + unable to distinguish: ",status_7)
706
- log.write(" -Variants not on given reference sequence : ",status_8,verbose=verbose)
707
-
708
- if remove is True:
709
- sumstats = sumstats.loc[~sumstats["STATUS"].str.match("\w\w\w\w\w[8]\w"),:]
710
- log.write(" -Variants not on given reference sequence were removed.",verbose=verbose)
711
-
712
-
713
- finished(log, verbose, _end_line)
714
- return sumstats
715
-
716
- def build_fasta_records(fasta_records_dict, pos_as_dict=True, log=Log(), verbose=True):
717
- log.write(" -Building numpy fasta records from dict", verbose=verbose)
718
-
719
- # Let's do some magic to convert the fasta record to a numpy array of integers in a very fast way.
720
- # fasta_record.seq._data is a byte-string, so we can use the bytes.maketrans to apply a translation.
721
- # Here we map the bytes to the unicode character representing the desired integer as defined in the mapping dict
722
- # (i.e. b'A' -> '\x02', b'T' -> '\x03', b'C' -> '\x04', b'G' -> '\x05', b'N' -> '\x06')
723
- # Then, using np.array(... dtype=<U..) we convert the string to a numpy array of unicode characters.
724
- # Then, we do a magic with view('<u4') to convert the unicode characters to 4-byte integers, so we obtain the actual integer representation of the characters
725
- # Lastly, we cast the array to np.uint8 to convert the 4-byte integers to 1-byte integers to save memory
726
- # Full example:
727
- # fasta_record.seq._data = b'ACTGN' -> b'\x02\x04\x03\x05\x06' -> np.array(['\x02\x04\x03\x05\x06'], dtype='<U5') -> np.array([2, 4, 3, 5, 6], dtype=uint32) -> np.array([2, 4, 3, 5, 6], dtype=uint8)
728
- all_r = []
729
- for r in fasta_records_dict.values():
730
- r = r.seq._data.translate(TRANSLATE_TABLE)
731
- r = np.array([r], dtype=f'<U{len(r)}').view('<u4').astype(np.uint8)
732
- all_r.append(r)
733
-
734
- # We've just created a list of numpy arrays, so we can concatenate them to obtain a single numpy array
735
- # Then we keep track of the starting position of each record in the concatenated array. This will be useful later
736
- # to index the record array depending on the position of the variant and the chromosome
737
- records_len = np.array([len(r) for r in all_r])
738
-
739
- starting_positions = np.cumsum(records_len) - records_len
740
-
741
-
742
- if pos_as_dict:
743
- starting_positions = {k: v for k, v in zip(fasta_records_dict.keys(), starting_positions)}
744
- records_len_dict = {k: v for k, v in zip(fasta_records_dict.keys(), records_len)}
745
- record = np.concatenate(all_r)
746
- del all_r # free memory
747
-
748
-
749
- return record, starting_positions,records_len_dict
750
-
751
- #######################################################################################################################################
752
-
753
- #20220721
754
- def chrposref_rsid(chr,end,ref,alt,vcf_reader,chr_dict=get_number_to_chr()):
755
- ## single record assignment
756
- start=end-1
757
- if chr_dict is not None: chr=chr_dict[chr]
758
-
759
- try:
760
- chr_seq = vcf_reader.fetch(chr,start,end)
761
- except:
762
- return pd.NA
763
-
764
- for record in chr_seq:
765
- if record.pos==end:
766
- if record.alts is None:
767
- return pd.NA
768
- if record.ref==ref and (alt in record.alts):
769
- return record.id
770
- elif (ref in record.alts) and record.ref==alt:
771
- return record.id
772
- return pd.NA
773
-
774
- def assign_rsid_single(sumstats,path,rsid="rsID",chr="CHR",pos="POS",ref="NEA",alt="EA",chr_dict=get_number_to_chr()):
775
- ## single df assignment
776
- vcf_reader = VariantFile(path)
777
- def rsid_helper(x,vcf_reader,chr_dict):
778
- return chrposref_rsid(x.iloc[0],x.iloc[1],x.iloc[2],x.iloc[3],vcf_reader,chr_dict)
779
- map_func=partial(rsid_helper,vcf_reader=vcf_reader,chr_dict=chr_dict)
780
- rsID = sumstats.apply(map_func,axis=1)
781
- return rsID
782
-
783
- def parallelizeassignrsid(sumstats, path, ref_mode="vcf",snpid="SNPID",rsid="rsID",chr="CHR",pos="POS",ref="NEA",alt="EA",status="STATUS",
784
- n_cores=1,chunksize=5000000,ref_snpid="SNPID",ref_rsid="rsID",
785
- overwrite="empty",verbose=True,log=Log(),chr_dict=None):
786
- '''
787
- overwrite mode :
788
- all , overwrite rsid for all availalbe rsid
789
- invalid, only assign rsid for variants with invalid rsid
790
- empty only assign rsid for variants with na rsid
791
- '''
792
-
793
- if ref_mode=="vcf":
794
- ###################################################################################################################
795
- ##start function with col checking##########################################################
796
- _start_line = "assign rsID using reference VCF"
797
- _end_line = "assign rsID using reference file"
798
- _start_cols = [chr,pos,ref,alt,status]
799
- _start_function = ".assign_rsid()"
800
- _must_args ={}
801
-
802
- is_enough_info = start_to(sumstats=sumstats,
803
- log=log,
804
- verbose=verbose,
805
- start_line=_start_line,
806
- end_line=_end_line,
807
- start_cols=_start_cols,
808
- start_function=_start_function,
809
- n_cores=n_cores,
810
- ref_vcf=path,
811
- **_must_args)
812
- if is_enough_info == False: return sumstats
813
- ############################################################################################
814
- chr_dict = auto_check_vcf_chr_dict(path, chr_dict, verbose, log)
815
- log.write(" -Assigning rsID based on CHR:POS and REF:ALT/ALT:REF...",verbose=verbose)
816
- ##############################################
817
- if rsid not in sumstats.columns:
818
- sumstats[rsid]=pd.Series(dtype="string")
819
-
820
- ###############################################
821
- total_number= len(sumstats)
822
- pre_number = sum(~sumstats[rsid].isna())
823
-
824
- ##################################################################################################################
825
- standardized_normalized = sumstats["STATUS"].str.match("\w\w\w[0][01234]\w\w", case=False, flags=0, na=False)
826
- if overwrite=="all":
827
- to_assign = standardized_normalized
828
- if overwrite=="invalid":
829
- to_assign = (~sumstats[rsid].str.match(r'rs([0-9]+)', case=False, flags=0, na=False)) & standardized_normalized
830
- if overwrite=="empty":
831
- to_assign = sumstats[rsid].isna()& standardized_normalized
832
- ##################################################################################################################
833
- # multicore arrangement
834
-
835
- if sum(to_assign)>0:
836
- if sum(to_assign)<10000: n_cores=1
837
- #df_split = np.array_split(sumstats.loc[to_assign, [chr,pos,ref,alt]], n_cores)
838
- df_split = _df_split(sumstats.loc[to_assign, [chr,pos,ref,alt]], n_cores)
839
- pool = Pool(n_cores)
840
- map_func = partial(assign_rsid_single,path=path,chr=chr,pos=pos,ref=ref,alt=alt,chr_dict=chr_dict)
841
- assigned_rsid = pd.concat(pool.map(map_func,df_split))
842
- sumstats.loc[to_assign,rsid] = assigned_rsid.values
843
- pool.close()
844
- pool.join()
845
- gc.collect()
846
- ##################################################################################################################
847
-
848
- after_number = sum(~sumstats[rsid].isna())
849
- log.write(" -rsID Annotation for "+str(total_number - after_number) +" need to be fixed!",verbose=verbose)
850
- log.write(" -Annotated "+str(after_number - pre_number) +" rsID successfully!",verbose=verbose)
851
-
852
- ##################################################################################################################
853
- elif ref_mode=="tsv":
854
- '''
855
- assign rsID based on chr:pos
856
- '''
857
- ##start function with col checking##########################################################
858
- _start_line = "assign rsID by matching SNPID with CHR:POS:REF:ALT in the reference TSV"
859
- _end_line = "assign rsID using reference file"
860
- _start_cols = [snpid,status]
861
- _start_function = ".assign_rsid()"
862
- _must_args ={}
863
-
864
- is_enough_info = start_to(sumstats=sumstats,
865
- log=log,
866
- verbose=verbose,
867
- start_line=_start_line,
868
- end_line=_end_line,
869
- start_cols=_start_cols,
870
- start_function=_start_function,
871
- n_cores=n_cores,
872
- ref_tsv=path,
873
- **_must_args)
874
- if is_enough_info == False: return sumstats
875
- ############################################################################################
876
-
877
- #standardized_normalized = sumstats["STATUS"].str.match("\w\w\w[0][01234]\w\w", case=False, flags=0, na=False)
878
- standardized_normalized = sumstats["STATUS"] == sumstats["STATUS"]
879
-
880
- if rsid not in sumstats.columns:
881
- sumstats[rsid]=pd.Series(dtype="string")
882
-
883
- if overwrite == "empty":
884
- to_assign = sumstats[rsid].isna() & standardized_normalized
885
- if overwrite=="all":
886
- to_assign = standardized_normalized
887
- if overwrite=="invalid":
888
- to_assign = (~sumstats[rsid].str.match(r'rs([0-9]+)', case=False, flags=0, na=False)) & standardized_normalized
889
-
890
- total_number= len(sumstats)
891
- pre_number = sum(~sumstats[rsid].isna())
892
- log.write(" -"+str(sum(to_assign)) +" rsID could be possibly fixed...",verbose=verbose)
893
- if sum(to_assign)>0:
894
- sumstats = sumstats.set_index(snpid)
895
- dic_chuncks = pd.read_csv(path,sep="\t",usecols=[ref_snpid,ref_rsid],
896
- chunksize=chunksize,index_col=ref_snpid,
897
- dtype={ref_snpid:"string",ref_rsid:"string"})
898
-
899
- log.write(" -Setting block size: ",chunksize,verbose=verbose)
900
- log.write(" -Loading block: ",end="",verbose=verbose)
901
- for i,dic in enumerate(dic_chuncks):
902
- gc.collect()
903
- log.write(i," ",end=" ",show_time=False)
904
- dic = dic.rename(index={ref_snpid:snpid})
905
- dic = dic.rename(columns={ref_rsid:rsid})
906
- dic = dic.loc[~dic.index.duplicated(keep=False),:]
907
- sumstats.update(dic,overwrite=True)
908
-
909
- log.write("\n",end="",show_time=False,verbose=verbose)
910
- sumstats = sumstats.reset_index()
911
- sumstats = sumstats.rename(columns = {'index':snpid})
912
-
913
- after_number = sum(~sumstats[rsid].isna())
914
- log.write(" -rsID annotation for "+str(total_number - after_number) +" needed to be fixed!",verbose=verbose)
915
- log.write(" -Annotated "+str(after_number - pre_number) +" rsID successfully!",verbose=verbose)
916
- else:
917
- log.write(" -No rsID can be fixed...skipping...",verbose=verbose)
918
- ################################################################################################################
919
-
920
- finished(log,verbose,_end_line)
921
- return sumstats
922
- #################################################################################################################################################
923
- #single record assignment
924
-
925
- def check_strand_status(chr,start,end,ref,alt,eaf,vcf_reader,alt_freq,status,chr_dict=get_number_to_chr()):
926
- ### 0 : not palindromic
927
- ### 1 : palindromic +strand
928
- ### 2 : palindromic -strand -> need to flip -> flipped
929
- ### 5 : palindromic -strand -> need to flip
930
- ### 8 : no ref data
931
- if chr_dict is not None: chr=chr_dict[chr]
932
- status_pre=status[:6]
933
- status_end=""
934
- try:
935
- chr_seq = vcf_reader.fetch(chr,start,end)
936
- except:
937
- return status_pre+"8"+status_end
938
-
939
-
940
- for record in chr_seq:
941
- if record.pos==end and record.ref==ref and (alt in record.alts):
942
-
943
- if (record.info[alt_freq][0]<0.5) and (eaf<0.5):
944
- return status_pre+"1"+status_end
945
- elif (record.info[alt_freq][0]>0.5) and (eaf>0.5):
946
- return status_pre+"1"+status_end
947
- else:
948
- return status_pre+"5"+status_end
949
- return status_pre+"8"+status_end
950
-
951
- def check_strand_status_cache(data,cache,ref_infer=None,ref_alt_freq=None,chr_dict=get_number_to_chr(),trust_cache=True,log=Log(),verbose=True):
952
- if not trust_cache:
953
- assert ref_infer is not None, "If trust_cache is False, ref_infer must be provided"
954
- log.warning("You are not trusting the cache, this will slow down the process. Please consider building a complete cache.")
955
-
956
- if ref_infer is not None and not trust_cache:
957
- vcf_reader = VariantFile(ref_infer)
958
-
959
- if isinstance(data, pd.DataFrame):
960
- data = data.values
961
-
962
- in_cache = 0
963
- new_statuses = []
964
-
965
- for i in range(data.shape[0]):
966
- _chrom, pos, ref, alt, eaf, status = data[i]
967
- chrom = _chrom
968
- start = pos - 1
969
- end = pos
970
-
971
- if chr_dict is not None: chrom=chr_dict[chrom]
972
-
973
- status_pre=status[:6]
974
- status_end=""
975
-
976
- new_status = status_pre+"8"+status_end # default value
977
-
978
- cache_key = f"{chrom}:{pos}:{ref}:{alt}"
979
- if cache_key in cache:
980
- in_cache += 1
981
- record = cache[cache_key]
982
- if record is None:
983
- new_status = status_pre+"8"+status_end
984
- else:
985
- if (record<0.5) and (eaf<0.5):
986
- new_status = status_pre+"1"+status_end
987
- elif (record>0.5) and (eaf>0.5):
988
- new_status = status_pre+"1"+status_end
989
- else:
990
- new_status = status_pre+"5"+status_end
991
- else:
992
- if not trust_cache:
993
- # If we don't trust the cache as a not complete cache, we should perform the check reading from the VCF file
994
- new_status = check_strand_status(_chrom, start, end, ref, alt, eaf, vcf_reader, ref_alt_freq, status, chr_dict)
995
-
996
- new_statuses.append(new_status)
997
-
998
- log.write(f" -Elements in cache: {in_cache}", verbose=verbose)
999
- return new_statuses
1000
-
1001
-
1002
- def check_unkonwn_indel(chr,start,end,ref,alt,eaf,vcf_reader,alt_freq,status,chr_dict=get_number_to_chr(),daf_tolerance=0.2):
1003
- ### input : unknown indel, both on genome (xx1[45]x)
1004
- ### 3 no flip
1005
- ### 4 unknown indel,fixed (6->5)
1006
- ### 6 flip
1007
-
1008
- if chr_dict is not None: chr=chr_dict[chr]
1009
- status_pre=status[:6]
1010
- status_end=""
1011
-
1012
- try:
1013
- chr_seq = vcf_reader.fetch(chr,start,end)
1014
- except:
1015
- return status_pre+"8"+status_end
1016
-
1017
- for record in chr_seq:
1018
- if record.pos==end and record.ref==ref and (alt in record.alts):
1019
- if abs(record.info[alt_freq][0] - eaf)<daf_tolerance:
1020
- return status_pre+"3"+status_end
1021
-
1022
- elif record.pos==end and record.ref==alt and (ref in record.alts):
1023
- if abs(record.info[alt_freq][0] - (1 - eaf))<daf_tolerance:
1024
- return status_pre+"6"+status_end
1025
-
1026
- return status_pre+"8"+status_end
1027
-
1028
-
1029
- def check_unkonwn_indel_cache(data,cache,ref_infer=None,ref_alt_freq=None,chr_dict=get_number_to_chr(),daf_tolerance=0.2,trust_cache=True,log=Log(),verbose=True):
1030
- if not trust_cache:
1031
- assert ref_infer is not None, "If trust_cache is False, ref_infer must be provided"
1032
- log.warning("You are not trusting the cache, this will slow down the process. Please consider building a complete cache.")
1033
-
1034
- if ref_infer is not None:
1035
- vcf_reader = VariantFile(ref_infer)
1036
-
1037
- if isinstance(data, pd.DataFrame):
1038
- data = data.values
1039
-
1040
- in_cache = 0
1041
- new_statuses = []
1042
-
1043
- for i in range(data.shape[0]):
1044
- _chrom, pos, ref, alt, eaf, status = data[i]
1045
- chrom = _chrom
1046
-
1047
- if chr_dict is not None: chrom=chr_dict[chrom]
1048
- start = pos - 1
1049
- end = pos
1050
-
1051
- status_pre=status[:6]
1052
- status_end=""
1053
-
1054
- new_status = status_pre+"8"+status_end # default value
1055
-
1056
- cache_key_ref_alt = f"{chrom}:{pos}:{ref}:{alt}"
1057
- cache_key_alt_ref = f"{chrom}:{pos}:{alt}:{ref}"
1058
-
1059
- if cache_key_ref_alt in cache:
1060
- in_cache += 1
1061
- record = cache[cache_key_ref_alt]
1062
- if record is None:
1063
- new_status = status_pre+"8"+status_end
1064
- else:
1065
- if abs(record - eaf)<daf_tolerance:
1066
- new_status = status_pre+"3"+status_end
1067
-
1068
- elif cache_key_alt_ref in cache:
1069
- in_cache += 1
1070
- record = cache[cache_key_alt_ref]
1071
- if record is None:
1072
- new_status = status_pre+"8"+status_end
1073
- else:
1074
- if abs(record - (1 - eaf))<daf_tolerance:
1075
- new_status = status_pre+"6"+status_end
1076
-
1077
- else:
1078
- if not trust_cache:
1079
- # If we don't trust the cache as a not complete cache, we should perform the check reading from the VCF file
1080
- new_status = check_unkonwn_indel(_chrom, start, end, ref, alt, eaf, vcf_reader, ref_alt_freq, status, chr_dict, daf_tolerance)
1081
-
1082
- new_statuses.append(new_status)
1083
-
1084
- log.write(f" -Elements in cache: {in_cache}", verbose=verbose)
1085
- return new_statuses
1086
-
1087
-
1088
- def get_reverse_complementary_allele(a):
1089
- dic = str.maketrans({
1090
- "A":"T",
1091
- "T":"A",
1092
- "C":"G",
1093
- "G":"C"})
1094
- return a[::-1].translate(dic)
1095
-
1096
- def is_palindromic(sumstats,a1="EA",a2="NEA"):
1097
- gc= (sumstats[a1]=="G") & (sumstats[a2]=="C")
1098
- cg= (sumstats[a1]=="C") & (sumstats[a2]=="G")
1099
- at= (sumstats[a1]=="A") & (sumstats[a2]=="T")
1100
- ta= (sumstats[a1]=="T") & (sumstats[a2]=="A")
1101
- palindromic = gc | cg | at | ta
1102
- return palindromic
1103
- ##################################################################################################################################################
1104
- #single df assignment
1105
-
1106
- def check_strand(sumstats,ref_infer,ref_alt_freq=None,chr="CHR",pos="POS",ref="NEA",alt="EA",eaf="EAF",chr_dict=get_number_to_chr(),status="STATUS"):
1107
- vcf_reader = VariantFile(ref_infer)
1108
- status_part = sumstats.apply(lambda x:check_strand_status(x.iloc[0],x.iloc[1]-1,x.iloc[1],x.iloc[2],x.iloc[3],x.iloc[4],vcf_reader,ref_alt_freq,x.iloc[5],chr_dict),axis=1)
1109
- return status_part
1110
-
1111
- def check_strand_cache(sumstats,cache,ref_infer,ref_alt_freq=None,chr_dict=get_number_to_chr(),trust_cache=True,log=Log(),verbose=True):
1112
- assert cache is not None, "Cache must be provided"
1113
- status_part = check_strand_status_cache(sumstats,cache,ref_infer,ref_alt_freq,chr_dict,trust_cache,log,verbose)
1114
- return status_part
1115
-
1116
- def check_indel(sumstats,ref_infer,ref_alt_freq=None,chr="CHR",pos="POS",ref="NEA",alt="EA",eaf="EAF",chr_dict=get_number_to_chr(),status="STATUS",daf_tolerance=0.2):
1117
- vcf_reader = VariantFile(ref_infer)
1118
- status_part = sumstats.apply(lambda x:check_unkonwn_indel(x.iloc[0],x.iloc[1]-1,x.iloc[1],x.iloc[2],x.iloc[3],x.iloc[4],vcf_reader,ref_alt_freq,x.iloc[5],chr_dict,daf_tolerance),axis=1)
1119
- return status_part
1120
-
1121
- def check_indel_cache(sumstats,cache,ref_infer,ref_alt_freq=None,chr_dict=get_number_to_chr(),daf_tolerance=0.2,trust_cache=True,log=Log(),verbose=True):
1122
- assert cache is not None, "Cache must be provided"
1123
- status_part = check_unkonwn_indel_cache(sumstats,cache,ref_infer,ref_alt_freq,chr_dict,daf_tolerance,trust_cache,log,verbose)
1124
- return status_part
1125
-
1126
- ##################################################################################################################################################
1127
-
1128
- def parallelinferstrand(sumstats,ref_infer,ref_alt_freq=None,maf_threshold=0.40,daf_tolerance=0.20,remove_snp="",mode="pi",n_cores=1,remove_indel="",
1129
- chr="CHR",pos="POS",ref="NEA",alt="EA",eaf="EAF",status="STATUS",
1130
- chr_dict=None,cache_options={},verbose=True,log=Log()):
1131
- '''
1132
- Args:
1133
- cache_options : A dictionary with the following keys:
1134
- - cache_manager: CacheManager object or None. If any between cache_loader and cache_process is not None, or use_cache is True, a CacheManager object will be created automatically.
1135
- - trust_cache: bool (optional, default: True). Whether to completely trust the cache or not. Trusting the cache means that any key not found inside the cache will be considered as a missing value even in the VCF file.
1136
- - cache_loader: Object with a get_cache() method or None.
1137
- - cache_process: Object with an apply_fn() method or None.
1138
- - use_cache: bool (optional, default: False). If any of the cache_manager, cache_loader or cache_process is not None, this will be set to True automatically.
1139
- If set to True and all between cache_manager, cache_loader and cache_process are None, the cache will be loaded (or built) on the spot.
1140
-
1141
- The usefulness of a cache_loader or cache_process object is to pass a custom object which already has the cache loaded. This can be useful if the cache is loaded in background in another thread/process while other operations are performed.
1142
- The cache_manager is a CacheManager object is used to expose the API to interact with the cache.
1143
- '''
1144
-
1145
- ##start function with col checking##########################################################
1146
- _start_line = "infer strand for palindromic SNPs/align indistinguishable indels"
1147
- _end_line = "inferring strand for palindromic SNPs/align indistinguishable indels"
1148
- _start_cols = [chr,pos,ref,alt,eaf,status]
1149
- _start_function = ".infer_strand()"
1150
- _must_args ={"ref_alt_freq":ref_alt_freq}
1151
-
1152
- is_enough_info = start_to(sumstats=sumstats,
1153
- log=log,
1154
- verbose=verbose,
1155
- start_line=_start_line,
1156
- end_line=_end_line,
1157
- start_cols=_start_cols,
1158
- start_function=_start_function,
1159
- n_cores=n_cores,
1160
- ref_vcf=ref_infer,
1161
- **_must_args)
1162
- if is_enough_info == False: return sumstats
1163
- ############################################################################################
1164
-
1165
- chr_dict = auto_check_vcf_chr_dict(ref_infer, chr_dict, verbose, log)
1166
-
1167
- # Setup cache variables
1168
- cache_manager = cache_options.get("cache_manager", None)
1169
- if cache_manager is not None:
1170
- assert isinstance(cache_manager, CacheManager), "cache_manager must be a CacheManager object"
1171
- trust_cache = cache_options.get("trust_cache", True)
1172
- cache_loader = cache_options.get("cache_loader", None)
1173
- cache_process = cache_options.get("cache_process", None)
1174
- use_cache = any(c is not None for c in [cache_manager, cache_loader, cache_process]) or cache_options.get('use_cache', False)
1175
- _n_cores = n_cores # backup n_cores
1176
-
1177
- log.write(" -Field for alternative allele frequency in VCF INFO: {}".format(ref_alt_freq), verbose=verbose)
1178
-
1179
- if "p" in mode:
1180
- ## checking \w\w\w\w[0]\w\w -> standardized and normalized snp
1181
- good_chrpos = sumstats[status].str.match(r'\w\w\w[0][0]\w\w', case=False, flags=0, na=False)
1182
- palindromic = good_chrpos & is_palindromic(sumstats[[ref,alt]],a1=ref,a2=alt)
1183
- not_palindromic_snp = good_chrpos & (~palindromic)
1184
-
1185
- ##not palindromic : change status
1186
- sumstats.loc[not_palindromic_snp,status] = vchange_status(sumstats.loc[not_palindromic_snp,status], 7 ,"9","0")
1187
- log.write(" -Identified ", sum(palindromic)," palindromic SNPs...",verbose=verbose)
1188
-
1189
- #palindromic but can not infer
1190
- maf_can_infer = (sumstats[eaf] < maf_threshold) | (sumstats[eaf] > 1 - maf_threshold)
1191
-
1192
- sumstats.loc[palindromic&(~maf_can_infer),status] = vchange_status(sumstats.loc[palindromic&(~maf_can_infer),status],7,"9","7")
1193
-
1194
- #palindromic WITH UNKNWON OR UNCHECKED STATUS
1195
- unknow_palindromic = sumstats[status].str.match(r'\w\w\w\w\w[012][89]', case=False, flags=0, na=False)
1196
-
1197
- unknow_palindromic_to_check = palindromic & maf_can_infer & unknow_palindromic
1198
-
1199
- log.write(" -After filtering by MAF< {} , {} palindromic SNPs with unknown strand will be inferred...".format(maf_threshold, sum(unknow_palindromic_to_check)),verbose=verbose)
1200
-
1201
- #########################################################################################
1202
- if sum(unknow_palindromic_to_check)>0:
1203
- if sum(unknow_palindromic_to_check)<10000:
1204
- n_cores=1
1205
-
1206
- if use_cache and cache_manager is None:
1207
- cache_manager = CacheManager(base_path=ref_infer, cache_loader=cache_loader, cache_process=cache_process,
1208
- ref_alt_freq=ref_alt_freq, category=PALINDROMIC_INDEL,
1209
- n_cores=_n_cores, log=log, verbose=verbose)
1210
-
1211
- log.write(" -Starting strand inference for palindromic SNPs...",verbose=verbose)
1212
- df_to_check = sumstats.loc[unknow_palindromic_to_check,[chr,pos,ref,alt,eaf,status]]
1213
-
1214
- if use_cache and cache_manager.cache_len > 0:
1215
- log.write(" -Using cache for strand inference",verbose=verbose)
1216
- status_inferred = cache_manager.apply_fn(check_strand_cache, sumstats=df_to_check, ref_infer=ref_infer, ref_alt_freq=ref_alt_freq, chr_dict=chr_dict, trust_cache=trust_cache, log=log, verbose=verbose)
1217
- sumstats.loc[unknow_palindromic_to_check,status] = status_inferred
1218
- else:
1219
- #df_split = np.array_split(df_to_check, n_cores)
1220
- df_split = _df_split(df_to_check, n_cores)
1221
- pool = Pool(n_cores)
1222
- map_func = partial(check_strand,chr=chr,pos=pos,ref=ref,alt=alt,eaf=eaf,status=status,ref_infer=ref_infer,ref_alt_freq=ref_alt_freq,chr_dict=chr_dict)
1223
- status_inferred = pd.concat(pool.map(map_func,df_split))
1224
- sumstats.loc[unknow_palindromic_to_check,status] = status_inferred.values
1225
- pool.close()
1226
- pool.join()
1227
- log.write(" -Finished strand inference.",verbose=verbose)
1228
- else:
1229
- log.warning("No palindromic variants available for checking.")
1230
- #########################################################################################
1231
- #0 Not palindromic SNPs
1232
- #1 Palindromic +strand -> no need to flip
1233
- #2 palindromic -strand -> need to flip -> fixed
1234
- #3 Indel no need flip
1235
- #4 Unknown Indel -> fixed
1236
- #5 Palindromic -strand -> need to flip
1237
- #6 Indel need flip
1238
- #7 indistinguishable
1239
- #8 Not matching or No information
1240
- #9 Unchecked
1241
-
1242
- status0 = sumstats[status].str.match(r'\w\w\w\w\w\w[0]', case=False, flags=0, na=False)
1243
- status1 = sumstats[status].str.match(r'\w\w\w\w\w\w[1]', case=False, flags=0, na=False)
1244
- status5 = sumstats[status].str.match(r'\w\w\w\w\w\w[5]', case=False, flags=0, na=False)
1245
- status7 = sumstats[status].str.match(r'\w\w\w\w\w\w[7]', case=False, flags=0, na=False)
1246
- status8 = sumstats[status].str.match(r'\w\w\w\w\w[123][8]', case=False, flags=0, na=False)
1247
-
1248
- log.write(" -Non-palindromic : ",sum(status0),verbose=verbose)
1249
- log.write(" -Palindromic SNPs on + strand: ",sum(status1),verbose=verbose)
1250
- log.write(" -Palindromic SNPs on - strand and needed to be flipped:",sum(status5),verbose=verbose)
1251
- log.write(" -Palindromic SNPs with MAF not available to infer : ",sum(status7),verbose=verbose)
1252
- log.write(" -Palindromic SNPs with no macthes or no information : ",sum(status8),verbose=verbose)
1253
-
1254
- if ("7" in remove_snp) and ("8" in remove_snp) :
1255
- log.write(" -Palindromic SNPs with MAF not available to infer and with no macthes or no information will will be removed",verbose=verbose)
1256
- sumstats = sumstats.loc[~(status7 | status8),:].copy()
1257
- elif "8" in remove_snp:
1258
- log.write(" -Palindromic SNPs with no macthes or no information will be removed",verbose=verbose)
1259
- sumstats = sumstats.loc[~status8,:].copy()
1260
- elif "7" in remove_snp:
1261
- log.write(" -Palindromic SNPs with MAF not available to infer will be removed",verbose=verbose)
1262
- sumstats = sumstats.loc[~status7,:].copy()
1263
-
1264
- ### unknow_indel
1265
- if "i" in mode:
1266
- unknow_indel = sumstats[status].str.match(r'\w\w\w\w\w[6][89]', case=False, flags=0, na=False)
1267
- log.write(" -Identified ", sum(unknow_indel)," indistinguishable Indels...",verbose=verbose)
1268
- if sum(unknow_indel)>0:
1269
- log.write(" -Indistinguishable indels will be inferred from reference vcf REF and ALT...",verbose=verbose)
1270
- #########################################################################################
1271
- #with maf can not infer
1272
- #maf_can_infer = (sumstats[eaf] < maf_threshold) | (sumstats[eaf] > 1 - maf_threshold)
1273
- #sumstats.loc[unknow_indel&(~maf_can_infer),status] = vchange_status(sumstats.loc[unknow_indel&(~maf_can_infer),status],7,"9","8")
1274
- log.write(" -Difference in allele frequency (DAF) tolerance: {}".format(daf_tolerance),verbose=verbose)
1275
-
1276
- if sum(unknow_indel)>0:
1277
- if sum(unknow_indel)<10000:
1278
- n_cores=1
1279
-
1280
- if use_cache and cache_manager is None:
1281
- cache_manager = CacheManager(base_path=ref_infer, cache_loader=cache_loader, cache_process=cache_process,
1282
- ref_alt_freq=ref_alt_freq, category=PALINDROMIC_INDEL,
1283
- n_cores=_n_cores, log=log, verbose=verbose)
1284
-
1285
- log.write(" -Starting indistinguishable indel inference...",verbose=verbose)
1286
- df_to_check = sumstats.loc[unknow_indel,[chr,pos,ref,alt,eaf,status]]
1287
-
1288
- if use_cache and cache_manager.cache_len > 0:
1289
- log.write(" -Using cache for indel inference",verbose=verbose)
1290
- status_inferred = cache_manager.apply_fn(check_indel_cache, sumstats=df_to_check, ref_infer=ref_infer, ref_alt_freq=ref_alt_freq, chr_dict=chr_dict, daf_tolerance=daf_tolerance, trust_cache=trust_cache, log=log, verbose=verbose)
1291
- sumstats.loc[unknow_indel,status] = status_inferred
1292
- else:
1293
- #df_split = np.array_split(sumstats.loc[unknow_indel, [chr,pos,ref,alt,eaf,status]], n_cores)
1294
- df_split = _df_split(sumstats.loc[unknow_indel, [chr,pos,ref,alt,eaf,status]], n_cores)
1295
- pool = Pool(n_cores)
1296
- map_func = partial(check_indel,chr=chr,pos=pos,ref=ref,alt=alt,eaf=eaf,status=status,ref_infer=ref_infer,ref_alt_freq=ref_alt_freq,chr_dict=chr_dict,daf_tolerance=daf_tolerance)
1297
- status_inferred = pd.concat(pool.map(map_func,df_split))
1298
- sumstats.loc[unknow_indel,status] = status_inferred.values
1299
- pool.close()
1300
- pool.join()
1301
- log.write(" -Finished indistinguishable indel inference.",verbose=verbose)
1302
-
1303
- #########################################################################################
1304
-
1305
- status3 = sumstats[status].str.match(r'\w\w\w\w\w\w[3]', case=False, flags=0, na=False)
1306
- status6 = sumstats[status].str.match(r'\w\w\w\w\w\w[6]', case=False, flags=0, na=False)
1307
- status8 = sumstats[status].str.match(r'\w\w\w\w\w[6][8]', case=False, flags=0, na=False)
1308
-
1309
- log.write(" -Indels ea/nea match reference : ",sum(status3),verbose=verbose)
1310
- log.write(" -Indels ea/nea need to be flipped : ",sum(status6),verbose=verbose)
1311
- log.write(" -Indels with no macthes or no information : ",sum(status8),verbose=verbose)
1312
- if "8" in remove_indel:
1313
- log.write(" -Indels with no macthes or no information will be removed",verbose=verbose)
1314
- sumstats = sumstats.loc[~status8,:].copy()
1315
- else:
1316
- log.warning("No indistinguishable indels available for checking.")
1317
-
1318
- finished(log,verbose,_end_line)
1319
- return sumstats
1320
-
1321
-
1322
-
1323
-
1324
-
1325
-
1326
-
1327
-
1328
-
1329
-
1330
-
1331
-
1332
-
1333
-
1334
-
1335
-
1336
-
1337
-
1338
-
1339
-
1340
- ################################################################################################################
1341
- def parallelecheckaf(sumstats,ref_infer,ref_alt_freq=None,maf_threshold=0.4,column_name="DAF",suffix="",n_cores=1, chr="CHR",pos="POS",ref="NEA",alt="EA",eaf="EAF",status="STATUS",chr_dict=None,force=False, verbose=True,log=Log()):
1342
- ##start function with col checking##########################################################
1343
- _start_line = "check the difference between EAF (sumstats) and ALT frequency (reference VCF)"
1344
- _end_line = "checking the difference between EAF (sumstats) and ALT frequency (reference VCF)"
1345
- _start_cols = [chr,pos,ref,alt,eaf,status]
1346
- _start_function = ".check_daf()"
1347
- _must_args ={"ref_alt_freq":ref_alt_freq}
1348
-
1349
- is_enough_info = start_to(sumstats=sumstats,
1350
- log=log,
1351
- verbose=verbose,
1352
- start_line=_start_line,
1353
- end_line=_end_line,
1354
- start_cols=_start_cols,
1355
- start_function=_start_function,
1356
- n_cores=n_cores,
1357
- ref_vcf=ref_infer,
1358
- **_must_args)
1359
- if is_enough_info == False: return sumstats
1360
- ############################################################################################
1361
-
1362
- chr_dict = auto_check_vcf_chr_dict(ref_infer, chr_dict, verbose, log)
1363
-
1364
- column_name = column_name + suffix
1365
-
1366
-
1367
-
1368
- # ref_alt_freq INFO in vcf was provided
1369
- if ref_alt_freq is not None:
1370
- log.write(" -Field for alternative allele frequency in VCF INFO: {}".format(ref_alt_freq), verbose=verbose)
1371
- if not force:
1372
- good_chrpos = sumstats[status].str.match(r'\w\w\w[0]\w\w\w', case=False, flags=0, na=False)
1373
- log.write(" -Checking variants:", sum(good_chrpos),verbose=verbose)
1374
- sumstats[column_name]=np.nan
1375
-
1376
- ########################
1377
- if sum(~sumstats[eaf].isna())<10000:
1378
- n_cores=1
1379
- #df_split = np.array_split(sumstats.loc[good_chrpos,[chr,pos,ref,alt,eaf]], n_cores)
1380
- df_split = _df_split(sumstats.loc[good_chrpos,[chr,pos,ref,alt,eaf]], n_cores)
1381
- pool = Pool(n_cores)
1382
- if sum(~sumstats[eaf].isna())>0:
1383
- map_func = partial(checkaf,chr=chr,pos=pos,ref=ref,alt=alt,eaf=eaf,ref_infer=ref_infer,ref_alt_freq=ref_alt_freq,column_name=column_name,chr_dict=chr_dict)
1384
- sumstats.loc[good_chrpos,[column_name]] = pd.concat(pool.map(map_func,df_split))
1385
- pool.close()
1386
- pool.join()
1387
- ###########################
1388
- #status_inferred = sumstats.loc[good_chrpos,[chr,pos,ref,alt,eaf]].apply(lambda x:check_daf(x[0],x[1]-1,x[1],x[2],x[3],x[4],vcf_reader,ref_alt_freq,chr_dict),axis=1)
1389
- log.write(" -Difference in allele frequency (DAF) = EAF (sumstats) - ALT_AF (reference VCF)", verbose=verbose)
1390
- log.write(" -Note: this DAF is not the derived allele frequency.", verbose=verbose)
1391
- #sumstats.loc[good_chrpos,"DAF"] = status_inferred.values
1392
- #sumstats["DAF"]=sumstats["DAF"].astype("float")
1393
- log.write(" - {} max:".format(column_name), np.nanmax(sumstats[column_name]),verbose=verbose)
1394
- log.write(" - {} min:".format(column_name), np.nanmin(sumstats[column_name]),verbose=verbose)
1395
- log.write(" - {} sd:".format(column_name), np.nanstd(sumstats[column_name]),verbose=verbose)
1396
- log.write(" - abs({}) min:".format(column_name), np.nanmin(np.abs(sumstats[column_name])),verbose=verbose)
1397
- log.write(" - abs({}) max:".format(column_name), np.nanmax(np.abs(sumstats[column_name])),verbose=verbose)
1398
- log.write(" - abs({}) sd:".format(column_name), np.nanstd(np.abs(sumstats[column_name])),verbose=verbose)
1399
- log.write("Finished allele frequency checking!")
1400
- return sumstats
1401
-
1402
- def checkaf(sumstats,ref_infer,ref_alt_freq=None,column_name="DAF",chr="CHR",pos="POS",ref="NEA",alt="EA",eaf="EAF",chr_dict=None):
1403
- #vcf_reader = vcf.Reader(open(ref_infer, 'rb'))
1404
- vcf_reader = VariantFile(ref_infer)
1405
- def afapply(x,vcf,alt_freq,chr_dict):
1406
- return check_daf(x.iloc[0],x.iloc[1]-1,x.iloc[1],x.iloc[2],x.iloc[3],x.iloc[4],vcf_reader,ref_alt_freq,chr_dict)
1407
- map_func = partial(afapply,vcf=vcf_reader,alt_freq=ref_alt_freq,chr_dict=chr_dict)
1408
- status_inferred = sumstats.apply(map_func,axis=1)
1409
- sumstats[column_name] = status_inferred.values
1410
- sumstats[column_name]=sumstats[column_name].astype("float")
1411
- return sumstats
1412
-
1413
- def check_daf(chr,start,end,ref,alt,eaf,vcf_reader,alt_freq,chr_dict=None):
1414
- if chr_dict is not None: chr=chr_dict[chr]
1415
- chr_seq = vcf_reader.fetch(chr,start,end)
1416
-
1417
- for record in chr_seq:
1418
- if record.pos==end:
1419
- if record.ref==ref and (alt in record.alts):
1420
- return eaf - record.info[alt_freq][0]
1421
- return np.nan
1422
- ################################################################################################################
1423
-
1424
- def paralleleinferaf(sumstats,ref_infer,ref_alt_freq=None,n_cores=1, chr="CHR",pos="POS",ref="NEA",alt="EA",eaf="EAF",status="STATUS",chr_dict=None,force=False, verbose=True,log=Log()):
1425
- ##start function with col checking##########################################################
1426
- _start_line = "infer sumstats EAF using reference VCF ALT frequency"
1427
- _end_line = "inferring sumstats EAF using reference VCF ALT frequency"
1428
- _start_cols = [chr,pos,ref,alt,status]
1429
- _start_function = ".infer_af()"
1430
- _must_args ={"ref_alt_freq":ref_alt_freq}
1431
-
1432
- is_enough_info = start_to(sumstats=sumstats,
1433
- log=log,
1434
- verbose=verbose,
1435
- start_line=_start_line,
1436
- end_line=_end_line,
1437
- start_cols=_start_cols,
1438
- start_function=_start_function,
1439
- n_cores=n_cores,
1440
- ref_vcf=ref_infer,
1441
- **_must_args)
1442
- if is_enough_info == False: return sumstats
1443
- ############################################################################################
1444
- chr_dict = auto_check_vcf_chr_dict(ref_infer, chr_dict, verbose, log)
1445
-
1446
- if eaf not in sumstats.columns:
1447
- sumstats[eaf]=np.nan
1448
-
1449
- prenumber = sum(sumstats[eaf].isna())
1450
-
1451
- # ref_alt_freq INFO in vcf was provided
1452
- if ref_alt_freq is not None:
1453
- log.write(" -Field for alternative allele frequency in VCF INFO: {}".format(ref_alt_freq), verbose=verbose)
1454
- if not force:
1455
- good_chrpos = sumstats[status].str.match(r'\w\w\w[0]\w\w\w', case=False, flags=0, na=False)
1456
- log.write(" -Checking variants:", sum(good_chrpos),verbose=verbose)
1457
-
1458
- ########################
1459
- if sum(sumstats[eaf].isna())<10000:
1460
- n_cores=1
1461
- #df_split = np.array_split(sumstats.loc[good_chrpos,[chr,pos,ref,alt]], n_cores)
1462
- df_split = _df_split(sumstats.loc[good_chrpos,[chr,pos,ref,alt]], n_cores)
1463
- pool = Pool(n_cores)
1464
- map_func = partial(inferaf,chr=chr,pos=pos,ref=ref,alt=alt,eaf=eaf,ref_infer=ref_infer,ref_alt_freq=ref_alt_freq,chr_dict=chr_dict)
1465
- sumstats.loc[good_chrpos,[eaf]] = pd.concat(pool.map(map_func,df_split))
1466
- pool.close()
1467
- pool.join()
1468
- ###########################
1469
-
1470
- afternumber = sum(sumstats[eaf].isna())
1471
- log.write(" -Inferred EAF for {} variants.".format(prenumber - afternumber),verbose=verbose)
1472
- log.write(" -EAF is still missing for {} variants.".format(afternumber),verbose=verbose)
1473
-
1474
- finished(log,verbose,_end_line)
1475
- return sumstats
1476
-
1477
- def inferaf(sumstats,ref_infer,ref_alt_freq=None,chr="CHR",pos="POS",ref="NEA",alt="EA",eaf="EAF",chr_dict=None):
1478
- #vcf_reader = vcf.Reader(open(ref_infer, 'rb'))
1479
- vcf_reader = VariantFile(ref_infer)
1480
- def afapply(x,vcf,alt_freq,chr_dict):
1481
- return infer_af(x.iloc[0],x.iloc[1]-1,x.iloc[1],x.iloc[2],x.iloc[3],vcf_reader,ref_alt_freq,chr_dict)
1482
- map_func = partial(afapply,vcf=vcf_reader,alt_freq=ref_alt_freq,chr_dict=chr_dict)
1483
- status_inferred = sumstats.apply(map_func,axis=1)
1484
- sumstats[eaf] = status_inferred.values
1485
- sumstats[eaf]=sumstats[eaf].astype("float")
1486
- return sumstats
1487
-
1488
- def infer_af(chr,start,end,ref,alt,vcf_reader,alt_freq,chr_dict=None):
1489
- if chr_dict is not None: chr=chr_dict[chr]
1490
- chr_seq = vcf_reader.fetch(chr,start,end)
1491
-
1492
- for record in chr_seq:
1493
- if record.pos==end:
1494
- if record.ref==ref and (alt in record.alts):
1495
- return record.info[alt_freq][0]
1496
- elif record.ref==alt and (ref in record.alts):
1497
- return 1 - record.info[alt_freq][0]
1498
- return np.nan
1499
- ##############################################################################################################################################################################################
1500
-
1501
- ################################################################################################################
1502
-
1503
- def _paralleleinferafwithmaf(sumstats,ref_infer,ref_alt_freq=None,n_cores=1, chr="CHR",pos="POS",ref="NEA",alt="EA",
1504
- eaf="EAF",maf="MAF",ref_eaf="_REF_EAF",status="STATUS",chr_dict=None,force=False, verbose=True,log=Log()):
1505
- ##start function with col checking##########################################################
1506
- _start_line = "infer sumstats EAF from sumstats MAF using reference VCF ALT frequency"
1507
- _end_line = "inferring sumstats EAF from sumstats MAF using reference VCF ALT frequency"
1508
- _start_cols = [chr,pos,ref,alt,status]
1509
- _start_function = ".infer_af()"
1510
- _must_args ={"ref_alt_freq":ref_alt_freq}
1511
-
1512
- is_enough_info = start_to(sumstats=sumstats,
1513
- log=log,
1514
- verbose=verbose,
1515
- start_line=_start_line,
1516
- end_line=_end_line,
1517
- start_cols=_start_cols,
1518
- start_function=_start_function,
1519
- n_cores=n_cores,
1520
- ref_vcf=ref_infer,
1521
- **_must_args)
1522
- if is_enough_info == False: return sumstats
1523
- ############################################################################################
1524
- chr_dict = auto_check_vcf_chr_dict(ref_infer, chr_dict, verbose, log)
1525
-
1526
- if eaf not in sumstats.columns:
1527
- sumstats[eaf]=np.nan
1528
- if ref_eaf not in sumstats.columns:
1529
- sumstats[ref_eaf]=np.nan
1530
-
1531
- prenumber = sum(sumstats[eaf].isna())
1532
-
1533
- # ref_alt_freq INFO in vcf was provided
1534
- if ref_alt_freq is not None:
1535
- log.write(" -Field for alternative allele frequency in VCF INFO: {}".format(ref_alt_freq), verbose=verbose)
1536
- if not force:
1537
- good_chrpos = sumstats[status].str.match(r'\w\w\w[0]\w\w\w', case=False, flags=0, na=False)
1538
- log.write(" -Checking variants:", sum(good_chrpos),verbose=verbose)
1539
-
1540
- ########################
1541
- #extract ref af
1542
- if sum(sumstats[eaf].isna())<10000:
1543
- n_cores=1
1544
- #df_split = np.array_split(sumstats.loc[good_chrpos,[chr,pos,ref,alt]], n_cores)
1545
- df_split = _df_split(sumstats.loc[good_chrpos,[chr,pos,ref,alt]], n_cores)
1546
- pool = Pool(n_cores)
1547
- map_func = partial(inferaf,chr=chr,pos=pos,ref=ref,alt=alt,eaf=ref_eaf,ref_infer=ref_infer,ref_alt_freq=ref_alt_freq,chr_dict=chr_dict)
1548
- sumstats.loc[good_chrpos,[ref_eaf]] = pd.concat(pool.map(map_func,df_split))
1549
- pool.close()
1550
- pool.join()
1551
-
1552
- ###########################
1553
- # infer sumstats EAF
1554
- # based on sumstats MAF and reference EAF
1555
- is_filpped = ((sumstats[ref_eaf]>=0.5)&(sumstats[maf]<=0.5)) |((sumstats[ref_eaf]<0.5)&(sumstats[maf]>0.5))
1556
- sumstats[eaf] = sumstats[maf]
1557
- log.write(" -Flipping MAF to obtain EAF for {} variants".format(sum(is_filpped)),verbose=verbose)
1558
- sumstats.loc[is_filpped,eaf] = 1 - sumstats.loc[is_filpped,maf]
1559
-
1560
- ###########################
1561
- afternumber = sum(sumstats[eaf].isna())
1562
- log.write(" -Inferred EAF for {} variants.".format(prenumber - afternumber),verbose=verbose)
1563
- log.write(" -EAF is still missing for {} variants.".format(afternumber),verbose=verbose)
1564
- sumstats = sumstats.drop(columns=[ref_eaf])
1565
-
1566
- finished(log,verbose,_end_line)
1567
- return sumstats
1568
-
1569
- def inferaf(sumstats,ref_infer,ref_alt_freq=None,chr="CHR",pos="POS",ref="NEA",alt="EA",eaf="EAF",chr_dict=None):
1570
- #vcf_reader = vcf.Reader(open(ref_infer, 'rb'))
1571
- vcf_reader = VariantFile(ref_infer)
1572
- def afapply(x,vcf,alt_freq,chr_dict):
1573
- return infer_af(x.iloc[0],x.iloc[1]-1,x.iloc[1],x.iloc[2],x.iloc[3],vcf_reader,ref_alt_freq,chr_dict)
1574
- map_func = partial(afapply,vcf=vcf_reader,alt_freq=ref_alt_freq,chr_dict=chr_dict)
1575
- status_inferred = sumstats.apply(map_func,axis=1)
1576
- sumstats[eaf] = status_inferred.values
1577
- sumstats[eaf]=sumstats[eaf].astype("float")
1578
- return sumstats
1579
-
1580
- def infer_af(chr,start,end,ref,alt,vcf_reader,alt_freq,chr_dict=None):
1581
- if chr_dict is not None: chr=chr_dict[chr]
1582
- chr_seq = vcf_reader.fetch(chr,start,end)
1583
-
1584
- for record in chr_seq:
1585
- if record.pos==end:
1586
- if record.ref==ref and (alt in record.alts):
1587
- return record.info[alt_freq][0]
1588
- elif record.ref==alt and (ref in record.alts):
1589
- return 1 - record.info[alt_freq][0]
1590
- return np.nan
1591
-
1592
- ##############################################################################################################################################################################################
1593
- def auto_check_vcf_chr_dict(vcf_path, vcf_chr_dict, verbose, log):
1594
- if vcf_path is not None:
1595
- if vcf_chr_dict is None:
1596
- log.write(" -Checking chromosome notations in VCF/BCF files..." ,verbose=verbose)
1597
- vcf_chr_dict = check_vcf_chr_NC(vcf_path, log, verbose)
1598
- if vcf_chr_dict is not None:
1599
- return vcf_chr_dict
1600
- log.write(" -Checking prefix for chromosomes in VCF/BCF files..." ,verbose=verbose)
1601
- prefix = check_vcf_chr_prefix(vcf_path, log,verbose)
1602
- if prefix is not None:
1603
- log.write(" -Prefix for chromosomes: ",prefix)
1604
- vcf_chr_dict = get_number_to_chr(prefix=prefix)
1605
- else:
1606
- log.write(" -No prefix for chromosomes in the VCF/BCF files." ,verbose=verbose)
1607
- vcf_chr_dict = get_number_to_chr()
1608
- return vcf_chr_dict
1609
-
1610
- def check_vcf_chr_prefix(vcf_bcf_path,log,verbose):
1611
- vcf_bcf = VariantFile(vcf_bcf_path)
1612
- for i in list(vcf_bcf.header.contigs):
1613
- m = re.search('(chr|Chr|CHR)([0-9xXyYmM]+)', i)
1614
- if m is not None:
1615
- return m.group(1)
1616
- else:
1617
- return None
1618
-
1619
- def check_vcf_chr_NC(vcf_bcf_path,log,verbose):
1620
- vcf_bcf = VariantFile(vcf_bcf_path)
1621
- for i in list(vcf_bcf.header.contigs):
1622
- if i in get_number_to_NC(build="19").values():
1623
- log.write(" -RefSeq ID detected (hg19) in VCF/BCF...",verbose=verbose)
1624
- return get_number_to_NC(build="19")
1625
- elif i in get_number_to_NC(build="38").values():
1626
- log.write(" -RefSeq ID detected (hg38) in VCF/BCF...",verbose=verbose)
1627
- return get_number_to_NC(build="38")
1628
- else:
1629
- return None
1630
-