gwaslab 3.5.4__py3-none-any.whl → 3.5.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gwaslab might be problematic. Click here for more details.

gwaslab/g_headers.py ADDED
@@ -0,0 +1,131 @@
1
+ dtype_dic={
2
+ 'SNPID' : 'string' ,
3
+ 'rsID' : 'string' ,
4
+ 'CHR' : 'Int64' ,
5
+ 'POS' : 'Int64' ,
6
+ 'EA' : 'category' ,
7
+ 'NEA' : 'category' ,
8
+ 'STATUS' : 'category' ,
9
+ 'REF' : 'category' ,
10
+ 'ALT' : 'category' ,
11
+ 'EAF' : 'float64' ,
12
+ 'NEAF' : 'float64' ,
13
+ 'MAF' : 'float64' ,
14
+ 'INFO' : 'float32' ,
15
+ 'BETA' : 'float64' ,
16
+ 'SE' : 'float64' ,
17
+ 'BETA_95U' : 'float64' ,
18
+ 'BETA_95L' : 'float64' ,
19
+ 'OR' : 'float64' ,
20
+ 'OR_95U' : 'float64' ,
21
+ 'OR_95L' : 'float64' ,
22
+ 'HR' : 'float64' ,
23
+ 'HR_95U' : 'float64' ,
24
+ 'HR_95L' : 'float64' ,
25
+ 'CHISQ' : 'float64' ,
26
+ 'Z' : 'float64' ,
27
+ 'T' : 'float64' ,
28
+ 'F' : 'float64' ,
29
+ 'P' : 'float64' ,
30
+ 'P_MANTISSA' : 'float64' ,
31
+ 'P_EXPONENT' : 'float64' ,
32
+ 'MLOG10P' : 'float64' ,
33
+ 'SNPR2' : 'float64' ,
34
+ 'DOF' : 'Int64' ,
35
+ 'P_HET' : 'float64' ,
36
+ 'I2_HET' : 'float64' ,
37
+ 'DENSITY' : 'Int64' ,
38
+ 'N' : 'Int64' ,
39
+ 'N_CASE' : 'Int64' ,
40
+ 'N_CONTROL' : 'Int64' ,
41
+ 'GENENAME' : 'string' ,
42
+ 'CIS/TRANS' : 'string' ,
43
+ 'DISTANCE_TO_KNOWN' : 'Int64' ,
44
+ 'LOCATION_OF_KNOWN' : 'string' ,
45
+ 'KNOWN_ID' : 'string' ,
46
+ 'KNOWN_PUBMED_ID' : 'string' ,
47
+ 'KNOWN_AUTHOR' : 'string' ,
48
+ 'KNOWN_SET_VARIANT' : 'string' ,
49
+ 'KNOWN_VARIANT' : 'string' ,
50
+ 'KNOWN_SET' : 'string' ,
51
+ 'NOVEL' : 'string' ,
52
+ 'PIP' :' float64 ',
53
+ 'CREDIBLE_SET_INDEX': 'Int64' ,
54
+ 'N_SNP' : 'Int64' ,
55
+ 'LOCUS' : 'string' ,
56
+ 'STUDY' : 'string' ,
57
+ }
58
+
59
+
60
+ description_dic={
61
+ 'SNPID' :' variant ID (CHR:POS:NEA:EA) ',
62
+ 'rsID' :' dbSNP rsID ',
63
+ 'CHR' :' chromosome number (X 23, Y 24, MT 25) ',
64
+ 'POS' :' base pair position ',
65
+ 'EA' :' effect allele ',
66
+ 'NEA' :' non-effect allele ',
67
+ 'STATUS' :' variant standardization & harmonization status ',
68
+ 'REF' :' reference allele in reference genome ',
69
+ 'ALT' :' alternative allele ',
70
+ 'EAF' :' effect allele frequency ',
71
+ 'NEAF' :' non-effect allele frequency ',
72
+ 'MAF' :' minor allele frequency ',
73
+ 'INFO' :' imputation INFO/RSQ ',
74
+ 'BETA' :' effect size beta ',
75
+ 'SE' :' standard error of beta ',
76
+ 'BETA_95U' :' upper bound of beta 95% condidence interval ',
77
+ 'BETA_95L' :' lower bound of beta 95% condidence interval ',
78
+ 'OR' :' odds ratio ',
79
+ 'OR_95U' :' upper bound of OR 95% condidence interval ',
80
+ 'OR_95L' :' lower bound of OR 95% condidence interval ',
81
+ 'HR' :' hazard ratio ',
82
+ 'HR_95U' :' upper bound of HR 95% condidence interval ',
83
+ 'HR_95L' :' lower bound of HR 95% condidence interval ',
84
+ 'CHISQ' :' chi square ',
85
+ 'Z' :' z score ',
86
+ 'T' :' t statistics ',
87
+ 'F' :' F statistics ',
88
+ 'P' :' P value ',
89
+ 'P_MANTISSA' :' P mantissa ',
90
+ 'P_EXPONENT' :' P exponent ',
91
+ 'MLOG10P' :' $-log_{10}(P)$ ',
92
+ 'SNPR2' :' per variant R2 ',
93
+ 'DOF' :' degree of freedom ',
94
+ 'P_HET' :' heterogeneity test P value ',
95
+ 'I2_HET' :' heterogeneity I2 ',
96
+ 'DENSITY' :' signal density ',
97
+ 'N' :' total sample size ',
98
+ 'N_CASE' :' number of cases ',
99
+ 'N_CONTROL' :' number of controls ',
100
+ 'GENENAME' :' nearest gene symbol ',
101
+ 'CIS/TRANS' :' whether the variant is in cis or trans region ',
102
+ 'DISTANCE_TO_KNOWN' :' distance to nearest known variants ',
103
+ 'LOCATION_OF_KNOWN' :' relative location to nearest known variants ',
104
+ 'KNOWN_ID' :' nearest known variant ID ',
105
+ 'KNOWN_PUBMED_ID' :' pubmed ID of the known variant ',
106
+ 'KNOWN_AUTHOR' :' author of the study ',
107
+ 'KNOWN_SET_VARIANT' :' known set and overlapping variant ',
108
+ 'KNOWN_VARIANT' :' known variant overlapping with the variant ',
109
+ 'KNOWN_SET' :' variant set of the known variant ',
110
+ 'PIP' :' Posterior Inclusion Probability ',
111
+ 'CREDIBLE_SET_INDEX':' credible sets index ',
112
+ 'N_SNP' :' number of variants included in this locus for finemapping ',
113
+ 'LOCUS' :' locus name, usually the lead variant of the locus ',
114
+ 'STUDY' :' study name '}
115
+
116
+ def _get_headers(mode="all"):
117
+ if mode=="info":
118
+ return ["SNPID","rsID","CHR","POS","EA","NEA","STATUS"]
119
+ elif mode=="stats":
120
+ return ["BETA","SE","P","MLOG10P","N","N_CASE","N_CONTROL","Z","T","F","OR","OR_95L","OR_95U","HR","HR_95L","HR_95U","MAF","EAF","BETA_95L","BETA_95U"]
121
+ else:
122
+ return description_dic.keys()
123
+
124
+ def _check_overlap_with_reserved_keys(other):
125
+ overlapped=[]
126
+ for i in other:
127
+ if i in _get_headers():
128
+ overlapped.append(i)
129
+ return overlapped
130
+
131
+
gwaslab/g_meta.py CHANGED
@@ -55,4 +55,5 @@ def _append_meta_record(old, new):
55
55
  if old == "Unknown" or old== "Unchecked":
56
56
  return new
57
57
  else:
58
- return "{}, {}".format(old, new)
58
+ return "{}, {}".format(old, new)
59
+
gwaslab/g_version.py CHANGED
@@ -6,7 +6,7 @@ import numpy as np
6
6
  def _show_version(log=Log(), verbose=True):
7
7
  # show version when loading sumstats
8
8
  log.write("GWASLab v{} https://cloufield.github.io/gwaslab/".format(gwaslab_info()["version"]),verbose=verbose)
9
- log.write("(C) 2022-2024, Yunye He, Kamatani Lab, MIT License, gwaslab@gmail.com",verbose=verbose)
9
+ log.write("(C) 2022-2025, Yunye He, Kamatani Lab, GPL-3.0 license, gwaslab@gmail.com",verbose=verbose)
10
10
 
11
11
  def _get_version():
12
12
  # return short version string like v3.4.33
@@ -15,8 +15,8 @@ def _get_version():
15
15
  def gwaslab_info():
16
16
  # version meta information
17
17
  dic={
18
- "version":"3.5.4",
19
- "release_date":"20241218"
18
+ "version":"3.5.6",
19
+ "release_date":"20250306"
20
20
  }
21
21
  return dic
22
22
 
@@ -1490,10 +1490,100 @@ def infer_af(chr,start,end,ref,alt,vcf_reader,alt_freq,chr_dict=None):
1490
1490
  elif record.ref==alt and (ref in record.alts):
1491
1491
  return 1 - record.info[alt_freq][0]
1492
1492
  return np.nan
1493
+ ##############################################################################################################################################################################################
1493
1494
 
1495
+ ################################################################################################################
1494
1496
 
1497
+ def _paralleleinferafwithmaf(sumstats,ref_infer,ref_alt_freq=None,n_cores=1, chr="CHR",pos="POS",ref="NEA",alt="EA",
1498
+ eaf="EAF",maf="MAF",ref_eaf="_REF_EAF",status="STATUS",chr_dict=None,force=False, verbose=True,log=Log()):
1499
+ ##start function with col checking##########################################################
1500
+ _start_line = "infer sumstats EAF from sumstats MAF using reference VCF ALT frequency"
1501
+ _end_line = "inferring sumstats EAF from sumstats MAF using reference VCF ALT frequency"
1502
+ _start_cols = [chr,pos,ref,alt,status]
1503
+ _start_function = ".infer_af()"
1504
+ _must_args ={"ref_alt_freq":ref_alt_freq}
1495
1505
 
1496
- ################################################################################################################
1506
+ is_enough_info = start_to(sumstats=sumstats,
1507
+ log=log,
1508
+ verbose=verbose,
1509
+ start_line=_start_line,
1510
+ end_line=_end_line,
1511
+ start_cols=_start_cols,
1512
+ start_function=_start_function,
1513
+ n_cores=n_cores,
1514
+ ref_vcf=ref_infer,
1515
+ **_must_args)
1516
+ if is_enough_info == False: return sumstats
1517
+ ############################################################################################
1518
+ chr_dict = auto_check_vcf_chr_dict(ref_infer, chr_dict, verbose, log)
1519
+
1520
+ if eaf not in sumstats.columns:
1521
+ sumstats[eaf]=np.nan
1522
+ if ref_eaf not in sumstats.columns:
1523
+ sumstats[ref_eaf]=np.nan
1524
+
1525
+ prenumber = sum(sumstats[eaf].isna())
1526
+
1527
+ # ref_alt_freq INFO in vcf was provided
1528
+ if ref_alt_freq is not None:
1529
+ log.write(" -Field for alternative allele frequency in VCF INFO: {}".format(ref_alt_freq), verbose=verbose)
1530
+ if not force:
1531
+ good_chrpos = sumstats[status].str.match(r'\w\w\w[0]\w\w\w', case=False, flags=0, na=False)
1532
+ log.write(" -Checking variants:", sum(good_chrpos),verbose=verbose)
1533
+
1534
+ ########################
1535
+ #extract ref af
1536
+ if sum(sumstats[eaf].isna())<10000:
1537
+ n_cores=1
1538
+ #df_split = np.array_split(sumstats.loc[good_chrpos,[chr,pos,ref,alt]], n_cores)
1539
+ df_split = _df_split(sumstats.loc[good_chrpos,[chr,pos,ref,alt]], n_cores)
1540
+ pool = Pool(n_cores)
1541
+ map_func = partial(inferaf,chr=chr,pos=pos,ref=ref,alt=alt,eaf=ref_eaf,ref_infer=ref_infer,ref_alt_freq=ref_alt_freq,chr_dict=chr_dict)
1542
+ sumstats.loc[good_chrpos,[ref_eaf]] = pd.concat(pool.map(map_func,df_split))
1543
+ pool.close()
1544
+ pool.join()
1545
+
1546
+ ###########################
1547
+ # infer sumstats EAF
1548
+ # based on sumstats MAF and reference EAF
1549
+ is_filpped = ((sumstats[ref_eaf]>=0.5)&(sumstats[maf]<=0.5)) |((sumstats[ref_eaf]<0.5)&(sumstats[maf]>0.5))
1550
+ sumstats[eaf] = sumstats[maf]
1551
+ log.write(" -Flipping MAF to obtain EAF for {} variants".format(sum(is_filpped)),verbose=verbose)
1552
+ sumstats.loc[is_filpped,eaf] = 1 - sumstats.loc[is_filpped,maf]
1553
+
1554
+ ###########################
1555
+ afternumber = sum(sumstats[eaf].isna())
1556
+ log.write(" -Inferred EAF for {} variants.".format(prenumber - afternumber),verbose=verbose)
1557
+ log.write(" -EAF is still missing for {} variants.".format(afternumber),verbose=verbose)
1558
+ sumstats = sumstats.drop(columns=[ref_eaf])
1559
+
1560
+ finished(log,verbose,_end_line)
1561
+ return sumstats
1562
+
1563
+ def inferaf(sumstats,ref_infer,ref_alt_freq=None,chr="CHR",pos="POS",ref="NEA",alt="EA",eaf="EAF",chr_dict=None):
1564
+ #vcf_reader = vcf.Reader(open(ref_infer, 'rb'))
1565
+ vcf_reader = VariantFile(ref_infer)
1566
+ def afapply(x,vcf,alt_freq,chr_dict):
1567
+ return infer_af(x.iloc[0],x.iloc[1]-1,x.iloc[1],x.iloc[2],x.iloc[3],vcf_reader,ref_alt_freq,chr_dict)
1568
+ map_func = partial(afapply,vcf=vcf_reader,alt_freq=ref_alt_freq,chr_dict=chr_dict)
1569
+ status_inferred = sumstats.apply(map_func,axis=1)
1570
+ sumstats[eaf] = status_inferred.values
1571
+ sumstats[eaf]=sumstats[eaf].astype("float")
1572
+ return sumstats
1573
+
1574
+ def infer_af(chr,start,end,ref,alt,vcf_reader,alt_freq,chr_dict=None):
1575
+ if chr_dict is not None: chr=chr_dict[chr]
1576
+ chr_seq = vcf_reader.fetch(chr,start,end)
1577
+
1578
+ for record in chr_seq:
1579
+ if record.pos==end:
1580
+ if record.ref==ref and (alt in record.alts):
1581
+ return record.info[alt_freq][0]
1582
+ elif record.ref==alt and (ref in record.alts):
1583
+ return 1 - record.info[alt_freq][0]
1584
+ return np.nan
1585
+
1586
+ ##############################################################################################################################################################################################
1497
1587
  def auto_check_vcf_chr_dict(vcf_path, vcf_chr_dict, verbose, log):
1498
1588
  if vcf_path is not None:
1499
1589
  if vcf_chr_dict is None:
@@ -8,11 +8,13 @@ from gwaslab.bd_common_data import get_format_dict
8
8
  from gwaslab.qc_fix_sumstats import sortcolumn
9
9
  from gwaslab.qc_fix_sumstats import _process_build
10
10
  from gwaslab.qc_check_datatype import check_datatype
11
+ from gwaslab.qc_check_datatype import quick_convert_datatype
11
12
  from gwaslab.qc_check_datatype import check_dataframe_memory_usage
12
-
13
+ from gwaslab.g_headers import _check_overlap_with_reserved_keys
13
14
  #20221030
14
15
  def preformat(sumstats,
15
16
  fmt=None,
17
+ tab_fmt="tsv",
16
18
  snpid=None,
17
19
  rsid=None,
18
20
  chrom=None,
@@ -66,12 +68,21 @@ def preformat(sumstats,
66
68
  rename_dictionary = {}
67
69
  usecols = []
68
70
  dtype_dictionary ={}
69
-
71
+ if readargs is None:
72
+ readargs={}
70
73
  #######################################################################################################################################################
71
74
  # workflow:
72
75
  # 1. formatbook
73
76
  # 2. user specified header
74
77
  # 3. usekeys
78
+ if tab_fmt=="parquet":
79
+ if type(sumstats) is str:
80
+ log.write("Start to load data from parquet file....",verbose=verbose)
81
+ log.write(" -path: {}".format(sumstats),verbose=verbose)
82
+ sumstats = pd.read_parquet(sumstats,**readargs)
83
+ log.write("Finished loading parquet file into pd.DataFrame....",verbose=verbose)
84
+ else:
85
+ raise ValueError("Please input a path for parquet file.")
75
86
 
76
87
  if fmt is not None:
77
88
  # loading format parameters
@@ -145,9 +156,11 @@ def preformat(sumstats,
145
156
  if key in raw_cols:
146
157
  usecols.append(key)
147
158
  if value in ["EA","NEA"]:
148
- dtype_dictionary[value]="category"
149
- if value in ["CHR","STATUS"]:
150
- dtype_dictionary[value]="string"
159
+ dtype_dictionary[key]="category"
160
+ if value in ["STATUS"]:
161
+ dtype_dictionary[key]="string"
162
+ if value in ["CHR"]:
163
+ dtype_dictionary[key]="string"
151
164
 
152
165
  except ValueError:
153
166
  raise ValueError("Please input a path or a pd.DataFrame, and make sure the separator is correct and the columns you specified are in the file.")
@@ -276,6 +289,8 @@ def preformat(sumstats,
276
289
  rename_dictionary[status]="STATUS"
277
290
  dtype_dictionary[status]="string"
278
291
  if other:
292
+ overlapped = _check_overlap_with_reserved_keys(other)
293
+ log.warning("Columns with headers overlapping with GWASLab reserved keywords:{}".format(overlapped),verbose=verbose)
279
294
  usecols = usecols + other
280
295
  for i in other:
281
296
  rename_dictionary[i] = i
@@ -359,8 +374,13 @@ def preformat(sumstats,
359
374
  sumstats = sumstats[usecols].copy()
360
375
  for key,value in dtype_dictionary.items():
361
376
  if key in usecols:
362
- sumstats[key] = sumstats[key].astype(value)
363
-
377
+ astype = value
378
+ if rename_dictionary[key]=="CHR":
379
+ astype ="Int64"
380
+ try:
381
+ sumstats[key] = sumstats[key].astype(astype)
382
+ except:
383
+ sumstats[key] = sumstats[key].astype("string")
364
384
  except ValueError:
365
385
  raise ValueError("Please input a path or a pd.DataFrame, and make sure it contain the columns.")
366
386
 
@@ -400,6 +420,8 @@ def preformat(sumstats,
400
420
 
401
421
  ## reodering ###################################################################################################
402
422
  sumstats = sortcolumn(sumstats=sumstats,log=log,verbose=verbose)
423
+ sumstats = quick_convert_datatype(sumstats,log=log,verbose=verbose)
424
+
403
425
  check_datatype(sumstats,log=log,verbose=verbose)
404
426
  gc.collect()
405
427
  check_dataframe_memory_usage(sumstats,log=log,verbose=verbose)
@@ -0,0 +1,23 @@
1
+ import pandas as pd
2
+ from gwaslab.g_Log import Log
3
+ from gwaslab.qc_check_datatype import check_datatype
4
+ from gwaslab.qc_check_datatype import check_dataframe_memory_usage
5
+
6
+ def _read_pipcs(data, output_prefix, log=Log(),verbose=True):
7
+ log.write("Start to load PIP and CREDIBLE_SET_INDEX from file...",verbose=verbose)
8
+ log.write(" -File:{}.pipcs".format(output_prefix),verbose=verbose)
9
+
10
+ pipcs = pd.read_csv("{}.pipcs".format(output_prefix))
11
+
12
+ log.write(" -Merging CHR and POS from main dataframe...",verbose=verbose)
13
+ pipcs = _merge_chrpos(data,pipcs)
14
+
15
+ log.write(" -Current pipcs Dataframe shape :",len(pipcs)," x ", len(pipcs.columns),verbose=verbose)
16
+ check_datatype(pipcs,log=log,verbose=verbose)
17
+ check_dataframe_memory_usage(pipcs,log=log,verbose=verbose)
18
+ log.write("Finished loading PIP and CREDIBLE_SET_INDEX from file!",verbose=verbose)
19
+ return pipcs
20
+
21
+ def _merge_chrpos(data,pipcs):
22
+ df = pd.merge(pipcs, data,on="SNPID",how="left")
23
+ return df
gwaslab/io_to_formats.py CHANGED
@@ -114,48 +114,49 @@ def _to_format(sumstats,
114
114
 
115
115
  #######################################################################################################
116
116
  #formatting float statistics
117
- onetime_log.write(" -Formatting statistics ...",verbose=verbose)
118
117
 
119
- formats = {
120
- 'EAF': '{:.4g}',
121
- 'MAF': '{:.4g}',
122
- 'BETA': '{:.4f}',
123
- 'SE': '{:.4f}',
124
- 'BETA_95U': '{:.4f}',
125
- 'BETA_95L': '{:.4f}',
126
- 'Z': '{:.4f}',
127
- 'CHISQ': '{:.4f}',
128
- 'F': '{:.4f}',
129
- 'OR': '{:.4f}',
130
- 'OR_95U': '{:.4f}',
131
- 'OR_95L': '{:.4f}',
132
- 'HR': '{:.4f}',
133
- 'HR_95U': '{:.4f}',
134
- 'HR_95L': '{:.4f}',
135
- 'INFO': '{:.4f}',
136
- 'P': '{:.4e}',
137
- 'MLOG10P': '{:.4f}',
138
- 'DAF': '{:.4f}'}
139
-
140
- for col, f in float_formats.items():
141
- if col in output.columns:
142
- formats[col]=f
143
-
144
- for col, f in formats.items():
145
- if col in output.columns:
146
- if str(output[col].dtype) in ["Float32","Float64","float64","float32","float16","float"]:
147
- output[col] = output[col].map(f.format)
148
-
149
- onetime_log.write(" -Float statistics formats:",verbose=verbose)
150
- keys=[]
151
- values=[]
152
- for key,value in formats.items():
153
- if key in output.columns:
154
- keys.append(key)
155
- values.append(value)
156
-
157
- onetime_log.write(" - Columns :",keys,verbose=verbose)
158
- onetime_log.write(" - Output formats:",values,verbose=verbose)
118
+ if tab_fmt!="parquet":
119
+ onetime_log.write(" -Formatting statistics ...",verbose=verbose)
120
+ formats = {
121
+ 'EAF': '{:.4g}',
122
+ 'MAF': '{:.4g}',
123
+ 'BETA': '{:.4f}',
124
+ 'SE': '{:.4f}',
125
+ 'BETA_95U': '{:.4f}',
126
+ 'BETA_95L': '{:.4f}',
127
+ 'Z': '{:.4f}',
128
+ 'CHISQ': '{:.4f}',
129
+ 'F': '{:.4f}',
130
+ 'OR': '{:.4f}',
131
+ 'OR_95U': '{:.4f}',
132
+ 'OR_95L': '{:.4f}',
133
+ 'HR': '{:.4f}',
134
+ 'HR_95U': '{:.4f}',
135
+ 'HR_95L': '{:.4f}',
136
+ 'INFO': '{:.4f}',
137
+ 'P': '{:.4e}',
138
+ 'MLOG10P': '{:.4f}',
139
+ 'DAF': '{:.4f}'}
140
+
141
+ for col, f in float_formats.items():
142
+ if col in output.columns:
143
+ formats[col]=f
144
+
145
+ for col, f in formats.items():
146
+ if col in output.columns:
147
+ if str(output[col].dtype) in ["Float32","Float64","float64","float32","float16","float"]:
148
+ output[col] = output[col].map(f.format)
149
+
150
+ onetime_log.write(" -Float statistics formats:",verbose=verbose)
151
+ keys=[]
152
+ values=[]
153
+ for key,value in formats.items():
154
+ if key in output.columns:
155
+ keys.append(key)
156
+ values.append(value)
157
+
158
+ onetime_log.write(" - Columns :",keys,verbose=verbose)
159
+ onetime_log.write(" - Output formats:",values,verbose=verbose)
159
160
 
160
161
  ##########################################################################################################
161
162
  # output, mapping column names
@@ -233,7 +234,7 @@ def tofmt(sumstats,
233
234
  if xymt_number is False and pd.api.types.is_integer_dtype(sumstats["CHR"]):
234
235
  sumstats["CHR"]= sumstats["CHR"].map(get_number_to_chr(xymt=xymt,prefix=chr_prefix))
235
236
  # add prefix to CHR
236
- elif chr_prefix is not None:
237
+ elif len(chr_prefix)>0:
237
238
  sumstats["CHR"]= chr_prefix + sumstats["CHR"].astype("string")
238
239
 
239
240
  ####################################################################################################################
@@ -409,7 +410,7 @@ def _write_tabular(sumstats,rename_dictionary, path, tab_fmt, to_csvargs, to_tab
409
410
  log.write(f" -@ detected: writing each chromosome to a single file...",verbose=verbose)
410
411
  log.write(" -Chromosomes:{}...".format(list(sumstats["CHR"].unique())),verbose=verbose)
411
412
  for single_chr in list(sumstats["CHR"].unique()):
412
- single_path = path.replace("@",single_chr)
413
+ single_path = path.replace("@","{}".format(single_chr))
413
414
 
414
415
  fast_to_csv(sumstats.loc[sumstats[chr_header]==single_chr,:],
415
416
  single_path,
@@ -422,7 +423,7 @@ def _write_tabular(sumstats,rename_dictionary, path, tab_fmt, to_csvargs, to_tab
422
423
  log.write(f" -@ detected: writing each chromosome to a single file...",verbose=verbose)
423
424
  log.write(" -Chromosomes:{}...".format(list(sumstats["CHR"].unique())),verbose=verbose)
424
425
  for single_chr in list(sumstats["CHR"].unique()):
425
- single_path = path.replace("@",single_chr)
426
+ single_path = path.replace("@","{}".format(single_chr))
426
427
 
427
428
  sumstats.loc[sumstats[chr_header]==single_chr,:].to_csv(path, index=None, **to_csvargs)
428
429
  else:
@@ -5,7 +5,54 @@ from gwaslab.g_Log import Log
5
5
  # pandas.api.types.is_int64_dtype
6
6
  # pandas.api.types.is_categorical_dtype
7
7
 
8
+ dtype_dict ={
9
+ "SNPID":["string","object"],
10
+ "rsID":["string","object"],
11
+ "CHR":["Int64","int64","int32","Int32","int"],
12
+ "POS":["int64","Int64"],
13
+ "EA":["category"],
14
+ "NEA":["category"],
15
+ "REF":["category"],
16
+ "ALT":["category"],
17
+ "BETA":["float64"],
18
+ "BETA_95L":["float64"],
19
+ "BETA_95U":["float64"],
20
+ "SE":["float64"],
21
+ "N":["Int64","int64","int32","Int32","int"],
22
+ "N_CASE":["Int64","int64","int32","Int32","int"],
23
+ "N_CONTROL":["Int64","int64","int32","Int32","int"],
24
+ "OR":["float64"],
25
+ "OR_95L":["float64"],
26
+ "OR_95U":["float64"],
27
+ "HR":["float64"],
28
+ "HR_95L":["float64"],
29
+ "HR_95U":["float64"],
30
+ "P":["float64"],
31
+ "MLOG10P":["float64"],
32
+ "Z":["float64"],
33
+ "F":["float64"],
34
+ "T":["float64"],
35
+ "TEST":["string","object","category"],
36
+ "CHISQ":["float64"],
37
+ "I2":["float64"],
38
+ "PHET":["float64"],
39
+ "SNPR2":["float64"],
40
+ "EAF":["float64","float","float32"],
41
+ "NEAF":["float64","float","float32"],
42
+ "MAF":["float64","float","float32"],
43
+ "INFO":["float64","float","float32"],
44
+ "DOF":["Int64","int64","int32","Int32","int"],
45
+ "STATUS":["category"],
46
+ "DIRECTION":["string","object"],
47
+ 'PIP' :["float64","float","float32"],
48
+ 'CREDIBLE_SET_INDEX':["Int64","int64","int32","Int32","int"],
49
+ 'N_SNP' :["Int64","int64","int32","Int32","int"],
50
+ 'LOCUS' :["string","object","category"],
51
+ 'STUDY' :["string","object","category"]
52
+ }
53
+
8
54
  def check_datatype(sumstats, verbose=True, log=Log()):
55
+
9
56
  try:
10
57
  headers = []
11
58
  dtypes = []
@@ -39,47 +86,6 @@ def check_datatype(sumstats, verbose=True, log=Log()):
39
86
 
40
87
  def verify_datatype(header, dtype):
41
88
 
42
- dtype_dict ={
43
- "SNPID":["object","string"],
44
- "rsID":["object","string"],
45
- "CHR":["int32","Int32","int64","Int64"],
46
- "POS":["int64","Int64"],
47
- "EA":"category",
48
- "NEA":"category",
49
- "REF":"category",
50
- "ALT":"category",
51
- "BETA":"float64",
52
- "BETA_95L":"float64",
53
- "BETA_95U":"float64",
54
- "SE":"float64",
55
- "N":["int","Int32","Int64","int32","int64"],
56
- "N_CASE":["int","Int32","Int64","int32","int64"],
57
- "N_CONTROL":["int","Int32","Int64","int32","int64"],
58
- "OR":"float64",
59
- "OR_95L":"float64",
60
- "OR_95U":"float64",
61
- "HR":"float64",
62
- "HR_95L":"float64",
63
- "HR_95U":"float64",
64
- "P":"float64",
65
- "MLOG10P":"float64",
66
- "Z":"float64",
67
- "F":"float64",
68
- "T":"float64",
69
- "TEST":["object","string","category"],
70
- "CHISQ":"float64",
71
- "I2":"float64",
72
- "PHET":"float64",
73
- "SNPR2":"float64",
74
- "EAF":["float","float32","float64"],
75
- "NEAF":["float","float32","float64"],
76
- "MAF":["float","float32","float64"],
77
- "INFO":["float32","float64"],
78
- "DOF":["int","Int32","Int64","int32","int64"],
79
- "STATUS":"category",
80
- "DIRECTION":["object","string"],
81
- }
82
-
83
89
  if header in dtype_dict.keys():
84
90
  if str(dtype) in dtype_dict[header]:
85
91
  return "T"
@@ -88,6 +94,22 @@ def verify_datatype(header, dtype):
88
94
  else:
89
95
  return "NA"
90
96
 
97
+ def quick_convert_datatype(sumstats, log, verbose):
98
+ for col in sumstats.columns:
99
+ if col in dtype_dict.keys():
100
+ if str(sumstats[col].dtypes) not in dtype_dict[col]:
101
+ datatype=dtype_dict[col][0]
102
+ log.write(" -Trying to convert datatype for {}: {} -> {}...".format(col, str(sumstats[col].dtypes), datatype), end="" ,verbose=verbose)
103
+ try:
104
+ sumstats[col] = sumstats[col].astype(datatype)
105
+ log.write("{}".format(datatype),show_time=False, verbose=verbose)
106
+ except:
107
+ log.write("Failed...",show_time=False,verbose=verbose)
108
+ pass
109
+ return sumstats
110
+
111
+
112
+
91
113
  def check_dataframe_shape(sumstats, log, verbose):
92
114
  memory_in_mb = sumstats.memory_usage().sum()/1024/1024
93
115
  try:
@@ -100,4 +122,5 @@ def check_dataframe_memory_usage(sumstats, log, verbose):
100
122
  try:
101
123
  log.write(" -Current Dataframe memory usage: {:.2f} MB".format(memory_in_mb), verbose=verbose)
102
124
  except:
103
- log.warning("Error: cannot get Memory usage...")
125
+ log.warning("Error: cannot get Memory usage...")
126
+
@@ -1178,7 +1178,7 @@ def sanitycheckstats(sumstats,
1178
1178
  t=(-99999,99999),
1179
1179
  f=(0,float("Inf")),
1180
1180
  p=(0,1),
1181
- mlog10p=(0,9999),
1181
+ mlog10p=(0,99999),
1182
1182
  beta=(-100,100),
1183
1183
  se=(0,float("Inf")),
1184
1184
  OR=(-100,100),