py2ls 0.2.4.6__py3-none-any.whl → 0.2.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
py2ls/bio.py
CHANGED
@@ -166,9 +166,23 @@ def get_probe(
|
|
166
166
|
if platform_id is None:
|
167
167
|
df_meta = get_meta(geo=geo, dataset=dataset, verbose=False)
|
168
168
|
platform_id = df_meta["platform_id"].unique().tolist()
|
169
|
-
platform_id = platform_id[0] if len(platform_id) == 1 else platform_id
|
170
169
|
print(f"Platform: {platform_id}")
|
171
|
-
|
170
|
+
if len(platform_id) > 1:
|
171
|
+
df_probe= geo[dataset].gpls[platform_id[0]].table
|
172
|
+
# df_probe=pd.DataFrame()
|
173
|
+
# # Iterate over each platform ID and collect the probe tables
|
174
|
+
# for platform_id_ in platform_id:
|
175
|
+
# if platform_id_ in geo[dataset].gpls:
|
176
|
+
# df_probe_ = geo[dataset].gpls[platform_id_].table
|
177
|
+
# if not df_probe_.empty:
|
178
|
+
# df_probe=pd.concat([df_probe, df_probe_])
|
179
|
+
# else:
|
180
|
+
# print(f"Warning: Probe table for platform {platform_id_} is empty.")
|
181
|
+
# else:
|
182
|
+
# print(f"Warning: Platform ID {platform_id_} not found in dataset {dataset}.")
|
183
|
+
else:
|
184
|
+
df_probe= geo[dataset].gpls[platform_id[0]].table
|
185
|
+
|
172
186
|
if df_probe.empty:
|
173
187
|
print(
|
174
188
|
f"Warning: cannot find the probe info. 看一下是不是在单独的文件中包含了probe信息"
|
@@ -215,9 +229,12 @@ def get_data(geo: dict, dataset: str = "GSE25097", verbose=False):
|
|
215
229
|
df_expression = get_expression_data(geo, dataset=dataset)
|
216
230
|
if not df_expression.select_dtypes(include=["number"]).empty:
|
217
231
|
# 如果数据全部是counts类型的话, 则使用TMM进行normalize
|
218
|
-
if 'counts' in get_data_type(df_expression):
|
219
|
-
|
220
|
-
|
232
|
+
if 'counts' in get_data_type(df_expression):
|
233
|
+
try:
|
234
|
+
df_expression=counts2expression(df_expression.T).T
|
235
|
+
print(f"{dataset}'s type is raw read counts, nomalized(transformed) via 'TMM'")
|
236
|
+
except Exception as e:
|
237
|
+
print("raw counts data")
|
221
238
|
if any([df_probe.empty, df_expression.empty]):
|
222
239
|
print(
|
223
240
|
f"got empty values, check the probe info. 看一下是不是在单独的文件中包含了probe信息"
|
py2ls/ips.py
CHANGED
@@ -555,14 +555,28 @@ def shared(*args, strict=True, n_shared=2, verbose=True):
|
|
555
555
|
# Get elements that appear in at least n_shared lists
|
556
556
|
shared_elements = [item for item, count in element_count.items() if count >= n_shared]
|
557
557
|
|
558
|
-
shared_elements = flatten(shared_elements, verbose=verbose)
|
558
|
+
shared_elements = flatten(shared_elements, verbose=verbose)
|
559
559
|
if verbose:
|
560
560
|
elements2show = shared_elements if len(shared_elements)<10 else shared_elements[:5]
|
561
561
|
print(f"{' '*2}{len(shared_elements)} elements shared: {' '*2}{elements2show}")
|
562
562
|
print("********* checking shared elements *********")
|
563
563
|
return shared_elements
|
564
564
|
|
565
|
-
def
|
565
|
+
def not_shared(*args, strict=True, n_shared=2, verbose=False):
|
566
|
+
"""
|
567
|
+
To find the elements in list1 that are not shared with list2 while maintaining the original order of list1
|
568
|
+
usage:
|
569
|
+
list1 = [1, 8, 3, 3, 4, 5]
|
570
|
+
list2 = [4, 5, 6, 7, 8]
|
571
|
+
not_shared(list1,list2)# output [1,3]
|
572
|
+
"""
|
573
|
+
_common = shared(*args, strict=strict, n_shared=n_shared, verbose=verbose)
|
574
|
+
list1 = args[0]
|
575
|
+
_not_shared=[item for item in list1 if item not in _common]
|
576
|
+
return flatten(_not_shared, verbose=verbose)
|
577
|
+
|
578
|
+
|
579
|
+
def flatten(nested: Any, unique_list=True, verbose=False):
|
566
580
|
"""
|
567
581
|
Recursively flattens a nested structure (lists, tuples, dictionaries, sets) into a single list.
|
568
582
|
Parameters:
|
@@ -1560,13 +1574,26 @@ def unzip(dir_path, output_dir=None):
|
|
1560
1574
|
tar_ref.extractall(output_dir)
|
1561
1575
|
return output_dir
|
1562
1576
|
# Handle .gz files
|
1563
|
-
if dir_path.endswith(".gz"):
|
1577
|
+
if dir_path.endswith(".gz") or dir_path.endswith(".gzip"):
|
1564
1578
|
import gzip
|
1565
1579
|
|
1566
1580
|
output_file = os.path.splitext(dir_path)[0] # remove the .gz extension
|
1567
|
-
|
1568
|
-
with open(
|
1569
|
-
|
1581
|
+
try:
|
1582
|
+
with gzip.open(dir_path, "rb") as gz_file:
|
1583
|
+
with open(output_file, "wb") as out_file:
|
1584
|
+
shutil.copyfileobj(gz_file, out_file)
|
1585
|
+
print(f"unzipped '{dir_path}' to '{output_file}'")
|
1586
|
+
except FileNotFoundError:
|
1587
|
+
print(f"Error: The file '{dir_path}' was not found.")
|
1588
|
+
except PermissionError:
|
1589
|
+
print(f"Error: Permission denied when accessing '{dir_path}' or writing to '{output_file}'.")
|
1590
|
+
except Exception as e:
|
1591
|
+
try:
|
1592
|
+
import tarfile
|
1593
|
+
with tarfile.open(dir_path, 'r:gz') as tar:
|
1594
|
+
tar.extractall(path=output_file)
|
1595
|
+
except Exception as final_e:
|
1596
|
+
print(f"An final unexpected error occurred: {final_e}")
|
1570
1597
|
return output_file
|
1571
1598
|
|
1572
1599
|
# Handle .zip files
|
@@ -1648,9 +1675,12 @@ def is_df_abnormal(df: pd.DataFrame, verbose=False) -> bool:
|
|
1648
1675
|
False: normal
|
1649
1676
|
|
1650
1677
|
"""
|
1678
|
+
if not isinstance(df, pd.DataFrame):
|
1679
|
+
return False
|
1680
|
+
df.columns = df.columns.astype(str)# 把它变成str, 这样就可以进行counts运算了
|
1651
1681
|
# Initialize a list to hold messages about abnormalities
|
1652
1682
|
messages = []
|
1653
|
-
is_abnormal =
|
1683
|
+
is_abnormal = True
|
1654
1684
|
# Check the shape of the DataFrame
|
1655
1685
|
actual_shape = df.shape
|
1656
1686
|
messages.append(f"Shape of DataFrame: {actual_shape}")
|
@@ -1745,10 +1775,12 @@ def fload(fpath, kind=None, **kwargs):
|
|
1745
1775
|
content = file.read()
|
1746
1776
|
return content
|
1747
1777
|
|
1748
|
-
def load_html(fpath):
|
1749
|
-
|
1750
|
-
|
1751
|
-
|
1778
|
+
# def load_html(fpath):
|
1779
|
+
# with open(fpath, "r") as file:
|
1780
|
+
# content = file.read()
|
1781
|
+
# return content
|
1782
|
+
def load_html(fpath,**kwargs):
|
1783
|
+
return pd.read_html(fpath,**kwargs)
|
1752
1784
|
|
1753
1785
|
def load_json(fpath, **kwargs):
|
1754
1786
|
output=kwargs.pop("output","json")
|
@@ -1962,8 +1994,8 @@ def fload(fpath, kind=None, **kwargs):
|
|
1962
1994
|
# display(df.head(2))
|
1963
1995
|
# print(f"is_df_abnormal:{is_df_abnormal(df, verbose=0)}")
|
1964
1996
|
if not is_df_abnormal(df, verbose=0):
|
1965
|
-
display(df.head(2))
|
1966
|
-
print(f"shape: {df.shape}")
|
1997
|
+
display(df.head(2)) if isinstance(df, pd.DataFrame) else display("it is not a DataFrame")
|
1998
|
+
print(f"shape: {df.shape}") if isinstance(df, pd.DataFrame) else display("it is not a DataFrame")
|
1967
1999
|
return df
|
1968
2000
|
except EmptyDataError as e:
|
1969
2001
|
continue
|
@@ -1987,6 +2019,42 @@ def fload(fpath, kind=None, **kwargs):
|
|
1987
2019
|
pass
|
1988
2020
|
return df
|
1989
2021
|
|
2022
|
+
|
2023
|
+
def load_parquet(fpath, **kwargs):
|
2024
|
+
"""
|
2025
|
+
Load a Parquet file into a Pandas DataFrame with advanced options.
|
2026
|
+
|
2027
|
+
Parameters:
|
2028
|
+
- fpath (str): The file path to the Parquet file.
|
2029
|
+
- engine (str): The engine to use for reading the Parquet file (default is 'pyarrow').
|
2030
|
+
- columns (list): List of columns to load. If None, loads all columns.
|
2031
|
+
- verbose (bool): If True, prints additional information about the loading process.
|
2032
|
+
- filters (list): List of filter conditions for predicate pushdown.
|
2033
|
+
- **kwargs: Additional keyword arguments for `pd.read_parquet`.
|
2034
|
+
|
2035
|
+
Returns:
|
2036
|
+
- df (DataFrame): The loaded DataFrame.
|
2037
|
+
"""
|
2038
|
+
|
2039
|
+
engine = kwargs.get("engine", "pyarrow")
|
2040
|
+
verbose = kwargs.pop("verbose", False)
|
2041
|
+
|
2042
|
+
if verbose:
|
2043
|
+
use_pd("read_parquet", verbose=verbose)
|
2044
|
+
try:
|
2045
|
+
df = pd.read_parquet(fpath, engine=engine, **kwargs)
|
2046
|
+
if verbose:
|
2047
|
+
if 'columns' in kwargs:
|
2048
|
+
print(f"Loaded columns: {kwargs['columns']}")
|
2049
|
+
else:
|
2050
|
+
print("Loaded all columns.")
|
2051
|
+
print(f"shape: {df.shape}")
|
2052
|
+
except Exception as e:
|
2053
|
+
print(f"An error occurred while loading the Parquet file: {e}")
|
2054
|
+
df = None
|
2055
|
+
|
2056
|
+
return df
|
2057
|
+
|
1990
2058
|
def load_ipynb(fpath, **kwargs):
|
1991
2059
|
as_version = kwargs.get("as_version", 4)
|
1992
2060
|
with open(fpath, "r") as file:
|
@@ -2055,51 +2123,21 @@ def fload(fpath, kind=None, **kwargs):
|
|
2055
2123
|
kind = kind.lower()
|
2056
2124
|
kind = kind.lstrip(".").lower()
|
2057
2125
|
img_types = [
|
2058
|
-
"bmp",
|
2059
|
-
"
|
2060
|
-
"gif",
|
2061
|
-
"icns",
|
2062
|
-
"ico",
|
2063
|
-
"im",
|
2064
|
-
"jpg",
|
2065
|
-
"jpeg",
|
2066
|
-
"jpeg2000",
|
2067
|
-
"msp",
|
2068
|
-
"pcx",
|
2069
|
-
"png",
|
2070
|
-
"ppm",
|
2071
|
-
"sgi",
|
2072
|
-
"spider",
|
2073
|
-
"tga",
|
2074
|
-
"tiff",
|
2075
|
-
"tif",
|
2076
|
-
"webp",
|
2077
|
-
"json",
|
2126
|
+
"bmp","eps","gif","png","jpg","jpeg","jpeg2000","tiff","tif",
|
2127
|
+
"icns","ico","im","msp","pcx","ppm","sgi","spider","tga","webp",
|
2078
2128
|
]
|
2079
2129
|
doc_types = [
|
2080
|
-
"docx",
|
2081
|
-
"txt",
|
2082
|
-
"md",
|
2083
|
-
"
|
2084
|
-
"json",
|
2085
|
-
"yaml",
|
2086
|
-
"xml",
|
2087
|
-
"csv",
|
2088
|
-
"xlsx",
|
2089
|
-
"pdf",
|
2130
|
+
"docx","pdf",
|
2131
|
+
"txt","csv","xlsx","tsv","parquet","snappy",
|
2132
|
+
"md","html",
|
2133
|
+
"json","yaml","xml",
|
2090
2134
|
"ipynb",
|
2135
|
+
"mtx"
|
2091
2136
|
]
|
2092
2137
|
zip_types = [
|
2093
|
-
"gz",
|
2094
|
-
"
|
2095
|
-
"
|
2096
|
-
"tar",
|
2097
|
-
"tar.gz",
|
2098
|
-
"tar.bz2",
|
2099
|
-
"bz2",
|
2100
|
-
"xz",
|
2101
|
-
"rar",
|
2102
|
-
"tgz",
|
2138
|
+
"gz","zip","7z","rar","tgz",
|
2139
|
+
"tar","tar.gz","tar.bz2",
|
2140
|
+
"bz2","xz","gzip"
|
2103
2141
|
]
|
2104
2142
|
other_types = ["fcs"]
|
2105
2143
|
supported_types = [*doc_types, *img_types, *zip_types, *other_types]
|
@@ -2128,14 +2166,14 @@ def fload(fpath, kind=None, **kwargs):
|
|
2128
2166
|
elif kind == "txt" or kind == "md":
|
2129
2167
|
return load_txt_md(fpath)
|
2130
2168
|
elif kind == "html":
|
2131
|
-
return load_html(fpath)
|
2169
|
+
return load_html(fpath, **kwargs)
|
2132
2170
|
elif kind == "json":
|
2133
|
-
return load_json(fpath)
|
2171
|
+
return load_json(fpath, **kwargs)
|
2134
2172
|
elif kind == "yaml":
|
2135
2173
|
return load_yaml(fpath)
|
2136
2174
|
elif kind == "xml":
|
2137
2175
|
return load_xml(fpath)
|
2138
|
-
elif kind
|
2176
|
+
elif kind in ["csv","tsv"]:
|
2139
2177
|
content = load_csv(fpath, **kwargs)
|
2140
2178
|
return content
|
2141
2179
|
elif kind in ["ods", "ods", "odt"]:
|
@@ -2146,14 +2184,25 @@ def fload(fpath, kind=None, **kwargs):
|
|
2146
2184
|
engine = kwargs.get("engine", "xlrd")
|
2147
2185
|
kwargs.pop("engine", None)
|
2148
2186
|
content = load_excel(fpath, engine=engine, **kwargs)
|
2187
|
+
print(f"shape: {content.shape}")
|
2149
2188
|
display(content.head(3))
|
2150
2189
|
return content
|
2151
2190
|
elif kind == "xlsx":
|
2152
2191
|
content = load_excel(fpath, **kwargs)
|
2153
2192
|
display(content.head(3))
|
2193
|
+
print(f"shape: {content.shape}")
|
2194
|
+
return content
|
2195
|
+
elif kind=='mtx':
|
2196
|
+
from scipy.io import mmread
|
2197
|
+
dat_mtx=mmread(fpath)
|
2198
|
+
content=pd.DataFrame.sparse.from_spmatrix(dat_mtx,**kwargs)
|
2199
|
+
display(content.head(3))
|
2200
|
+
print(f"shape: {content.shape}")
|
2154
2201
|
return content
|
2155
2202
|
elif kind == "ipynb":
|
2156
2203
|
return load_ipynb(fpath, **kwargs)
|
2204
|
+
elif kind in ['parquet','snappy']:
|
2205
|
+
return load_parquet(fpath,**kwargs)
|
2157
2206
|
elif kind == "pdf":
|
2158
2207
|
# print('usage:load_pdf(fpath, page="all", verbose=False)')
|
2159
2208
|
return load_pdf(fpath, **kwargs)
|
@@ -2199,9 +2248,7 @@ def fload(fpath, kind=None, **kwargs):
|
|
2199
2248
|
return meta, data
|
2200
2249
|
|
2201
2250
|
else:
|
2202
|
-
|
2203
|
-
# content = load_csv(fpath, **kwargs)
|
2204
|
-
# except:
|
2251
|
+
print("direct reading...")
|
2205
2252
|
try:
|
2206
2253
|
try:
|
2207
2254
|
with open(fpath, "r", encoding="utf-8") as f:
|
@@ -2501,6 +2548,25 @@ def fsave(
|
|
2501
2548
|
tree = etree.ElementTree(root)
|
2502
2549
|
tree.write(fpath, pretty_print=True, xml_declaration=True, encoding="UTF-8")
|
2503
2550
|
|
2551
|
+
def save_parquet(fpath:str, data:pd.DataFrame, **kwargs):
|
2552
|
+
engine = kwargs.pop("engine","auto") # auto先试pyarrow, 不行就转为fastparquet, {‘auto’, ‘pyarrow’, ‘fastparquet’}
|
2553
|
+
compression=kwargs.pop("compression",None) # Use None for no compression. Supported options: ‘snappy’, ‘gzip’, ‘brotli’, ‘lz4’, ‘zstd’
|
2554
|
+
try:
|
2555
|
+
# Attempt to save with "pyarrow" if engine is set to "auto"
|
2556
|
+
data.to_parquet(fpath, engine=engine, compression=compression, **kwargs)
|
2557
|
+
print(f"DataFrame successfully saved to {fpath} with engine '{engine}' and {compression} compression.")
|
2558
|
+
except Exception as e:
|
2559
|
+
print(f"Error using with engine '{engine}' and {compression} compression: {e}")
|
2560
|
+
if "Sparse" in str(e):
|
2561
|
+
try:
|
2562
|
+
# Handle sparse data by converting columns to dense
|
2563
|
+
print("Attempting to convert sparse columns to dense format...")
|
2564
|
+
data = data.apply(lambda x: x.sparse.to_dense() if pd.api.types.is_sparse(x) else x)
|
2565
|
+
save_parquet(fpath, data=data,**kwargs)
|
2566
|
+
except Exception as last_e:
|
2567
|
+
print(f"After converted sparse columns to dense format, Error using with engine '{engine}' and {compression} compression: {last_e}")
|
2568
|
+
|
2569
|
+
|
2504
2570
|
if kind is None:
|
2505
2571
|
_, kind = os.path.splitext(fpath)
|
2506
2572
|
kind = kind.lower()
|
@@ -2546,6 +2612,15 @@ def fsave(
|
|
2546
2612
|
save_yaml(fpath, content, **kwargs)
|
2547
2613
|
elif kind == "ipynb":
|
2548
2614
|
save_ipynb(fpath, content, **kwargs)
|
2615
|
+
elif kind.lower() in ["parquet","pq","big","par"]:
|
2616
|
+
compression=kwargs.pop("compression",None) # Use None for no compression. Supported options: ‘snappy’, ‘gzip’, ‘brotli’, ‘lz4’, ‘zstd’
|
2617
|
+
# fix the fpath ends
|
2618
|
+
if not '.parquet' in fpath:
|
2619
|
+
fpath=fpath.replace(kind, 'parquet')
|
2620
|
+
if compression is not None:
|
2621
|
+
if not fpath.endswith(compression):
|
2622
|
+
fpath=fpath+f".{compression}"
|
2623
|
+
save_parquet(fpath=fpath, data=content,compression=compression,**kwargs)
|
2549
2624
|
else:
|
2550
2625
|
try:
|
2551
2626
|
netfinder.downloader(url=content, dir_save=dirname(fpath), kind=kind)
|
@@ -5518,7 +5593,21 @@ def df_reducer(
|
|
5518
5593
|
) -> pd.DataFrame:
|
5519
5594
|
"""
|
5520
5595
|
Reduces the dimensionality of the selected DataFrame using PCA or UMAP.
|
5521
|
-
|
5596
|
+
method:
|
5597
|
+
1. 'umap':
|
5598
|
+
- big dataset and global structure, often preferred in large-scale datasets for
|
5599
|
+
visualization and dimensionality reduction, balancing speed and quality of visualization.
|
5600
|
+
- t-SNE excels at preserving local structure (i.e., clusters), but it often loses global
|
5601
|
+
relationships, causing clusters to appear in arbitrary proximities to each other.
|
5602
|
+
2. 'pca':
|
5603
|
+
- t-SNE excels at preserving local structure (i.e., clusters), but it often loses global
|
5604
|
+
relationships, causing clusters to appear in arbitrary proximities to each other.
|
5605
|
+
- useful as a preprocessing step and in datasets where linear relationships dominate.
|
5606
|
+
3. 't-SNE':
|
5607
|
+
a. t-SNE excels at preserving local structure (i.e., clusters), but it often loses global
|
5608
|
+
relationships, causing clusters to appear in arbitrary proximities to each other.
|
5609
|
+
b. often preferred in large-scale datasets for visualization and dimensionality
|
5610
|
+
reduction, balancing speed and quality of visualization.
|
5522
5611
|
Parameters:
|
5523
5612
|
-----------
|
5524
5613
|
data : pd.DataFrame
|
py2ls/plot.py
CHANGED
@@ -2337,40 +2337,20 @@ def get_color(
|
|
2337
2337
|
if n == 1:
|
2338
2338
|
colorlist = ["#3A4453"]
|
2339
2339
|
elif n == 2:
|
2340
|
-
colorlist = ["#3A4453", "#
|
2340
|
+
colorlist = ["#3A4453", "#FBAF63"]
|
2341
2341
|
elif n == 3:
|
2342
|
-
colorlist = ["#3A4453", "#
|
2342
|
+
colorlist = ["#3A4453", "#FBAF63", "#299D8F"]
|
2343
2343
|
elif n == 4:
|
2344
|
-
|
2345
|
-
colorlist = ["#81C6BD", "#FBAF63", "#F2675B", "#72A1C9"]
|
2344
|
+
colorlist = ["#087cf7", "#FBAF63", "#3C898A","#FF2C00"]
|
2346
2345
|
elif n == 5:
|
2347
|
-
colorlist = [
|
2348
|
-
"#3A4453",
|
2349
|
-
"#427AB2",
|
2350
|
-
"#F09148",
|
2351
|
-
"#DBDB8D",
|
2352
|
-
"#C59D94",
|
2353
|
-
"#AFC7E8",
|
2354
|
-
]
|
2346
|
+
colorlist = ["#459AA9", "#B25E9D", "#4B8C3B","#EF8632","#FF2C00"]
|
2355
2347
|
elif n == 6:
|
2356
|
-
colorlist = [
|
2357
|
-
|
2358
|
-
|
2359
|
-
"#F09148",
|
2360
|
-
"#DBDB8D",
|
2361
|
-
"#C59D94",
|
2362
|
-
"#E53528",
|
2363
|
-
]
|
2348
|
+
colorlist = ["#459AA9", "#B25E9D", "#4B8C3B","#EF8632", "#24578E","#FF2C00"]
|
2349
|
+
elif n==7:
|
2350
|
+
colorlist = [ "#7F7F7F", "#459AA9", "#B25E9D", "#4B8C3B","#EF8632", "#24578E" "#FF2C00"]
|
2364
2351
|
else:
|
2365
|
-
colorlist = [
|
2366
|
-
|
2367
|
-
"#FF2C00",
|
2368
|
-
"#0C5DA5",
|
2369
|
-
"#845B97",
|
2370
|
-
"#58BBCC",
|
2371
|
-
"#FF9500",
|
2372
|
-
"#D57DBE",
|
2373
|
-
]
|
2352
|
+
colorlist = ['#FF7F0E','#2CA02C','#D62728','#9467BD','#E377C2','#7F7F7F','#7BB8CC','#06daf2']
|
2353
|
+
|
2374
2354
|
by = "start"
|
2375
2355
|
elif any(["cub" in cmap.lower(), "sns" in cmap.lower()]):
|
2376
2356
|
if kwargs:
|
@@ -3106,9 +3086,9 @@ def plotxy(
|
|
3106
3086
|
palette = kws_scatter.pop(
|
3107
3087
|
"palette",
|
3108
3088
|
(
|
3109
|
-
sns.color_palette("
|
3089
|
+
sns.color_palette("tab20", data[hue].nunique())
|
3110
3090
|
if hue is not None
|
3111
|
-
else sns.color_palette("
|
3091
|
+
else sns.color_palette("tab20")
|
3112
3092
|
),
|
3113
3093
|
)
|
3114
3094
|
s = kws_scatter.pop("s", 10)
|
@@ -3202,7 +3182,7 @@ def volcano(
|
|
3202
3182
|
top_genes=[5, 5], # [down-regulated, up-regulated]
|
3203
3183
|
thr_x=np.log2(1.5), # default: 0.585
|
3204
3184
|
thr_y=-np.log10(0.05),
|
3205
|
-
sort_xy="x", #'y'
|
3185
|
+
sort_xy="x", #'y', 'xy'
|
3206
3186
|
colors=("#00BFFF", "#9d9a9a", "#FF3030"),
|
3207
3187
|
s=20,
|
3208
3188
|
fill=True, # plot filled scatter
|
@@ -173,7 +173,7 @@ py2ls/LICENSE,sha256=UOZ1F5fFDe3XXvG4oNnkL1-Ecun7zpHzRxjp-XsMeAo,11324
|
|
173
173
|
py2ls/README.md,sha256=CwvJWAnSXnCnrVHlnEbrxxi6MbjbE_MT6DH2D53S818,11572
|
174
174
|
py2ls/__init__.py,sha256=Nn8jTIvySX7t7DMJ8VNRVctTStgXGjHldOIdZ35PdW8,165
|
175
175
|
py2ls/batman.py,sha256=E7gYofbDzN7S5oCmO_dd5Z1bxxhoYMJSD6s-VaF388E,11398
|
176
|
-
py2ls/bio.py,sha256=
|
176
|
+
py2ls/bio.py,sha256=HoI432AY1lXVcO2iB_1vpmpmDEj07brRNgOyLUazfxk,87655
|
177
177
|
py2ls/brain_atlas.py,sha256=w1o5EelRjq89zuFJUNSz4Da8HnTCwAwDAZ4NU4a-bAY,5486
|
178
178
|
py2ls/chat.py,sha256=Yr22GoIvoWhpV3m4fdwV_I0Mn77La346_ymSinR-ORA,3793
|
179
179
|
py2ls/correlators.py,sha256=RbOaJIPLCHJtUm5SFi_4dCJ7VFUPWR0PErfK3K26ad4,18243
|
@@ -214,17 +214,17 @@ py2ls/export_requirements.py,sha256=x2WgUF0jYKz9GfA1MVKN-MdsM-oQ8yUeC6Ua8oCymio,
|
|
214
214
|
py2ls/fetch_update.py,sha256=9LXj661GpCEFII2wx_99aINYctDiHni6DOruDs_fdt8,4752
|
215
215
|
py2ls/freqanalysis.py,sha256=F4218VSPbgL5tnngh6xNCYuNnfR-F_QjECUUxrPYZss,32594
|
216
216
|
py2ls/ich2ls.py,sha256=3E9R8oVpyYZXH5PiIQgT3CN5NxLe4Dwtm2LwaeacE6I,21381
|
217
|
-
py2ls/ips.py,sha256=
|
217
|
+
py2ls/ips.py,sha256=o3B1mv1Oscu5R32070z8j0GbcRT_K4gBIrVSUEyWdM8,234266
|
218
218
|
py2ls/ml2ls.py,sha256=XSe2-sLNzUVSvVRkeRGfhrB_q8C49SDK1sekYC1Bats,50277
|
219
219
|
py2ls/mol.py,sha256=AZnHzarIk_MjueKdChqn1V6e4tUle3X1NnHSFA6n3Nw,10645
|
220
220
|
py2ls/netfinder.py,sha256=RJFr80tGEJiuwEx99IBOhI5-ZuXnPdWnGUYpF7XCEwI,56426
|
221
221
|
py2ls/ocr.py,sha256=5lhUbJufIKRSOL6wAWVLEo8TqMYSjoI_Q-IO-_4u3DE,31419
|
222
|
-
py2ls/plot.py,sha256=
|
222
|
+
py2ls/plot.py,sha256=dVEKkHqLqgrKv8mx_lIeFDtRi4A9ThRFlYne6kr-jfk,153856
|
223
223
|
py2ls/setuptools-70.1.0-py3-none-any.whl,sha256=2bi3cUVal8ip86s0SOvgspteEF8SKLukECi-EWmFomc,882588
|
224
224
|
py2ls/sleep_events_detectors.py,sha256=bQA3HJqv5qnYKJJEIhCyhlDtkXQfIzqksnD0YRXso68,52145
|
225
225
|
py2ls/stats.py,sha256=DMoJd8Z5YV9T1wB-4P52F5K5scfVK55DT8UP4Twcebo,38627
|
226
226
|
py2ls/translator.py,sha256=zBeq4pYZeroqw3DT-5g7uHfVqKd-EQptT6LJ-Adi8JY,34244
|
227
227
|
py2ls/wb_detector.py,sha256=7y6TmBUj9exCZeIgBAJ_9hwuhkDh1x_-yg4dvNY1_GQ,6284
|
228
|
-
py2ls-0.2.4.
|
229
|
-
py2ls-0.2.4.
|
230
|
-
py2ls-0.2.4.
|
228
|
+
py2ls-0.2.4.7.dist-info/METADATA,sha256=G3vO4DXYQEGFPw6PeRX54YXnqHyOy9pgv6jfxxqluyg,20038
|
229
|
+
py2ls-0.2.4.7.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
230
|
+
py2ls-0.2.4.7.dist-info/RECORD,,
|