py2ls 0.2.2__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
py2ls/.DS_Store CHANGED
Binary file
@@ -0,0 +1,56 @@
1
+ {
2
+ "pd.read_pickle": "pd.read_pickle(filepath_or_buffer,compression='infer',storage_options=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_pickle.html",
3
+ "df.to_pickle": "df.to_pickle(path,*,compression='infer',protocol=5,storage_options=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_pickle.html",
4
+ "pd.read_table": "pd.read_table(filepath_or_buffer,*,sep=<no_default>,delimiter=None,header='infer',names=<no_default>,index_col=None,usecols=None,dtype=None,engine=None,converters=None,true_values=None,false_values=None,skipinitialspace=False,skiprows=None,skipfooter=0,nrows=None,na_values=None,keep_default_na=True,na_filter=True,verbose=<no_default>,skip_blank_lines=True,parse_dates=False,infer_datetime_format=<no_default>,keep_date_col=<no_default>,date_parser=<no_default>,date_format=None,dayfirst=False,cache_dates=True,iterator=False,chunksize=None,compression='infer',thousands=None,decimal='.',lineterminator=None,quotechar='\"',quoting=0,doublequote=True,escapechar=None,comment=None,encoding=None,encoding_errors='strict',dialect=None,on_bad_lines='error',delim_whitespace=<no_default>,low_memory=True,memory_map=False,float_precision=None,storage_options=None,dtype_backend=<no_default>)\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_table.html",
5
+ "pd.read_csv": "pd.read_csv(filepath_or_buffer,*,sep=<no_default>,delimiter=None,header='infer',names=<no_default>,index_col=None,usecols=None,dtype=None,engine=None,converters=None,true_values=None,false_values=None,skipinitialspace=False,skiprows=None,skipfooter=0,nrows=None,na_values=None,keep_default_na=True,na_filter=True,verbose=<no_default>,skip_blank_lines=True,parse_dates=None,infer_datetime_format=<no_default>,keep_date_col=<no_default>,date_parser=<no_default>,date_format=None,dayfirst=False,cache_dates=True,iterator=False,chunksize=None,compression='infer',thousands=None,decimal='.',lineterminator=None,quotechar='\"',quoting=0,doublequote=True,escapechar=None,comment=None,encoding=None,encoding_errors='strict',dialect=None,on_bad_lines='error',delim_whitespace=<no_default>,low_memory=True,memory_map=False,float_precision=None,storage_options=None,dtype_backend=<no_default>)\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_csv.html",
6
+ "df.to_csv": "df.to_csv(path_or_buf=None,*,sep=',',na_rep='',float_format=None,columns=None,header=True,index=True,index_label=None,mode='w',encoding=None,compression='infer',quoting=None,quotechar='\"',lineterminator=None,chunksize=None,date_format=None,doublequote=True,escapechar=None,decimal='.',errors='strict',storage_options=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_csv.html#pandas.DataFrame.to_csv",
7
+ "pd.read_fwf": "pd.read_fwf(filepath_or_buffer,*,colspecs='infer',widths=None,infer_nrows=100,dtype_backend=<no_default>,iterator=False,chunksize=None,**kwds)\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_fwf.html",
8
+ "pd.read_clipboard": "pd.read_clipboard(sep='\\\\s+',dtype_backend=<no_default>,**kwargs)\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_clipboard.html#pandas.read_clipboard",
9
+ "pd.read_excel": "pd.read_excel(io,sheet_name=0,*,header=0,names=None,index_col=None,usecols=None,dtype=None,engine=None,converters=None,true_values=None,false_values=None,skiprows=None,nrows=None,na_values=None,keep_default_na=True,na_filter=True,verbose=False,parse_dates=False,date_parser=<no_default>,date_format=None,thousands=None,decimal='.',comment=None,skipfooter=0,storage_options=None,dtype_backend=<no_default>,engine_kwargs=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_excel.html",
10
+ "df.to_excel": "df.to_excel(excel_writer,*,sheet_name='Sheet1',na_rep='',float_format=None,columns=None,header=True,index=True,index_label=None,startrow=0,startcol=0,engine=None,merge_cells=True,inf_rep='inf',freeze_panes=None,storage_options=None,engine_kwargs=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_excel.html",
11
+ "pandas.ExcelFile": "pandas.ExcelFile(path_or_buffer,engine=None,storage_options=None,engine_kwargs=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.ExcelFile.html",
12
+ "ExcelFile.book": "ExcelFile.book\nhttps://pandas.pydata.org/docs/reference/api/pandas.ExcelFile.book.html",
13
+ "ExcelFile.sheet_names": "ExcelFile.sheet_names\nhttps://pandas.pydata.org/docs/reference/api/pandas.ExcelFile.sheet_names.html",
14
+ "ExcelFile.parse": "ExcelFile.parse(sheet_name=0,header=0,names=None,index_col=None,usecols=None,converters=None,true_values=None,false_values=None,skiprows=None,nrows=None,na_values=None,parse_dates=False,date_parser=<no_default>,date_format=None,thousands=None,comment=None,skipfooter=0,dtype_backend=<no_default>,**kwds)\nhttps://pandas.pydata.org/docs/reference/api/pandas.ExcelFile.parse.html",
15
+ "Styler.to_excel": "Styler.to_excel(excel_writer,sheet_name='Sheet1',na_rep='',float_format=None,columns=None,header=True,index=True,index_label=None,startrow=0,startcol=0,engine=None,merge_cells=True,encoding=None,inf_rep='inf',verbose=True,freeze_panes=None,storage_options=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.io.formats.style.Styler.to_excel.html",
16
+ "pandas.ExcelWriter": "pandas.ExcelWriter(path,engine=None,date_format=None,datetime_format=None,mode='w',storage_options=None,if_sheet_exists=None,engine_kwargs=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.ExcelWriter.html",
17
+ "pd.read_json": "pd.read_json(path_or_buf,*,orient=None,typ='frame',dtype=None,convert_axes=None,convert_dates=True,keep_default_dates=True,precise_float=False,date_unit=None,encoding=None,encoding_errors='strict',lines=False,chunksize=None,compression='infer',nrows=None,storage_options=None,dtype_backend=<no_default>,engine='ujson')\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_json.html",
18
+ "pd.json_normalize": "pd.json_normalize(data,record_path=None,meta=None,meta_prefix=None,record_prefix=None,errors='raise',sep='.',max_level=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.json_normalize.html",
19
+ "df.to_json": "df.to_json(path_or_buf=None,*,orient=None,date_format=None,double_precision=10,force_ascii=True,date_unit='ms',default_handler=None,lines=False,compression='infer',index=None,indent=None,storage_options=None,mode='w')\nhttps://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html",
20
+ "pd.io.json.build_table_schema": "pd.io.json.build_table_schema(data,index=True,primary_key=None,version=True)\nhttps://pandas.pydata.org/docs/reference/api/pandas.io.json.build_table_schema.html",
21
+ "pd.read_html": "pd.read_html(io,*,match='.+',flavor=None,header=None,index_col=None,skiprows=None,attrs=None,parse_dates=False,thousands=',',encoding=None,decimal='.',converters=None,na_values=None,keep_default_na=True,displayed_only=True,extract_links=None,dtype_backend=<no_default>,storage_options=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_html.html",
22
+ "df.to_html": "df.to_html(buf=None,*,columns=None,col_space=None,header=True,index=True,na_rep='NaN',formatters=None,float_format=None,sparsify=None,index_names=True,justify=None,max_rows=None,max_cols=None,show_dimensions=False,decimal='.',bold_rows=True,classes=None,escape=True,notebook=False,border=None,table_id=None,render_links=False,encoding=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_html.html",
23
+ "Styler.to_html": "Styler.to_html(buf=None,*,table_uuid=None,table_attributes=None,sparse_index=None,sparse_columns=None,bold_headers=False,caption=None,max_rows=None,max_columns=None,encoding=None,doctype_html=False,exclude_styles=False,**kwargs)\nhttps://pandas.pydata.org/docs/reference/api/pandas.io.formats.style.Styler.to_html.html",
24
+ "pd.read_xml": "pd.read_xml(path_or_buffer,*,xpath='./*',namespaces=None,elems_only=False,attrs_only=False,names=None,dtype=None,converters=None,parse_dates=None,encoding='utf-8',parser='lxml',stylesheet=None,iterparse=None,compression='infer',storage_options=None,dtype_backend=<no_default>)\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_xml.html",
25
+ "df.to_xml": "df.to_xml(path_or_buffer=None,*,index=True,root_name='data',row_name='row',na_rep=None,attr_cols=None,elem_cols=None,namespaces=None,prefix=None,encoding='utf-8',xml_declaration=True,pretty_print=True,parser='lxml',stylesheet=None,compression='infer',storage_options=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_xml.html",
26
+ "df.to_latex": "df.to_latex(buf=None,*,columns=None,header=True,index=True,na_rep='NaN',formatters=None,float_format=None,sparsify=None,index_names=True,bold_rows=False,column_format=None,longtable=None,escape=None,encoding=None,decimal='.',multicolumn=None,multicolumn_format=None,multirow=None,caption=None,label=None,position=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_latex.html",
27
+ "Styler.to_latex": "Styler.to_latex(buf=None,*,column_format=None,position=None,position_float=None,hrules=None,clines=None,label=None,caption=None,sparse_index=None,sparse_columns=None,multirow_align=None,multicol_align=None,siunitx=False,environment=None,encoding=None,convert_css=False)\nhttps://pandas.pydata.org/docs/reference/api/pandas.io.formats.style.Styler.to_latex.html",
28
+ "pd.read_hdf": "pd.read_hdf(path_or_buf,key=None,mode='r',errors='strict',where=None,start=None,stop=None,columns=None,iterator=False,chunksize=None,**kwargs)\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_hdf.html",
29
+ "HDFStore.put": "HDFStore.put(key,value,format=None,index=True,append=False,complib=None,complevel=None,min_itemsize=None,nan_rep=None,data_columns=None,encoding=None,errors='strict',track_times=True,dropna=False)\nhttps://pandas.pydata.org/docs/reference/api/pandas.HDFStore.put.html",
30
+ "HDFStore.append": "HDFStore.append(key,value,format=None,axes=None,index=True,append=True,complib=None,complevel=None,columns=None,min_itemsize=None,nan_rep=None,chunksize=None,expectedrows=None,dropna=None,data_columns=None,encoding=None,errors='strict')\nhttps://pandas.pydata.org/docs/reference/api/pandas.HDFStore.append.html",
31
+ "HDFStore.get": "HDFStore.get(key)\nhttps://pandas.pydata.org/docs/reference/api/pandas.HDFStore.get.html",
32
+ "HDFStore.select": "HDFStore.select(key,where=None,start=None,stop=None,columns=None,iterator=False,chunksize=None,auto_close=False)\nhttps://pandas.pydata.org/docs/reference/api/pandas.HDFStore.select.html",
33
+ "HDFStore.info": "HDFStore.info()\nhttps://pandas.pydata.org/docs/reference/api/pandas.HDFStore.info.html",
34
+ "HDFStore.keys": "HDFStore.keys(include='pandas')\nhttps://pandas.pydata.org/docs/reference/api/pandas.HDFStore.keys.html",
35
+ "HDFStore.groups": "HDFStore.groups()\nhttps://pandas.pydata.org/docs/reference/api/pandas.HDFStore.groups.html",
36
+ "HDFStore.walk": "HDFStore.walk(where='/')\nhttps://pandas.pydata.org/docs/reference/api/pandas.HDFStore.walk.html",
37
+ "pd.read_feather": "pd.read_feather(path,columns=None,use_threads=True,storage_options=None,dtype_backend=<no_default>)\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_feather.html",
38
+ "df.to_feather": "df.to_feather(path,**kwargs)\nhttps://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_feather.html",
39
+ "pd.read_parquet": "pd.read_parquet(path,engine='auto',columns=None,storage_options=None,use_nullable_dtypes=<no_default>,dtype_backend=<no_default>,filesystem=None,filters=None,**kwargs)\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_parquet.html",
40
+ "df.to_parquet": "df.to_parquet(path=None,*,engine='auto',compression='snappy',index=None,partition_cols=None,storage_options=None,**kwargs)\nhttps://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_parquet.html",
41
+ "pd.read_orc": "pd.read_orc(path,columns=None,dtype_backend=<no_default>,filesystem=None,**kwargs)\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_orc.html",
42
+ "df.to_orc": "df.to_orc(path=None,*,engine='pyarrow',index=None,engine_kwargs=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_orc.html",
43
+ "pd.read_sas": "pd.read_sas(filepath_or_buffer,*,format=None,index=None,encoding=None,chunksize=None,iterator=False,compression='infer')\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_sas.html",
44
+ "pd.read_spss": "pd.read_spss(path,usecols=None,convert_categoricals=True,dtype_backend=<no_default>)\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_spss.html",
45
+ "pd.read_sql_table": "pd.read_sql_table(table_name,con,schema=None,index_col=None,coerce_float=True,parse_dates=None,columns=None,chunksize=None,dtype_backend=<no_default>)\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_sql_table.html",
46
+ "pd.read_sql_query": "pd.read_sql_query(sql,con,index_col=None,coerce_float=True,params=None,parse_dates=None,chunksize=None,dtype=None,dtype_backend=<no_default>)\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_sql_query.html",
47
+ "pd.read_sql": "pd.read_sql(sql,con,index_col=None,coerce_float=True,params=None,parse_dates=None,columns=None,chunksize=None,dtype_backend=<no_default>,dtype=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_sql.html",
48
+ "df.to_sql": "df.to_sql(name,con,*,schema=None,if_exists='fail',index=True,index_label=None,chunksize=None,dtype=None,method=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_sql.html",
49
+ "pd.read_gbq": "pd.read_gbq(query,project_id=None,index_col=None,col_order=None,reauth=False,auth_local_webserver=True,dialect=None,location=None,configuration=None,credentials=None,use_bqstorage_api=None,max_results=None,progress_bar_type=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_gbq.html",
50
+ "pd.read_stata": "pd.read_stata(filepath_or_buffer,*,convert_dates=True,convert_categoricals=True,index_col=None,convert_missing=False,preserve_dtypes=True,columns=None,order_categoricals=True,chunksize=None,iterator=False,compression='infer',storage_options=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.read_stata.html",
51
+ "df.to_stata": "df.to_stata(path,*,convert_dates=None,write_index=True,byteorder=None,time_stamp=None,data_label=None,variable_labels=None,version=114,convert_strl=None,compression='infer',storage_options=None,value_labels=None)\nhttps://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_stata.html",
52
+ "StataReader.data_label": "StataReader.data_label\nhttps://pandas.pydata.org/docs/reference/api/pandas.io.stata.StataReader.data_label.html",
53
+ "StataReader.value_labels": "StataReader.value_labels()\nhttps://pandas.pydata.org/docs/reference/api/pandas.io.stata.StataReader.value_labels.html",
54
+ "StataReader.variable_labels": "StataReader.variable_labels()\nhttps://pandas.pydata.org/docs/reference/api/pandas.io.stata.StataReader.variable_labels.html",
55
+ "StataWriter.write_file": "StataWriter.write_file()\nhttps://pandas.pydata.org/docs/reference/api/pandas.io.stata.StataWriter.write_file.html"
56
+ }
@@ -1,25 +1,25 @@
1
1
  {
2
- "relplot": "seaborn.relplot(data=None,*,x=None,y=None,hue=None,size=None,style=None,units=None,weights=None,row=None,col=None,col_wrap=None,row_order=None,col_order=None,palette=None,hue_order=None,hue_norm=None,sizes=None,size_order=None,size_norm=None,markers=None,dashes=None,style_order=None,legend='auto',kind='scatter',height=5,aspect=1,facet_kws=None,**kwargs)",
3
- "scatterplot": "seaborn.scatterplot(data=None,*,x=None,y=None,hue=None,size=None,style=None,palette=None,hue_order=None,hue_norm=None,sizes=None,size_order=None,size_norm=None,markers=True,style_order=None,legend='auto',ax=None,**kwargs)",
4
- "lineplot": "seaborn.lineplot(data=None,*,x=None,y=None,hue=None,size=None,style=None,units=None,weights=None,palette=None,hue_order=None,hue_norm=None,sizes=None,size_order=None,size_norm=None,dashes=True,markers=None,style_order=None,estimator='mean',errorbar=('ci',95),n_boot=1000,seed=None,orient='x',sort=True,err_style='band',err_kws=None,legend='auto',ci='deprecated',ax=None,**kwargs)",
5
- "displot": "seaborn.displot(data=None,*,x=None,y=None,hue=None,row=None,col=None,weights=None,kind='hist',rug=False,rug_kws=None,log_scale=None,legend=True,palette=None,hue_order=None,hue_norm=None,color=None,col_wrap=None,row_order=None,col_order=None,height=5,aspect=1,facet_kws=None,**kwargs)",
6
- "histplot": "seaborn.histplot(data=None,*,x=None,y=None,hue=None,weights=None,stat='count',bins='auto',binwidth=None,binrange=None,discrete=None,cumulative=False,common_bins=True,common_norm=True,multiple='layer',element='bars',fill=True,shrink=1,kde=False,kde_kws=None,line_kws=None,thresh=0,pthresh=None,pmax=None,cbar=False,cbar_ax=None,cbar_kws=None,palette=None,hue_order=None,hue_norm=None,color=None,log_scale=None,legend=True,ax=None,**kwargs)",
7
- "kdeplot": "seaborn.kdeplot(data=None,*,x=None,y=None,hue=None,weights=None,palette=None,hue_order=None,hue_norm=None,color=None,fill=None,multiple='layer',common_norm=True,common_grid=False,cumulative=False,bw_method='scott',bw_adjust=1,warn_singular=True,log_scale=None,levels=10,thresh=0.05,gridsize=200,cut=3,clip=None,legend=True,cbar=False,cbar_ax=None,cbar_kws=None,ax=None,**kwargs)",
8
- "ecdfplot": "seaborn.ecdfplot(data=None,*,x=None,y=None,hue=None,weights=None,stat='proportion',complementary=False,palette=None,hue_order=None,hue_norm=None,log_scale=None,legend=True,ax=None,**kwargs)",
9
- "rugplot": "seaborn.rugplot(data=None,*,x=None,y=None,hue=None,height=0.025,expand_margins=True,palette=None,hue_order=None,hue_norm=None,legend=True,ax=None,**kwargs)",
10
- "distplot": "seaborn.distplot(a=None,bins=None,hist=True,kde=True,rug=False,fit=None,hist_kws=None,kde_kws=None,rug_kws=None,fit_kws=None,color=None,vertical=False,norm_hist=False,axlabel=None,label=None,ax=None,x=None)",
11
- "catplot": "seaborn.catplot(data=None,*,x=None,y=None,hue=None,row=None,col=None,kind='strip',estimator='mean',errorbar=('ci',95),n_boot=1000,seed=None,units=None,weights=None,order=None,hue_order=None,row_order=None,col_order=None,col_wrap=None,height=5,aspect=1,log_scale=None,native_scale=False,formatter=None,orient=None,color=None,palette=None,hue_norm=None,legend='auto',legend_out=True,sharex=True,sharey=True,margin_titles=False,facet_kws=None,ci=<deprecated>,**kwargs)",
12
- "stripplot": "seaborn.stripplot(data=None,*,x=None,y=None,hue=None,order=None,hue_order=None,jitter=True,dodge=False,orient=None,color=None,palette=None,size=5,edgecolor=<default>,linewidth=0,hue_norm=None,log_scale=None,native_scale=False,formatter=None,legend='auto',ax=None,**kwargs)",
13
- "boxplot": "seaborn.boxplot(data=None,*,x=None,y=None,hue=None,order=None,hue_order=None,orient=None,color=None,palette=None,saturation=0.75,fill=True,dodge='auto',width=0.8,gap=0,whis=1.5,linecolor='auto',linewidth=None,fliersize=None,hue_norm=None,native_scale=False,log_scale=None,formatter=None,legend='auto',ax=None,**kwargs)",
14
- "violinplot": "seaborn.violinplot(data=None,*,x=None,y=None,hue=None,order=None,hue_order=None,orient=None,color=None,palette=None,saturation=0.75,fill=True,inner='box',split=False,width=0.8,dodge='auto',gap=0,linewidth=None,linecolor='auto',cut=2,gridsize=100,bw_method='scott',bw_adjust=1,density_norm='area',common_norm=False,hue_norm=None,formatter=None,log_scale=None,native_scale=False,legend='auto',scale=<deprecated>,scale_hue=<deprecated>,bw=<deprecated>,inner_kws=None,ax=None,**kwargs)",
15
- "boxenplot": "seaborn.boxenplot(data=None,*,x=None,y=None,hue=None,order=None,hue_order=None,orient=None,color=None,palette=None,saturation=0.75,fill=True,dodge='auto',width=0.8,gap=0,linewidth=None,linecolor=None,width_method='exponential',k_depth='tukey',outlier_prop=0.007,trust_alpha=0.05,showfliers=True,hue_norm=None,log_scale=None,native_scale=False,formatter=None,legend='auto',scale=<deprecated>,box_kws=None,flier_kws=None,line_kws=None,ax=None,**kwargs)",
16
- "pointplot": "seaborn.pointplot(data=None,*,x=None,y=None,hue=None,order=None,hue_order=None,estimator='mean',errorbar=('ci',95),n_boot=1000,seed=None,units=None,weights=None,color=None,palette=None,hue_norm=None,markers=<default>,linestyles=<default>,dodge=False,log_scale=None,native_scale=False,orient=None,capsize=0,formatter=None,legend='auto',err_kws=None,ci=<deprecated>,errwidth=<deprecated>,join=<deprecated>,scale=<deprecated>,ax=None,**kwargs)",
17
- "barplot": "seaborn.barplot(data=None,*,x=None,y=None,hue=None,order=None,hue_order=None,estimator='mean',errorbar=('ci',95),n_boot=1000,seed=None,units=None,weights=None,orient=None,color=None,palette=None,saturation=0.75,fill=True,hue_norm=None,width=0.8,dodge='auto',gap=0,log_scale=None,native_scale=False,formatter=None,legend='auto',capsize=0,err_kws=None,ci=<deprecated>,errcolor=<deprecated>,errwidth=<deprecated>,ax=None,**kwargs)",
18
- "countplot": "seaborn.countplot(data=None,*,x=None,y=None,hue=None,order=None,hue_order=None,orient=None,color=None,palette=None,saturation=0.75,fill=True,hue_norm=None,stat='count',width=0.8,dodge='auto',gap=0,log_scale=None,native_scale=False,formatter=None,legend='auto',ax=None,**kwargs)",
19
- "lmplot": "seaborn.lmplot(data,*,x=None,y=None,hue=None,col=None,row=None,palette=None,col_wrap=None,height=5,aspect=1,markers='o',sharex=None,sharey=None,hue_order=None,col_order=None,row_order=None,legend=True,legend_out=None,x_estimator=None,x_bins=None,x_ci='ci',scatter=True,fit_reg=True,ci=95,n_boot=1000,units=None,seed=None,order=1,logistic=False,lowess=False,robust=False,logx=False,x_partial=None,y_partial=None,truncate=True,x_jitter=None,y_jitter=None,scatter_kws=None,line_kws=None,facet_kws=None)",
20
- "regplot": "seaborn.regplot(data=None,*,x=None,y=None,x_estimator=None,x_bins=None,x_ci='ci',scatter=True,fit_reg=True,ci=95,n_boot=1000,units=None,seed=None,order=1,logistic=False,lowess=False,robust=False,logx=False,x_partial=None,y_partial=None,truncate=True,dropna=True,x_jitter=None,y_jitter=None,label=None,color=None,marker='o',scatter_kws=None,line_kws=None,ax=None)",
21
- "residplot": "seaborn.residplot(data=None,*,x=None,y=None,x_partial=None,y_partial=None,lowess=False,order=1,robust=False,dropna=True,label=None,color=None,scatter_kws=None,line_kws=None,ax=None)",
22
- "pairplot": "seaborn.pairplot(data,*,hue=None,hue_order=None,palette=None,vars=None,x_vars=None,y_vars=None,kind='scatter',diag_kind='auto',markers=None,height=2.5,aspect=1,corner=False,dropna=False,plot_kws=None,diag_kws=None,grid_kws=None,size=None)",
23
- "jointplot": "seaborn.jointplot(data=None,*,x=None,y=None,hue=None,kind='scatter',height=6,ratio=5,space=0.2,dropna=False,xlim=None,ylim=None,color=None,palette=None,hue_order=None,hue_norm=None,marginal_ticks=False,joint_kws=None,marginal_kws=None,**kwargs)",
24
- "plotting_context": "seaborn.plotting_context(context=None,font_scale=1,rc=None)"
2
+ "relplot": "seaborn.relplot(data=None,*,x=None,y=None,hue=None,size=None,style=None,units=None,weights=None,row=None,col=None,col_wrap=None,row_order=None,col_order=None,palette=None,hue_order=None,hue_norm=None,sizes=None,size_order=None,size_norm=None,markers=None,dashes=None,style_order=None,legend='auto',kind='scatter',height=5,aspect=1,facet_kws=None,**kwargs)\nhttps://seaborn.pydata.org/generated/seaborn.relplot.html",
3
+ "scatterplot": "seaborn.scatterplot(data=None,*,x=None,y=None,hue=None,size=None,style=None,palette=None,hue_order=None,hue_norm=None,sizes=None,size_order=None,size_norm=None,markers=True,style_order=None,legend='auto',ax=None,**kwargs)\nhttps://seaborn.pydata.org/generated/seaborn.scatterplot.html",
4
+ "lineplot": "seaborn.lineplot(data=None,*,x=None,y=None,hue=None,size=None,style=None,units=None,weights=None,palette=None,hue_order=None,hue_norm=None,sizes=None,size_order=None,size_norm=None,dashes=True,markers=None,style_order=None,estimator='mean',errorbar=('ci',95),n_boot=1000,seed=None,orient='x',sort=True,err_style='band',err_kws=None,legend='auto',ci='deprecated',ax=None,**kwargs)\nhttps://seaborn.pydata.org/generated/seaborn.lineplot.html",
5
+ "displot": "seaborn.displot(data=None,*,x=None,y=None,hue=None,row=None,col=None,weights=None,kind='hist',rug=False,rug_kws=None,log_scale=None,legend=True,palette=None,hue_order=None,hue_norm=None,color=None,col_wrap=None,row_order=None,col_order=None,height=5,aspect=1,facet_kws=None,**kwargs)\nhttps://seaborn.pydata.org/generated/seaborn.displot.html",
6
+ "histplot": "seaborn.histplot(data=None,*,x=None,y=None,hue=None,weights=None,stat='count',bins='auto',binwidth=None,binrange=None,discrete=None,cumulative=False,common_bins=True,common_norm=True,multiple='layer',element='bars',fill=True,shrink=1,kde=False,kde_kws=None,line_kws=None,thresh=0,pthresh=None,pmax=None,cbar=False,cbar_ax=None,cbar_kws=None,palette=None,hue_order=None,hue_norm=None,color=None,log_scale=None,legend=True,ax=None,**kwargs)\nhttps://seaborn.pydata.org/generated/seaborn.histplot.html",
7
+ "kdeplot": "seaborn.kdeplot(data=None,*,x=None,y=None,hue=None,weights=None,palette=None,hue_order=None,hue_norm=None,color=None,fill=None,multiple='layer',common_norm=True,common_grid=False,cumulative=False,bw_method='scott',bw_adjust=1,warn_singular=True,log_scale=None,levels=10,thresh=0.05,gridsize=200,cut=3,clip=None,legend=True,cbar=False,cbar_ax=None,cbar_kws=None,ax=None,**kwargs)\nhttps://seaborn.pydata.org/generated/seaborn.kdeplot.html",
8
+ "ecdfplot": "seaborn.ecdfplot(data=None,*,x=None,y=None,hue=None,weights=None,stat='proportion',complementary=False,palette=None,hue_order=None,hue_norm=None,log_scale=None,legend=True,ax=None,**kwargs)\nhttps://seaborn.pydata.org/generated/seaborn.ecdfplot.html",
9
+ "rugplot": "seaborn.rugplot(data=None,*,x=None,y=None,hue=None,height=0.025,expand_margins=True,palette=None,hue_order=None,hue_norm=None,legend=True,ax=None,**kwargs)\nhttps://seaborn.pydata.org/generated/seaborn.rugplot.html",
10
+ "distplot": "seaborn.distplot(a=None,bins=None,hist=True,kde=True,rug=False,fit=None,hist_kws=None,kde_kws=None,rug_kws=None,fit_kws=None,color=None,vertical=False,norm_hist=False,axlabel=None,label=None,ax=None,x=None)\nhttps://seaborn.pydata.org/generated/seaborn.distplot.html",
11
+ "catplot": "seaborn.catplot(data=None,*,x=None,y=None,hue=None,row=None,col=None,kind='strip',estimator='mean',errorbar=('ci',95),n_boot=1000,seed=None,units=None,weights=None,order=None,hue_order=None,row_order=None,col_order=None,col_wrap=None,height=5,aspect=1,log_scale=None,native_scale=False,formatter=None,orient=None,color=None,palette=None,hue_norm=None,legend='auto',legend_out=True,sharex=True,sharey=True,margin_titles=False,facet_kws=None,ci=<deprecated>,**kwargs)\nhttps://seaborn.pydata.org/generated/seaborn.catplot.html#seaborn.catplot",
12
+ "stripplot": "seaborn.stripplot(data=None,*,x=None,y=None,hue=None,order=None,hue_order=None,jitter=True,dodge=False,orient=None,color=None,palette=None,size=5,edgecolor=<default>,linewidth=0,hue_norm=None,log_scale=None,native_scale=False,formatter=None,legend='auto',ax=None,**kwargs)\nhttps://seaborn.pydata.org/generated/seaborn.stripplot.html#seaborn.stripplot",
13
+ "boxplot": "seaborn.boxplot(data=None,*,x=None,y=None,hue=None,order=None,hue_order=None,orient=None,color=None,palette=None,saturation=0.75,fill=True,dodge='auto',width=0.8,gap=0,whis=1.5,linecolor='auto',linewidth=None,fliersize=None,hue_norm=None,native_scale=False,log_scale=None,formatter=None,legend='auto',ax=None,**kwargs)\nhttps://seaborn.pydata.org/generated/seaborn.boxplot.html#seaborn.boxplot",
14
+ "violinplot": "seaborn.violinplot(data=None,*,x=None,y=None,hue=None,order=None,hue_order=None,orient=None,color=None,palette=None,saturation=0.75,fill=True,inner='box',split=False,width=0.8,dodge='auto',gap=0,linewidth=None,linecolor='auto',cut=2,gridsize=100,bw_method='scott',bw_adjust=1,density_norm='area',common_norm=False,hue_norm=None,formatter=None,log_scale=None,native_scale=False,legend='auto',scale=<deprecated>,scale_hue=<deprecated>,bw=<deprecated>,inner_kws=None,ax=None,**kwargs)\nhttps://seaborn.pydata.org/generated/seaborn.violinplot.html#seaborn.violinplot",
15
+ "boxenplot": "seaborn.boxenplot(data=None,*,x=None,y=None,hue=None,order=None,hue_order=None,orient=None,color=None,palette=None,saturation=0.75,fill=True,dodge='auto',width=0.8,gap=0,linewidth=None,linecolor=None,width_method='exponential',k_depth='tukey',outlier_prop=0.007,trust_alpha=0.05,showfliers=True,hue_norm=None,log_scale=None,native_scale=False,formatter=None,legend='auto',scale=<deprecated>,box_kws=None,flier_kws=None,line_kws=None,ax=None,**kwargs)\nhttps://seaborn.pydata.org/generated/seaborn.boxenplot.html",
16
+ "pointplot": "seaborn.pointplot(data=None,*,x=None,y=None,hue=None,order=None,hue_order=None,estimator='mean',errorbar=('ci',95),n_boot=1000,seed=None,units=None,weights=None,color=None,palette=None,hue_norm=None,markers=<default>,linestyles=<default>,dodge=False,log_scale=None,native_scale=False,orient=None,capsize=0,formatter=None,legend='auto',err_kws=None,ci=<deprecated>,errwidth=<deprecated>,join=<deprecated>,scale=<deprecated>,ax=None,**kwargs)\nhttps://seaborn.pydata.org/generated/seaborn.pointplot.html",
17
+ "barplot": "seaborn.barplot(data=None,*,x=None,y=None,hue=None,order=None,hue_order=None,estimator='mean',errorbar=('ci',95),n_boot=1000,seed=None,units=None,weights=None,orient=None,color=None,palette=None,saturation=0.75,fill=True,hue_norm=None,width=0.8,dodge='auto',gap=0,log_scale=None,native_scale=False,formatter=None,legend='auto',capsize=0,err_kws=None,ci=<deprecated>,errcolor=<deprecated>,errwidth=<deprecated>,ax=None,**kwargs)\nhttps://seaborn.pydata.org/generated/seaborn.barplot.html",
18
+ "countplot": "seaborn.countplot(data=None,*,x=None,y=None,hue=None,order=None,hue_order=None,orient=None,color=None,palette=None,saturation=0.75,fill=True,hue_norm=None,stat='count',width=0.8,dodge='auto',gap=0,log_scale=None,native_scale=False,formatter=None,legend='auto',ax=None,**kwargs)\nhttps://seaborn.pydata.org/generated/seaborn.countplot.html",
19
+ "lmplot": "seaborn.lmplot(data,*,x=None,y=None,hue=None,col=None,row=None,palette=None,col_wrap=None,height=5,aspect=1,markers='o',sharex=None,sharey=None,hue_order=None,col_order=None,row_order=None,legend=True,legend_out=None,x_estimator=None,x_bins=None,x_ci='ci',scatter=True,fit_reg=True,ci=95,n_boot=1000,units=None,seed=None,order=1,logistic=False,lowess=False,robust=False,logx=False,x_partial=None,y_partial=None,truncate=True,x_jitter=None,y_jitter=None,scatter_kws=None,line_kws=None,facet_kws=None)\nhttps://seaborn.pydata.org/generated/seaborn.lmplot.html",
20
+ "regplot": "seaborn.regplot(data=None,*,x=None,y=None,x_estimator=None,x_bins=None,x_ci='ci',scatter=True,fit_reg=True,ci=95,n_boot=1000,units=None,seed=None,order=1,logistic=False,lowess=False,robust=False,logx=False,x_partial=None,y_partial=None,truncate=True,dropna=True,x_jitter=None,y_jitter=None,label=None,color=None,marker='o',scatter_kws=None,line_kws=None,ax=None)\nhttps://seaborn.pydata.org/generated/seaborn.regplot.html",
21
+ "residplot": "seaborn.residplot(data=None,*,x=None,y=None,x_partial=None,y_partial=None,lowess=False,order=1,robust=False,dropna=True,label=None,color=None,scatter_kws=None,line_kws=None,ax=None)\nhttps://seaborn.pydata.org/generated/seaborn.residplot.html",
22
+ "pairplot": "seaborn.pairplot(data,*,hue=None,hue_order=None,palette=None,vars=None,x_vars=None,y_vars=None,kind='scatter',diag_kind='auto',markers=None,height=2.5,aspect=1,corner=False,dropna=False,plot_kws=None,diag_kws=None,grid_kws=None,size=None)\nhttps://seaborn.pydata.org/generated/seaborn.pairplot.html",
23
+ "jointplot": "seaborn.jointplot(data=None,*,x=None,y=None,hue=None,kind='scatter',height=6,ratio=5,space=0.2,dropna=False,xlim=None,ylim=None,color=None,palette=None,hue_order=None,hue_norm=None,marginal_ticks=False,joint_kws=None,marginal_kws=None,**kwargs)\nhttps://seaborn.pydata.org/generated/seaborn.jointplot.html",
24
+ "plotting_context": "seaborn.plotting_context(context=None,font_scale=1,rc=None)\nhttps://seaborn.pydata.org/generated/seaborn.plotting_context.html"
25
25
  }
py2ls/fetch_update.py ADDED
@@ -0,0 +1,127 @@
1
+ from .ips import *
2
+ from .netfinder import fetch, get_soup
3
+
4
+
5
+ def usage_pd(
6
+ url="https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_clipboard.html",
7
+ dir_save=None,
8
+ ):
9
+ # extract each usage from its url
10
+ def get_usage(url):
11
+ # extract each usage from its url
12
+ print(f"trying: {url}")
13
+ sp = get_soup(url, driver="se")
14
+ return fetch(sp, where="dt")[0]
15
+
16
+ if dir_save is None:
17
+ if "mac" in get_os():
18
+ dir_save = "/Users/macjianfeng/Dropbox/github/python/py2ls/py2ls/data/"
19
+ else:
20
+ dir_save = "Z:\\Jianfeng\\temp\\"
21
+ sp = get_soup(url, driver="se")
22
+ links_all = fetch(sp, where="a", get="href", class_="reference internal")
23
+ filtered_links = unique(
24
+ [i for i in links_all if any([i.startswith(cond) for cond in ["pandas"]])]
25
+ )
26
+ links = [
27
+ "https://pandas.pydata.org/docs/reference/api/" + i for i in filtered_links
28
+ ]
29
+
30
+ usages = [get_usage(i) for i in links]
31
+ dict_usage = {}
32
+ for usage, link in zip(usages, links):
33
+ if usage.startswith("DataFrame"):
34
+ usage = usage.replace("DataFrame", "df")
35
+ if usage.startswith("pandas"):
36
+ usage = usage.replace("pandas", "pd")
37
+ if usage.endswith("[source]#"):
38
+ usage = usage.replace("[source]#", "")
39
+ if usage.endswith("#"):
40
+ usage = usage.replace("#", "")
41
+ str2rm = ["class", "property"]
42
+ for str2rm_ in str2rm:
43
+ if usage.startswith(str2rm_):
44
+ usage = usage.replace(str2rm_, "")
45
+ funcname = ssplit(usage, by="(")[0]
46
+ dict_usage.update({funcname: usage + f"\n{link}"})
47
+ # save to local
48
+ dir_save += "/" if not dir_save.endswith("/") else ""
49
+ fsave(
50
+ dir_save + "usages_pd.json",
51
+ dict_usage,
52
+ )
53
+
54
+
55
+ def usage_sns(
56
+ url="https://seaborn.pydata.org/generated/seaborn.swarmplot.html",
57
+ dir_save=None,
58
+ ):
59
+ """
60
+ Fetches usage examples of various Seaborn plotting functions from the Seaborn documentation website.
61
+ It filters the relevant plot-related links, extracts usage examples, and saves them in a JSON file.
62
+
63
+ Parameters:
64
+ - url (str): URL of the Seaborn page to start extracting plot usages (default is swarmplot page).
65
+ - dir_save (str): Directory where the JSON file containing usages will be saved (default is a local path).
66
+
67
+ Saves:
68
+ - A JSON file named 'usages_sns.json' containing plotting function names and their usage descriptions.
69
+
70
+ Returns:
71
+ - None
72
+ """
73
+
74
+ # extract each usage from its url
75
+ def get_usage(url):
76
+ print(f"trying: {url}")
77
+ sp = get_soup(url, driver="se")
78
+ return fetch(sp, where="dt")[0]
79
+
80
+ if dir_save is None:
81
+ if "mac" in get_os():
82
+ dir_save = "/Users/macjianfeng/Dropbox/github/python/py2ls/py2ls/data/"
83
+ else:
84
+ dir_save = "Z:\\Jianfeng\\temp\\"
85
+ sp = get_soup(url, driver="se")
86
+ links_all = fetch(sp, where="a", get="href", class_="reference internal")
87
+ filtered_links = unique(
88
+ [
89
+ i
90
+ for i in links_all
91
+ if not any(
92
+ [
93
+ i.startswith(cond)
94
+ for cond in [
95
+ "seaborn.JointGrid",
96
+ "seaborn.PairGrid",
97
+ "seaborn.objects",
98
+ ]
99
+ ]
100
+ + ["plot" not in i]
101
+ )
102
+ ]
103
+ )
104
+ links = ["https://seaborn.pydata.org/generated/" + i for i in filtered_links]
105
+
106
+ usages = [get_usage(i) for i in links]
107
+ dict_usage = {}
108
+ for usage, link in zip(usages, links):
109
+ dict_usage.update(
110
+ {ssplit(usage, by="(")[0].replace("seaborn.", ""): usage[:-1] + f"\n{link}"}
111
+ )
112
+ # save to local
113
+ dir_save += "/" if not dir_save.endswith("/") else ""
114
+ fsave(
115
+ dir_save + "usages_sns.json",
116
+ dict_usage,
117
+ )
118
+
119
+
120
+ def main():
121
+ # update pandas usage to local
122
+ update_pd_usages()
123
+ # update_sns_usages()
124
+
125
+
126
+ if __name__ == "__main__":
127
+ main()
py2ls/ips.py CHANGED
@@ -59,6 +59,21 @@ try:
59
59
  except NameError:
60
60
  pass
61
61
 
62
+ def plt_font(dir_font: str = "/System/Library/Fonts/Hiragino Sans GB.ttc"):
63
+ """
64
+ Add the Chinese font to the font manager
65
+ Args:
66
+ dir_font (str, optional): _description_. Defaults to "/System/Library/Fonts/Hiragino Sans GB.ttc".
67
+ """
68
+ import matplotlib.pyplot as plt
69
+ from matplotlib import font_manager
70
+
71
+ font_manager.fontManager.addfont(dir_font)
72
+ fontname_chinese = os.path.basename(dir_font).split(".")[0]
73
+
74
+ plt.rcParams["font.sans-serif"] = [fontname_chinese]
75
+ plt.rcParams["font.family"] = "sans-serif"
76
+ plt.rcParams["axes.unicode_minus"] = False
62
77
 
63
78
  # set 'dir_save'
64
79
  if "dar" in sys.platform:
@@ -255,16 +270,21 @@ def is_package_installed(package_name):
255
270
  return package_spec is not None
256
271
 
257
272
 
258
- def upgrade(module="py2ls"):
259
- # try:
260
- # pkg_resources.get_distribution(module)
261
- # except pkg_resources.DistributionNotFound:
262
- # subprocess.check_call([sys.executable, "-m", "pip", "install", module])
273
+ def upgrade(module="py2ls",uninstall=False):
274
+ """
275
+ Installs or upgrades a specified Python module.
276
+
277
+ Parameters:
278
+ module (str): The name of the module to install/upgrade.
279
+ uninstall (bool): If True, uninstalls the webdriver-manager before upgrading.
280
+ """
263
281
  if not is_package_installed(module):
264
282
  try:
265
283
  subprocess.check_call([sys.executable, "-m", "pip", "install", module])
266
284
  except subprocess.CalledProcessError as e:
267
285
  print(f"An error occurred while installing {module}: {e}")
286
+ if uninstall:
287
+ subprocess.check_call(["pip", "uninstall", "-y", "webdriver-manager"])
268
288
  try:
269
289
  subprocess.check_call(
270
290
  [sys.executable, "-m", "pip", "install", "--upgrade", module]
@@ -515,8 +535,8 @@ def strcmp(search_term, candidates, ignore_case=True, verbose=False, scorer="WR"
515
535
  similarity_scores = [fuzz.partial_ratio(str1_, word) for word in str2_]
516
536
  elif "W" in scorer.lower():
517
537
  similarity_scores = [fuzz.WRatio(str1_, word) for word in str2_]
518
- elif "Ratio" in scorer.lower():
519
- similarity_scores = [fuzz.Ratio(str1_, word) for word in str2_]
538
+ elif "ratio" in scorer.lower():#Ratio (Strictest)
539
+ similarity_scores = [fuzz.ratio(str1_, word) for word in str2_]
520
540
  else:
521
541
  similarity_scores = [fuzz.WRatio(str1_, word) for word in str2_]
522
542
  best_match_index = similarity_scores.index(max(similarity_scores))
@@ -528,7 +548,7 @@ def strcmp(search_term, candidates, ignore_case=True, verbose=False, scorer="WR"
528
548
  elif "W" in scorer.lower():
529
549
  best_match_score = fuzz.WRatio(str1_, str2_)
530
550
  elif "Ratio" in scorer.lower():
531
- best_match_score = fuzz.Ratio(str1_, str2_)
551
+ best_match_score = fuzz.ratio(str1_, str2_)
532
552
  else:
533
553
  best_match_score = fuzz.WRatio(str1_, str2_)
534
554
  if verbose:
@@ -543,6 +563,80 @@ def strcmp(search_term, candidates, ignore_case=True, verbose=False, scorer="WR"
543
563
  # str2 = ['PLoS Computational Biology', 'PLOS BIOLOGY']
544
564
  # best_match, idx = strcmp(str1, str2, ignore_case=1)
545
565
 
566
+ def cn2pinyin(
567
+ cn_str: Union[str, list] = None,
568
+ sep: str = " ",
569
+ fmt: str = "normal", # which style you want to set
570
+ ):
571
+ from pypinyin import pinyin, Style
572
+
573
+ """
574
+ Converts Chinese characters to Pinyin.
575
+ usage:
576
+ cn2pinyin(cn_str, sep="_", fmt="tone")
577
+ Args:
578
+ cn_str (str): Chinese string to convert.
579
+ sep (str): Separator for the output Pinyin string.
580
+ style (Style): "normal","tone", "tone2","tone3",
581
+ "finals","finals_tone","finals_tone2","finals_tone3",
582
+ "initials","bopomofo","bopomofo_first","cyrillic","pl",
583
+ Returns:
584
+ cn_str: The Pinyin representation of the Chinese string.
585
+ """
586
+ fmts = [
587
+ "normal",
588
+ "tone",
589
+ "tone2",
590
+ "tone3",
591
+ "finals",
592
+ "finals_tone",
593
+ "finals_tone2",
594
+ "finals_tone3",
595
+ "initials",
596
+ "bopomofo",
597
+ "bopomofo_first",
598
+ "cyrillic",
599
+ "pl",
600
+ ]
601
+ fmt = strcmp(fmt, fmts)[0]
602
+ if fmt == "normal":
603
+ style = Style.NORMAL
604
+ elif fmt == "tone":
605
+ style = Style.TONE
606
+ elif fmt == "tone2":
607
+ style = Style.TONE2
608
+ elif fmt == "tone3":
609
+ style = Style.TONE3
610
+ elif fmt == "finals":
611
+ style = Style.FINALS
612
+ elif fmt == "finals_tone":
613
+ style = Style.FINALS_TONE
614
+ elif fmt == "finals_tone2":
615
+ style = Style.FINALS_TONE2
616
+ elif fmt == "finals_tone3":
617
+ style = Style.FINALS_TONE3
618
+ elif fmt == "initials":
619
+ style = Style.INITIALS
620
+ elif fmt == "bopomofo":
621
+ style = Style.BOPOMOFO
622
+ elif fmt == "bopomofo_first":
623
+ style = Style.BOPOMOFO_FIRST
624
+ elif fmt == "cyrillic":
625
+ style = Style.CYRILLIC
626
+ elif fmt == "pl":
627
+ style = Style.PL
628
+ else:
629
+ style = Style.NORMAL
630
+ if not isinstance(cn_str,list):
631
+ cn_str=[cn_str]
632
+ pinyin_flat=[]
633
+ for cn_str_ in cn_str:
634
+ pinyin_string = pinyin(cn_str_, style=style)
635
+ pinyin_flat.append(sep.join([item[0] for item in pinyin_string]))
636
+ if len(pinyin_flat)==1:
637
+ return pinyin_flat[0]
638
+ else:
639
+ return pinyin_flat
546
640
 
547
641
  def counter(list_, verbose=True):
548
642
  c = Counter(list_)
@@ -1551,10 +1645,14 @@ def fload(fpath, kind=None, **kwargs):
1551
1645
  content = file.read()
1552
1646
  return content
1553
1647
 
1554
- def load_json(fpath):
1555
- with open(fpath, "r") as file:
1556
- content = json.load(file)
1557
- return content
1648
+ def load_json(fpath, **kwargs):
1649
+ output=kwargs.pop("output","json")
1650
+ if output=='json':
1651
+ with open(fpath, "r") as file:
1652
+ content = json.load(file)
1653
+ return content
1654
+ else:
1655
+ return pd.read_json(fpath,**kwargs)
1558
1656
 
1559
1657
  def load_yaml(fpath):
1560
1658
  with open(fpath, "r") as file:
@@ -1616,6 +1714,11 @@ def fload(fpath, kind=None, **kwargs):
1616
1714
  comment = kwargs.get("comment", None)
1617
1715
  kwargs.pop("comment", None)
1618
1716
 
1717
+ fmt=kwargs.pop("fmt",False)
1718
+ if verbose:
1719
+ print_pd_usage("read_csv", verbose=verbose)
1720
+ return
1721
+
1619
1722
  if comment is None:
1620
1723
  comment = get_comment(
1621
1724
  fpath, comment=None, encoding="utf-8", lines_to_check=5
@@ -1741,10 +1844,18 @@ def fload(fpath, kind=None, **kwargs):
1741
1844
  print(f"shape: {df.shape}")
1742
1845
  return df
1743
1846
 
1744
- def load_xlsx(fpath, **kwargs):
1847
+ def load_excel(fpath, **kwargs):
1745
1848
  engine = kwargs.get("engine", "openpyxl")
1746
- kwargs.pop("engine", None)
1849
+ verbose=kwargs.pop("verbose",False)
1850
+ if verbose:
1851
+ print_pd_usage("read_excel", verbose=verbose)
1747
1852
  df = pd.read_excel(fpath, engine=engine, **kwargs)
1853
+ try:
1854
+ meata=pd.ExcelFile(fpath)
1855
+ print(f"n_sheet={len(meata.sheet_names)},\t'sheetname = 0 (default)':")
1856
+ [print(f"{i}:\t{i_}") for i,i_ in enumerate(meata.sheet_names)]
1857
+ except:
1858
+ pass
1748
1859
  return df
1749
1860
 
1750
1861
  def load_ipynb(fpath, **kwargs):
@@ -1813,7 +1924,6 @@ def fload(fpath, kind=None, **kwargs):
1813
1924
  if kind is None:
1814
1925
  _, kind = os.path.splitext(fpath)
1815
1926
  kind = kind.lower()
1816
-
1817
1927
  kind = kind.lstrip(".").lower()
1818
1928
  img_types = [
1819
1929
  "bmp",
@@ -1902,16 +2012,16 @@ def fload(fpath, kind=None, **kwargs):
1902
2012
  elif kind in ["ods", "ods", "odt"]:
1903
2013
  engine = kwargs.get("engine", "odf")
1904
2014
  kwargs.pop("engine", None)
1905
- return load_xlsx(fpath, engine=engine, **kwargs)
2015
+ return load_excel(fpath, engine=engine, **kwargs)
1906
2016
  elif kind == "xls":
1907
2017
  engine = kwargs.get("engine", "xlrd")
1908
2018
  kwargs.pop("engine", None)
1909
- content = load_xlsx(fpath, engine=engine, **kwargs)
1910
- display(content.head(2))
2019
+ content = load_excel(fpath, engine=engine, **kwargs)
2020
+ display(content.head(3))
1911
2021
  return content
1912
2022
  elif kind == "xlsx":
1913
- content = load_xlsx(fpath, **kwargs)
1914
- display(content.head(2))
2023
+ content = load_excel(fpath, **kwargs)
2024
+ display(content.head(3))
1915
2025
  return content
1916
2026
  elif kind == "ipynb":
1917
2027
  return load_ipynb(fpath, **kwargs)
@@ -2121,6 +2231,10 @@ def fsave(
2121
2231
 
2122
2232
  def save_csv(fpath, data, **kwargs):
2123
2233
  # https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_csv.html
2234
+
2235
+ verbose=kwargs.pop("verbose",False)
2236
+ if verbose:
2237
+ print_pd_usage("to_csv", verbose=verbose)
2124
2238
  kwargs_csv = dict(
2125
2239
  path_or_buf=None,
2126
2240
  sep=",",
@@ -2131,7 +2245,7 @@ def fsave(
2131
2245
  index=True,
2132
2246
  index_label=None,
2133
2247
  mode="w",
2134
- encoding=None,
2248
+ encoding="UTF-8",
2135
2249
  compression="infer",
2136
2250
  quoting=None,
2137
2251
  quotechar='"',
@@ -2149,26 +2263,31 @@ def fsave(
2149
2263
  df.to_csv(fpath, **kwargs_valid)
2150
2264
 
2151
2265
  def save_xlsx(fpath, data, **kwargs):
2266
+ verbose=kwargs.pop("verbose",False)
2267
+ sheet_name = kwargs.pop("sheet_name", "Sheet1")
2268
+ if verbose:
2269
+ print_pd_usage("to_excel", verbose=verbose)
2152
2270
  if any(kwargs):
2153
2271
  format_excel(df=data, filename=fpath, **kwargs)
2154
2272
  else:
2155
- kwargs.pop("format", None)
2156
- kwargs.pop("usage", None)
2157
- kwargs.pop("cell", None)
2158
- kwargs.pop("width", None)
2159
- kwargs.pop("height", None)
2160
- kwargs.pop("width", None)
2161
- kwargs.pop("height_max", None)
2162
- kwargs.pop("merge", None)
2163
- kwargs.pop("shade", None)
2164
- kwargs.pop("comment", None)
2165
- kwargs.pop("link", None)
2166
- kwargs.pop("protect", None)
2167
- kwargs.pop("number_format", None)
2168
- kwargs.pop("conditional_format", None)
2169
- kwargs.pop("index_default", None)
2273
+ # Remove non-relevant kwargs
2274
+ irrelevant_keys = [
2275
+ "format", "usage", "cell", "width", "height", "height_max", "merge",
2276
+ "shade", "comment", "link", "protect", "number_format", "conditional_format",
2277
+ "index_default"]
2278
+ for key in irrelevant_keys:
2279
+ kwargs.pop(key, None)
2280
+
2170
2281
  df = pd.DataFrame(data)
2171
- df.to_excel(fpath, **kwargs)
2282
+ # Check if the file exists, then append the sheet, otherwise create a new file
2283
+ try:
2284
+ # Use ExcelWriter with append mode if the file exists
2285
+ with pd.ExcelWriter(fpath, engine='openpyxl', mode='a', if_sheet_exists='new') as writer:
2286
+ df.to_excel(writer, sheet_name=sheet_name, index=False, **kwargs)
2287
+ except FileNotFoundError:
2288
+ # If file doesn't exist, create a new one
2289
+ df.to_excel(fpath, sheet_name=sheet_name, index=False, **kwargs)
2290
+
2172
2291
 
2173
2292
  def save_ipynb(fpath, data, **kwargs):
2174
2293
  # Split the content by code fences to distinguish between code and markdown
@@ -2441,10 +2560,13 @@ def listdir(
2441
2560
  verbose=True,
2442
2561
  ):
2443
2562
  if kind is None:
2563
+ ls=os.listdir(rootdir)
2564
+ ls = [f for f in ls if not f.startswith('.') and not f.startswith('~')]
2565
+ print(ls)
2444
2566
  df_all = pd.DataFrame(
2445
2567
  {
2446
- "fname": os.listdir(rootdir),
2447
- "fpath": [os.path.join(rootdir, i) for i in os.listdir(rootdir)],
2568
+ "fname": all_files,
2569
+ "fpath": [os.path.join(rootdir, i) for i in ls],
2448
2570
  }
2449
2571
  )
2450
2572
  display(df_all)
@@ -2470,6 +2592,7 @@ def listdir(
2470
2592
 
2471
2593
  if os.path.isdir(rootdir):
2472
2594
  ls = os.listdir(rootdir)
2595
+ ls = [f for f in ls if not f.startswith('.') and not f.startswith('~')]
2473
2596
  fd = [".fd", ".fld", ".fol", ".fd", ".folder"]
2474
2597
  i = 0
2475
2598
  f = {
@@ -2692,6 +2815,8 @@ def figsave(*args, dpi=300):
2692
2815
  img = arg # Store the PIL image if provided
2693
2816
 
2694
2817
  f_slash = "/" if "mac" in get_os().lower() else "\\"
2818
+ if dir_save is None:
2819
+ dir_save="./"
2695
2820
  dir_par = f_slash.join(dir_save.split(f_slash)[:-1])
2696
2821
  dir_ch = "".join(dir_save.split(f_slash)[-1:])
2697
2822
  if not dir_par.endswith(f_slash):
@@ -4291,13 +4416,12 @@ def preview(var):
4291
4416
  # preview(pd.DataFrame({"Name": ["Alice", "Bob"], "Age": [25, 30]}))
4292
4417
  # preview({"key": "value", "numbers": [1, 2, 3]})
4293
4418
 
4294
-
4295
4419
  # ! DataFrame
4296
- def df_as_type(
4420
+ def df_astype(
4297
4421
  df: pd.DataFrame,
4298
4422
  columns: Optional[Union[str, List[str]]] = None,
4299
4423
  astype: str = "datetime",
4300
- format: Optional[str] = None,
4424
+ fmt: Optional[str] = None,
4301
4425
  inplace: bool = True,
4302
4426
  errors: str = "coerce", # Can be "ignore", "raise", or "coerce"
4303
4427
  **kwargs,
@@ -4310,7 +4434,7 @@ def df_as_type(
4310
4434
  - df: DataFrame containing the columns to convert.
4311
4435
  - columns: Either a single column name, a list of column names, or None to convert all columns.
4312
4436
  - astype: The target type to convert the columns to ('datetime', 'float', 'int', 'numeric', 'timedelta', etc.).
4313
- - format: Optional; format to specify the datetime format (only relevant for 'datetime' conversion).
4437
+ - fmt: Optional; format to specify the datetime format (only relevant for 'datetime' conversion).
4314
4438
  - inplace: Whether to modify the DataFrame in place or return a new one. Defaults to False.
4315
4439
  - errors: Can be "ignore", "raise", or "coerce"
4316
4440
  - **kwargs: Additional keyword arguments to pass to the conversion function (e.g., errors='ignore' for pd.to_datetime or pd.to_numeric).
@@ -4390,7 +4514,7 @@ def df_as_type(
4390
4514
  # convert it as type: datetime
4391
4515
  if isinstance(column, int):
4392
4516
  df.iloc[:, column] = pd.to_datetime(
4393
- df.iloc[:, column], format=format, errors=errors, **kwargs
4517
+ df.iloc[:, column], format=fmt, errors=errors, **kwargs
4394
4518
  )
4395
4519
  # further convert:
4396
4520
  if astype == "time":
@@ -4412,9 +4536,9 @@ def df_as_type(
4412
4536
  else:
4413
4537
  df[column] = (
4414
4538
  pd.to_datetime(
4415
- df[column], format=format, errors=errors, **kwargs
4539
+ df[column], format=fmt, errors=errors, **kwargs
4416
4540
  )
4417
- if format
4541
+ if fmt
4418
4542
  else pd.to_datetime(df[column], errors=errors, **kwargs)
4419
4543
  )
4420
4544
  # further convert:
@@ -4454,15 +4578,15 @@ def df_as_type(
4454
4578
  return df
4455
4579
 
4456
4580
 
4457
- # ! DataFrame
4458
- def df_sort_values(df, column, by=None, ascending=True, inplace=False, **kwargs):
4581
+ # ! DataFrame
4582
+ def df_sort_values(df, column, by=None, ascending=True, inplace=True, **kwargs):
4459
4583
  """
4460
- Sort a DataFrame by a specified column based on a custom order.
4584
+ Sort a DataFrame by a specified column based on a custom order or by count.
4461
4585
 
4462
4586
  Parameters:
4463
4587
  - df: DataFrame to be sorted.
4464
4588
  - column: The name of the column to sort by.
4465
- - by: List specifying the custom order for sorting.
4589
+ - by: List specifying the custom order for sorting or 'count' to sort by frequency.
4466
4590
  - ascending: Boolean or list of booleans, default True.
4467
4591
  Sort ascending vs. descending.
4468
4592
  - inplace: If True, perform operation in place and return None.
@@ -4474,12 +4598,25 @@ def df_sort_values(df, column, by=None, ascending=True, inplace=False, **kwargs)
4474
4598
  if column not in df.columns:
4475
4599
  raise ValueError(f"Column '{column}' does not exist in the DataFrame.")
4476
4600
 
4477
- if not isinstance(by, list):
4478
- raise ValueError("custom_order must be a list.")
4601
+ if isinstance(by, str) and 'count' in by.lower():
4602
+ # Count occurrences of each value in the specified column
4603
+ value_counts = df[column].value_counts()
4479
4604
 
4480
- try:
4605
+ # Determine the order based on counts
4606
+ count_ascending = kwargs.pop("count_ascending", ascending)
4607
+ sorted_counts = value_counts.sort_values(ascending=count_ascending).index.tolist()
4608
+
4609
+ # Convert to a categorical type with the new order
4610
+ df[column] = pd.Categorical(df[column], categories=sorted_counts, ordered=True)
4611
+ # Set ascending to count_ascending for sorting
4612
+ ascending = count_ascending # Adjust ascending for the final sort
4613
+ elif isinstance(by, list):
4481
4614
  # Convert the specified column to a categorical type with the custom order
4482
4615
  df[column] = pd.Categorical(df[column], categories=by, ordered=True)
4616
+ else:
4617
+ raise ValueError("Custom order must be a list or 'count'.")
4618
+
4619
+ try:
4483
4620
  if inplace: # replace the original
4484
4621
  df.sort_values(column, ascending=ascending, inplace=True, **kwargs)
4485
4622
  print(f"Successfully sorted DataFrame by '{column}'")
@@ -4493,6 +4630,7 @@ def df_sort_values(df, column, by=None, ascending=True, inplace=False, **kwargs)
4493
4630
  return df
4494
4631
 
4495
4632
 
4633
+
4496
4634
  # # Example usage:
4497
4635
  # # Sample DataFrame
4498
4636
  # data = {
@@ -5227,3 +5365,20 @@ def evaluate_cluster(
5227
5365
  metrics["V-Measure"] = np.nan
5228
5366
 
5229
5367
  return metrics
5368
+
5369
+
5370
+ def print_pd_usage(
5371
+ func_name="excel",
5372
+ verbose=True,
5373
+ dir_json="/Users/macjianfeng/Dropbox/github/python/py2ls/py2ls/data/usages_pd.json",
5374
+ ):
5375
+ default_settings = fload(dir_json, output='json')
5376
+ valid_kinds = list(default_settings.keys())
5377
+ kind = strcmp(func_name, valid_kinds)[0]
5378
+ usage=default_settings[kind]
5379
+ if verbose:
5380
+ for i, i_ in enumerate(ssplit(usage, by=",")):
5381
+ i_ = i_.replace("=", "\t= ") + ","
5382
+ print(i_) if i == 0 else print("\t", i_)
5383
+ else:
5384
+ print(usage)
py2ls/netfinder.py CHANGED
@@ -31,7 +31,7 @@ from PIL import Image
31
31
  from duckduckgo_search import DDGS
32
32
  from datetime import datetime
33
33
  import time
34
- from py2ls import ips
34
+ from . import ips
35
35
 
36
36
  dir_save = "/Users/macjianfeng/Dropbox/Downloads/"
37
37
  # Set up logging
@@ -421,12 +421,25 @@ def fetch_all(
421
421
  prefs = {"profile.managed_default_content_settings.images": 2}
422
422
  chrome_options.add_experimental_option("prefs", prefs)
423
423
  # chrome_options.page_load_strategy = capability
424
+ try:
425
+ # Try to install ChromeDriver using webdriver-manager
424
426
 
425
- service = Service(ChromeDriverManager().install())
426
- # driver_path='/Users/macjianfeng/.wdm/drivers/chromedriver/mac64/127.0.6533.119/chromedriver-mac-arm64/chromedriver'
427
- # service=Service(executable_path=driver_path)
427
+ # driver_pah='/Users/macjianfeng/.wdm/drivers/chromedriver/mac64/129.0.6668.100/chromedriver-mac-arm64/chromedriver'
428
+ # service=Service(executable_path=driver_path)
428
429
 
429
- driver_ = webdriver.Chrome(service=service, options=chrome_options)
430
+ service = Service(ChromeDriverManager().install())
431
+ driver_ = webdriver.Chrome(service=service, options=chrome_options)
432
+ except Exception as e:
433
+ print(f"Error occurred: {e}")
434
+ print("Attempting to reinstall webdriver-manager...")
435
+ try:
436
+ ips.upgrade("webdriver-manager", uninstall=True)
437
+ service = Service(ChromeDriverManager().install())
438
+ driver_ = webdriver.Chrome(service=service, options=chrome_options)
439
+ except Exception as reinstall_error:
440
+ print(
441
+ f"Reinstallation failed: {reinstall_error}\n之前发生过类似的问题, 更新了webdriver-manager以后得到解决"
442
+ )
430
443
 
431
444
  # 隐式等等待
432
445
  if 3 < wait < 5:
py2ls/plot.py CHANGED
@@ -15,74 +15,117 @@ from .ips import fsave, fload, mkdir, listdir, figsave, strcmp, unique, get_os,
15
15
  from .stats import *
16
16
  from .netfinder import get_soup, fetch
17
17
 
18
-
19
18
  # Suppress INFO messages from fontTools
20
19
  logging.getLogger("fontTools").setLevel(logging.WARNING)
21
20
 
22
21
 
23
- def update_sns_usages(
24
- url="https://seaborn.pydata.org/generated/seaborn.swarmplot.html",
25
- dir_save=None,
26
- ):
27
- """
28
- Fetches usage examples of various Seaborn plotting functions from the Seaborn documentation website.
29
- It filters the relevant plot-related links, extracts usage examples, and saves them in a JSON file.
30
-
31
- Parameters:
32
- - url (str): URL of the Seaborn page to start extracting plot usages (default is swarmplot page).
33
- - dir_save (str): Directory where the JSON file containing usages will be saved (default is a local path).
34
-
35
- Saves:
36
- - A JSON file named 'usages_sns.json' containing plotting function names and their usage descriptions.
37
-
38
- Returns:
39
- - None
22
+ def add_text(ax=None, height_offset=0.5, fmt=".1f", **kwargs):
23
+ """Adds text annotations for various types of Seaborn and Matplotlib plots.
24
+ Args:
25
+ ax: Axes object.
26
+ height_offset: 0.5 (default) The vertical distance (offset) to place the text.
27
+ fmt: Default is ".1f" for one decimal place.
28
+ **kwargs: Additional keyword arguments for the text function
29
+ Usage:
30
+ ax = sns.barplot(x='Category', y='Values', data=data)
31
+ add_text(ax=ax, height_offset=1.0, color='black', fontsize=12)
32
+
33
+ The function will automatically detect the type of plot and add annotations accordingly.
34
+ It supports annotations for:
35
+ - **Bar Plots**: Displays the height of each bar.
36
+ - **Box Plots**: Shows the height of the boxes.
37
+ - **Scatter and Line Plots**: Displays the y-value for each point.
38
+ - **Histograms and KDE Plots**: Shows the maximum height of the bars.
39
+ - **Other Plots**: If the Axes contains containers, it handles those as well.
40
40
  """
41
+ from matplotlib.collections import LineCollection
41
42
 
42
- # extract each usage from its url
43
- def get_usage(url):
44
- sp = get_soup(url, driver="se")
45
- # preview(sp)
46
- return fetch(sp, where="dt")[0]
47
-
48
- if dir_save is None:
49
- if "mac" in get_os():
50
- dir_save = "/Users/macjianfeng/Dropbox/github/python/py2ls/py2ls/data/"
51
- else:
52
- dir_save = "Z:\\Jianfeng\\temp\\"
53
- sp = get_soup(url, driver="se")
54
- links_all = fetch(sp, where="a", get="href", class_="reference internal")
55
- filtered_links = unique(
56
- [
57
- i
58
- for i in links_all
59
- if not any(
60
- [
61
- i.startswith(cond)
62
- for cond in [
63
- "seaborn.JointGrid",
64
- "seaborn.PairGrid",
65
- "seaborn.objects",
66
- ]
67
- ]
68
- + ["plot" not in i]
69
- )
70
- ]
71
- )
72
- links = ["https://seaborn.pydata.org/generated/" + i for i in filtered_links]
43
+ ha = kwargs.pop("ha", "center")
44
+ va = kwargs.pop("va", "bottom")
45
+ if ax is None:
46
+ ax = plt.gca()
47
+ # Check if the Axes has patches (for bar, count, boxen, violin, and other plots with bars)
48
+ # Check for artists (for box plots)
49
+ if hasattr(ax, "artists") and ax.artists:
50
+ print("artists")
51
+ for box in ax.artists:
52
+ if hasattr(box, "get_height") and hasattr(box, "get_y"):
53
+ height = box.get_y() + box.get_height() # For box plots
54
+
55
+ ax.text(
56
+ box.get_x() + box.get_width() / 2.0,
57
+ height + height_offset,
58
+ format(height, fmt),
59
+ ha=ha,
60
+ va=va,
61
+ **kwargs,
62
+ )
73
63
 
74
- usages = [get_usage(i) for i in links]
75
- dict_usage = {}
76
- for usage in usages:
77
- dict_usage.update(
78
- {ssplit(usage, by="(")[0].replace("seaborn.", ""): usage[:-1]}
79
- )
80
- # save to local
81
- dir_save += "/" if not dir_save.endswith("/") else ""
82
- fsave(
83
- dir_save + "usages_sns.json",
84
- dict_usage,
85
- )
64
+ # Scatter plot or line plot
65
+ if hasattr(ax, "lines"):
66
+ print("lines")
67
+ for line in ax.lines:
68
+ if hasattr(line, "get_xydata"):
69
+ xdata, ydata = line.get_xydata().T # Get x and y data points
70
+ for x, y in zip(xdata, ydata):
71
+ ax.text(x, y + height_offset, format(y, fmt), **kwargs)
72
+
73
+ if hasattr(ax, "patches") and ax.patches:
74
+ print("patches")
75
+ for p in ax.patches:
76
+ if hasattr(p, "get_height"):
77
+ height = p.get_height() # For bar plots
78
+
79
+ ax.text(
80
+ p.get_x() + p.get_width() / 2.0,
81
+ height + height_offset,
82
+ format(height, fmt),
83
+ ha=ha,
84
+ va=va,
85
+ **kwargs,
86
+ )
87
+ # For histplot, kdeplot, rugplot
88
+ if hasattr(ax, "collections"):
89
+ print("collections")
90
+ for collection in ax.collections:
91
+ # If it is a histogram or KDE plot
92
+ if isinstance(collection, LineCollection):
93
+ for path in collection.get_paths():
94
+ if hasattr(path, "vertices"):
95
+ vertices = path.vertices
96
+ # Get the heights (y values) for histogram or KDE plots
97
+ ax.text(
98
+ vertices[:, 0].mean(),
99
+ vertices[:, 1].max() + height_offset,
100
+ format(vertices[:, 1].max(), fmt),
101
+ **kwargs,
102
+ )
103
+ # Handle point, strip, and swarm plots
104
+ elif isinstance(collection, LineCollection):
105
+ for path in collection.get_paths():
106
+ vertices = path.vertices
107
+ ax.text(
108
+ vertices[:, 0].mean(),
109
+ vertices[:, 1].max() + height_offset,
110
+ format(vertices[:, 1].max(), fmt),
111
+ **kwargs,
112
+ )
113
+ # Handle bar charts (not from seaborn)
114
+ if hasattr(ax, "containers"):
115
+ print("containers")
116
+ for container in ax.containers:
117
+ for bar in container:
118
+ if hasattr(bar, "get_height"):
119
+ height = bar.get_height()
120
+
121
+ ax.text(
122
+ bar.get_x() + bar.get_width() / 2.0,
123
+ height + height_offset,
124
+ format(height, fmt),
125
+ ha=ha,
126
+ va=va,
127
+ **kwargs,
128
+ )
86
129
 
87
130
 
88
131
  def heatmap(
@@ -2775,7 +2818,7 @@ import matplotlib.pyplot as plt
2775
2818
  from PIL import Image
2776
2819
 
2777
2820
 
2778
- def thumbnail(dir_img_list: list, figsize=(10, 10), dpi=100, show=False, usage=False):
2821
+ def thumbnail(dir_img_list: list, figsize=(10, 10), dpi=100, show=False, verbose=False):
2779
2822
  """
2780
2823
  Display a thumbnail figure of all images in the specified directory.
2781
2824
 
@@ -2784,7 +2827,7 @@ def thumbnail(dir_img_list: list, figsize=(10, 10), dpi=100, show=False, usage=F
2784
2827
  figsize (tuple): Size of the figure (width, height) in inches.
2785
2828
  dpi (int): Dots per inch for the figure.
2786
2829
  """
2787
- if usage:
2830
+ if verbose:
2788
2831
  print(
2789
2832
  'thumbnail(listdir("./img-innere-medizin-ii", ["jpeg", "jpg", "png"]).fpath.tolist(),figsize=[5,5],dpi=200)'
2790
2833
  )
@@ -2831,7 +2874,7 @@ def plot_xy(
2831
2874
  y=None,
2832
2875
  ax=None,
2833
2876
  kind: str = None, # Specify the kind of plot
2834
- usage=False,
2877
+ verbose=False,
2835
2878
  # kws_figsets:dict=None,
2836
2879
  **kwargs,
2837
2880
  ):
@@ -2846,7 +2889,7 @@ def plot_xy(
2846
2889
  hue (str): Column name for the hue (color) grouping.
2847
2890
  ax: Matplotlib axes object for the plot.
2848
2891
  kind (str): Type of plot ('scatter', 'line', 'displot', 'kdeplot', etc.).
2849
- usage (bool): If True, print default settings instead of plotting.
2892
+ verbose (bool): If True, print default settings instead of plotting.
2850
2893
  **kwargs: Additional keyword arguments for the plot functions.
2851
2894
 
2852
2895
  Returns:
@@ -2869,8 +2912,8 @@ def plot_xy(
2869
2912
  kind = [kind]
2870
2913
  kind = [strcmp(i, valid_kinds)[0] for i in kind]
2871
2914
  else:
2872
- usage = True
2873
- if usage:
2915
+ verbose = True
2916
+ if verbose:
2874
2917
  if kind is not None:
2875
2918
  for k in kind:
2876
2919
  if k in valid_kinds:
@@ -2888,7 +2931,7 @@ def plot_xy(
2888
2931
  kind=["scatter","rug"],
2889
2932
  kws_rug=dict(height=0.2),
2890
2933
  kws_scatter=dict(s=20, color=get_color(3)[2]),
2891
- usage=0)
2934
+ verbose=0)
2892
2935
  """
2893
2936
  print(f"currently support to plot:\n{valid_kinds}\n\nusage:\n{usage_str}")
2894
2937
  return # Do not plot, just print the usage
@@ -2899,6 +2942,12 @@ def plot_xy(
2899
2942
  kws_figsets = v_arg
2900
2943
  kwargs.pop(k_arg, None)
2901
2944
  break
2945
+ kws_text = {}
2946
+ for k_arg, v_arg in kwargs.items():
2947
+ if "add" in k_arg and all(["t" in k_arg, "x" in k_arg]): # add_text
2948
+ kws_text = v_arg
2949
+ kwargs.pop(k_arg, None)
2950
+ break
2902
2951
 
2903
2952
  for k in kind:
2904
2953
  # indicate 'col' features
@@ -2920,17 +2969,17 @@ def plot_xy(
2920
2969
  # (1) return FcetGrid
2921
2970
  if k == "jointplot":
2922
2971
  kws_joint = kwargs.pop("kws_joint", kwargs)
2923
- g = sns.jointplot(data=data, x=x, y=y, hue=hue, **kws_joint)
2972
+ g = sns.jointplot(data=data, x=x, y=y, **kws_joint)
2924
2973
  elif k == "lmplot":
2925
2974
  kws_lm = kwargs.pop("kws_lm", kwargs)
2926
- g = sns.lmplot(data=data, x=x, y=y, hue=hue, **kws_lm)
2975
+ g = sns.lmplot(data=data, x=x, y=y, **kws_lm)
2927
2976
  elif k == "catplot_sns":
2928
2977
  kws_cat = kwargs.pop("kws_cat", kwargs)
2929
- g = sns.catplot(data=data, x=x, y=y, hue=hue, **kws_cat)
2978
+ g = sns.catplot(data=data, x=x, y=y, **kws_cat)
2930
2979
  elif k == "displot":
2931
2980
  kws_dis = kwargs.pop("kws_dis", kwargs)
2932
2981
  # displot creates a new figure and returns a FacetGrid
2933
- g = sns.displot(data=data, x=x, hue=hue, **kws_dis)
2982
+ g = sns.displot(data=data, x=x, **kws_dis)
2934
2983
 
2935
2984
  # (2) return axis
2936
2985
  if ax is None:
@@ -2939,6 +2988,9 @@ def plot_xy(
2939
2988
  if k == "catplot":
2940
2989
  kws_cat = kwargs.pop("kws_cat", kwargs)
2941
2990
  g = catplot(data=data, x=x, y=y, ax=ax, **kws_cat)
2991
+ elif k == "stdshade":
2992
+ kws_stdshade = kwargs.pop("kws_stdshade", kwargs)
2993
+ ax = stdshade(ax=ax, **kwargs)
2942
2994
  elif k == "scatterplot":
2943
2995
  kws_scatter = kwargs.pop("kws_scatter", kwargs)
2944
2996
  palette = kws_scatter.pop(
@@ -2964,41 +3016,41 @@ def plot_xy(
2964
3016
  )
2965
3017
  elif k == "histplot":
2966
3018
  kws_hist = kwargs.pop("kws_hist", kwargs)
2967
- ax = sns.histplot(data=data, x=x, hue=hue, ax=ax, **kws_hist)
3019
+ ax = sns.histplot(data=data, x=x, ax=ax, **kws_hist)
2968
3020
  elif k == "kdeplot":
2969
3021
  kws_kde = kwargs.pop("kws_kde", kwargs)
2970
- ax = sns.kdeplot(data=data, x=x, hue=hue, ax=ax, **kws_kde)
3022
+ ax = sns.kdeplot(data=data, x=x, ax=ax, **kws_kde)
2971
3023
  elif k == "ecdfplot":
2972
3024
  kws_ecdf = kwargs.pop("kws_ecdf", kwargs)
2973
- ax = sns.ecdfplot(data=data, x=x, hue=hue, ax=ax, **kws_ecdf)
3025
+ ax = sns.ecdfplot(data=data, x=x, ax=ax, **kws_ecdf)
2974
3026
  elif k == "rugplot":
2975
3027
  kws_rug = kwargs.pop("kws_rug", kwargs)
2976
3028
  print(kws_rug)
2977
- ax = sns.rugplot(data=data, x=x, hue=hue, ax=ax, **kws_rug)
3029
+ ax = sns.rugplot(data=data, x=x, ax=ax, **kws_rug)
2978
3030
  elif k == "stripplot":
2979
3031
  kws_strip = kwargs.pop("kws_strip", kwargs)
2980
- ax = sns.stripplot(data=data, x=x, y=y, hue=hue, ax=ax, **kws_strip)
3032
+ ax = sns.stripplot(data=data, x=x, y=y, ax=ax, **kws_strip)
2981
3033
  elif k == "swarmplot":
2982
3034
  kws_swarm = kwargs.pop("kws_swarm", kwargs)
2983
- ax = sns.swarmplot(data=data, x=x, y=y, hue=hue, ax=ax, **kws_swarm)
3035
+ ax = sns.swarmplot(data=data, x=x, y=y, ax=ax, **kws_swarm)
2984
3036
  elif k == "boxplot":
2985
3037
  kws_box = kwargs.pop("kws_box", kwargs)
2986
- ax = sns.boxplot(data=data, x=x, y=y, hue=hue, ax=ax, **kws_box)
3038
+ ax = sns.boxplot(data=data, x=x, y=y, ax=ax, **kws_box)
2987
3039
  elif k == "violinplot":
2988
3040
  kws_violin = kwargs.pop("kws_violin", kwargs)
2989
- ax = sns.violinplot(data=data, x=x, y=y, hue=hue, ax=ax, **kws_violin)
3041
+ ax = sns.violinplot(data=data, x=x, y=y, ax=ax, **kws_violin)
2990
3042
  elif k == "boxenplot":
2991
3043
  kws_boxen = kwargs.pop("kws_boxen", kwargs)
2992
- ax = sns.boxenplot(data=data, x=x, y=y, hue=hue, ax=ax, **kws_boxen)
3044
+ ax = sns.boxenplot(data=data, x=x, y=y, ax=ax, **kws_boxen)
2993
3045
  elif k == "pointplot":
2994
3046
  kws_point = kwargs.pop("kws_point", kwargs)
2995
- ax = sns.pointplot(data=data, x=x, y=y, hue=hue, ax=ax, **kws_point)
3047
+ ax = sns.pointplot(data=data, x=x, y=y, ax=ax, **kws_point)
2996
3048
  elif k == "barplot":
2997
3049
  kws_bar = kwargs.pop("kws_bar", kwargs)
2998
- ax = sns.barplot(data=data, x=x, y=y, hue=hue, ax=ax, **kws_bar)
3050
+ ax = sns.barplot(data=data, x=x, y=y, ax=ax, **kws_bar)
2999
3051
  elif k == "countplot":
3000
3052
  kws_count = kwargs.pop("kws_count", kwargs)
3001
- ax = sns.countplot(data=data, x=x, hue=hue, ax=ax, **kws_count)
3053
+ ax = sns.countplot(data=data, x=x, ax=ax, **kws_count)
3002
3054
  elif k == "regplot":
3003
3055
  kws_reg = kwargs.pop("kws_reg", kwargs)
3004
3056
  ax = sns.regplot(data=data, x=x, y=y, ax=ax, **kws_reg)
@@ -3007,9 +3059,10 @@ def plot_xy(
3007
3059
  ax = sns.residplot(data=data, x=x, y=y, lowess=True, ax=ax, **kws_resid)
3008
3060
  elif k == "lineplot":
3009
3061
  kws_line = kwargs.pop("kws_line", kwargs)
3010
- ax = sns.lineplot(ax=ax, data=data, x=x, y=y, hue=hue, **kws_line)
3062
+ ax = sns.lineplot(ax=ax, data=data, x=x, y=y, **kws_line)
3011
3063
 
3012
- figsets(**kws_figsets)
3064
+ figsets(ax=ax, **kws_figsets)
3065
+ add_text(ax=ax, **kws_text)
3013
3066
  print(k, " ⤵ ")
3014
3067
  print(default_settings[k])
3015
3068
  print(
@@ -3040,7 +3093,7 @@ def volcano(
3040
3093
  alpha=0.8,
3041
3094
  legend=False,
3042
3095
  ax=None,
3043
- usage=False,
3096
+ verbose=False,
3044
3097
  kws_arrow=None,
3045
3098
  kws_text=None,
3046
3099
  **kwargs,
@@ -3097,11 +3150,11 @@ def volcano(
3097
3150
  edgecolor="0.5",
3098
3151
  kws_text=dict(fontsize=10, color="k"),
3099
3152
  kws_arrow=dict(style="-", color="k", lw=0.5),
3100
- # usage=True,
3153
+ # verbose=True,
3101
3154
  figsets=dict(ylim=[0, 10], title="df"),
3102
3155
  )
3103
3156
  """
3104
- if usage:
3157
+ if verbose:
3105
3158
  print(usage_str)
3106
3159
  return
3107
3160
  from adjustText import adjust_text
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: py2ls
3
- Version: 0.2.2
3
+ Version: 0.2.4
4
4
  Summary: py(thon)2(too)ls
5
5
  Author: Jianfeng
6
6
  Author-email: Jianfeng.Liu0413@gmail.com
@@ -1,4 +1,4 @@
1
- py2ls/.DS_Store,sha256=BloZZz2vlFVfF-I3X7ZsqXusvqOawJMx7erKcnIP-b0,6148
1
+ py2ls/.DS_Store,sha256=9qxq9N5T7k6muGHxG5s6yEdEEYqEh8eDGT3w7LokxsU,6148
2
2
  py2ls/.git/COMMIT_EDITMSG,sha256=AdtqRHle5Ej2EBNPJY79v-SB454v5UK4wuPCPFELiFQ,11
3
3
  py2ls/.git/FETCH_HEAD,sha256=VM-2Jiw6iPaGu0ftg9xwq76OyNPWV0iT1nL0VWiL1zI,100
4
4
  py2ls/.git/HEAD,sha256=KNJb-Cr0wOK3L1CVmyvrhZ4-YLljCl6MYD2tTdsrboA,21
@@ -203,21 +203,23 @@ py2ls/data/styles/style6.json,sha256=tu-MYOT9x5Rorc-2IK6sy-J-frmz0RNdm65XAsDQKX4
203
203
  py2ls/data/styles/style7.json,sha256=StdUFwIVrS7T_6CDrADHMorzc0WZFWBM7IyYdO1TPHg,4447
204
204
  py2ls/data/styles/style8.json,sha256=8XUgkZtew8ebvjbAHlDHCSWUqNra3ktDvMCO4vNh-CM,4456
205
205
  py2ls/data/styles/style9.json,sha256=PLxvntbH_kfzZlnCTtCEAUVBGi5m6Lngb9C01rArQog,4769
206
- py2ls/data/usages_sns.json,sha256=Vu2kGIIMxxWxJ1kW0Ov7mq47DQwZa_-gwsXiW72A2ag,7788
206
+ py2ls/data/usages_pd.json,sha256=A76y7lr28BUBdggFN_50r-4My3Q51M_3y0ZcQ4LHSco,15077
207
+ py2ls/data/usages_sns.json,sha256=3OTu6T7n9HbQaFkz-UPMJ_9-Ug6Xjf7q5aDIvZ_6cHk,9246
207
208
  py2ls/db2ls.py,sha256=MMfFX47aIPIyu7fU9aPvX9lbPRPYOpJ_VXwlnWk-8qo,13615
208
209
  py2ls/doc.py,sha256=xN3g1OWfoaGUhikbJ0NqbN5eKy1VZVvWwRlhHMgyVEc,4243
209
210
  py2ls/export_requirements.py,sha256=x2WgUF0jYKz9GfA1MVKN-MdsM-oQ8yUeC6Ua8oCymio,2325
211
+ py2ls/fetch_update.py,sha256=jh2MQHREpCqzYhDmEnMpuIvQVf4_wD9pDdIziXoFE3E,4055
210
212
  py2ls/freqanalysis.py,sha256=F4218VSPbgL5tnngh6xNCYuNnfR-F_QjECUUxrPYZss,32594
211
213
  py2ls/ich2ls.py,sha256=3E9R8oVpyYZXH5PiIQgT3CN5NxLe4Dwtm2LwaeacE6I,21381
212
- py2ls/ips.py,sha256=HzdfzVViyIkgarkU7WWTtgXO3zS_ZwwDISGaKJUQ49E,193975
213
- py2ls/netfinder.py,sha256=vgOOMhzwbjRuLWMAPyf_kh3HoOhsJ9dlA-tCkMf7kNU,55371
214
+ py2ls/ips.py,sha256=92b5_p1EM0VRsEylXln7oaI7eLmtcC3GkBcEHFTUkIU,199481
215
+ py2ls/netfinder.py,sha256=LwBkGITB_4BTNtY6RlKdEZVFW6epzMWlnqy2g03KtyU,56117
214
216
  py2ls/ocr.py,sha256=5lhUbJufIKRSOL6wAWVLEo8TqMYSjoI_Q-IO-_4u3DE,31419
215
- py2ls/plot.py,sha256=N6Rap4kX5xhae__WQG_YkBU8uSOH94IAxmoxZm2Wo9Q,129280
217
+ py2ls/plot.py,sha256=dxZeD27TZMzccN8_kyqSJf72PdZJk45gLuTYAeDq81I,131906
216
218
  py2ls/setuptools-70.1.0-py3-none-any.whl,sha256=2bi3cUVal8ip86s0SOvgspteEF8SKLukECi-EWmFomc,882588
217
219
  py2ls/sleep_events_detectors.py,sha256=bQA3HJqv5qnYKJJEIhCyhlDtkXQfIzqksnD0YRXso68,52145
218
220
  py2ls/stats.py,sha256=DMoJd8Z5YV9T1wB-4P52F5K5scfVK55DT8UP4Twcebo,38627
219
221
  py2ls/translator.py,sha256=zBeq4pYZeroqw3DT-5g7uHfVqKd-EQptT6LJ-Adi8JY,34244
220
222
  py2ls/wb_detector.py,sha256=7y6TmBUj9exCZeIgBAJ_9hwuhkDh1x_-yg4dvNY1_GQ,6284
221
- py2ls-0.2.2.dist-info/METADATA,sha256=IWoyyv2E8mcEblmBAyXYkVvrPncuRqSV1Ys-CpENv6Y,20036
222
- py2ls-0.2.2.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
223
- py2ls-0.2.2.dist-info/RECORD,,
223
+ py2ls-0.2.4.dist-info/METADATA,sha256=-i9MH0eTedNkRUR6XQuj6rikJRNwp9U2eA1CskY6X1Y,20036
224
+ py2ls-0.2.4.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
225
+ py2ls-0.2.4.dist-info/RECORD,,
File without changes