py2ls 0.2.4.33__py3-none-any.whl → 0.2.4.34__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
py2ls/ips.py
CHANGED
@@ -26,6 +26,16 @@ import re
|
|
26
26
|
import stat
|
27
27
|
import platform
|
28
28
|
|
29
|
+
# only for backup these scripts
|
30
|
+
def backup(
|
31
|
+
src="/Users/macjianfeng/Dropbox/github/python/py2ls/.venv/lib/python3.12/site-packages/py2ls/",
|
32
|
+
tar="/Users/macjianfeng/Dropbox/github/python/py2ls/py2ls/",
|
33
|
+
kind="py",
|
34
|
+
overwrite=True,
|
35
|
+
):
|
36
|
+
f = listdir(src, kind)
|
37
|
+
[copy(i, tar, overwrite=overwrite) for i in f.path]
|
38
|
+
print(f"all files are copied from {os.path.basename(src)} to {tar}")
|
29
39
|
def run_once_within(duration=60, reverse=False): # default 60s
|
30
40
|
import time
|
31
41
|
|
@@ -5506,7 +5516,9 @@ def imgsets(img,
|
|
5506
5516
|
show_axis:bool=False,
|
5507
5517
|
plot_:bool=True,
|
5508
5518
|
verbose:bool=False,
|
5509
|
-
|
5519
|
+
model:str="isnet-general-use",
|
5520
|
+
**kwargs,
|
5521
|
+
):
|
5510
5522
|
"""
|
5511
5523
|
Apply various enhancements and filters to an image using PIL's ImageEnhance and ImageFilter modules.
|
5512
5524
|
|
@@ -5562,6 +5574,27 @@ def imgsets(img,
|
|
5562
5574
|
"BOX_BLUR",
|
5563
5575
|
"MEDIAN_FILTER",
|
5564
5576
|
]
|
5577
|
+
# *Rembg is a tool to remove images background.
|
5578
|
+
# https://github.com/danielgatis/rembg
|
5579
|
+
rem_models = {
|
5580
|
+
"u2net": "general use cases.",
|
5581
|
+
"u2netp": "A lightweight version of u2net model.",
|
5582
|
+
"u2net_human_seg": "human segmentation.",
|
5583
|
+
"u2net_cloth_seg": "Cloths Parsing from human portrait. Here clothes are parsed into 3 category: Upper body, Lower body and Full body.",
|
5584
|
+
"silueta": "Same as u2net but the size is reduced to 43Mb.",
|
5585
|
+
"isnet-general-use": "A new pre-trained model for general use cases.",
|
5586
|
+
"isnet-anime": "A high-accuracy segmentation for anime character.",
|
5587
|
+
"sam": "any use cases.",
|
5588
|
+
"birefnet-general": "general use cases.",
|
5589
|
+
"birefnet-general-lite": "A light pre-trained model for general use cases.",
|
5590
|
+
"birefnet-portrait": "human portraits.",
|
5591
|
+
"birefnet-dis": "dichotomous image segmentation (DIS).",
|
5592
|
+
"birefnet-hrsod": "high-resolution salient object detection (HRSOD).",
|
5593
|
+
"birefnet-cod": "concealed object detection (COD).",
|
5594
|
+
"birefnet-massive": "A pre-trained model with massive dataset.",
|
5595
|
+
}
|
5596
|
+
models_support_rem=list(rem_models.keys())
|
5597
|
+
|
5565
5598
|
str_usage="""
|
5566
5599
|
imgsets(dir_img, auto=1, color=1.5, plot_=0)
|
5567
5600
|
imgsets(dir_img, color=2)
|
@@ -5569,6 +5602,7 @@ def imgsets(img,
|
|
5569
5602
|
imgsets(dir_img, contrast=0, color=1.2, plot_=0)
|
5570
5603
|
imgsets(get_clip(), flip="tb")# flip top and bottom
|
5571
5604
|
imgsets(get_clip(), contrast=1, rm=[100, 5, 2]) #'foreground_threshold', 'background_threshold' and 'erode_structure_size'
|
5605
|
+
imgsets(dir_img, rm="birefnet-portrait") # with using custom model
|
5572
5606
|
"""
|
5573
5607
|
if run_once_within():
|
5574
5608
|
print(str_usage)
|
@@ -5577,28 +5611,7 @@ def imgsets(img,
|
|
5577
5611
|
# adjust gama value
|
5578
5612
|
inv_gamma = 1.0 / gamma
|
5579
5613
|
lut = [int((i / float(v_max)) ** inv_gamma * int(v_max)) for i in range(int(v_max))]
|
5580
|
-
return lut #image.point(lut)
|
5581
|
-
|
5582
|
-
def confirm_rembg_models(model_name):
|
5583
|
-
models_support = [
|
5584
|
-
"u2net",
|
5585
|
-
"u2netp",
|
5586
|
-
"u2net_human_seg",
|
5587
|
-
"u2net_cloth_seg",
|
5588
|
-
"silueta",
|
5589
|
-
"isnet-general-use",
|
5590
|
-
"isnet-anime",
|
5591
|
-
"sam",
|
5592
|
-
]
|
5593
|
-
if model_name in models_support:
|
5594
|
-
print(f"model_name: {model_name}")
|
5595
|
-
return model_name
|
5596
|
-
else:
|
5597
|
-
print(
|
5598
|
-
f"{model_name} cannot be found, check the name:{models_support}, default('isnet-general-use') has been used"
|
5599
|
-
)
|
5600
|
-
return "isnet-general-use"
|
5601
|
-
|
5614
|
+
return lut #image.point(lut)
|
5602
5615
|
def auto_enhance(img):
|
5603
5616
|
"""
|
5604
5617
|
Automatically enhances the image based on its characteristics, including brightness,
|
@@ -5765,9 +5778,11 @@ def imgsets(img,
|
|
5765
5778
|
img_update = ImageOps.pad(img_update, size=value)
|
5766
5779
|
elif "rem" in k.lower() or "rm" in k.lower() or "back" in k.lower():
|
5767
5780
|
from rembg import remove, new_session
|
5768
|
-
|
5781
|
+
if verbose:
|
5782
|
+
preview(rem_models)
|
5783
|
+
model=strcmp(model, models_support_rem)[0]
|
5784
|
+
session = new_session(model)
|
5769
5785
|
if isinstance(value, bool):
|
5770
|
-
session = new_session("isnet-general-use")
|
5771
5786
|
img_update = remove(img_update, session=session)
|
5772
5787
|
elif value and isinstance(value, (int, float, list)):
|
5773
5788
|
if verbose:
|
@@ -5779,14 +5794,14 @@ def imgsets(img,
|
|
5779
5794
|
img_update = remove(
|
5780
5795
|
img_update,
|
5781
5796
|
alpha_matting=True,
|
5782
|
-
alpha_matting_background_threshold=value,
|
5797
|
+
alpha_matting_background_threshold=value, session=session
|
5783
5798
|
)
|
5784
5799
|
elif 2 <= len(value) < 3:
|
5785
5800
|
img_update = remove(
|
5786
5801
|
img_update,
|
5787
5802
|
alpha_matting=True,
|
5788
5803
|
alpha_matting_background_threshold=value[0],
|
5789
|
-
alpha_matting_foreground_threshold=value[1],
|
5804
|
+
alpha_matting_foreground_threshold=value[1], session=session
|
5790
5805
|
)
|
5791
5806
|
elif 3 <= len(value) < 4:
|
5792
5807
|
img_update = remove(
|
@@ -5794,17 +5809,15 @@ def imgsets(img,
|
|
5794
5809
|
alpha_matting=True,
|
5795
5810
|
alpha_matting_background_threshold=value[0],
|
5796
5811
|
alpha_matting_foreground_threshold=value[1],
|
5797
|
-
alpha_matting_erode_size=value[2],
|
5812
|
+
alpha_matting_erode_size=value[2], session=session
|
5798
5813
|
)
|
5799
5814
|
elif isinstance(value, tuple): # replace the background color
|
5800
5815
|
if len(value) == 3:
|
5801
5816
|
value += (255,)
|
5802
|
-
img_update = remove(img_update, bgcolor=value)
|
5817
|
+
img_update = remove(img_update, bgcolor=value, session=session)
|
5803
5818
|
elif isinstance(value, str):
|
5804
|
-
|
5805
|
-
|
5806
|
-
else:
|
5807
|
-
img_update = remove(img_update)
|
5819
|
+
# use custom model
|
5820
|
+
img_update = remove(img_update, session=new_session(strcmp(value, models_support_rem)[0]))
|
5808
5821
|
elif "bg" in k.lower() and "color" in k.lower():
|
5809
5822
|
from rembg import remove
|
5810
5823
|
|
py2ls/ocr.py
CHANGED
@@ -9,8 +9,6 @@ from py2ls.ips import (
|
|
9
9
|
isa
|
10
10
|
)
|
11
11
|
import logging
|
12
|
-
#logging.getLogger("ppocr").setLevel(logging.ERROR)
|
13
|
-
logging.getLogger("ppocr").setLevel(logging.WARNING)
|
14
12
|
|
15
13
|
"""
|
16
14
|
Optical Character Recognition (OCR)
|
@@ -618,7 +616,6 @@ def get_text(
|
|
618
616
|
image = cv2.imread(image)
|
619
617
|
elif isa(image,'image'):
|
620
618
|
cvt_cmp=False
|
621
|
-
print(1)
|
622
619
|
image = np.array(image)
|
623
620
|
else:
|
624
621
|
raise ValueError(f"not support image with {type(image)} type")
|
@@ -704,6 +701,8 @@ def get_text(
|
|
704
701
|
return detections
|
705
702
|
elif "pad" in model.lower():
|
706
703
|
from paddleocr import PaddleOCR
|
704
|
+
logging.getLogger("ppocr").setLevel(logging.ERROR)
|
705
|
+
|
707
706
|
lang=strcmp(lang, ['ch','en','french','german','korean','japan'])[0]
|
708
707
|
ocr = PaddleOCR(
|
709
708
|
use_angle_cls=True,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: py2ls
|
3
|
-
Version: 0.2.4.
|
3
|
+
Version: 0.2.4.34
|
4
4
|
Summary: py(thon)2(too)ls
|
5
5
|
Author: Jianfeng
|
6
6
|
Author-email: Jianfeng.Liu0413@gmail.com
|
@@ -85,7 +85,7 @@ Requires-Dist: h2 (>=3.2.0)
|
|
85
85
|
Requires-Dist: h5py (>=3.11.0)
|
86
86
|
Requires-Dist: hpack (>=3.0.0)
|
87
87
|
Requires-Dist: hstspreload (>=2024.7.1)
|
88
|
-
Requires-Dist: httpcore (>=0.9.
|
88
|
+
Requires-Dist: httpcore (>=0.9.0)
|
89
89
|
Requires-Dist: httpx (>=0.13.3)
|
90
90
|
Requires-Dist: humanfriendly (>=10.0)
|
91
91
|
Requires-Dist: hyperframe (>=5.2.0)
|
@@ -242,18 +242,18 @@ py2ls/export_requirements.py,sha256=x2WgUF0jYKz9GfA1MVKN-MdsM-oQ8yUeC6Ua8oCymio,
|
|
242
242
|
py2ls/fetch_update.py,sha256=9LXj661GpCEFII2wx_99aINYctDiHni6DOruDs_fdt8,4752
|
243
243
|
py2ls/freqanalysis.py,sha256=F4218VSPbgL5tnngh6xNCYuNnfR-F_QjECUUxrPYZss,32594
|
244
244
|
py2ls/ich2ls.py,sha256=3E9R8oVpyYZXH5PiIQgT3CN5NxLe4Dwtm2LwaeacE6I,21381
|
245
|
-
py2ls/ips.py,sha256=
|
245
|
+
py2ls/ips.py,sha256=MCwgNPw-E1lAWj0nVlVPcGvixADB3eNuFMrWGTDRrxo,404232
|
246
246
|
py2ls/ml2ls.py,sha256=I-JFPdikgEtfQjhv5gBz-QSeorpTJI_Pda_JwkTioBY,209732
|
247
247
|
py2ls/mol.py,sha256=AZnHzarIk_MjueKdChqn1V6e4tUle3X1NnHSFA6n3Nw,10645
|
248
248
|
py2ls/netfinder.py,sha256=6XZWxFCo5PNOVKdr5qGL_250AoKLfz6CuVmhGkDwkFM,69266
|
249
249
|
py2ls/nl2ls.py,sha256=UEIdok-OamFZFIvvz_PdZenu085zteMdaJd9mLu3F-s,11485
|
250
|
-
py2ls/ocr.py,sha256=
|
250
|
+
py2ls/ocr.py,sha256=qAIk7hzKwbryWaCtWRzBQgO89JBQoHk8tjGUwz4ykoM,33935
|
251
251
|
py2ls/plot.py,sha256=7C1x6KX0Fvmbll4IStIzlNjxLnrRBNSPaLJRgGjF3Ok,239172
|
252
252
|
py2ls/setuptools-70.1.0-py3-none-any.whl,sha256=2bi3cUVal8ip86s0SOvgspteEF8SKLukECi-EWmFomc,882588
|
253
253
|
py2ls/sleep_events_detectors.py,sha256=bQA3HJqv5qnYKJJEIhCyhlDtkXQfIzqksnD0YRXso68,52145
|
254
254
|
py2ls/stats.py,sha256=qBn2rJmNa_QLLUqjwYqXUlGzqmW94sgA1bxJU2FC3r0,39175
|
255
255
|
py2ls/translator.py,sha256=77Tp_GjmiiwFbEIJD_q3VYpQ43XL9ZeJo6Mhl44mvh8,34284
|
256
256
|
py2ls/wb_detector.py,sha256=7y6TmBUj9exCZeIgBAJ_9hwuhkDh1x_-yg4dvNY1_GQ,6284
|
257
|
-
py2ls-0.2.4.
|
258
|
-
py2ls-0.2.4.
|
259
|
-
py2ls-0.2.4.
|
257
|
+
py2ls-0.2.4.34.dist-info/METADATA,sha256=KQ3NGQ07YFbGX__qttlwPsvwLBDPODKCJt9S7WStkqM,20332
|
258
|
+
py2ls-0.2.4.34.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
259
|
+
py2ls-0.2.4.34.dist-info/RECORD,,
|
File without changes
|