py2ls 0.2.4.32__py3-none-any.whl → 0.2.4.33__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- py2ls/.git/index +0 -0
- py2ls/ips.py +736 -164
- py2ls/netfinder.py +99 -0
- py2ls/ocr.py +140 -126
- py2ls/plot.py +612 -376
- {py2ls-0.2.4.32.dist-info → py2ls-0.2.4.33.dist-info}/METADATA +1 -1
- {py2ls-0.2.4.32.dist-info → py2ls-0.2.4.33.dist-info}/RECORD +8 -8
- {py2ls-0.2.4.32.dist-info → py2ls-0.2.4.33.dist-info}/WHEEL +0 -0
py2ls/ips.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
import numpy as np
|
2
2
|
import pandas as pd
|
3
|
-
import sys
|
3
|
+
import sys
|
4
|
+
import os
|
4
5
|
from IPython.display import display
|
5
6
|
from typing import List, Optional, Union
|
6
7
|
|
@@ -17,12 +18,13 @@ import warnings
|
|
17
18
|
warnings.simplefilter("ignore", category=pd.errors.SettingWithCopyWarning)
|
18
19
|
warnings.filterwarnings("ignore", category=pd.errors.PerformanceWarning)
|
19
20
|
warnings.filterwarnings("ignore")
|
20
|
-
import os
|
21
21
|
import shutil
|
22
22
|
import logging
|
23
23
|
from pathlib import Path
|
24
24
|
from datetime import datetime
|
25
|
-
|
25
|
+
import re
|
26
|
+
import stat
|
27
|
+
import platform
|
26
28
|
|
27
29
|
def run_once_within(duration=60, reverse=False): # default 60s
|
28
30
|
import time
|
@@ -786,13 +788,22 @@ def strcmp(
|
|
786
788
|
return candidates[best_match_index], best_match_index
|
787
789
|
|
788
790
|
|
789
|
-
def imgcmp(img: list,
|
791
|
+
def imgcmp(img: list,
|
792
|
+
method:str ="knn",
|
793
|
+
thr:float =0.75,
|
794
|
+
detector: str = "sift",
|
795
|
+
plot_:bool =True,
|
796
|
+
figsize=[12, 6],
|
797
|
+
grid_size=10,# only for grid detector
|
798
|
+
**kwargs):
|
790
799
|
"""
|
791
800
|
Compare two images using SSIM, Feature Matching (SIFT), or KNN Matching.
|
792
801
|
|
793
802
|
Parameters:
|
794
|
-
- img (list): List containing two image file paths [img1, img2].
|
803
|
+
- img (list): List containing two image file paths [img1, img2] or two numpy arrays.
|
795
804
|
- method (str): Comparison method ('ssim', 'match', or 'knn').
|
805
|
+
- detector (str): Feature detector ('sift', 'grid', 'pixel').
|
806
|
+
- thr (float): Threshold for filtering matches.
|
796
807
|
- plot_ (bool): Whether to display the results visually.
|
797
808
|
- figsize (list): Size of the figure for plots.
|
798
809
|
|
@@ -805,8 +816,13 @@ def imgcmp(img: list, method="knn", plot_=True, figsize=[12, 6]):
|
|
805
816
|
from skimage.metrics import structural_similarity as ssim
|
806
817
|
|
807
818
|
# Load images
|
808
|
-
|
809
|
-
|
819
|
+
if isinstance(img, list) and isinstance(img[0],str):
|
820
|
+
image1 = cv2.imread(img[0])
|
821
|
+
image2 = cv2.imread(img[1])
|
822
|
+
bool_cvt=True
|
823
|
+
else:
|
824
|
+
image1, image2 = np.array(img[0]),np.array(img[1])
|
825
|
+
bool_cvt=False
|
810
826
|
|
811
827
|
if image1 is None or image2 is None:
|
812
828
|
raise ValueError("Could not load one or both images. Check file paths.")
|
@@ -841,21 +857,53 @@ def imgcmp(img: list, method="knn", plot_=True, figsize=[12, 6]):
|
|
841
857
|
elif method in ["match", "knn"]:
|
842
858
|
# Convert images to grayscale
|
843
859
|
gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
|
844
|
-
gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
|
845
|
-
|
846
|
-
|
847
|
-
|
860
|
+
gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
|
861
|
+
|
862
|
+
if detector == "sift":
|
863
|
+
# SIFT detector
|
864
|
+
sift = cv2.SIFT_create()
|
865
|
+
keypoints1, descriptors1 = sift.detectAndCompute(gray1, None)
|
866
|
+
keypoints2, descriptors2 = sift.detectAndCompute(gray2, None)
|
867
|
+
|
868
|
+
elif detector == "grid":
|
869
|
+
# Grid-based detection
|
870
|
+
keypoints1, descriptors1 = [], []
|
871
|
+
keypoints2, descriptors2 = [], []
|
872
|
+
|
873
|
+
for i in range(0, gray1.shape[0], grid_size):
|
874
|
+
for j in range(0, gray1.shape[1], grid_size):
|
875
|
+
patch1 = gray1[i:i + grid_size, j:j + grid_size]
|
876
|
+
patch2 = gray2[i:i + grid_size, j:j + grid_size]
|
877
|
+
if patch1.size > 0 and patch2.size > 0:
|
878
|
+
keypoints1.append(cv2.KeyPoint(j + grid_size // 2, i + grid_size // 2, grid_size))
|
879
|
+
keypoints2.append(cv2.KeyPoint(j + grid_size // 2, i + grid_size // 2, grid_size))
|
880
|
+
descriptors1.append(np.mean(patch1))
|
881
|
+
descriptors2.append(np.mean(patch2))
|
882
|
+
|
883
|
+
descriptors1 = np.array(descriptors1).reshape(-1, 1)
|
884
|
+
descriptors2 = np.array(descriptors2).reshape(-1, 1)
|
885
|
+
|
886
|
+
elif detector == "pixel":
|
887
|
+
# Pixel-based direct comparison
|
888
|
+
descriptors1 = gray1.flatten()
|
889
|
+
descriptors2 = gray2.flatten()
|
890
|
+
keypoints1 = [cv2.KeyPoint(x, y, 1) for y in range(gray1.shape[0]) for x in range(gray1.shape[1])]
|
891
|
+
keypoints2 = [cv2.KeyPoint(x, y, 1) for y in range(gray2.shape[0]) for x in range(gray2.shape[1])]
|
848
892
|
|
849
|
-
|
850
|
-
|
851
|
-
|
852
|
-
|
853
|
-
if
|
854
|
-
raise ValueError("
|
893
|
+
else:
|
894
|
+
raise ValueError("Invalid detector. Use 'sift', 'grid', or 'pixel'.")
|
895
|
+
|
896
|
+
# Handle missing descriptors
|
897
|
+
if descriptors1 is None or descriptors2 is None:
|
898
|
+
raise ValueError("Failed to compute descriptors for one or both images.")
|
899
|
+
# Ensure descriptors are in the correct data type
|
900
|
+
if descriptors1.dtype != np.float32:
|
901
|
+
descriptors1 = descriptors1.astype(np.float32)
|
902
|
+
if descriptors2.dtype != np.float32:
|
903
|
+
descriptors2 = descriptors2.astype(np.float32)
|
855
904
|
|
856
905
|
# BFMatcher initialization
|
857
906
|
bf = cv2.BFMatcher()
|
858
|
-
|
859
907
|
if method == "match": # Cross-check matching
|
860
908
|
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
|
861
909
|
matches = bf.match(descriptors1, descriptors2)
|
@@ -863,13 +911,14 @@ def imgcmp(img: list, method="knn", plot_=True, figsize=[12, 6]):
|
|
863
911
|
|
864
912
|
# Filter good matches
|
865
913
|
good_matches = [
|
866
|
-
m for m in matches if m.distance <
|
914
|
+
m for m in matches if m.distance < thr * matches[-1].distance
|
867
915
|
]
|
868
916
|
|
869
917
|
elif method == "knn": # KNN matching with ratio test
|
918
|
+
bf = cv2.BFMatcher()
|
870
919
|
matches = bf.knnMatch(descriptors1, descriptors2, k=2)
|
871
920
|
# Apply Lowe's ratio test
|
872
|
-
good_matches = [m for m, n in matches if m.distance <
|
921
|
+
good_matches = [m for m, n in matches if m.distance < thr * n.distance]
|
873
922
|
|
874
923
|
# Calculate similarity score
|
875
924
|
similarity_score = len(good_matches) / min(len(keypoints1), len(keypoints2))
|
@@ -887,23 +936,24 @@ def imgcmp(img: list, method="knn", plot_=True, figsize=[12, 6]):
|
|
887
936
|
dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good_matches]).reshape(
|
888
937
|
-1, 1, 2
|
889
938
|
)
|
890
|
-
|
891
|
-
# Calculate Homography using RANSAC
|
892
|
-
homography_matrix, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
|
893
|
-
|
894
939
|
# Apply the homography to image2
|
895
|
-
|
896
|
-
|
897
|
-
|
898
|
-
|
899
|
-
|
900
|
-
|
901
|
-
|
902
|
-
|
903
|
-
|
904
|
-
|
905
|
-
|
906
|
-
|
940
|
+
try:
|
941
|
+
# Calculate Homography using RANSAC
|
942
|
+
homography_matrix, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
|
943
|
+
h, w = image1.shape[:2]
|
944
|
+
warped_image2 = cv2.warpPerspective(image2, homography_matrix, (w, h))
|
945
|
+
|
946
|
+
# Plot result if needed
|
947
|
+
if plot_:
|
948
|
+
fig, ax = plt.subplots(1, 2, figsize=figsize)
|
949
|
+
ax[0].imshow(cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)) if bool_cvt else ax[0].imshow(image1)
|
950
|
+
ax[0].set_title("Image 1")
|
951
|
+
ax[1].imshow(cv2.cvtColor(warped_image2, cv2.COLOR_BGR2RGB)) if bool_cvt else ax[1].imshow(warped_image2)
|
952
|
+
ax[1].set_title("Warped Image 2")
|
953
|
+
plt.tight_layout()
|
954
|
+
plt.show()
|
955
|
+
except Exception as e:
|
956
|
+
print(e)
|
907
957
|
|
908
958
|
# Plot matches if needed
|
909
959
|
if plot_:
|
@@ -911,28 +961,41 @@ def imgcmp(img: list, method="knn", plot_=True, figsize=[12, 6]):
|
|
911
961
|
image1, keypoints1, image2, keypoints2, good_matches, None, flags=2
|
912
962
|
)
|
913
963
|
plt.figure(figsize=figsize)
|
914
|
-
plt.imshow(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
915
|
-
plt.title(
|
916
|
-
f"Feature Matches ({len(good_matches)} matches, Score: {similarity_score:.4f})"
|
917
|
-
)
|
964
|
+
plt.imshow(cv2.cvtColor(result, cv2.COLOR_BGR2RGB)) if bool_cvt else plt.imshow(result)
|
965
|
+
plt.title(f"Feature Matches ({len(good_matches)} matches, Score: {similarity_score:.4f})")
|
918
966
|
plt.axis("off")
|
919
967
|
plt.show()
|
920
968
|
# Identify unmatched keypoints
|
921
969
|
matched_idx1 = [m.queryIdx for m in good_matches]
|
922
970
|
matched_idx2 = [m.trainIdx for m in good_matches]
|
923
|
-
|
971
|
+
matched_kp1 = [kp for i, kp in enumerate(keypoints1) if i in matched_idx1]
|
972
|
+
matched_kp2 = [kp for i, kp in enumerate(keypoints2) if i in matched_idx2]
|
924
973
|
unmatched_kp1 = [kp for i, kp in enumerate(keypoints1) if i not in matched_idx1]
|
925
974
|
unmatched_kp2 = [kp for i, kp in enumerate(keypoints2) if i not in matched_idx2]
|
926
975
|
|
927
|
-
# Mark
|
928
|
-
|
976
|
+
# Mark keypoints on the images
|
977
|
+
img1_match = cv2.drawKeypoints(
|
978
|
+
image1,
|
979
|
+
matched_kp1,
|
980
|
+
None,
|
981
|
+
color=(0, 0, 255),
|
982
|
+
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,
|
983
|
+
)
|
984
|
+
img2_match = cv2.drawKeypoints(
|
985
|
+
image2,
|
986
|
+
matched_kp2,
|
987
|
+
None,
|
988
|
+
color=(0, 0, 255),
|
989
|
+
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,
|
990
|
+
)
|
991
|
+
img1_unmatch = cv2.drawKeypoints(
|
929
992
|
image1,
|
930
993
|
unmatched_kp1,
|
931
994
|
None,
|
932
995
|
color=(0, 0, 255),
|
933
996
|
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,
|
934
997
|
)
|
935
|
-
|
998
|
+
img2_unmatch = cv2.drawKeypoints(
|
936
999
|
image2,
|
937
1000
|
unmatched_kp2,
|
938
1001
|
None,
|
@@ -940,16 +1003,27 @@ def imgcmp(img: list, method="knn", plot_=True, figsize=[12, 6]):
|
|
940
1003
|
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,
|
941
1004
|
)
|
942
1005
|
|
943
|
-
# Display results
|
944
1006
|
if plot_:
|
945
1007
|
fig, ax = plt.subplots(1, 2, figsize=figsize)
|
946
|
-
ax[0].imshow(cv2.cvtColor(
|
1008
|
+
ax[0].imshow(cv2.cvtColor(img1_unmatch, cv2.COLOR_BGR2RGB)) if bool_cvt else ax[0].imshow(img1_unmatch)
|
947
1009
|
ax[0].set_title("Unmatched Keypoints (Image 1)")
|
948
|
-
ax[1].imshow(cv2.cvtColor(
|
1010
|
+
ax[1].imshow(cv2.cvtColor(img2_unmatch, cv2.COLOR_BGR2RGB)) if bool_cvt else ax[1].imshow(img2_unmatch)
|
949
1011
|
ax[1].set_title("Unmatched Keypoints (Image 2)")
|
1012
|
+
ax[0].axis("off")
|
1013
|
+
ax[1].axis("off")
|
1014
|
+
plt.tight_layout()
|
1015
|
+
plt.show()
|
1016
|
+
if plot_:
|
1017
|
+
fig, ax = plt.subplots(1, 2, figsize=figsize)
|
1018
|
+
ax[0].imshow(cv2.cvtColor(img1_match, cv2.COLOR_BGR2RGB)) if bool_cvt else ax[0].imshow(img1_match)
|
1019
|
+
ax[0].set_title("Matched Keypoints (Image 1)")
|
1020
|
+
ax[1].imshow(cv2.cvtColor(img2_match, cv2.COLOR_BGR2RGB)) if bool_cvt else ax[1].imshow(img2_match)
|
1021
|
+
ax[1].set_title("Matched Keypoints (Image 2)")
|
1022
|
+
ax[0].axis("off")
|
1023
|
+
ax[1].axis("off")
|
950
1024
|
plt.tight_layout()
|
951
1025
|
plt.show()
|
952
|
-
return good_matches, similarity_score
|
1026
|
+
return good_matches, similarity_score#, homography_matrix
|
953
1027
|
|
954
1028
|
else:
|
955
1029
|
raise ValueError("Invalid method. Use 'ssim', 'match', or 'knn'.")
|
@@ -969,9 +1043,7 @@ def cn2pinyin(
|
|
969
1043
|
Args:
|
970
1044
|
cn_str (str): Chinese string to convert.
|
971
1045
|
sep (str): Separator for the output Pinyin string.
|
972
|
-
|
973
|
-
"finals","finals_tone","finals_tone2","finals_tone3",
|
974
|
-
"initials","bopomofo","bopomofo_first","cyrillic","pl",
|
1046
|
+
fmt (Style): "normal","tone", "tone2","tone3","finals","finals_tone","finals_tone2","finals_tone3","initials","bopomofo","bopomofo_first","cyrillic","pl",
|
975
1047
|
Returns:
|
976
1048
|
cn_str: The Pinyin representation of the Chinese string.
|
977
1049
|
"""
|
@@ -1224,7 +1296,6 @@ def text2audio(
|
|
1224
1296
|
print(f"Error opening file: {e}")
|
1225
1297
|
print("done")
|
1226
1298
|
|
1227
|
-
|
1228
1299
|
def str2time(time_str, fmt="24"):
|
1229
1300
|
"""
|
1230
1301
|
Convert a time string into the specified format.
|
@@ -3649,8 +3720,8 @@ def get_os(full=False, verbose=False):
|
|
3649
3720
|
import os
|
3650
3721
|
import subprocess
|
3651
3722
|
from datetime import datetime, timedelta
|
3652
|
-
from collections import defaultdict
|
3653
3723
|
|
3724
|
+
|
3654
3725
|
def get_os_type():
|
3655
3726
|
os_name = sys.platform
|
3656
3727
|
if "dar" in os_name:
|
@@ -3663,7 +3734,8 @@ def get_os(full=False, verbose=False):
|
|
3663
3734
|
else:
|
3664
3735
|
print(f"{os_name}, returned 'None'")
|
3665
3736
|
return None
|
3666
|
-
|
3737
|
+
if not full:
|
3738
|
+
return get_os_type()
|
3667
3739
|
def get_os_info():
|
3668
3740
|
"""Get the detailed OS name, version, and other platform-specific details."""
|
3669
3741
|
|
@@ -4074,11 +4146,6 @@ def get_os(full=False, verbose=False):
|
|
4074
4146
|
return res
|
4075
4147
|
|
4076
4148
|
|
4077
|
-
import re
|
4078
|
-
import stat
|
4079
|
-
import platform
|
4080
|
-
|
4081
|
-
|
4082
4149
|
def listdir(
|
4083
4150
|
rootdir,
|
4084
4151
|
kind=None,
|
@@ -4695,57 +4762,64 @@ def is_image(fpath):
|
|
4695
4762
|
Returns:
|
4696
4763
|
bool: True if the file is a recognized image, False otherwise.
|
4697
4764
|
"""
|
4698
|
-
import
|
4765
|
+
from PIL import Image
|
4766
|
+
if isinstance(fpath,str):
|
4767
|
+
import mimetypes
|
4768
|
+
|
4769
|
+
# Known image MIME types
|
4770
|
+
image_mime_types = {
|
4771
|
+
"image/jpeg",
|
4772
|
+
"image/png",
|
4773
|
+
"image/gif",
|
4774
|
+
"image/bmp",
|
4775
|
+
"image/webp",
|
4776
|
+
"image/tiff",
|
4777
|
+
"image/x-icon",
|
4778
|
+
"image/svg+xml",
|
4779
|
+
"image/heic",
|
4780
|
+
"image/heif",
|
4781
|
+
}
|
4699
4782
|
|
4700
|
-
|
4701
|
-
|
4702
|
-
|
4703
|
-
|
4704
|
-
|
4705
|
-
|
4706
|
-
|
4707
|
-
|
4708
|
-
|
4709
|
-
|
4710
|
-
|
4711
|
-
|
4712
|
-
|
4783
|
+
# Known image file extensions
|
4784
|
+
image_extensions = {
|
4785
|
+
".jpg",
|
4786
|
+
".jpeg",
|
4787
|
+
".png",
|
4788
|
+
".gif",
|
4789
|
+
".bmp",
|
4790
|
+
".webp",
|
4791
|
+
".tif",
|
4792
|
+
".tiff",
|
4793
|
+
".ico",
|
4794
|
+
".svg",
|
4795
|
+
".heic",
|
4796
|
+
".heif",
|
4797
|
+
".fig",
|
4798
|
+
".jpg",
|
4799
|
+
}
|
4713
4800
|
|
4714
|
-
|
4715
|
-
|
4716
|
-
".jpg",
|
4717
|
-
".jpeg",
|
4718
|
-
".png",
|
4719
|
-
".gif",
|
4720
|
-
".bmp",
|
4721
|
-
".webp",
|
4722
|
-
".tif",
|
4723
|
-
".tiff",
|
4724
|
-
".ico",
|
4725
|
-
".svg",
|
4726
|
-
".heic",
|
4727
|
-
".heif",
|
4728
|
-
".fig",
|
4729
|
-
".jpg",
|
4730
|
-
}
|
4801
|
+
# Get MIME type using mimetypes
|
4802
|
+
mime_type, _ = mimetypes.guess_type(fpath)
|
4731
4803
|
|
4732
|
-
|
4733
|
-
|
4804
|
+
# Check MIME type
|
4805
|
+
if mime_type in image_mime_types:
|
4806
|
+
return True
|
4734
4807
|
|
4735
|
-
|
4736
|
-
|
4737
|
-
|
4808
|
+
# Fallback: Check file extension
|
4809
|
+
ext = os.path.splitext(fpath)[
|
4810
|
+
-1
|
4811
|
+
].lower() # Get the file extension and ensure lowercase
|
4812
|
+
if ext in image_extensions:
|
4813
|
+
return True
|
4738
4814
|
|
4739
|
-
|
4740
|
-
|
4741
|
-
|
4742
|
-
|
4743
|
-
if ext in image_extensions:
|
4815
|
+
return False
|
4816
|
+
|
4817
|
+
elif isinstance(fpath, Image.Image):
|
4818
|
+
# If the input is a PIL Image object
|
4744
4819
|
return True
|
4745
4820
|
|
4746
4821
|
return False
|
4747
4822
|
|
4748
|
-
|
4749
4823
|
def is_video(fpath):
|
4750
4824
|
"""
|
4751
4825
|
Determine if a given file is a video based on MIME type and file extension.
|
@@ -5055,6 +5129,105 @@ def str2list(str_):
|
|
5055
5129
|
[l.append(x) for x in str_]
|
5056
5130
|
return l
|
5057
5131
|
|
5132
|
+
def str2words(content, method="combined", custom_dict=None, sym_spell_params=None, use_threading=True):
|
5133
|
+
"""
|
5134
|
+
Ultimate text correction function supporting multiple methods,
|
5135
|
+
lists or strings, and domain-specific corrections.
|
5136
|
+
|
5137
|
+
Parameters:
|
5138
|
+
content (str or list): Input text or list of strings to correct.
|
5139
|
+
method (str): Correction method ('textblob', 'sym', 'combined').
|
5140
|
+
custom_dict (dict): Custom dictionary for domain-specific corrections.
|
5141
|
+
sym_spell_params (dict): Parameters for initializing SymSpell.
|
5142
|
+
|
5143
|
+
Returns:
|
5144
|
+
str or list: Corrected text or list of corrected strings.
|
5145
|
+
"""
|
5146
|
+
from textblob import TextBlob
|
5147
|
+
from symspellpy import SymSpell, Verbosity
|
5148
|
+
from functools import lru_cache
|
5149
|
+
import pkg_resources
|
5150
|
+
from concurrent.futures import ThreadPoolExecutor
|
5151
|
+
|
5152
|
+
def initialize_symspell(max_edit_distance=2, prefix_length=7):
|
5153
|
+
"""Initialize SymSpell for advanced spelling correction."""
|
5154
|
+
sym_spell = SymSpell(max_edit_distance, prefix_length)
|
5155
|
+
dictionary_path = pkg_resources.resource_filename(
|
5156
|
+
"symspellpy",
|
5157
|
+
# "frequency_bigramdictionary_en_243_342.txt",
|
5158
|
+
"frequency_dictionary_en_82_765.txt",
|
5159
|
+
)
|
5160
|
+
|
5161
|
+
sym_spell.load_dictionary(dictionary_path, term_index=0, count_index=1)
|
5162
|
+
return sym_spell
|
5163
|
+
|
5164
|
+
def segment_words(text, sym_spell):
|
5165
|
+
"""Segment concatenated words into separate words."""
|
5166
|
+
segmented = sym_spell.word_segmentation(text)
|
5167
|
+
return segmented.corrected_string
|
5168
|
+
|
5169
|
+
@lru_cache(maxsize=1000) # Cache results for repeated corrections
|
5170
|
+
def advanced_correction(word, sym_spell):
|
5171
|
+
"""Correct a single word using SymSpell."""
|
5172
|
+
suggestions = sym_spell.lookup(word, Verbosity.CLOSEST, max_edit_distance=2)
|
5173
|
+
return suggestions[0].term if suggestions else word
|
5174
|
+
|
5175
|
+
def apply_custom_corrections(word, custom_dict):
|
5176
|
+
"""Apply domain-specific corrections using a custom dictionary."""
|
5177
|
+
return custom_dict.get(word.lower(), word)
|
5178
|
+
def preserve_case(original_word, corrected_word):
|
5179
|
+
"""
|
5180
|
+
Preserve the case of the original word in the corrected word.
|
5181
|
+
"""
|
5182
|
+
if original_word.isupper():
|
5183
|
+
return corrected_word.upper()
|
5184
|
+
elif original_word[0].isupper():
|
5185
|
+
return corrected_word.capitalize()
|
5186
|
+
else:
|
5187
|
+
return corrected_word.lower()
|
5188
|
+
def process_string(text, method, sym_spell=None, custom_dict=None):
|
5189
|
+
"""
|
5190
|
+
Process a single string for spelling corrections.
|
5191
|
+
Handles TextBlob, SymSpell, and custom corrections.
|
5192
|
+
"""
|
5193
|
+
if method in ("sym", "combined") and sym_spell:
|
5194
|
+
text = segment_words(text, sym_spell)
|
5195
|
+
|
5196
|
+
if method in ("textblob", "combined"):
|
5197
|
+
text = str(TextBlob(text).correct())
|
5198
|
+
|
5199
|
+
corrected_words = []
|
5200
|
+
for word in text.split():
|
5201
|
+
original_word = word
|
5202
|
+
if method in ("sym", "combined") and sym_spell:
|
5203
|
+
word = advanced_correction(word, sym_spell)
|
5204
|
+
|
5205
|
+
# Step 3: Apply custom corrections
|
5206
|
+
if custom_dict:
|
5207
|
+
word = apply_custom_corrections(word, custom_dict)
|
5208
|
+
# Preserve original case
|
5209
|
+
word = preserve_case(original_word, word)
|
5210
|
+
corrected_words.append(word)
|
5211
|
+
|
5212
|
+
return " ".join(corrected_words)
|
5213
|
+
|
5214
|
+
# Initialize SymSpell if needed
|
5215
|
+
sym_spell = None
|
5216
|
+
if method in ("sym", "combined"):
|
5217
|
+
if not sym_spell_params:
|
5218
|
+
sym_spell_params = {"max_edit_distance": 2, "prefix_length": 7}
|
5219
|
+
sym_spell = initialize_symspell(**sym_spell_params)
|
5220
|
+
|
5221
|
+
# Process lists or strings
|
5222
|
+
if isinstance(content, list):
|
5223
|
+
if use_threading:
|
5224
|
+
with ThreadPoolExecutor() as executor:
|
5225
|
+
corrected_content = list(executor.map(lambda x: process_string(x, method, sym_spell, custom_dict), content))
|
5226
|
+
return corrected_content
|
5227
|
+
else:
|
5228
|
+
return [process_string(item, method, sym_spell, custom_dict) for item in content]
|
5229
|
+
else:
|
5230
|
+
return process_string(content, method, sym_spell, custom_dict)
|
5058
5231
|
|
5059
5232
|
def load_img(fpath):
|
5060
5233
|
"""
|
@@ -5078,7 +5251,7 @@ def load_img(fpath):
|
|
5078
5251
|
raise OSError(f"Unable to open file '{fpath}' or it is not a valid image file.")
|
5079
5252
|
|
5080
5253
|
|
5081
|
-
def apply_filter(img, *args):
|
5254
|
+
def apply_filter(img, *args,verbose=True):
|
5082
5255
|
# def apply_filter(img, filter_name, filter_value=None):
|
5083
5256
|
"""
|
5084
5257
|
Apply the specified filter to the image.
|
@@ -5092,7 +5265,7 @@ def apply_filter(img, *args):
|
|
5092
5265
|
from PIL import ImageFilter
|
5093
5266
|
|
5094
5267
|
def correct_filter_name(filter_name):
|
5095
|
-
if "
|
5268
|
+
if all(["b" in filter_name.lower(),"ur" in filter_name.lower(), "box" not in filter_name.lower()]):
|
5096
5269
|
return "BLUR"
|
5097
5270
|
elif "cont" in filter_name.lower():
|
5098
5271
|
return "Contour"
|
@@ -5156,10 +5329,11 @@ def apply_filter(img, *args):
|
|
5156
5329
|
|
5157
5330
|
for arg in args:
|
5158
5331
|
if isinstance(arg, str):
|
5159
|
-
filter_name = arg
|
5160
|
-
filter_name = correct_filter_name(filter_name)
|
5332
|
+
filter_name = correct_filter_name(arg)
|
5161
5333
|
else:
|
5162
5334
|
filter_value = arg
|
5335
|
+
if verbose:
|
5336
|
+
print(f'processing {filter_name}')
|
5163
5337
|
filter_name = filter_name.upper() # Ensure filter name is uppercase
|
5164
5338
|
|
5165
5339
|
# Supported filters
|
@@ -5203,7 +5377,7 @@ def apply_filter(img, *args):
|
|
5203
5377
|
bands = filter_value if filter_value is not None else None
|
5204
5378
|
return img.filter(supported_filters[filter_name](bands))
|
5205
5379
|
else:
|
5206
|
-
if filter_value is not None:
|
5380
|
+
if filter_value is not None and verbose:
|
5207
5381
|
print(
|
5208
5382
|
f"{filter_name} doesn't require a value for {filter_value}, but it remains unaffected"
|
5209
5383
|
)
|
@@ -5220,6 +5394,8 @@ def detect_angle(image, by="median", template=None):
|
|
5220
5394
|
import cv2
|
5221
5395
|
|
5222
5396
|
# Convert to grayscale
|
5397
|
+
if np.array(image).shape[-1]>3:
|
5398
|
+
image=np.array(image)[:,:,:3]
|
5223
5399
|
gray_image = rgb2gray(image)
|
5224
5400
|
|
5225
5401
|
# Detect edges using Canny edge detector
|
@@ -5231,9 +5407,10 @@ def detect_angle(image, by="median", template=None):
|
|
5231
5407
|
if not lines and any(["me" in by, "pca" in by]):
|
5232
5408
|
print("No lines detected. Adjust the edge detection parameters.")
|
5233
5409
|
return 0
|
5234
|
-
|
5410
|
+
methods=['mean','median','pca','gradient orientation','template matching','moments','fft']
|
5411
|
+
by=strcmp(by, methods)[0]
|
5235
5412
|
# Hough Transform-based angle detection (Median/Mean)
|
5236
|
-
if "me" in by:
|
5413
|
+
if "me" in by.lower():
|
5237
5414
|
angles = []
|
5238
5415
|
for line in lines:
|
5239
5416
|
(x0, y0), (x1, y1) = line
|
@@ -5256,7 +5433,7 @@ def detect_angle(image, by="median", template=None):
|
|
5256
5433
|
return rotation_angle
|
5257
5434
|
|
5258
5435
|
# PCA-based angle detection
|
5259
|
-
elif "pca" in by:
|
5436
|
+
elif "pca" in by.lower():
|
5260
5437
|
y, x = np.nonzero(edges)
|
5261
5438
|
if len(x) == 0:
|
5262
5439
|
return 0
|
@@ -5266,14 +5443,14 @@ def detect_angle(image, by="median", template=None):
|
|
5266
5443
|
return angle
|
5267
5444
|
|
5268
5445
|
# Gradient Orientation-based angle detection
|
5269
|
-
elif "gra" in by:
|
5446
|
+
elif "gra" in by.lower():
|
5270
5447
|
gx, gy = np.gradient(gray_image)
|
5271
5448
|
angles = np.arctan2(gy, gx) * 180 / np.pi
|
5272
5449
|
hist, bin_edges = np.histogram(angles, bins=360, range=(-180, 180))
|
5273
5450
|
return bin_edges[np.argmax(hist)]
|
5274
5451
|
|
5275
5452
|
# Template Matching-based angle detection
|
5276
|
-
elif "temp" in by:
|
5453
|
+
elif "temp" in by.lower():
|
5277
5454
|
if template is None:
|
5278
5455
|
# Automatically extract a template from the center of the image
|
5279
5456
|
height, width = gray_image.shape
|
@@ -5296,7 +5473,7 @@ def detect_angle(image, by="median", template=None):
|
|
5296
5473
|
return best_angle
|
5297
5474
|
|
5298
5475
|
# Image Moments-based angle detection
|
5299
|
-
elif "mo" in by:
|
5476
|
+
elif "mo" in by.lower():
|
5300
5477
|
moments = measure.moments_central(gray_image)
|
5301
5478
|
angle = (
|
5302
5479
|
0.5
|
@@ -5307,7 +5484,7 @@ def detect_angle(image, by="median", template=None):
|
|
5307
5484
|
return angle
|
5308
5485
|
|
5309
5486
|
# Fourier Transform-based angle detection
|
5310
|
-
elif "fft" in by:
|
5487
|
+
elif "fft" in by.lower():
|
5311
5488
|
f = fft2(gray_image)
|
5312
5489
|
fshift = fftshift(f)
|
5313
5490
|
magnitude_spectrum = np.log(np.abs(fshift) + 1)
|
@@ -5317,11 +5494,19 @@ def detect_angle(image, by="median", template=None):
|
|
5317
5494
|
return angle
|
5318
5495
|
|
5319
5496
|
else:
|
5320
|
-
print(f"Unknown method {by}")
|
5497
|
+
print(f"Unknown method {by}: supported methods: {methods}")
|
5321
5498
|
return 0
|
5322
5499
|
|
5323
5500
|
|
5324
|
-
def imgsets(img,
|
5501
|
+
def imgsets(img,
|
5502
|
+
auto:bool=True,
|
5503
|
+
size=None,
|
5504
|
+
figsize=None,
|
5505
|
+
dpi:int=200,
|
5506
|
+
show_axis:bool=False,
|
5507
|
+
plot_:bool=True,
|
5508
|
+
verbose:bool=False,
|
5509
|
+
**kwargs):
|
5325
5510
|
"""
|
5326
5511
|
Apply various enhancements and filters to an image using PIL's ImageEnhance and ImageFilter modules.
|
5327
5512
|
|
@@ -5355,6 +5540,9 @@ def imgsets(img, **kwargs):
|
|
5355
5540
|
Note:
|
5356
5541
|
The "color" and "enhance" enhancements are not implemented in this function.
|
5357
5542
|
"""
|
5543
|
+
|
5544
|
+
import matplotlib.pyplot as plt
|
5545
|
+
from PIL import ImageEnhance, ImageOps,Image
|
5358
5546
|
supported_filters = [
|
5359
5547
|
"BLUR",
|
5360
5548
|
"CONTOUR",
|
@@ -5374,8 +5562,22 @@ def imgsets(img, **kwargs):
|
|
5374
5562
|
"BOX_BLUR",
|
5375
5563
|
"MEDIAN_FILTER",
|
5376
5564
|
]
|
5377
|
-
|
5378
|
-
|
5565
|
+
str_usage="""
|
5566
|
+
imgsets(dir_img, auto=1, color=1.5, plot_=0)
|
5567
|
+
imgsets(dir_img, color=2)
|
5568
|
+
imgsets(dir_img, pad=(300, 300), bgcolor=(73, 162, 127), plot_=0)
|
5569
|
+
imgsets(dir_img, contrast=0, color=1.2, plot_=0)
|
5570
|
+
imgsets(get_clip(), flip="tb")# flip top and bottom
|
5571
|
+
imgsets(get_clip(), contrast=1, rm=[100, 5, 2]) #'foreground_threshold', 'background_threshold' and 'erode_structure_size'
|
5572
|
+
"""
|
5573
|
+
if run_once_within():
|
5574
|
+
print(str_usage)
|
5575
|
+
|
5576
|
+
def gamma_correction(image, gamma=1.0, v_max=255):
|
5577
|
+
# adjust gama value
|
5578
|
+
inv_gamma = 1.0 / gamma
|
5579
|
+
lut = [int((i / float(v_max)) ** inv_gamma * int(v_max)) for i in range(int(v_max))]
|
5580
|
+
return lut #image.point(lut)
|
5379
5581
|
|
5380
5582
|
def confirm_rembg_models(model_name):
|
5381
5583
|
models_support = [
|
@@ -5399,37 +5601,52 @@ def imgsets(img, **kwargs):
|
|
5399
5601
|
|
5400
5602
|
def auto_enhance(img):
|
5401
5603
|
"""
|
5402
|
-
Automatically enhances the image based on its characteristics
|
5604
|
+
Automatically enhances the image based on its characteristics, including brightness,
|
5605
|
+
contrast, color range, sharpness, and gamma correction.
|
5606
|
+
|
5403
5607
|
Args:
|
5404
5608
|
img (PIL.Image): The input image.
|
5609
|
+
|
5405
5610
|
Returns:
|
5406
|
-
dict: A dictionary containing the optimal enhancement values.
|
5611
|
+
dict: A dictionary containing the optimal enhancement values applied.
|
5612
|
+
PIL.Image: The enhanced image.
|
5407
5613
|
"""
|
5614
|
+
from PIL import Image, ImageEnhance, ImageOps, ImageFilter
|
5615
|
+
import numpy as np
|
5408
5616
|
# Determine the bit depth based on the image mode
|
5409
|
-
|
5410
|
-
|
5411
|
-
|
5412
|
-
|
5413
|
-
|
5617
|
+
try:
|
5618
|
+
if img.mode in ["1", "L", "P", "RGB", "YCbCr", "LAB", "HSV"]:
|
5619
|
+
bit_depth = 8
|
5620
|
+
elif img.mode in ["RGBA", "CMYK"]:
|
5621
|
+
bit_depth = 8
|
5622
|
+
elif img.mode in ["I", "F"]:
|
5623
|
+
bit_depth = 16
|
5624
|
+
else:
|
5625
|
+
raise ValueError("Unsupported image mode")
|
5626
|
+
except:
|
5414
5627
|
bit_depth = 8
|
5415
|
-
|
5416
|
-
|
5417
|
-
|
5418
|
-
|
5419
|
-
|
5420
|
-
|
5628
|
+
|
5629
|
+
# Initialize enhancement factors
|
5630
|
+
enhancements = {
|
5631
|
+
"brightness": 1.0,
|
5632
|
+
"contrast": 0,# autocontrasted
|
5633
|
+
"color": 1.35,
|
5634
|
+
"sharpness": 1.0,
|
5635
|
+
"gamma": 1.0
|
5636
|
+
}
|
5637
|
+
|
5638
|
+
# Calculate brightness and contrast for each channel
|
5421
5639
|
num_channels = len(img.getbands())
|
5422
5640
|
brightness_factors = []
|
5423
5641
|
contrast_factors = []
|
5424
5642
|
for channel in range(num_channels):
|
5425
5643
|
channel_histogram = img.split()[channel].histogram()
|
5426
|
-
|
5427
|
-
|
5428
|
-
)
|
5644
|
+
total_pixels = sum(channel_histogram)
|
5645
|
+
brightness = sum(i * w for i, w in enumerate(channel_histogram)) / total_pixels
|
5429
5646
|
channel_min, channel_max = img.split()[channel].getextrema()
|
5430
5647
|
contrast = channel_max - channel_min
|
5431
5648
|
# Adjust calculations based on bit depth
|
5432
|
-
normalization_factor = 2**bit_depth - 1
|
5649
|
+
normalization_factor = 2**bit_depth - 1
|
5433
5650
|
brightness_factor = (
|
5434
5651
|
1.0 + (brightness - normalization_factor / 2) / normalization_factor
|
5435
5652
|
)
|
@@ -5438,37 +5655,62 @@ def imgsets(img, **kwargs):
|
|
5438
5655
|
)
|
5439
5656
|
brightness_factors.append(brightness_factor)
|
5440
5657
|
contrast_factors.append(contrast_factor)
|
5441
|
-
# Calculate the average brightness and contrast factors across channels
|
5442
|
-
avg_brightness_factor = sum(brightness_factors) / num_channels
|
5443
|
-
avg_contrast_factor = sum(contrast_factors) / num_channels
|
5444
|
-
return {"brightness": avg_brightness_factor, "contrast": avg_contrast_factor}
|
5445
5658
|
|
5446
|
-
|
5447
|
-
|
5659
|
+
# Calculate average brightness and contrast factors across channels
|
5660
|
+
enhancements["brightness"] = sum(brightness_factors) / num_channels
|
5661
|
+
# Adjust brightness and contrast
|
5662
|
+
img = ImageEnhance.Brightness(img).enhance(enhancements["brightness"])
|
5663
|
+
|
5664
|
+
# # Automatic color enhancement (saturation)
|
5665
|
+
# if img.mode == "RGB":
|
5666
|
+
# color_enhancer = ImageEnhance.Color(img)
|
5667
|
+
# color_histogram = np.array(img.histogram()).reshape(3, -1)
|
5668
|
+
# avg_saturation = np.mean([np.std(channel) for channel in color_histogram]) / normalization_factor
|
5669
|
+
# print(avg_saturation)
|
5670
|
+
# enhancements["color"] = min(0, max(0.5, 1.0 + avg_saturation)) # Clamp to a reasonable range
|
5671
|
+
# # img = color_enhancer.enhance(enhancements["color"])
|
5672
|
+
|
5673
|
+
# Adjust sharpness
|
5674
|
+
sharpness_enhancer = ImageEnhance.Sharpness(img)
|
5675
|
+
# Use edge detection to estimate sharpness need
|
5676
|
+
edges = img.filter(ImageFilter.FIND_EDGES).convert("L")
|
5677
|
+
avg_edge_intensity = np.mean(np.array(edges))
|
5678
|
+
enhancements["sharpness"] = min(2.0, max(0.5, 1.0 + avg_edge_intensity / normalization_factor))
|
5679
|
+
# img = sharpness_enhancer.enhance(enhancements["sharpness"])
|
5680
|
+
|
5681
|
+
# # Apply gamma correction
|
5682
|
+
# def gamma_correction(image, gamma):
|
5683
|
+
# inv_gamma = 1.0 / gamma
|
5684
|
+
# lut = [min(255, max(0, int((i / 255.0) ** inv_gamma * 255))) for i in range(256)]
|
5685
|
+
# return image.point(lut)
|
5686
|
+
|
5687
|
+
# avg_brightness = np.mean(np.array(img.convert("L"))) / 255
|
5688
|
+
# enhancements["gamma"] = min(2.0, max(0.5, 1.0 if avg_brightness > 0.5 else 1.2 - avg_brightness))
|
5689
|
+
# img = gamma_correction(img, enhancements["gamma"])
|
5690
|
+
|
5691
|
+
# Return the enhancements and the enhanced image
|
5692
|
+
return enhancements
|
5693
|
+
|
5448
5694
|
|
5449
5695
|
# Load image if input is a file path
|
5450
5696
|
if isinstance(img, str):
|
5451
5697
|
img = load_img(img)
|
5452
|
-
img_update = img.copy()
|
5453
|
-
# Auto-enhance image if requested
|
5454
|
-
|
5455
|
-
auto = kwargs.get("auto", False)
|
5456
|
-
show = kwargs.get("show", True)
|
5457
|
-
show_axis = kwargs.get("show_axis", False)
|
5458
|
-
size = kwargs.get("size", None)
|
5459
|
-
figsize = kwargs.get("figsize", None)
|
5460
|
-
dpi = kwargs.get("dpi", 100)
|
5698
|
+
img_update = img.copy()
|
5461
5699
|
|
5462
5700
|
if auto:
|
5463
5701
|
kwargs = {**auto_enhance(img_update), **kwargs}
|
5464
|
-
|
5702
|
+
params=["sharp","color","contrast","bright","crop","rotate",'size',"resize",
|
5703
|
+
"thumbnail","cover","contain","filter","fit","pad",
|
5704
|
+
"rem","rm","back","bg_color","cut",'gamma','flip']
|
5465
5705
|
for k, value in kwargs.items():
|
5706
|
+
k = strcmp(k, params)[0] # correct the param name
|
5466
5707
|
if "shar" in k.lower():
|
5467
5708
|
enhancer = ImageEnhance.Sharpness(img_update)
|
5468
5709
|
img_update = enhancer.enhance(value)
|
5469
5710
|
elif all(
|
5470
5711
|
["col" in k.lower(), "bg" not in k.lower(), "background" not in k.lower()]
|
5471
5712
|
):
|
5713
|
+
# *color
|
5472
5714
|
enhancer = ImageEnhance.Color(img_update)
|
5473
5715
|
img_update = enhancer.enhance(value)
|
5474
5716
|
elif "contr" in k.lower():
|
@@ -5476,8 +5718,11 @@ def imgsets(img, **kwargs):
|
|
5476
5718
|
enhancer = ImageEnhance.Contrast(img_update)
|
5477
5719
|
img_update = enhancer.enhance(value)
|
5478
5720
|
else:
|
5479
|
-
|
5480
|
-
|
5721
|
+
try:
|
5722
|
+
img_update = ImageOps.autocontrast(img_update)
|
5723
|
+
print("autocontrasted")
|
5724
|
+
except Exception as e:
|
5725
|
+
print(f"Failed 'autocontrasted':{e}")
|
5481
5726
|
elif "bri" in k.lower():
|
5482
5727
|
enhancer = ImageEnhance.Brightness(img_update)
|
5483
5728
|
img_update = enhancer.enhance(value)
|
@@ -5488,7 +5733,13 @@ def imgsets(img, **kwargs):
|
|
5488
5733
|
value = detect_angle(img_update, by=value)
|
5489
5734
|
print(f"rotated by {value}°")
|
5490
5735
|
img_update = img_update.rotate(value)
|
5491
|
-
|
5736
|
+
elif 'flip' in k.lower():
|
5737
|
+
if 'l' in value and 'r' in value:
|
5738
|
+
# left/right
|
5739
|
+
img_update = img_update.transpose(Image.FLIP_LEFT_RIGHT)
|
5740
|
+
elif any(['u' in value and'd' in value, 't' in value and 'b' in value]):
|
5741
|
+
# up/down or top/bottom
|
5742
|
+
img_update = img_update.transpose(Image.FLIP_TOP_BOTTOM)
|
5492
5743
|
elif "si" in k.lower():
|
5493
5744
|
if isinstance(value, tuple):
|
5494
5745
|
value = list(value)
|
@@ -5500,13 +5751,17 @@ def imgsets(img, **kwargs):
|
|
5500
5751
|
img_update = ImageOps.cover(img_update, size=value)
|
5501
5752
|
elif "contain" in k.lower():
|
5502
5753
|
img_update = ImageOps.contain(img_update, size=value)
|
5503
|
-
elif "
|
5754
|
+
elif "fi" in k.lower() and "t" in k.lower(): # filter
|
5504
5755
|
if isinstance(value, dict):
|
5756
|
+
if verbose:
|
5757
|
+
print(f"supported filter: {supported_filters}")
|
5505
5758
|
for filter_name, filter_value in value.items():
|
5506
|
-
img_update = apply_filter(img_update, filter_name, filter_value)
|
5759
|
+
img_update = apply_filter(img_update, filter_name, filter_value,verbose=verbose)
|
5507
5760
|
else:
|
5508
5761
|
img_update = ImageOps.fit(img_update, size=value)
|
5509
5762
|
elif "pad" in k.lower():
|
5763
|
+
# *ImageOps.pad ensures that the resized image has the exact size specified by the size parameter while maintaining the aspect ratio.
|
5764
|
+
# size: A tuple specifying the target size (width, height).
|
5510
5765
|
img_update = ImageOps.pad(img_update, size=value)
|
5511
5766
|
elif "rem" in k.lower() or "rm" in k.lower() or "back" in k.lower():
|
5512
5767
|
from rembg import remove, new_session
|
@@ -5515,7 +5770,9 @@ def imgsets(img, **kwargs):
|
|
5515
5770
|
session = new_session("isnet-general-use")
|
5516
5771
|
img_update = remove(img_update, session=session)
|
5517
5772
|
elif value and isinstance(value, (int, float, list)):
|
5518
|
-
|
5773
|
+
if verbose:
|
5774
|
+
print("https://github.com/danielgatis/rembg/blob/main/USAGE.md")
|
5775
|
+
print(f"rm=True # using default setting;\nrm=(240,10,10)\n'foreground_threshold'(240) and 'background_threshold' (10) values used to determine foreground and background pixels. \nThe 'erode_structure_size'(10) parameter specifies the size of the erosion structure to be applied to the mask.")
|
5519
5776
|
if isinstance(value, int):
|
5520
5777
|
value = [value]
|
5521
5778
|
if len(value) < 2:
|
@@ -5557,8 +5814,11 @@ def imgsets(img, **kwargs):
|
|
5557
5814
|
if len(value) == 3:
|
5558
5815
|
value += (255,)
|
5559
5816
|
img_update = remove(img_update, bgcolor=value)
|
5817
|
+
|
5818
|
+
# elif "ga" in k.lower() and "m" in k.lower():
|
5819
|
+
# img_update = gamma_correction(img_update, gamma=value)
|
5560
5820
|
# Display the image if requested
|
5561
|
-
if
|
5821
|
+
if plot_:
|
5562
5822
|
if figsize is None:
|
5563
5823
|
plt.figure(dpi=dpi)
|
5564
5824
|
else:
|
@@ -9944,13 +10204,17 @@ def get_loc(input_data, user_agent="0413@mygmail.com)", verbose=True):
|
|
9944
10204
|
# Case 1: Input is a city name (string)
|
9945
10205
|
if isinstance(input_data, str) and not re.match(r"^\d+(\.\d+)?$", input_data):
|
9946
10206
|
location = geolocator.geocode(input_data)
|
9947
|
-
|
9948
|
-
|
9949
|
-
|
9950
|
-
|
9951
|
-
|
9952
|
-
|
9953
|
-
|
10207
|
+
try:
|
10208
|
+
if verbose:
|
10209
|
+
print(
|
10210
|
+
f"Latitude and Longitude for {input_data}: {location.latitude}, {location.longitude}"
|
10211
|
+
)
|
10212
|
+
else:
|
10213
|
+
print(f"Could not find {input_data}.")
|
10214
|
+
return location
|
10215
|
+
except Exception as e:
|
10216
|
+
print(f'Error: {e}')
|
10217
|
+
return
|
9954
10218
|
|
9955
10219
|
# Case 2: Input is latitude and longitude (float or tuple)
|
9956
10220
|
elif isinstance(input_data, (float, tuple)):
|
@@ -10144,3 +10408,311 @@ def depass(encrypted_code: str, method: str = "AES", key: str = None):
|
|
10144
10408
|
raise ValueError("SHA256 is a hash function and cannot be decrypted.")
|
10145
10409
|
else:
|
10146
10410
|
raise ValueError("Unsupported decryption method")
|
10411
|
+
|
10412
|
+
def get_clip(dir_save=None):
|
10413
|
+
"""
|
10414
|
+
Master function to extract content from the clipboard (text, URL, or image).
|
10415
|
+
|
10416
|
+
Parameters:
|
10417
|
+
dir_save (str, optional): If an image is found, save it to this path.
|
10418
|
+
|
10419
|
+
Returns:
|
10420
|
+
dict: A dictionary with extracted content:
|
10421
|
+
{
|
10422
|
+
"type": "text" | "url" | "image" | "none",
|
10423
|
+
"content": <str|Image|None>,
|
10424
|
+
"saved_to": <str|None> # Path if an image is saved
|
10425
|
+
}
|
10426
|
+
"""
|
10427
|
+
result = {"type": "none", "content": None, "saved_to": None}
|
10428
|
+
|
10429
|
+
try:
|
10430
|
+
import pyperclip
|
10431
|
+
from PIL import ImageGrab, Image
|
10432
|
+
import validators
|
10433
|
+
# 1. Check for text in the clipboard
|
10434
|
+
clipboard_content = pyperclip.paste()
|
10435
|
+
if clipboard_content:
|
10436
|
+
if validators.url(clipboard_content.strip()):
|
10437
|
+
result["type"] = "url"
|
10438
|
+
result["content"] = clipboard_content.strip()
|
10439
|
+
|
10440
|
+
else:
|
10441
|
+
result["type"] = "text"
|
10442
|
+
result["content"] = clipboard_content.strip()
|
10443
|
+
return clipboard_content.strip()
|
10444
|
+
|
10445
|
+
# 2. Check for image in the clipboard
|
10446
|
+
image = ImageGrab.grabclipboard()
|
10447
|
+
if isinstance(image, Image.Image):
|
10448
|
+
result["type"] = "image"
|
10449
|
+
result["content"] = image
|
10450
|
+
if dir_save:
|
10451
|
+
image.save(dir_save)
|
10452
|
+
result["saved_to"] = dir_save
|
10453
|
+
print(f"Image saved to {dir_save}.")
|
10454
|
+
else:
|
10455
|
+
print("Image detected in clipboard but not saved.")
|
10456
|
+
return image
|
10457
|
+
print("No valid text, URL, or image found in clipboard.")
|
10458
|
+
return result
|
10459
|
+
|
10460
|
+
except Exception as e:
|
10461
|
+
print(f"An error occurred: {e}")
|
10462
|
+
return result
|
10463
|
+
|
10464
|
+
def keyboard(*args, action='press', n_click=1,interval=0,verbose=False,**kwargs):
|
10465
|
+
"""
|
10466
|
+
Simulates keyboard input using pyautogui.
|
10467
|
+
|
10468
|
+
Parameters:
|
10469
|
+
input_key (str): The key to simulate. Check the list of supported keys with verbose=True.
|
10470
|
+
action (str): The action to perform. Options are 'press', 'keyDown', or 'keyUp'.
|
10471
|
+
n_click (int): Number of times to press the key (only for 'press' action).
|
10472
|
+
interval (float): Time interval between key presses for 'press' action.
|
10473
|
+
verbose (bool): Print detailed output, including supported keys and debug info.
|
10474
|
+
kwargs: Additional arguments (reserved for future extensions).
|
10475
|
+
|
10476
|
+
keyboard("command", "d", action="shorcut")
|
10477
|
+
"""
|
10478
|
+
import pyautogui
|
10479
|
+
input_key = args
|
10480
|
+
|
10481
|
+
actions = ['press','keyDown','keyUp', 'hold','release', 'hotkey','shortcut']
|
10482
|
+
action = strcmp(action,actions)[0]
|
10483
|
+
keyboard_keys_=['\t', '\n', '\r', ' ', '!', '"', '#', '$', '%', '&', "'", '(',
|
10484
|
+
')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7',
|
10485
|
+
'8', '9', ':', ';', '<', '=', '>', '?', '@', '[', '\\', ']', '^', '_', '`',
|
10486
|
+
'a', 'b', 'c', 'd', 'e','f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
|
10487
|
+
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~',
|
10488
|
+
'accept', 'add', 'alt', 'altleft', 'altright', 'apps', 'backspace',
|
10489
|
+
'browserback', 'browserfavorites', 'browserforward', 'browserhome',
|
10490
|
+
'browserrefresh', 'browsersearch', 'browserstop', 'capslock', 'clear',
|
10491
|
+
'convert', 'ctrl', 'ctrlleft', 'ctrlright', 'decimal', 'del', 'delete',
|
10492
|
+
'divide', 'down', 'end', 'enter', 'esc', 'escape', 'execute', 'f1', 'f10',
|
10493
|
+
'f11', 'f12', 'f13', 'f14', 'f15', 'f16', 'f17', 'f18', 'f19', 'f2', 'f20',
|
10494
|
+
'f21', 'f22', 'f23', 'f24', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9',
|
10495
|
+
'final', 'fn', 'hanguel', 'hangul', 'hanja', 'help', 'home', 'insert', 'junja',
|
10496
|
+
'kana', 'kanji', 'launchapp1', 'launchapp2', 'launchmail',
|
10497
|
+
'launchmediaselect', 'left', 'modechange', 'multiply', 'nexttrack',
|
10498
|
+
'nonconvert', 'num0', 'num1', 'num2', 'num3', 'num4', 'num5', 'num6',
|
10499
|
+
'num7', 'num8', 'num9', 'numlock', 'pagedown', 'pageup', 'pause', 'pgdn',
|
10500
|
+
'pgup', 'playpause', 'prevtrack', 'print', 'printscreen', 'prntscrn',
|
10501
|
+
'prtsc', 'prtscr', 'return', 'right', 'scrolllock', 'select', 'separator',
|
10502
|
+
'shift', 'shiftleft', 'shiftright', 'sleep', 'space', 'stop', 'subtract', 'tab',
|
10503
|
+
'up', 'volumedown', 'volumemute', 'volumeup', 'win', 'winleft', 'winright', 'yen',
|
10504
|
+
'command', 'option', 'optionleft', 'optionright']
|
10505
|
+
if verbose:
|
10506
|
+
print(f"supported keys: {keyboard_keys_}")
|
10507
|
+
|
10508
|
+
if action not in ['hotkey','shortcut']:
|
10509
|
+
if not isinstance(input_key, list):
|
10510
|
+
input_key=list(input_key)
|
10511
|
+
input_key = [strcmp(i, keyboard_keys_)[0] for i in input_key ]
|
10512
|
+
|
10513
|
+
# correct action
|
10514
|
+
cmd_keys = ['command', 'option', 'optionleft', 'optionright','win', 'winleft', 'winright','ctrl', 'ctrlleft', 'ctrlright']
|
10515
|
+
try:
|
10516
|
+
if any([i in cmd_keys for i in input_key]):
|
10517
|
+
action='hotkey'
|
10518
|
+
except:
|
10519
|
+
pass
|
10520
|
+
|
10521
|
+
print(f"\n{action}: {input_key}")
|
10522
|
+
# keyboard
|
10523
|
+
if action in ["press"]:
|
10524
|
+
# pyautogui.press(input_key, presses=n_click,interval=interval)
|
10525
|
+
for _ in range(n_click):
|
10526
|
+
for key in input_key:
|
10527
|
+
pyautogui.press(key)
|
10528
|
+
pyautogui.sleep(interval)
|
10529
|
+
elif action in ['keyDown','hold']:
|
10530
|
+
# pyautogui.keyDown(input_key)
|
10531
|
+
for _ in range(n_click):
|
10532
|
+
for key in input_key:
|
10533
|
+
pyautogui.keyDown(key)
|
10534
|
+
pyautogui.sleep(interval)
|
10535
|
+
|
10536
|
+
elif action in ['keyUp','release']:
|
10537
|
+
# pyautogui.keyUp(input_key)
|
10538
|
+
for _ in range(n_click):
|
10539
|
+
for key in input_key:
|
10540
|
+
pyautogui.keyUp(key)
|
10541
|
+
pyautogui.sleep(interval)
|
10542
|
+
|
10543
|
+
elif action in ['hotkey','shortcut']:
|
10544
|
+
pyautogui.hotkey(input_key)
|
10545
|
+
|
10546
|
+
def mouse(
|
10547
|
+
*args, # loc
|
10548
|
+
action: str = "move",
|
10549
|
+
duration: float = 0.5,
|
10550
|
+
loc_type: str = "absolute", # 'absolute', 'relative'
|
10551
|
+
region: tuple = None, # (tuple, optional): A region (x, y, width, height) to search for the image.
|
10552
|
+
image_path: str = None,
|
10553
|
+
wait:float = 0,
|
10554
|
+
text: str = None,
|
10555
|
+
confidence: float = 0.8,
|
10556
|
+
button: str = "left",
|
10557
|
+
n_click: int = 1, # number of clicks
|
10558
|
+
interval: float = 0.25, # time between clicks
|
10559
|
+
scroll_amount: int = -500,
|
10560
|
+
fail_safe: bool = True,
|
10561
|
+
grayscale: bool = False,
|
10562
|
+
**kwargs,
|
10563
|
+
):
|
10564
|
+
"""
|
10565
|
+
Master function to handle pyautogui actions.
|
10566
|
+
|
10567
|
+
Parameters:
|
10568
|
+
action (str): The action to perform ('click', 'double_click', 'type', 'drag', 'scroll', 'move', 'locate', etc.).
|
10569
|
+
image_path (str, optional): Path to the image for 'locate' or 'click' actions.
|
10570
|
+
text (str, optional): Text to type for 'type' action.
|
10571
|
+
confidence (float, optional): Confidence level for image recognition (default 0.8).
|
10572
|
+
duration (float, optional): Duration for smooth movements in seconds (default 0.5).
|
10573
|
+
region (tuple, optional): A region (x, y, width, height) to search for the image.
|
10574
|
+
button (str, optional): Mouse button to use ('left', 'right', 'middle').
|
10575
|
+
n_click (int, optional): Number of times to click for 'click' actions.
|
10576
|
+
interval (float, optional): Interval between clicks for 'click' actions.
|
10577
|
+
offset (tuple, optional): Horizontal offset from the located image. y_offset (int, optional): Vertical offset from the located image.
|
10578
|
+
scroll_amount (int, optional): Amount to scroll (positive for up, negative for down).
|
10579
|
+
fail_safe (bool, optional): Enable/disable pyautogui's fail-safe feature.
|
10580
|
+
grayscale (bool, optional): Search for the image in grayscale mode.
|
10581
|
+
|
10582
|
+
Returns:
|
10583
|
+
tuple or None: Returns coordinates for 'locate' actions, otherwise None.
|
10584
|
+
"""
|
10585
|
+
import pyautogui
|
10586
|
+
import time
|
10587
|
+
|
10588
|
+
pyautogui.FAILSAFE = fail_safe # Enable/disable fail-safe
|
10589
|
+
loc_type = "absolute" if "abs" in loc_type else "relative"
|
10590
|
+
if len(args) == 1:
|
10591
|
+
if isinstance(args[0], str):
|
10592
|
+
image_path = args[0]
|
10593
|
+
x_offset, y_offset = None, None
|
10594
|
+
else:
|
10595
|
+
x_offset, y_offset = args
|
10596
|
+
|
10597
|
+
elif len(args) == 2:
|
10598
|
+
x_offset, y_offset = args
|
10599
|
+
elif len(args) == 3:
|
10600
|
+
x_offset, y_offset, action = args
|
10601
|
+
elif len(args) == 4:
|
10602
|
+
x_offset, y_offset, action, duration = args
|
10603
|
+
else:
|
10604
|
+
x_offset, y_offset = None, None
|
10605
|
+
|
10606
|
+
what_action = [
|
10607
|
+
"locate",
|
10608
|
+
"click",
|
10609
|
+
"double_click",
|
10610
|
+
"triple_click",
|
10611
|
+
"input",
|
10612
|
+
"write",
|
10613
|
+
"type",
|
10614
|
+
"drag",
|
10615
|
+
"move",
|
10616
|
+
"scroll",
|
10617
|
+
"down",
|
10618
|
+
"up",
|
10619
|
+
"hold",
|
10620
|
+
"press",
|
10621
|
+
"release"
|
10622
|
+
]
|
10623
|
+
action = strcmp(action, what_action)[0]
|
10624
|
+
# get the locations
|
10625
|
+
location = None
|
10626
|
+
if any([x_offset is None, y_offset is None]):
|
10627
|
+
if region is None:
|
10628
|
+
w,h=pyautogui.size()
|
10629
|
+
region=(0,0,w,h)
|
10630
|
+
print(region)
|
10631
|
+
try:
|
10632
|
+
print(image_path)
|
10633
|
+
location = pyautogui.locateOnScreen(
|
10634
|
+
image_path, confidence=confidence, region=region, grayscale=grayscale
|
10635
|
+
)
|
10636
|
+
print(pyautogui.center(location))
|
10637
|
+
except Exception as e:
|
10638
|
+
location = None
|
10639
|
+
|
10640
|
+
# try:
|
10641
|
+
if location:
|
10642
|
+
x, y = pyautogui.center(location)
|
10643
|
+
x += x_offset if x_offset else 0
|
10644
|
+
y += y_offset if y_offset else 0
|
10645
|
+
x_offset, y_offset = x,y
|
10646
|
+
print(action)
|
10647
|
+
if action in ['locate']:
|
10648
|
+
x, y = pyautogui.position()
|
10649
|
+
elif action in ["click", "double_click","triple_click"]:
|
10650
|
+
# if location:
|
10651
|
+
# x, y = pyautogui.center(location)
|
10652
|
+
# x += x_offset
|
10653
|
+
# y += y_offset
|
10654
|
+
# pyautogui.moveTo(x, y, duration=duration)
|
10655
|
+
# if action == "click":
|
10656
|
+
# pyautogui.click(x=x, y=y, clicks=n_click, interval=interval, button=button)
|
10657
|
+
# elif action == "double_click":
|
10658
|
+
# pyautogui.doubleClick(x=x, y=y, interval=interval, button=button)
|
10659
|
+
# elif action=='triple_click':
|
10660
|
+
# pyautogui.tripleClick(x=x,y=y,interval=interval, button=button)
|
10661
|
+
# else:
|
10662
|
+
if action == "click":
|
10663
|
+
pyautogui.moveTo(x_offset, y_offset, duration=duration)
|
10664
|
+
time.sleep(wait)
|
10665
|
+
pyautogui.click(x=x_offset, y=y_offset, clicks=n_click, interval=interval, button=button)
|
10666
|
+
elif action == "double_click":
|
10667
|
+
pyautogui.moveTo(x_offset, y_offset, duration=duration)
|
10668
|
+
time.sleep(wait)
|
10669
|
+
pyautogui.doubleClick(x=x_offset, y=y_offset, interval=interval, button=button)
|
10670
|
+
elif action=='triple_click':
|
10671
|
+
pyautogui.moveTo(x_offset, y_offset, duration=duration)
|
10672
|
+
time.sleep(wait)
|
10673
|
+
pyautogui.tripleClick(x=x_offset, y=y_offset, interval=interval, button=button)
|
10674
|
+
|
10675
|
+
elif action in ["type", "write", "input"]:
|
10676
|
+
pyautogui.moveTo(x_offset, y_offset, duration=duration)
|
10677
|
+
time.sleep(wait)
|
10678
|
+
if text is not None:
|
10679
|
+
pyautogui.typewrite(text, interval=interval)
|
10680
|
+
else:
|
10681
|
+
raise ValueError("Text must be provided for the 'type' action.")
|
10682
|
+
|
10683
|
+
elif action == "drag":
|
10684
|
+
if loc_type == "absolute":
|
10685
|
+
pyautogui.dragTo(x_offset, y_offset, duration=duration, button=button)
|
10686
|
+
else:
|
10687
|
+
pyautogui.dragRel(x_offset, y_offset, duration=duration, button=button)
|
10688
|
+
|
10689
|
+
elif action in ["move"]:
|
10690
|
+
if loc_type == "absolute":
|
10691
|
+
pyautogui.moveTo(x_offset, y_offset, duration=duration)
|
10692
|
+
else:
|
10693
|
+
pyautogui.moveRel(x_offset, y_offset, duration=duration)
|
10694
|
+
|
10695
|
+
elif action == "scroll":
|
10696
|
+
pyautogui.moveTo(x_offset, y_offset, duration=duration)
|
10697
|
+
time.sleep(wait)
|
10698
|
+
pyautogui.scroll(scroll_amount)
|
10699
|
+
|
10700
|
+
elif action in ["down",'hold','press']:
|
10701
|
+
pyautogui.moveTo(x_offset, y_offset, duration=duration)
|
10702
|
+
time.sleep(wait)
|
10703
|
+
pyautogui.mouseDown(x_offset, y_offset, button=button, duration=duration)
|
10704
|
+
|
10705
|
+
elif action in ['up','release']:
|
10706
|
+
pyautogui.moveTo(x_offset, y_offset, duration=duration)
|
10707
|
+
time.sleep(wait)
|
10708
|
+
pyautogui.mouseUp(x_offset, y_offset, button=button, duration=duration)
|
10709
|
+
|
10710
|
+
else:
|
10711
|
+
raise ValueError(f"Unsupported action: {action}")
|
10712
|
+
|
10713
|
+
# except pyautogui.ImageNotFoundException:
|
10714
|
+
# print(
|
10715
|
+
# "Image not found. Ensure the image is visible and parameters are correct."
|
10716
|
+
# )
|
10717
|
+
# except Exception as e:
|
10718
|
+
# print(f"An error occurred: {e}")
|