py2ls 0.2.4.32__py3-none-any.whl → 0.2.4.34__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
py2ls/ips.py CHANGED
@@ -1,6 +1,7 @@
1
1
  import numpy as np
2
2
  import pandas as pd
3
- import sys, os
3
+ import sys
4
+ import os
4
5
  from IPython.display import display
5
6
  from typing import List, Optional, Union
6
7
 
@@ -17,13 +18,24 @@ import warnings
17
18
  warnings.simplefilter("ignore", category=pd.errors.SettingWithCopyWarning)
18
19
  warnings.filterwarnings("ignore", category=pd.errors.PerformanceWarning)
19
20
  warnings.filterwarnings("ignore")
20
- import os
21
21
  import shutil
22
22
  import logging
23
23
  from pathlib import Path
24
24
  from datetime import datetime
25
+ import re
26
+ import stat
27
+ import platform
25
28
 
26
-
29
+ # only for backup these scripts
30
+ def backup(
31
+ src="/Users/macjianfeng/Dropbox/github/python/py2ls/.venv/lib/python3.12/site-packages/py2ls/",
32
+ tar="/Users/macjianfeng/Dropbox/github/python/py2ls/py2ls/",
33
+ kind="py",
34
+ overwrite=True,
35
+ ):
36
+ f = listdir(src, kind)
37
+ [copy(i, tar, overwrite=overwrite) for i in f.path]
38
+ print(f"all files are copied from {os.path.basename(src)} to {tar}")
27
39
  def run_once_within(duration=60, reverse=False): # default 60s
28
40
  import time
29
41
 
@@ -786,13 +798,22 @@ def strcmp(
786
798
  return candidates[best_match_index], best_match_index
787
799
 
788
800
 
789
- def imgcmp(img: list, method="knn", plot_=True, figsize=[12, 6]):
801
+ def imgcmp(img: list,
802
+ method:str ="knn",
803
+ thr:float =0.75,
804
+ detector: str = "sift",
805
+ plot_:bool =True,
806
+ figsize=[12, 6],
807
+ grid_size=10,# only for grid detector
808
+ **kwargs):
790
809
  """
791
810
  Compare two images using SSIM, Feature Matching (SIFT), or KNN Matching.
792
811
 
793
812
  Parameters:
794
- - img (list): List containing two image file paths [img1, img2].
813
+ - img (list): List containing two image file paths [img1, img2] or two numpy arrays.
795
814
  - method (str): Comparison method ('ssim', 'match', or 'knn').
815
+ - detector (str): Feature detector ('sift', 'grid', 'pixel').
816
+ - thr (float): Threshold for filtering matches.
796
817
  - plot_ (bool): Whether to display the results visually.
797
818
  - figsize (list): Size of the figure for plots.
798
819
 
@@ -805,8 +826,13 @@ def imgcmp(img: list, method="knn", plot_=True, figsize=[12, 6]):
805
826
  from skimage.metrics import structural_similarity as ssim
806
827
 
807
828
  # Load images
808
- image1 = cv2.imread(img[0])
809
- image2 = cv2.imread(img[1])
829
+ if isinstance(img, list) and isinstance(img[0],str):
830
+ image1 = cv2.imread(img[0])
831
+ image2 = cv2.imread(img[1])
832
+ bool_cvt=True
833
+ else:
834
+ image1, image2 = np.array(img[0]),np.array(img[1])
835
+ bool_cvt=False
810
836
 
811
837
  if image1 is None or image2 is None:
812
838
  raise ValueError("Could not load one or both images. Check file paths.")
@@ -841,21 +867,53 @@ def imgcmp(img: list, method="knn", plot_=True, figsize=[12, 6]):
841
867
  elif method in ["match", "knn"]:
842
868
  # Convert images to grayscale
843
869
  gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
844
- gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
870
+ gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
871
+
872
+ if detector == "sift":
873
+ # SIFT detector
874
+ sift = cv2.SIFT_create()
875
+ keypoints1, descriptors1 = sift.detectAndCompute(gray1, None)
876
+ keypoints2, descriptors2 = sift.detectAndCompute(gray2, None)
877
+
878
+ elif detector == "grid":
879
+ # Grid-based detection
880
+ keypoints1, descriptors1 = [], []
881
+ keypoints2, descriptors2 = [], []
882
+
883
+ for i in range(0, gray1.shape[0], grid_size):
884
+ for j in range(0, gray1.shape[1], grid_size):
885
+ patch1 = gray1[i:i + grid_size, j:j + grid_size]
886
+ patch2 = gray2[i:i + grid_size, j:j + grid_size]
887
+ if patch1.size > 0 and patch2.size > 0:
888
+ keypoints1.append(cv2.KeyPoint(j + grid_size // 2, i + grid_size // 2, grid_size))
889
+ keypoints2.append(cv2.KeyPoint(j + grid_size // 2, i + grid_size // 2, grid_size))
890
+ descriptors1.append(np.mean(patch1))
891
+ descriptors2.append(np.mean(patch2))
892
+
893
+ descriptors1 = np.array(descriptors1).reshape(-1, 1)
894
+ descriptors2 = np.array(descriptors2).reshape(-1, 1)
895
+
896
+ elif detector == "pixel":
897
+ # Pixel-based direct comparison
898
+ descriptors1 = gray1.flatten()
899
+ descriptors2 = gray2.flatten()
900
+ keypoints1 = [cv2.KeyPoint(x, y, 1) for y in range(gray1.shape[0]) for x in range(gray1.shape[1])]
901
+ keypoints2 = [cv2.KeyPoint(x, y, 1) for y in range(gray2.shape[0]) for x in range(gray2.shape[1])]
845
902
 
846
- # Initialize SIFT detector
847
- sift = cv2.SIFT_create()
848
-
849
- # Detect and compute features
850
- keypoints1, descriptors1 = sift.detectAndCompute(gray1, None)
851
- keypoints2, descriptors2 = sift.detectAndCompute(gray2, None)
852
-
853
- if len(keypoints1) == 0 or len(keypoints2) == 0:
854
- raise ValueError("No keypoints found in one or both images.")
903
+ else:
904
+ raise ValueError("Invalid detector. Use 'sift', 'grid', or 'pixel'.")
905
+
906
+ # Handle missing descriptors
907
+ if descriptors1 is None or descriptors2 is None:
908
+ raise ValueError("Failed to compute descriptors for one or both images.")
909
+ # Ensure descriptors are in the correct data type
910
+ if descriptors1.dtype != np.float32:
911
+ descriptors1 = descriptors1.astype(np.float32)
912
+ if descriptors2.dtype != np.float32:
913
+ descriptors2 = descriptors2.astype(np.float32)
855
914
 
856
915
  # BFMatcher initialization
857
916
  bf = cv2.BFMatcher()
858
-
859
917
  if method == "match": # Cross-check matching
860
918
  bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
861
919
  matches = bf.match(descriptors1, descriptors2)
@@ -863,13 +921,14 @@ def imgcmp(img: list, method="knn", plot_=True, figsize=[12, 6]):
863
921
 
864
922
  # Filter good matches
865
923
  good_matches = [
866
- m for m in matches if m.distance < 0.75 * matches[-1].distance
924
+ m for m in matches if m.distance < thr * matches[-1].distance
867
925
  ]
868
926
 
869
927
  elif method == "knn": # KNN matching with ratio test
928
+ bf = cv2.BFMatcher()
870
929
  matches = bf.knnMatch(descriptors1, descriptors2, k=2)
871
930
  # Apply Lowe's ratio test
872
- good_matches = [m for m, n in matches if m.distance < 0.75 * n.distance]
931
+ good_matches = [m for m, n in matches if m.distance < thr * n.distance]
873
932
 
874
933
  # Calculate similarity score
875
934
  similarity_score = len(good_matches) / min(len(keypoints1), len(keypoints2))
@@ -887,23 +946,24 @@ def imgcmp(img: list, method="knn", plot_=True, figsize=[12, 6]):
887
946
  dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good_matches]).reshape(
888
947
  -1, 1, 2
889
948
  )
890
-
891
- # Calculate Homography using RANSAC
892
- homography_matrix, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
893
-
894
949
  # Apply the homography to image2
895
- h, w = image1.shape[:2]
896
- warped_image2 = cv2.warpPerspective(image2, homography_matrix, (w, h))
897
-
898
- # Plot result if needed
899
- if plot_:
900
- fig, ax = plt.subplots(1, 2, figsize=figsize)
901
- ax[0].imshow(cv2.cvtColor(image1, cv2.COLOR_BGR2RGB))
902
- ax[0].set_title("Image 1")
903
- ax[1].imshow(cv2.cvtColor(warped_image2, cv2.COLOR_BGR2RGB))
904
- ax[1].set_title("Warped Image 2")
905
- plt.tight_layout()
906
- plt.show()
950
+ try:
951
+ # Calculate Homography using RANSAC
952
+ homography_matrix, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
953
+ h, w = image1.shape[:2]
954
+ warped_image2 = cv2.warpPerspective(image2, homography_matrix, (w, h))
955
+
956
+ # Plot result if needed
957
+ if plot_:
958
+ fig, ax = plt.subplots(1, 2, figsize=figsize)
959
+ ax[0].imshow(cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)) if bool_cvt else ax[0].imshow(image1)
960
+ ax[0].set_title("Image 1")
961
+ ax[1].imshow(cv2.cvtColor(warped_image2, cv2.COLOR_BGR2RGB)) if bool_cvt else ax[1].imshow(warped_image2)
962
+ ax[1].set_title("Warped Image 2")
963
+ plt.tight_layout()
964
+ plt.show()
965
+ except Exception as e:
966
+ print(e)
907
967
 
908
968
  # Plot matches if needed
909
969
  if plot_:
@@ -911,28 +971,41 @@ def imgcmp(img: list, method="knn", plot_=True, figsize=[12, 6]):
911
971
  image1, keypoints1, image2, keypoints2, good_matches, None, flags=2
912
972
  )
913
973
  plt.figure(figsize=figsize)
914
- plt.imshow(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
915
- plt.title(
916
- f"Feature Matches ({len(good_matches)} matches, Score: {similarity_score:.4f})"
917
- )
974
+ plt.imshow(cv2.cvtColor(result, cv2.COLOR_BGR2RGB)) if bool_cvt else plt.imshow(result)
975
+ plt.title(f"Feature Matches ({len(good_matches)} matches, Score: {similarity_score:.4f})")
918
976
  plt.axis("off")
919
977
  plt.show()
920
978
  # Identify unmatched keypoints
921
979
  matched_idx1 = [m.queryIdx for m in good_matches]
922
980
  matched_idx2 = [m.trainIdx for m in good_matches]
923
-
981
+ matched_kp1 = [kp for i, kp in enumerate(keypoints1) if i in matched_idx1]
982
+ matched_kp2 = [kp for i, kp in enumerate(keypoints2) if i in matched_idx2]
924
983
  unmatched_kp1 = [kp for i, kp in enumerate(keypoints1) if i not in matched_idx1]
925
984
  unmatched_kp2 = [kp for i, kp in enumerate(keypoints2) if i not in matched_idx2]
926
985
 
927
- # Mark unmatched keypoints on the images
928
- img1_marked = cv2.drawKeypoints(
986
+ # Mark keypoints on the images
987
+ img1_match = cv2.drawKeypoints(
988
+ image1,
989
+ matched_kp1,
990
+ None,
991
+ color=(0, 0, 255),
992
+ flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,
993
+ )
994
+ img2_match = cv2.drawKeypoints(
995
+ image2,
996
+ matched_kp2,
997
+ None,
998
+ color=(0, 0, 255),
999
+ flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,
1000
+ )
1001
+ img1_unmatch = cv2.drawKeypoints(
929
1002
  image1,
930
1003
  unmatched_kp1,
931
1004
  None,
932
1005
  color=(0, 0, 255),
933
1006
  flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,
934
1007
  )
935
- img2_marked = cv2.drawKeypoints(
1008
+ img2_unmatch = cv2.drawKeypoints(
936
1009
  image2,
937
1010
  unmatched_kp2,
938
1011
  None,
@@ -940,16 +1013,27 @@ def imgcmp(img: list, method="knn", plot_=True, figsize=[12, 6]):
940
1013
  flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,
941
1014
  )
942
1015
 
943
- # Display results
944
1016
  if plot_:
945
1017
  fig, ax = plt.subplots(1, 2, figsize=figsize)
946
- ax[0].imshow(cv2.cvtColor(img1_marked, cv2.COLOR_BGR2RGB))
1018
+ ax[0].imshow(cv2.cvtColor(img1_unmatch, cv2.COLOR_BGR2RGB)) if bool_cvt else ax[0].imshow(img1_unmatch)
947
1019
  ax[0].set_title("Unmatched Keypoints (Image 1)")
948
- ax[1].imshow(cv2.cvtColor(img2_marked, cv2.COLOR_BGR2RGB))
1020
+ ax[1].imshow(cv2.cvtColor(img2_unmatch, cv2.COLOR_BGR2RGB)) if bool_cvt else ax[1].imshow(img2_unmatch)
949
1021
  ax[1].set_title("Unmatched Keypoints (Image 2)")
1022
+ ax[0].axis("off")
1023
+ ax[1].axis("off")
950
1024
  plt.tight_layout()
951
1025
  plt.show()
952
- return good_matches, similarity_score, homography_matrix
1026
+ if plot_:
1027
+ fig, ax = plt.subplots(1, 2, figsize=figsize)
1028
+ ax[0].imshow(cv2.cvtColor(img1_match, cv2.COLOR_BGR2RGB)) if bool_cvt else ax[0].imshow(img1_match)
1029
+ ax[0].set_title("Matched Keypoints (Image 1)")
1030
+ ax[1].imshow(cv2.cvtColor(img2_match, cv2.COLOR_BGR2RGB)) if bool_cvt else ax[1].imshow(img2_match)
1031
+ ax[1].set_title("Matched Keypoints (Image 2)")
1032
+ ax[0].axis("off")
1033
+ ax[1].axis("off")
1034
+ plt.tight_layout()
1035
+ plt.show()
1036
+ return good_matches, similarity_score#, homography_matrix
953
1037
 
954
1038
  else:
955
1039
  raise ValueError("Invalid method. Use 'ssim', 'match', or 'knn'.")
@@ -969,9 +1053,7 @@ def cn2pinyin(
969
1053
  Args:
970
1054
  cn_str (str): Chinese string to convert.
971
1055
  sep (str): Separator for the output Pinyin string.
972
- style (Style): "normal","tone", "tone2","tone3",
973
- "finals","finals_tone","finals_tone2","finals_tone3",
974
- "initials","bopomofo","bopomofo_first","cyrillic","pl",
1056
+ fmt (Style): "normal","tone", "tone2","tone3","finals","finals_tone","finals_tone2","finals_tone3","initials","bopomofo","bopomofo_first","cyrillic","pl",
975
1057
  Returns:
976
1058
  cn_str: The Pinyin representation of the Chinese string.
977
1059
  """
@@ -1224,7 +1306,6 @@ def text2audio(
1224
1306
  print(f"Error opening file: {e}")
1225
1307
  print("done")
1226
1308
 
1227
-
1228
1309
  def str2time(time_str, fmt="24"):
1229
1310
  """
1230
1311
  Convert a time string into the specified format.
@@ -3649,8 +3730,8 @@ def get_os(full=False, verbose=False):
3649
3730
  import os
3650
3731
  import subprocess
3651
3732
  from datetime import datetime, timedelta
3652
- from collections import defaultdict
3653
3733
 
3734
+
3654
3735
  def get_os_type():
3655
3736
  os_name = sys.platform
3656
3737
  if "dar" in os_name:
@@ -3663,7 +3744,8 @@ def get_os(full=False, verbose=False):
3663
3744
  else:
3664
3745
  print(f"{os_name}, returned 'None'")
3665
3746
  return None
3666
-
3747
+ if not full:
3748
+ return get_os_type()
3667
3749
  def get_os_info():
3668
3750
  """Get the detailed OS name, version, and other platform-specific details."""
3669
3751
 
@@ -4074,11 +4156,6 @@ def get_os(full=False, verbose=False):
4074
4156
  return res
4075
4157
 
4076
4158
 
4077
- import re
4078
- import stat
4079
- import platform
4080
-
4081
-
4082
4159
  def listdir(
4083
4160
  rootdir,
4084
4161
  kind=None,
@@ -4695,57 +4772,64 @@ def is_image(fpath):
4695
4772
  Returns:
4696
4773
  bool: True if the file is a recognized image, False otherwise.
4697
4774
  """
4698
- import mimetypes
4775
+ from PIL import Image
4776
+ if isinstance(fpath,str):
4777
+ import mimetypes
4778
+
4779
+ # Known image MIME types
4780
+ image_mime_types = {
4781
+ "image/jpeg",
4782
+ "image/png",
4783
+ "image/gif",
4784
+ "image/bmp",
4785
+ "image/webp",
4786
+ "image/tiff",
4787
+ "image/x-icon",
4788
+ "image/svg+xml",
4789
+ "image/heic",
4790
+ "image/heif",
4791
+ }
4699
4792
 
4700
- # Known image MIME types
4701
- image_mime_types = {
4702
- "image/jpeg",
4703
- "image/png",
4704
- "image/gif",
4705
- "image/bmp",
4706
- "image/webp",
4707
- "image/tiff",
4708
- "image/x-icon",
4709
- "image/svg+xml",
4710
- "image/heic",
4711
- "image/heif",
4712
- }
4793
+ # Known image file extensions
4794
+ image_extensions = {
4795
+ ".jpg",
4796
+ ".jpeg",
4797
+ ".png",
4798
+ ".gif",
4799
+ ".bmp",
4800
+ ".webp",
4801
+ ".tif",
4802
+ ".tiff",
4803
+ ".ico",
4804
+ ".svg",
4805
+ ".heic",
4806
+ ".heif",
4807
+ ".fig",
4808
+ ".jpg",
4809
+ }
4713
4810
 
4714
- # Known image file extensions
4715
- image_extensions = {
4716
- ".jpg",
4717
- ".jpeg",
4718
- ".png",
4719
- ".gif",
4720
- ".bmp",
4721
- ".webp",
4722
- ".tif",
4723
- ".tiff",
4724
- ".ico",
4725
- ".svg",
4726
- ".heic",
4727
- ".heif",
4728
- ".fig",
4729
- ".jpg",
4730
- }
4811
+ # Get MIME type using mimetypes
4812
+ mime_type, _ = mimetypes.guess_type(fpath)
4731
4813
 
4732
- # Get MIME type using mimetypes
4733
- mime_type, _ = mimetypes.guess_type(fpath)
4814
+ # Check MIME type
4815
+ if mime_type in image_mime_types:
4816
+ return True
4734
4817
 
4735
- # Check MIME type
4736
- if mime_type in image_mime_types:
4737
- return True
4818
+ # Fallback: Check file extension
4819
+ ext = os.path.splitext(fpath)[
4820
+ -1
4821
+ ].lower() # Get the file extension and ensure lowercase
4822
+ if ext in image_extensions:
4823
+ return True
4738
4824
 
4739
- # Fallback: Check file extension
4740
- ext = os.path.splitext(fpath)[
4741
- -1
4742
- ].lower() # Get the file extension and ensure lowercase
4743
- if ext in image_extensions:
4825
+ return False
4826
+
4827
+ elif isinstance(fpath, Image.Image):
4828
+ # If the input is a PIL Image object
4744
4829
  return True
4745
4830
 
4746
4831
  return False
4747
4832
 
4748
-
4749
4833
  def is_video(fpath):
4750
4834
  """
4751
4835
  Determine if a given file is a video based on MIME type and file extension.
@@ -5055,6 +5139,105 @@ def str2list(str_):
5055
5139
  [l.append(x) for x in str_]
5056
5140
  return l
5057
5141
 
5142
+ def str2words(content, method="combined", custom_dict=None, sym_spell_params=None, use_threading=True):
5143
+ """
5144
+ Ultimate text correction function supporting multiple methods,
5145
+ lists or strings, and domain-specific corrections.
5146
+
5147
+ Parameters:
5148
+ content (str or list): Input text or list of strings to correct.
5149
+ method (str): Correction method ('textblob', 'sym', 'combined').
5150
+ custom_dict (dict): Custom dictionary for domain-specific corrections.
5151
+ sym_spell_params (dict): Parameters for initializing SymSpell.
5152
+
5153
+ Returns:
5154
+ str or list: Corrected text or list of corrected strings.
5155
+ """
5156
+ from textblob import TextBlob
5157
+ from symspellpy import SymSpell, Verbosity
5158
+ from functools import lru_cache
5159
+ import pkg_resources
5160
+ from concurrent.futures import ThreadPoolExecutor
5161
+
5162
+ def initialize_symspell(max_edit_distance=2, prefix_length=7):
5163
+ """Initialize SymSpell for advanced spelling correction."""
5164
+ sym_spell = SymSpell(max_edit_distance, prefix_length)
5165
+ dictionary_path = pkg_resources.resource_filename(
5166
+ "symspellpy",
5167
+ # "frequency_bigramdictionary_en_243_342.txt",
5168
+ "frequency_dictionary_en_82_765.txt",
5169
+ )
5170
+
5171
+ sym_spell.load_dictionary(dictionary_path, term_index=0, count_index=1)
5172
+ return sym_spell
5173
+
5174
+ def segment_words(text, sym_spell):
5175
+ """Segment concatenated words into separate words."""
5176
+ segmented = sym_spell.word_segmentation(text)
5177
+ return segmented.corrected_string
5178
+
5179
+ @lru_cache(maxsize=1000) # Cache results for repeated corrections
5180
+ def advanced_correction(word, sym_spell):
5181
+ """Correct a single word using SymSpell."""
5182
+ suggestions = sym_spell.lookup(word, Verbosity.CLOSEST, max_edit_distance=2)
5183
+ return suggestions[0].term if suggestions else word
5184
+
5185
+ def apply_custom_corrections(word, custom_dict):
5186
+ """Apply domain-specific corrections using a custom dictionary."""
5187
+ return custom_dict.get(word.lower(), word)
5188
+ def preserve_case(original_word, corrected_word):
5189
+ """
5190
+ Preserve the case of the original word in the corrected word.
5191
+ """
5192
+ if original_word.isupper():
5193
+ return corrected_word.upper()
5194
+ elif original_word[0].isupper():
5195
+ return corrected_word.capitalize()
5196
+ else:
5197
+ return corrected_word.lower()
5198
+ def process_string(text, method, sym_spell=None, custom_dict=None):
5199
+ """
5200
+ Process a single string for spelling corrections.
5201
+ Handles TextBlob, SymSpell, and custom corrections.
5202
+ """
5203
+ if method in ("sym", "combined") and sym_spell:
5204
+ text = segment_words(text, sym_spell)
5205
+
5206
+ if method in ("textblob", "combined"):
5207
+ text = str(TextBlob(text).correct())
5208
+
5209
+ corrected_words = []
5210
+ for word in text.split():
5211
+ original_word = word
5212
+ if method in ("sym", "combined") and sym_spell:
5213
+ word = advanced_correction(word, sym_spell)
5214
+
5215
+ # Step 3: Apply custom corrections
5216
+ if custom_dict:
5217
+ word = apply_custom_corrections(word, custom_dict)
5218
+ # Preserve original case
5219
+ word = preserve_case(original_word, word)
5220
+ corrected_words.append(word)
5221
+
5222
+ return " ".join(corrected_words)
5223
+
5224
+ # Initialize SymSpell if needed
5225
+ sym_spell = None
5226
+ if method in ("sym", "combined"):
5227
+ if not sym_spell_params:
5228
+ sym_spell_params = {"max_edit_distance": 2, "prefix_length": 7}
5229
+ sym_spell = initialize_symspell(**sym_spell_params)
5230
+
5231
+ # Process lists or strings
5232
+ if isinstance(content, list):
5233
+ if use_threading:
5234
+ with ThreadPoolExecutor() as executor:
5235
+ corrected_content = list(executor.map(lambda x: process_string(x, method, sym_spell, custom_dict), content))
5236
+ return corrected_content
5237
+ else:
5238
+ return [process_string(item, method, sym_spell, custom_dict) for item in content]
5239
+ else:
5240
+ return process_string(content, method, sym_spell, custom_dict)
5058
5241
 
5059
5242
  def load_img(fpath):
5060
5243
  """
@@ -5078,7 +5261,7 @@ def load_img(fpath):
5078
5261
  raise OSError(f"Unable to open file '{fpath}' or it is not a valid image file.")
5079
5262
 
5080
5263
 
5081
- def apply_filter(img, *args):
5264
+ def apply_filter(img, *args,verbose=True):
5082
5265
  # def apply_filter(img, filter_name, filter_value=None):
5083
5266
  """
5084
5267
  Apply the specified filter to the image.
@@ -5092,7 +5275,7 @@ def apply_filter(img, *args):
5092
5275
  from PIL import ImageFilter
5093
5276
 
5094
5277
  def correct_filter_name(filter_name):
5095
- if "bl" in filter_name.lower() and "box" not in filter_name.lower():
5278
+ if all(["b" in filter_name.lower(),"ur" in filter_name.lower(), "box" not in filter_name.lower()]):
5096
5279
  return "BLUR"
5097
5280
  elif "cont" in filter_name.lower():
5098
5281
  return "Contour"
@@ -5156,10 +5339,11 @@ def apply_filter(img, *args):
5156
5339
 
5157
5340
  for arg in args:
5158
5341
  if isinstance(arg, str):
5159
- filter_name = arg
5160
- filter_name = correct_filter_name(filter_name)
5342
+ filter_name = correct_filter_name(arg)
5161
5343
  else:
5162
5344
  filter_value = arg
5345
+ if verbose:
5346
+ print(f'processing {filter_name}')
5163
5347
  filter_name = filter_name.upper() # Ensure filter name is uppercase
5164
5348
 
5165
5349
  # Supported filters
@@ -5203,7 +5387,7 @@ def apply_filter(img, *args):
5203
5387
  bands = filter_value if filter_value is not None else None
5204
5388
  return img.filter(supported_filters[filter_name](bands))
5205
5389
  else:
5206
- if filter_value is not None:
5390
+ if filter_value is not None and verbose:
5207
5391
  print(
5208
5392
  f"{filter_name} doesn't require a value for {filter_value}, but it remains unaffected"
5209
5393
  )
@@ -5220,6 +5404,8 @@ def detect_angle(image, by="median", template=None):
5220
5404
  import cv2
5221
5405
 
5222
5406
  # Convert to grayscale
5407
+ if np.array(image).shape[-1]>3:
5408
+ image=np.array(image)[:,:,:3]
5223
5409
  gray_image = rgb2gray(image)
5224
5410
 
5225
5411
  # Detect edges using Canny edge detector
@@ -5231,9 +5417,10 @@ def detect_angle(image, by="median", template=None):
5231
5417
  if not lines and any(["me" in by, "pca" in by]):
5232
5418
  print("No lines detected. Adjust the edge detection parameters.")
5233
5419
  return 0
5234
-
5420
+ methods=['mean','median','pca','gradient orientation','template matching','moments','fft']
5421
+ by=strcmp(by, methods)[0]
5235
5422
  # Hough Transform-based angle detection (Median/Mean)
5236
- if "me" in by:
5423
+ if "me" in by.lower():
5237
5424
  angles = []
5238
5425
  for line in lines:
5239
5426
  (x0, y0), (x1, y1) = line
@@ -5256,7 +5443,7 @@ def detect_angle(image, by="median", template=None):
5256
5443
  return rotation_angle
5257
5444
 
5258
5445
  # PCA-based angle detection
5259
- elif "pca" in by:
5446
+ elif "pca" in by.lower():
5260
5447
  y, x = np.nonzero(edges)
5261
5448
  if len(x) == 0:
5262
5449
  return 0
@@ -5266,14 +5453,14 @@ def detect_angle(image, by="median", template=None):
5266
5453
  return angle
5267
5454
 
5268
5455
  # Gradient Orientation-based angle detection
5269
- elif "gra" in by:
5456
+ elif "gra" in by.lower():
5270
5457
  gx, gy = np.gradient(gray_image)
5271
5458
  angles = np.arctan2(gy, gx) * 180 / np.pi
5272
5459
  hist, bin_edges = np.histogram(angles, bins=360, range=(-180, 180))
5273
5460
  return bin_edges[np.argmax(hist)]
5274
5461
 
5275
5462
  # Template Matching-based angle detection
5276
- elif "temp" in by:
5463
+ elif "temp" in by.lower():
5277
5464
  if template is None:
5278
5465
  # Automatically extract a template from the center of the image
5279
5466
  height, width = gray_image.shape
@@ -5296,7 +5483,7 @@ def detect_angle(image, by="median", template=None):
5296
5483
  return best_angle
5297
5484
 
5298
5485
  # Image Moments-based angle detection
5299
- elif "mo" in by:
5486
+ elif "mo" in by.lower():
5300
5487
  moments = measure.moments_central(gray_image)
5301
5488
  angle = (
5302
5489
  0.5
@@ -5307,7 +5494,7 @@ def detect_angle(image, by="median", template=None):
5307
5494
  return angle
5308
5495
 
5309
5496
  # Fourier Transform-based angle detection
5310
- elif "fft" in by:
5497
+ elif "fft" in by.lower():
5311
5498
  f = fft2(gray_image)
5312
5499
  fshift = fftshift(f)
5313
5500
  magnitude_spectrum = np.log(np.abs(fshift) + 1)
@@ -5317,11 +5504,21 @@ def detect_angle(image, by="median", template=None):
5317
5504
  return angle
5318
5505
 
5319
5506
  else:
5320
- print(f"Unknown method {by}")
5507
+ print(f"Unknown method {by}: supported methods: {methods}")
5321
5508
  return 0
5322
5509
 
5323
5510
 
5324
- def imgsets(img, **kwargs):
5511
+ def imgsets(img,
5512
+ auto:bool=True,
5513
+ size=None,
5514
+ figsize=None,
5515
+ dpi:int=200,
5516
+ show_axis:bool=False,
5517
+ plot_:bool=True,
5518
+ verbose:bool=False,
5519
+ model:str="isnet-general-use",
5520
+ **kwargs,
5521
+ ):
5325
5522
  """
5326
5523
  Apply various enhancements and filters to an image using PIL's ImageEnhance and ImageFilter modules.
5327
5524
 
@@ -5355,6 +5552,9 @@ def imgsets(img, **kwargs):
5355
5552
  Note:
5356
5553
  The "color" and "enhance" enhancements are not implemented in this function.
5357
5554
  """
5555
+
5556
+ import matplotlib.pyplot as plt
5557
+ from PIL import ImageEnhance, ImageOps,Image
5358
5558
  supported_filters = [
5359
5559
  "BLUR",
5360
5560
  "CONTOUR",
@@ -5374,62 +5574,92 @@ def imgsets(img, **kwargs):
5374
5574
  "BOX_BLUR",
5375
5575
  "MEDIAN_FILTER",
5376
5576
  ]
5377
- print('usage: imgsets(dir_img, contrast="auto", rm=True, color=2.2)')
5378
- print("\nlog:\n")
5379
-
5380
- def confirm_rembg_models(model_name):
5381
- models_support = [
5382
- "u2net",
5383
- "u2netp",
5384
- "u2net_human_seg",
5385
- "u2net_cloth_seg",
5386
- "silueta",
5387
- "isnet-general-use",
5388
- "isnet-anime",
5389
- "sam",
5390
- ]
5391
- if model_name in models_support:
5392
- print(f"model_name: {model_name}")
5393
- return model_name
5394
- else:
5395
- print(
5396
- f"{model_name} cannot be found, check the name:{models_support}, default('isnet-general-use') has been used"
5397
- )
5398
- return "isnet-general-use"
5577
+ # *Rembg is a tool to remove images background.
5578
+ # https://github.com/danielgatis/rembg
5579
+ rem_models = {
5580
+ "u2net": "general use cases.",
5581
+ "u2netp": "A lightweight version of u2net model.",
5582
+ "u2net_human_seg": "human segmentation.",
5583
+ "u2net_cloth_seg": "Cloths Parsing from human portrait. Here clothes are parsed into 3 category: Upper body, Lower body and Full body.",
5584
+ "silueta": "Same as u2net but the size is reduced to 43Mb.",
5585
+ "isnet-general-use": "A new pre-trained model for general use cases.",
5586
+ "isnet-anime": "A high-accuracy segmentation for anime character.",
5587
+ "sam": "any use cases.",
5588
+ "birefnet-general": "general use cases.",
5589
+ "birefnet-general-lite": "A light pre-trained model for general use cases.",
5590
+ "birefnet-portrait": "human portraits.",
5591
+ "birefnet-dis": "dichotomous image segmentation (DIS).",
5592
+ "birefnet-hrsod": "high-resolution salient object detection (HRSOD).",
5593
+ "birefnet-cod": "concealed object detection (COD).",
5594
+ "birefnet-massive": "A pre-trained model with massive dataset.",
5595
+ }
5596
+ models_support_rem=list(rem_models.keys())
5597
+
5598
+ str_usage="""
5599
+ imgsets(dir_img, auto=1, color=1.5, plot_=0)
5600
+ imgsets(dir_img, color=2)
5601
+ imgsets(dir_img, pad=(300, 300), bgcolor=(73, 162, 127), plot_=0)
5602
+ imgsets(dir_img, contrast=0, color=1.2, plot_=0)
5603
+ imgsets(get_clip(), flip="tb")# flip top and bottom
5604
+ imgsets(get_clip(), contrast=1, rm=[100, 5, 2]) #'foreground_threshold', 'background_threshold' and 'erode_structure_size'
5605
+ imgsets(dir_img, rm="birefnet-portrait") # with using custom model
5606
+ """
5607
+ if run_once_within():
5608
+ print(str_usage)
5399
5609
 
5610
+ def gamma_correction(image, gamma=1.0, v_max=255):
5611
+ # adjust gama value
5612
+ inv_gamma = 1.0 / gamma
5613
+ lut = [int((i / float(v_max)) ** inv_gamma * int(v_max)) for i in range(int(v_max))]
5614
+ return lut #image.point(lut)
5400
5615
  def auto_enhance(img):
5401
5616
  """
5402
- Automatically enhances the image based on its characteristics.
5617
+ Automatically enhances the image based on its characteristics, including brightness,
5618
+ contrast, color range, sharpness, and gamma correction.
5619
+
5403
5620
  Args:
5404
5621
  img (PIL.Image): The input image.
5622
+
5405
5623
  Returns:
5406
- dict: A dictionary containing the optimal enhancement values.
5624
+ dict: A dictionary containing the optimal enhancement values applied.
5625
+ PIL.Image: The enhanced image.
5407
5626
  """
5627
+ from PIL import Image, ImageEnhance, ImageOps, ImageFilter
5628
+ import numpy as np
5408
5629
  # Determine the bit depth based on the image mode
5409
- if img.mode in ["1", "L", "P", "RGB", "YCbCr", "LAB", "HSV"]:
5410
- # 8-bit depth per channel
5411
- bit_depth = 8
5412
- elif img.mode in ["RGBA", "CMYK"]:
5413
- # 8-bit depth per channel + alpha (RGBA) or additional channels (CMYK)
5630
+ try:
5631
+ if img.mode in ["1", "L", "P", "RGB", "YCbCr", "LAB", "HSV"]:
5632
+ bit_depth = 8
5633
+ elif img.mode in ["RGBA", "CMYK"]:
5634
+ bit_depth = 8
5635
+ elif img.mode in ["I", "F"]:
5636
+ bit_depth = 16
5637
+ else:
5638
+ raise ValueError("Unsupported image mode")
5639
+ except:
5414
5640
  bit_depth = 8
5415
- elif img.mode in ["I", "F"]:
5416
- # 16-bit depth per channel (integer or floating-point)
5417
- bit_depth = 16
5418
- else:
5419
- raise ValueError("Unsupported image mode")
5420
- # Calculate the brightness and contrast for each channel
5641
+
5642
+ # Initialize enhancement factors
5643
+ enhancements = {
5644
+ "brightness": 1.0,
5645
+ "contrast": 0,# autocontrasted
5646
+ "color": 1.35,
5647
+ "sharpness": 1.0,
5648
+ "gamma": 1.0
5649
+ }
5650
+
5651
+ # Calculate brightness and contrast for each channel
5421
5652
  num_channels = len(img.getbands())
5422
5653
  brightness_factors = []
5423
5654
  contrast_factors = []
5424
5655
  for channel in range(num_channels):
5425
5656
  channel_histogram = img.split()[channel].histogram()
5426
- brightness = sum(i * w for i, w in enumerate(channel_histogram)) / sum(
5427
- channel_histogram
5428
- )
5657
+ total_pixels = sum(channel_histogram)
5658
+ brightness = sum(i * w for i, w in enumerate(channel_histogram)) / total_pixels
5429
5659
  channel_min, channel_max = img.split()[channel].getextrema()
5430
5660
  contrast = channel_max - channel_min
5431
5661
  # Adjust calculations based on bit depth
5432
- normalization_factor = 2**bit_depth - 1 # Max value for the given bit depth
5662
+ normalization_factor = 2**bit_depth - 1
5433
5663
  brightness_factor = (
5434
5664
  1.0 + (brightness - normalization_factor / 2) / normalization_factor
5435
5665
  )
@@ -5438,37 +5668,62 @@ def imgsets(img, **kwargs):
5438
5668
  )
5439
5669
  brightness_factors.append(brightness_factor)
5440
5670
  contrast_factors.append(contrast_factor)
5441
- # Calculate the average brightness and contrast factors across channels
5442
- avg_brightness_factor = sum(brightness_factors) / num_channels
5443
- avg_contrast_factor = sum(contrast_factors) / num_channels
5444
- return {"brightness": avg_brightness_factor, "contrast": avg_contrast_factor}
5445
5671
 
5446
- import matplotlib.pyplot as plt
5447
- from PIL import ImageEnhance, ImageOps
5672
+ # Calculate average brightness and contrast factors across channels
5673
+ enhancements["brightness"] = sum(brightness_factors) / num_channels
5674
+ # Adjust brightness and contrast
5675
+ img = ImageEnhance.Brightness(img).enhance(enhancements["brightness"])
5676
+
5677
+ # # Automatic color enhancement (saturation)
5678
+ # if img.mode == "RGB":
5679
+ # color_enhancer = ImageEnhance.Color(img)
5680
+ # color_histogram = np.array(img.histogram()).reshape(3, -1)
5681
+ # avg_saturation = np.mean([np.std(channel) for channel in color_histogram]) / normalization_factor
5682
+ # print(avg_saturation)
5683
+ # enhancements["color"] = min(0, max(0.5, 1.0 + avg_saturation)) # Clamp to a reasonable range
5684
+ # # img = color_enhancer.enhance(enhancements["color"])
5685
+
5686
+ # Adjust sharpness
5687
+ sharpness_enhancer = ImageEnhance.Sharpness(img)
5688
+ # Use edge detection to estimate sharpness need
5689
+ edges = img.filter(ImageFilter.FIND_EDGES).convert("L")
5690
+ avg_edge_intensity = np.mean(np.array(edges))
5691
+ enhancements["sharpness"] = min(2.0, max(0.5, 1.0 + avg_edge_intensity / normalization_factor))
5692
+ # img = sharpness_enhancer.enhance(enhancements["sharpness"])
5693
+
5694
+ # # Apply gamma correction
5695
+ # def gamma_correction(image, gamma):
5696
+ # inv_gamma = 1.0 / gamma
5697
+ # lut = [min(255, max(0, int((i / 255.0) ** inv_gamma * 255))) for i in range(256)]
5698
+ # return image.point(lut)
5699
+
5700
+ # avg_brightness = np.mean(np.array(img.convert("L"))) / 255
5701
+ # enhancements["gamma"] = min(2.0, max(0.5, 1.0 if avg_brightness > 0.5 else 1.2 - avg_brightness))
5702
+ # img = gamma_correction(img, enhancements["gamma"])
5703
+
5704
+ # Return the enhancements and the enhanced image
5705
+ return enhancements
5706
+
5448
5707
 
5449
5708
  # Load image if input is a file path
5450
5709
  if isinstance(img, str):
5451
5710
  img = load_img(img)
5452
- img_update = img.copy()
5453
- # Auto-enhance image if requested
5454
-
5455
- auto = kwargs.get("auto", False)
5456
- show = kwargs.get("show", True)
5457
- show_axis = kwargs.get("show_axis", False)
5458
- size = kwargs.get("size", None)
5459
- figsize = kwargs.get("figsize", None)
5460
- dpi = kwargs.get("dpi", 100)
5711
+ img_update = img.copy()
5461
5712
 
5462
5713
  if auto:
5463
5714
  kwargs = {**auto_enhance(img_update), **kwargs}
5464
-
5715
+ params=["sharp","color","contrast","bright","crop","rotate",'size',"resize",
5716
+ "thumbnail","cover","contain","filter","fit","pad",
5717
+ "rem","rm","back","bg_color","cut",'gamma','flip']
5465
5718
  for k, value in kwargs.items():
5719
+ k = strcmp(k, params)[0] # correct the param name
5466
5720
  if "shar" in k.lower():
5467
5721
  enhancer = ImageEnhance.Sharpness(img_update)
5468
5722
  img_update = enhancer.enhance(value)
5469
5723
  elif all(
5470
5724
  ["col" in k.lower(), "bg" not in k.lower(), "background" not in k.lower()]
5471
5725
  ):
5726
+ # *color
5472
5727
  enhancer = ImageEnhance.Color(img_update)
5473
5728
  img_update = enhancer.enhance(value)
5474
5729
  elif "contr" in k.lower():
@@ -5476,8 +5731,11 @@ def imgsets(img, **kwargs):
5476
5731
  enhancer = ImageEnhance.Contrast(img_update)
5477
5732
  img_update = enhancer.enhance(value)
5478
5733
  else:
5479
- print("autocontrasted")
5480
- img_update = ImageOps.autocontrast(img_update)
5734
+ try:
5735
+ img_update = ImageOps.autocontrast(img_update)
5736
+ print("autocontrasted")
5737
+ except Exception as e:
5738
+ print(f"Failed 'autocontrasted':{e}")
5481
5739
  elif "bri" in k.lower():
5482
5740
  enhancer = ImageEnhance.Brightness(img_update)
5483
5741
  img_update = enhancer.enhance(value)
@@ -5488,7 +5746,13 @@ def imgsets(img, **kwargs):
5488
5746
  value = detect_angle(img_update, by=value)
5489
5747
  print(f"rotated by {value}°")
5490
5748
  img_update = img_update.rotate(value)
5491
-
5749
+ elif 'flip' in k.lower():
5750
+ if 'l' in value and 'r' in value:
5751
+ # left/right
5752
+ img_update = img_update.transpose(Image.FLIP_LEFT_RIGHT)
5753
+ elif any(['u' in value and'd' in value, 't' in value and 'b' in value]):
5754
+ # up/down or top/bottom
5755
+ img_update = img_update.transpose(Image.FLIP_TOP_BOTTOM)
5492
5756
  elif "si" in k.lower():
5493
5757
  if isinstance(value, tuple):
5494
5758
  value = list(value)
@@ -5500,36 +5764,44 @@ def imgsets(img, **kwargs):
5500
5764
  img_update = ImageOps.cover(img_update, size=value)
5501
5765
  elif "contain" in k.lower():
5502
5766
  img_update = ImageOps.contain(img_update, size=value)
5503
- elif "fit" in k.lower():
5767
+ elif "fi" in k.lower() and "t" in k.lower(): # filter
5504
5768
  if isinstance(value, dict):
5769
+ if verbose:
5770
+ print(f"supported filter: {supported_filters}")
5505
5771
  for filter_name, filter_value in value.items():
5506
- img_update = apply_filter(img_update, filter_name, filter_value)
5772
+ img_update = apply_filter(img_update, filter_name, filter_value,verbose=verbose)
5507
5773
  else:
5508
5774
  img_update = ImageOps.fit(img_update, size=value)
5509
5775
  elif "pad" in k.lower():
5776
+ # *ImageOps.pad ensures that the resized image has the exact size specified by the size parameter while maintaining the aspect ratio.
5777
+ # size: A tuple specifying the target size (width, height).
5510
5778
  img_update = ImageOps.pad(img_update, size=value)
5511
5779
  elif "rem" in k.lower() or "rm" in k.lower() or "back" in k.lower():
5512
5780
  from rembg import remove, new_session
5513
-
5781
+ if verbose:
5782
+ preview(rem_models)
5783
+ model=strcmp(model, models_support_rem)[0]
5784
+ session = new_session(model)
5514
5785
  if isinstance(value, bool):
5515
- session = new_session("isnet-general-use")
5516
5786
  img_update = remove(img_update, session=session)
5517
5787
  elif value and isinstance(value, (int, float, list)):
5518
- print("https://github.com/danielgatis/rembg/blob/main/USAGE.md")
5788
+ if verbose:
5789
+ print("https://github.com/danielgatis/rembg/blob/main/USAGE.md")
5790
+ print(f"rm=True # using default setting;\nrm=(240,10,10)\n'foreground_threshold'(240) and 'background_threshold' (10) values used to determine foreground and background pixels. \nThe 'erode_structure_size'(10) parameter specifies the size of the erosion structure to be applied to the mask.")
5519
5791
  if isinstance(value, int):
5520
5792
  value = [value]
5521
5793
  if len(value) < 2:
5522
5794
  img_update = remove(
5523
5795
  img_update,
5524
5796
  alpha_matting=True,
5525
- alpha_matting_background_threshold=value,
5797
+ alpha_matting_background_threshold=value, session=session
5526
5798
  )
5527
5799
  elif 2 <= len(value) < 3:
5528
5800
  img_update = remove(
5529
5801
  img_update,
5530
5802
  alpha_matting=True,
5531
5803
  alpha_matting_background_threshold=value[0],
5532
- alpha_matting_foreground_threshold=value[1],
5804
+ alpha_matting_foreground_threshold=value[1], session=session
5533
5805
  )
5534
5806
  elif 3 <= len(value) < 4:
5535
5807
  img_update = remove(
@@ -5537,17 +5809,15 @@ def imgsets(img, **kwargs):
5537
5809
  alpha_matting=True,
5538
5810
  alpha_matting_background_threshold=value[0],
5539
5811
  alpha_matting_foreground_threshold=value[1],
5540
- alpha_matting_erode_size=value[2],
5812
+ alpha_matting_erode_size=value[2], session=session
5541
5813
  )
5542
5814
  elif isinstance(value, tuple): # replace the background color
5543
5815
  if len(value) == 3:
5544
5816
  value += (255,)
5545
- img_update = remove(img_update, bgcolor=value)
5817
+ img_update = remove(img_update, bgcolor=value, session=session)
5546
5818
  elif isinstance(value, str):
5547
- if confirm_rembg_models(value):
5548
- img_update = remove(img_update, session=new_session(value))
5549
- else:
5550
- img_update = remove(img_update)
5819
+ # use custom model
5820
+ img_update = remove(img_update, session=new_session(strcmp(value, models_support_rem)[0]))
5551
5821
  elif "bg" in k.lower() and "color" in k.lower():
5552
5822
  from rembg import remove
5553
5823
 
@@ -5557,8 +5827,11 @@ def imgsets(img, **kwargs):
5557
5827
  if len(value) == 3:
5558
5828
  value += (255,)
5559
5829
  img_update = remove(img_update, bgcolor=value)
5830
+
5831
+ # elif "ga" in k.lower() and "m" in k.lower():
5832
+ # img_update = gamma_correction(img_update, gamma=value)
5560
5833
  # Display the image if requested
5561
- if show:
5834
+ if plot_:
5562
5835
  if figsize is None:
5563
5836
  plt.figure(dpi=dpi)
5564
5837
  else:
@@ -9944,13 +10217,17 @@ def get_loc(input_data, user_agent="0413@mygmail.com)", verbose=True):
9944
10217
  # Case 1: Input is a city name (string)
9945
10218
  if isinstance(input_data, str) and not re.match(r"^\d+(\.\d+)?$", input_data):
9946
10219
  location = geolocator.geocode(input_data)
9947
- if verbose:
9948
- print(
9949
- f"Latitude and Longitude for {input_data}: {location.latitude}, {location.longitude}"
9950
- )
9951
- else:
9952
- print(f"Could not find {input_data}.")
9953
- return location
10220
+ try:
10221
+ if verbose:
10222
+ print(
10223
+ f"Latitude and Longitude for {input_data}: {location.latitude}, {location.longitude}"
10224
+ )
10225
+ else:
10226
+ print(f"Could not find {input_data}.")
10227
+ return location
10228
+ except Exception as e:
10229
+ print(f'Error: {e}')
10230
+ return
9954
10231
 
9955
10232
  # Case 2: Input is latitude and longitude (float or tuple)
9956
10233
  elif isinstance(input_data, (float, tuple)):
@@ -10144,3 +10421,311 @@ def depass(encrypted_code: str, method: str = "AES", key: str = None):
10144
10421
  raise ValueError("SHA256 is a hash function and cannot be decrypted.")
10145
10422
  else:
10146
10423
  raise ValueError("Unsupported decryption method")
10424
+
10425
+ def get_clip(dir_save=None):
10426
+ """
10427
+ Master function to extract content from the clipboard (text, URL, or image).
10428
+
10429
+ Parameters:
10430
+ dir_save (str, optional): If an image is found, save it to this path.
10431
+
10432
+ Returns:
10433
+ dict: A dictionary with extracted content:
10434
+ {
10435
+ "type": "text" | "url" | "image" | "none",
10436
+ "content": <str|Image|None>,
10437
+ "saved_to": <str|None> # Path if an image is saved
10438
+ }
10439
+ """
10440
+ result = {"type": "none", "content": None, "saved_to": None}
10441
+
10442
+ try:
10443
+ import pyperclip
10444
+ from PIL import ImageGrab, Image
10445
+ import validators
10446
+ # 1. Check for text in the clipboard
10447
+ clipboard_content = pyperclip.paste()
10448
+ if clipboard_content:
10449
+ if validators.url(clipboard_content.strip()):
10450
+ result["type"] = "url"
10451
+ result["content"] = clipboard_content.strip()
10452
+
10453
+ else:
10454
+ result["type"] = "text"
10455
+ result["content"] = clipboard_content.strip()
10456
+ return clipboard_content.strip()
10457
+
10458
+ # 2. Check for image in the clipboard
10459
+ image = ImageGrab.grabclipboard()
10460
+ if isinstance(image, Image.Image):
10461
+ result["type"] = "image"
10462
+ result["content"] = image
10463
+ if dir_save:
10464
+ image.save(dir_save)
10465
+ result["saved_to"] = dir_save
10466
+ print(f"Image saved to {dir_save}.")
10467
+ else:
10468
+ print("Image detected in clipboard but not saved.")
10469
+ return image
10470
+ print("No valid text, URL, or image found in clipboard.")
10471
+ return result
10472
+
10473
+ except Exception as e:
10474
+ print(f"An error occurred: {e}")
10475
+ return result
10476
+
10477
+ def keyboard(*args, action='press', n_click=1,interval=0,verbose=False,**kwargs):
10478
+ """
10479
+ Simulates keyboard input using pyautogui.
10480
+
10481
+ Parameters:
10482
+ input_key (str): The key to simulate. Check the list of supported keys with verbose=True.
10483
+ action (str): The action to perform. Options are 'press', 'keyDown', or 'keyUp'.
10484
+ n_click (int): Number of times to press the key (only for 'press' action).
10485
+ interval (float): Time interval between key presses for 'press' action.
10486
+ verbose (bool): Print detailed output, including supported keys and debug info.
10487
+ kwargs: Additional arguments (reserved for future extensions).
10488
+
10489
+ keyboard("command", "d", action="shorcut")
10490
+ """
10491
+ import pyautogui
10492
+ input_key = args
10493
+
10494
+ actions = ['press','keyDown','keyUp', 'hold','release', 'hotkey','shortcut']
10495
+ action = strcmp(action,actions)[0]
10496
+ keyboard_keys_=['\t', '\n', '\r', ' ', '!', '"', '#', '$', '%', '&', "'", '(',
10497
+ ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7',
10498
+ '8', '9', ':', ';', '<', '=', '>', '?', '@', '[', '\\', ']', '^', '_', '`',
10499
+ 'a', 'b', 'c', 'd', 'e','f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
10500
+ 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~',
10501
+ 'accept', 'add', 'alt', 'altleft', 'altright', 'apps', 'backspace',
10502
+ 'browserback', 'browserfavorites', 'browserforward', 'browserhome',
10503
+ 'browserrefresh', 'browsersearch', 'browserstop', 'capslock', 'clear',
10504
+ 'convert', 'ctrl', 'ctrlleft', 'ctrlright', 'decimal', 'del', 'delete',
10505
+ 'divide', 'down', 'end', 'enter', 'esc', 'escape', 'execute', 'f1', 'f10',
10506
+ 'f11', 'f12', 'f13', 'f14', 'f15', 'f16', 'f17', 'f18', 'f19', 'f2', 'f20',
10507
+ 'f21', 'f22', 'f23', 'f24', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9',
10508
+ 'final', 'fn', 'hanguel', 'hangul', 'hanja', 'help', 'home', 'insert', 'junja',
10509
+ 'kana', 'kanji', 'launchapp1', 'launchapp2', 'launchmail',
10510
+ 'launchmediaselect', 'left', 'modechange', 'multiply', 'nexttrack',
10511
+ 'nonconvert', 'num0', 'num1', 'num2', 'num3', 'num4', 'num5', 'num6',
10512
+ 'num7', 'num8', 'num9', 'numlock', 'pagedown', 'pageup', 'pause', 'pgdn',
10513
+ 'pgup', 'playpause', 'prevtrack', 'print', 'printscreen', 'prntscrn',
10514
+ 'prtsc', 'prtscr', 'return', 'right', 'scrolllock', 'select', 'separator',
10515
+ 'shift', 'shiftleft', 'shiftright', 'sleep', 'space', 'stop', 'subtract', 'tab',
10516
+ 'up', 'volumedown', 'volumemute', 'volumeup', 'win', 'winleft', 'winright', 'yen',
10517
+ 'command', 'option', 'optionleft', 'optionright']
10518
+ if verbose:
10519
+ print(f"supported keys: {keyboard_keys_}")
10520
+
10521
+ if action not in ['hotkey','shortcut']:
10522
+ if not isinstance(input_key, list):
10523
+ input_key=list(input_key)
10524
+ input_key = [strcmp(i, keyboard_keys_)[0] for i in input_key ]
10525
+
10526
+ # correct action
10527
+ cmd_keys = ['command', 'option', 'optionleft', 'optionright','win', 'winleft', 'winright','ctrl', 'ctrlleft', 'ctrlright']
10528
+ try:
10529
+ if any([i in cmd_keys for i in input_key]):
10530
+ action='hotkey'
10531
+ except:
10532
+ pass
10533
+
10534
+ print(f"\n{action}: {input_key}")
10535
+ # keyboard
10536
+ if action in ["press"]:
10537
+ # pyautogui.press(input_key, presses=n_click,interval=interval)
10538
+ for _ in range(n_click):
10539
+ for key in input_key:
10540
+ pyautogui.press(key)
10541
+ pyautogui.sleep(interval)
10542
+ elif action in ['keyDown','hold']:
10543
+ # pyautogui.keyDown(input_key)
10544
+ for _ in range(n_click):
10545
+ for key in input_key:
10546
+ pyautogui.keyDown(key)
10547
+ pyautogui.sleep(interval)
10548
+
10549
+ elif action in ['keyUp','release']:
10550
+ # pyautogui.keyUp(input_key)
10551
+ for _ in range(n_click):
10552
+ for key in input_key:
10553
+ pyautogui.keyUp(key)
10554
+ pyautogui.sleep(interval)
10555
+
10556
+ elif action in ['hotkey','shortcut']:
10557
+ pyautogui.hotkey(input_key)
10558
+
10559
+ def mouse(
10560
+ *args, # loc
10561
+ action: str = "move",
10562
+ duration: float = 0.5,
10563
+ loc_type: str = "absolute", # 'absolute', 'relative'
10564
+ region: tuple = None, # (tuple, optional): A region (x, y, width, height) to search for the image.
10565
+ image_path: str = None,
10566
+ wait:float = 0,
10567
+ text: str = None,
10568
+ confidence: float = 0.8,
10569
+ button: str = "left",
10570
+ n_click: int = 1, # number of clicks
10571
+ interval: float = 0.25, # time between clicks
10572
+ scroll_amount: int = -500,
10573
+ fail_safe: bool = True,
10574
+ grayscale: bool = False,
10575
+ **kwargs,
10576
+ ):
10577
+ """
10578
+ Master function to handle pyautogui actions.
10579
+
10580
+ Parameters:
10581
+ action (str): The action to perform ('click', 'double_click', 'type', 'drag', 'scroll', 'move', 'locate', etc.).
10582
+ image_path (str, optional): Path to the image for 'locate' or 'click' actions.
10583
+ text (str, optional): Text to type for 'type' action.
10584
+ confidence (float, optional): Confidence level for image recognition (default 0.8).
10585
+ duration (float, optional): Duration for smooth movements in seconds (default 0.5).
10586
+ region (tuple, optional): A region (x, y, width, height) to search for the image.
10587
+ button (str, optional): Mouse button to use ('left', 'right', 'middle').
10588
+ n_click (int, optional): Number of times to click for 'click' actions.
10589
+ interval (float, optional): Interval between clicks for 'click' actions.
10590
+ offset (tuple, optional): Horizontal offset from the located image. y_offset (int, optional): Vertical offset from the located image.
10591
+ scroll_amount (int, optional): Amount to scroll (positive for up, negative for down).
10592
+ fail_safe (bool, optional): Enable/disable pyautogui's fail-safe feature.
10593
+ grayscale (bool, optional): Search for the image in grayscale mode.
10594
+
10595
+ Returns:
10596
+ tuple or None: Returns coordinates for 'locate' actions, otherwise None.
10597
+ """
10598
+ import pyautogui
10599
+ import time
10600
+
10601
+ pyautogui.FAILSAFE = fail_safe # Enable/disable fail-safe
10602
+ loc_type = "absolute" if "abs" in loc_type else "relative"
10603
+ if len(args) == 1:
10604
+ if isinstance(args[0], str):
10605
+ image_path = args[0]
10606
+ x_offset, y_offset = None, None
10607
+ else:
10608
+ x_offset, y_offset = args
10609
+
10610
+ elif len(args) == 2:
10611
+ x_offset, y_offset = args
10612
+ elif len(args) == 3:
10613
+ x_offset, y_offset, action = args
10614
+ elif len(args) == 4:
10615
+ x_offset, y_offset, action, duration = args
10616
+ else:
10617
+ x_offset, y_offset = None, None
10618
+
10619
+ what_action = [
10620
+ "locate",
10621
+ "click",
10622
+ "double_click",
10623
+ "triple_click",
10624
+ "input",
10625
+ "write",
10626
+ "type",
10627
+ "drag",
10628
+ "move",
10629
+ "scroll",
10630
+ "down",
10631
+ "up",
10632
+ "hold",
10633
+ "press",
10634
+ "release"
10635
+ ]
10636
+ action = strcmp(action, what_action)[0]
10637
+ # get the locations
10638
+ location = None
10639
+ if any([x_offset is None, y_offset is None]):
10640
+ if region is None:
10641
+ w,h=pyautogui.size()
10642
+ region=(0,0,w,h)
10643
+ print(region)
10644
+ try:
10645
+ print(image_path)
10646
+ location = pyautogui.locateOnScreen(
10647
+ image_path, confidence=confidence, region=region, grayscale=grayscale
10648
+ )
10649
+ print(pyautogui.center(location))
10650
+ except Exception as e:
10651
+ location = None
10652
+
10653
+ # try:
10654
+ if location:
10655
+ x, y = pyautogui.center(location)
10656
+ x += x_offset if x_offset else 0
10657
+ y += y_offset if y_offset else 0
10658
+ x_offset, y_offset = x,y
10659
+ print(action)
10660
+ if action in ['locate']:
10661
+ x, y = pyautogui.position()
10662
+ elif action in ["click", "double_click","triple_click"]:
10663
+ # if location:
10664
+ # x, y = pyautogui.center(location)
10665
+ # x += x_offset
10666
+ # y += y_offset
10667
+ # pyautogui.moveTo(x, y, duration=duration)
10668
+ # if action == "click":
10669
+ # pyautogui.click(x=x, y=y, clicks=n_click, interval=interval, button=button)
10670
+ # elif action == "double_click":
10671
+ # pyautogui.doubleClick(x=x, y=y, interval=interval, button=button)
10672
+ # elif action=='triple_click':
10673
+ # pyautogui.tripleClick(x=x,y=y,interval=interval, button=button)
10674
+ # else:
10675
+ if action == "click":
10676
+ pyautogui.moveTo(x_offset, y_offset, duration=duration)
10677
+ time.sleep(wait)
10678
+ pyautogui.click(x=x_offset, y=y_offset, clicks=n_click, interval=interval, button=button)
10679
+ elif action == "double_click":
10680
+ pyautogui.moveTo(x_offset, y_offset, duration=duration)
10681
+ time.sleep(wait)
10682
+ pyautogui.doubleClick(x=x_offset, y=y_offset, interval=interval, button=button)
10683
+ elif action=='triple_click':
10684
+ pyautogui.moveTo(x_offset, y_offset, duration=duration)
10685
+ time.sleep(wait)
10686
+ pyautogui.tripleClick(x=x_offset, y=y_offset, interval=interval, button=button)
10687
+
10688
+ elif action in ["type", "write", "input"]:
10689
+ pyautogui.moveTo(x_offset, y_offset, duration=duration)
10690
+ time.sleep(wait)
10691
+ if text is not None:
10692
+ pyautogui.typewrite(text, interval=interval)
10693
+ else:
10694
+ raise ValueError("Text must be provided for the 'type' action.")
10695
+
10696
+ elif action == "drag":
10697
+ if loc_type == "absolute":
10698
+ pyautogui.dragTo(x_offset, y_offset, duration=duration, button=button)
10699
+ else:
10700
+ pyautogui.dragRel(x_offset, y_offset, duration=duration, button=button)
10701
+
10702
+ elif action in ["move"]:
10703
+ if loc_type == "absolute":
10704
+ pyautogui.moveTo(x_offset, y_offset, duration=duration)
10705
+ else:
10706
+ pyautogui.moveRel(x_offset, y_offset, duration=duration)
10707
+
10708
+ elif action == "scroll":
10709
+ pyautogui.moveTo(x_offset, y_offset, duration=duration)
10710
+ time.sleep(wait)
10711
+ pyautogui.scroll(scroll_amount)
10712
+
10713
+ elif action in ["down",'hold','press']:
10714
+ pyautogui.moveTo(x_offset, y_offset, duration=duration)
10715
+ time.sleep(wait)
10716
+ pyautogui.mouseDown(x_offset, y_offset, button=button, duration=duration)
10717
+
10718
+ elif action in ['up','release']:
10719
+ pyautogui.moveTo(x_offset, y_offset, duration=duration)
10720
+ time.sleep(wait)
10721
+ pyautogui.mouseUp(x_offset, y_offset, button=button, duration=duration)
10722
+
10723
+ else:
10724
+ raise ValueError(f"Unsupported action: {action}")
10725
+
10726
+ # except pyautogui.ImageNotFoundException:
10727
+ # print(
10728
+ # "Image not found. Ensure the image is visible and parameters are correct."
10729
+ # )
10730
+ # except Exception as e:
10731
+ # print(f"An error occurred: {e}")