py2ls 0.1.9.6__py3-none-any.whl → 0.1.9.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
py2ls/.DS_Store
CHANGED
Binary file
|
py2ls/ips.py
CHANGED
@@ -160,10 +160,10 @@ def search(
|
|
160
160
|
kind="text",
|
161
161
|
output="df",
|
162
162
|
verbose=False,
|
163
|
-
download=
|
163
|
+
download=False,
|
164
164
|
dir_save=dir_save,
|
165
|
+
**kwargs,
|
165
166
|
):
|
166
|
-
from duckduckgo_search import DDGS
|
167
167
|
|
168
168
|
if "te" in kind.lower():
|
169
169
|
results = DDGS().text(query, max_results=limit)
|
@@ -173,8 +173,8 @@ def search(
|
|
173
173
|
print(f'searching "{query}": got the results below\n{res}')
|
174
174
|
if download:
|
175
175
|
try:
|
176
|
-
|
177
|
-
url=res.links.tolist(), dir_save=dir_save, verbose=verbose
|
176
|
+
downloader(
|
177
|
+
url=res.links.tolist(), dir_save=dir_save, verbose=verbose, **kwargs
|
178
178
|
)
|
179
179
|
except:
|
180
180
|
if verbose:
|
@@ -2067,223 +2067,391 @@ def apply_filter(img, *args):
|
|
2067
2067
|
return img.filter(supported_filters[filter_name])
|
2068
2068
|
|
2069
2069
|
|
2070
|
-
|
2071
|
-
|
2072
|
-
|
2073
|
-
|
2074
|
-
|
2075
|
-
|
2076
|
-
|
2077
|
-
|
2078
|
-
|
2079
|
-
|
2080
|
-
|
2081
|
-
|
2082
|
-
|
2083
|
-
|
2084
|
-
|
2085
|
-
|
2086
|
-
|
2087
|
-
|
2088
|
-
|
2089
|
-
|
2090
|
-
|
2091
|
-
|
2092
|
-
|
2093
|
-
|
2094
|
-
|
2095
|
-
|
2096
|
-
|
2097
|
-
|
2098
|
-
|
2099
|
-
|
2100
|
-
|
2101
|
-
|
2102
|
-
|
2103
|
-
|
2104
|
-
|
2105
|
-
|
2106
|
-
|
2107
|
-
|
2108
|
-
|
2109
|
-
|
2110
|
-
|
2111
|
-
|
2112
|
-
|
2113
|
-
|
2114
|
-
|
2115
|
-
|
2116
|
-
|
2117
|
-
|
2118
|
-
|
2119
|
-
|
2120
|
-
|
2121
|
-
|
2122
|
-
|
2123
|
-
|
2124
|
-
|
2125
|
-
|
2126
|
-
|
2127
|
-
|
2128
|
-
|
2129
|
-
|
2130
|
-
|
2131
|
-
|
2132
|
-
|
2133
|
-
|
2134
|
-
|
2135
|
-
|
2136
|
-
|
2137
|
-
|
2138
|
-
|
2139
|
-
|
2140
|
-
|
2141
|
-
|
2142
|
-
|
2143
|
-
|
2144
|
-
|
2145
|
-
|
2146
|
-
|
2147
|
-
|
2148
|
-
|
2149
|
-
|
2150
|
-
|
2151
|
-
|
2152
|
-
|
2153
|
-
|
2154
|
-
|
2155
|
-
|
2156
|
-
|
2157
|
-
|
2158
|
-
|
2159
|
-
|
2160
|
-
|
2161
|
-
|
2162
|
-
|
2163
|
-
|
2164
|
-
|
2165
|
-
|
2166
|
-
|
2167
|
-
|
2168
|
-
#
|
2169
|
-
|
2170
|
-
#
|
2171
|
-
|
2172
|
-
|
2173
|
-
#
|
2174
|
-
|
2175
|
-
|
2176
|
-
#
|
2177
|
-
|
2178
|
-
|
2179
|
-
|
2180
|
-
#
|
2181
|
-
|
2182
|
-
|
2183
|
-
|
2184
|
-
|
2185
|
-
|
2186
|
-
|
2187
|
-
|
2188
|
-
|
2189
|
-
|
2190
|
-
|
2191
|
-
#
|
2192
|
-
#
|
2193
|
-
|
2194
|
-
|
2195
|
-
|
2196
|
-
|
2197
|
-
|
2198
|
-
|
2199
|
-
|
2200
|
-
|
2201
|
-
#
|
2202
|
-
|
2203
|
-
|
2204
|
-
|
2205
|
-
|
2206
|
-
#
|
2207
|
-
|
2208
|
-
|
2209
|
-
|
2210
|
-
#
|
2211
|
-
|
2212
|
-
|
2213
|
-
|
2214
|
-
|
2215
|
-
|
2216
|
-
|
2217
|
-
|
2218
|
-
|
2219
|
-
|
2220
|
-
|
2221
|
-
|
2222
|
-
|
2223
|
-
|
2224
|
-
|
2225
|
-
|
2226
|
-
|
2227
|
-
|
2228
|
-
|
2229
|
-
|
2230
|
-
|
2231
|
-
|
2232
|
-
|
2233
|
-
|
2234
|
-
|
2235
|
-
|
2236
|
-
|
2237
|
-
|
2238
|
-
|
2239
|
-
|
2240
|
-
|
2241
|
-
|
2242
|
-
|
2243
|
-
|
2244
|
-
|
2245
|
-
|
2246
|
-
|
2247
|
-
|
2248
|
-
|
2249
|
-
|
2250
|
-
|
2251
|
-
|
2252
|
-
|
2253
|
-
|
2254
|
-
|
2255
|
-
#
|
2256
|
-
#
|
2257
|
-
#
|
2258
|
-
#
|
2259
|
-
#
|
2260
|
-
#
|
2261
|
-
#
|
2262
|
-
#
|
2263
|
-
#
|
2264
|
-
#
|
2265
|
-
#
|
2266
|
-
|
2267
|
-
|
2268
|
-
|
2269
|
-
|
2270
|
-
|
2271
|
-
|
2272
|
-
|
2273
|
-
|
2274
|
-
|
2275
|
-
|
2276
|
-
|
2277
|
-
|
2278
|
-
|
2279
|
-
|
2280
|
-
|
2281
|
-
|
2282
|
-
|
2283
|
-
|
2284
|
-
|
2285
|
-
|
2286
|
-
|
2070
|
+
def imgsetss(
|
2071
|
+
img,
|
2072
|
+
sets=None,
|
2073
|
+
show=True,
|
2074
|
+
show_axis=False,
|
2075
|
+
size=None,
|
2076
|
+
dpi=100,
|
2077
|
+
figsize=None,
|
2078
|
+
auto=False,
|
2079
|
+
filter_kws=None,
|
2080
|
+
):
|
2081
|
+
"""
|
2082
|
+
Apply various enhancements and filters to an image using PIL's ImageEnhance and ImageFilter modules.
|
2083
|
+
|
2084
|
+
Args:
|
2085
|
+
img (PIL.Image): The input image.
|
2086
|
+
sets (dict): A dictionary specifying the enhancements, filters, and their parameters.
|
2087
|
+
show (bool): Whether to display the enhanced image.
|
2088
|
+
show_axis (bool): Whether to display axes on the image plot.
|
2089
|
+
size (tuple): The size of the thumbnail, cover, contain, or fit operation.
|
2090
|
+
dpi (int): Dots per inch for the displayed image.
|
2091
|
+
figsize (tuple): The size of the figure for displaying the image.
|
2092
|
+
auto (bool): Whether to automatically enhance the image based on its characteristics.
|
2093
|
+
|
2094
|
+
Returns:
|
2095
|
+
PIL.Image: The enhanced image.
|
2096
|
+
|
2097
|
+
Supported enhancements and filters:
|
2098
|
+
- "sharpness": Adjusts the sharpness of the image. Values > 1 increase sharpness, while values < 1 decrease sharpness.
|
2099
|
+
- "contrast": Adjusts the contrast of the image. Values > 1 increase contrast, while values < 1 decrease contrast.
|
2100
|
+
- "brightness": Adjusts the brightness of the image. Values > 1 increase brightness, while values < 1 decrease brightness.
|
2101
|
+
- "color": Adjusts the color saturation of the image. Values > 1 increase saturation, while values < 1 decrease saturation.
|
2102
|
+
- "rotate": Rotates the image by the specified angle.
|
2103
|
+
- "crop" or "cut": Crops the image. The value should be a tuple specifying the crop box as (left, upper, right, lower).
|
2104
|
+
- "size": Resizes the image to the specified dimensions.
|
2105
|
+
- "thumbnail": Resizes the image to fit within the given size while preserving aspect ratio.
|
2106
|
+
- "cover": Resizes and crops the image to fill the specified size.
|
2107
|
+
- "contain": Resizes the image to fit within the specified size, adding borders if necessary.
|
2108
|
+
- "fit": Resizes and pads the image to fit within the specified size.
|
2109
|
+
- "filter": Applies various filters to the image (e.g., BLUR, CONTOUR, EDGE_ENHANCE).
|
2110
|
+
|
2111
|
+
Note:
|
2112
|
+
The "color" and "enhance" enhancements are not implemented in this function.
|
2113
|
+
"""
|
2114
|
+
supported_filters = [
|
2115
|
+
"BLUR",
|
2116
|
+
"CONTOUR",
|
2117
|
+
"DETAIL",
|
2118
|
+
"EDGE_ENHANCE",
|
2119
|
+
"EDGE_ENHANCE_MORE",
|
2120
|
+
"EMBOSS",
|
2121
|
+
"FIND_EDGES",
|
2122
|
+
"SHARPEN",
|
2123
|
+
"SMOOTH",
|
2124
|
+
"SMOOTH_MORE",
|
2125
|
+
"MIN_FILTER",
|
2126
|
+
"MAX_FILTER",
|
2127
|
+
"MODE_FILTER",
|
2128
|
+
"MULTIBAND_FILTER",
|
2129
|
+
"GAUSSIAN_BLUR",
|
2130
|
+
"BOX_BLUR",
|
2131
|
+
"MEDIAN_FILTER",
|
2132
|
+
]
|
2133
|
+
print(
|
2134
|
+
"sets: a dict,'sharp:1.2','color','contrast:'auto' or 1.2','bright', 'crop: x_upperleft,y_upperleft, x_lowerright, y_lowerright','rotation','resize','rem or background'"
|
2135
|
+
)
|
2136
|
+
print(f"usage: filter_kws 'dict' below:")
|
2137
|
+
pp([str(i).lower() for i in supported_filters])
|
2138
|
+
print("\nlog:\n")
|
2139
|
+
|
2140
|
+
def confirm_rembg_models(model_name):
|
2141
|
+
models_support = [
|
2142
|
+
"u2net",
|
2143
|
+
"u2netp",
|
2144
|
+
"u2net_human_seg",
|
2145
|
+
"u2net_cloth_seg",
|
2146
|
+
"silueta",
|
2147
|
+
"isnet-general-use",
|
2148
|
+
"isnet-anime",
|
2149
|
+
"sam",
|
2150
|
+
]
|
2151
|
+
if model_name in models_support:
|
2152
|
+
print(f"model_name: {model_name}")
|
2153
|
+
return model_name
|
2154
|
+
else:
|
2155
|
+
print(
|
2156
|
+
f"{model_name} cannot be found, check the name:{models_support}, default('isnet-general-use') has been used"
|
2157
|
+
)
|
2158
|
+
return "isnet-general-use"
|
2159
|
+
|
2160
|
+
def auto_enhance(img):
|
2161
|
+
"""
|
2162
|
+
Automatically enhances the image based on its characteristics.
|
2163
|
+
Args:
|
2164
|
+
img (PIL.Image): The input image.
|
2165
|
+
Returns:
|
2166
|
+
dict: A dictionary containing the optimal enhancement values.
|
2167
|
+
"""
|
2168
|
+
# Determine the bit depth based on the image mode
|
2169
|
+
if img.mode in ["1", "L", "P", "RGB", "YCbCr", "LAB", "HSV"]:
|
2170
|
+
# 8-bit depth per channel
|
2171
|
+
bit_depth = 8
|
2172
|
+
elif img.mode in ["RGBA", "CMYK"]:
|
2173
|
+
# 8-bit depth per channel + alpha (RGBA) or additional channels (CMYK)
|
2174
|
+
bit_depth = 8
|
2175
|
+
elif img.mode in ["I", "F"]:
|
2176
|
+
# 16-bit depth per channel (integer or floating-point)
|
2177
|
+
bit_depth = 16
|
2178
|
+
else:
|
2179
|
+
raise ValueError("Unsupported image mode")
|
2180
|
+
# Calculate the brightness and contrast for each channel
|
2181
|
+
num_channels = len(img.getbands())
|
2182
|
+
brightness_factors = []
|
2183
|
+
contrast_factors = []
|
2184
|
+
for channel in range(num_channels):
|
2185
|
+
channel_histogram = img.split()[channel].histogram()
|
2186
|
+
brightness = sum(i * w for i, w in enumerate(channel_histogram)) / sum(
|
2187
|
+
channel_histogram
|
2188
|
+
)
|
2189
|
+
channel_min, channel_max = img.split()[channel].getextrema()
|
2190
|
+
contrast = channel_max - channel_min
|
2191
|
+
# Adjust calculations based on bit depth
|
2192
|
+
normalization_factor = 2**bit_depth - 1 # Max value for the given bit depth
|
2193
|
+
brightness_factor = (
|
2194
|
+
1.0 + (brightness - normalization_factor / 2) / normalization_factor
|
2195
|
+
)
|
2196
|
+
contrast_factor = (
|
2197
|
+
1.0 + (contrast - normalization_factor / 2) / normalization_factor
|
2198
|
+
)
|
2199
|
+
brightness_factors.append(brightness_factor)
|
2200
|
+
contrast_factors.append(contrast_factor)
|
2201
|
+
# Calculate the average brightness and contrast factors across channels
|
2202
|
+
avg_brightness_factor = sum(brightness_factors) / num_channels
|
2203
|
+
avg_contrast_factor = sum(contrast_factors) / num_channels
|
2204
|
+
return {"brightness": avg_brightness_factor, "contrast": avg_contrast_factor}
|
2205
|
+
|
2206
|
+
# Load image if input is a file path
|
2207
|
+
if isinstance(img, str):
|
2208
|
+
img = load_img(img)
|
2209
|
+
img_update = img.copy()
|
2210
|
+
# Auto-enhance image if requested
|
2211
|
+
if auto:
|
2212
|
+
auto_params = auto_enhance(img_update)
|
2213
|
+
sets.update(auto_params)
|
2214
|
+
if sets is None:
|
2215
|
+
sets = {}
|
2216
|
+
for k, value in sets.items():
|
2217
|
+
if "shar" in k.lower():
|
2218
|
+
enhancer = ImageEnhance.Sharpness(img_update)
|
2219
|
+
img_update = enhancer.enhance(value)
|
2220
|
+
elif "col" in k.lower() and "bg" not in k.lower():
|
2221
|
+
enhancer = ImageEnhance.Color(img_update)
|
2222
|
+
img_update = enhancer.enhance(value)
|
2223
|
+
elif "contr" in k.lower():
|
2224
|
+
if value and isinstance(value, (float, int)):
|
2225
|
+
enhancer = ImageEnhance.Contrast(img_update)
|
2226
|
+
img_update = enhancer.enhance(value)
|
2227
|
+
else:
|
2228
|
+
print("autocontrasted")
|
2229
|
+
img_update = ImageOps.autocontrast(img_update)
|
2230
|
+
elif "bri" in k.lower():
|
2231
|
+
enhancer = ImageEnhance.Brightness(img_update)
|
2232
|
+
img_update = enhancer.enhance(value)
|
2233
|
+
elif "cro" in k.lower() or "cut" in k.lower():
|
2234
|
+
img_update = img_update.crop(value)
|
2235
|
+
elif "rota" in k.lower():
|
2236
|
+
img_update = img_update.rotate(value)
|
2237
|
+
elif "si" in k.lower():
|
2238
|
+
img_update = img_update.resize(value)
|
2239
|
+
elif "thum" in k.lower():
|
2240
|
+
img_update.thumbnail(value)
|
2241
|
+
elif "cover" in k.lower():
|
2242
|
+
img_update = ImageOps.cover(img_update, size=value)
|
2243
|
+
elif "contain" in k.lower():
|
2244
|
+
img_update = ImageOps.contain(img_update, size=value)
|
2245
|
+
elif "fit" in k.lower():
|
2246
|
+
img_update = ImageOps.fit(img_update, size=value)
|
2247
|
+
elif "pad" in k.lower():
|
2248
|
+
img_update = ImageOps.pad(img_update, size=value)
|
2249
|
+
elif "rem" in k.lower() or "rm" in k.lower() or "back" in k.lower():
|
2250
|
+
if value and isinstance(value, (int, float, list)):
|
2251
|
+
print(
|
2252
|
+
'example usage: {"rm":[alpha_matting_background_threshold(20),alpha_matting_foreground_threshold(270),alpha_matting_erode_sive(11)]}'
|
2253
|
+
)
|
2254
|
+
print("https://github.com/danielgatis/rembg/blob/main/USAGE.md")
|
2255
|
+
# ### Parameters:
|
2256
|
+
# data (Union[bytes, PILImage, np.ndarray]): The input image data.
|
2257
|
+
# alpha_matting (bool, optional): Flag indicating whether to use alpha matting. Defaults to False.
|
2258
|
+
# alpha_matting_foreground_threshold (int, optional): Foreground threshold for alpha matting. Defaults to 240.
|
2259
|
+
# alpha_matting_background_threshold (int, optional): Background threshold for alpha matting. Defaults to 10.
|
2260
|
+
# alpha_matting_erode_size (int, optional): Erosion size for alpha matting. Defaults to 10.
|
2261
|
+
# session (Optional[BaseSession], optional): A session object for the 'u2net' model. Defaults to None.
|
2262
|
+
# only_mask (bool, optional): Flag indicating whether to return only the binary masks. Defaults to False.
|
2263
|
+
# post_process_mask (bool, optional): Flag indicating whether to post-process the masks. Defaults to False.
|
2264
|
+
# bgcolor (Optional[Tuple[int, int, int, int]], optional): Background color for the cutout image. Defaults to None.
|
2265
|
+
# ###
|
2266
|
+
if isinstance(value, int):
|
2267
|
+
value = [value]
|
2268
|
+
if len(value) < 2:
|
2269
|
+
img_update = remove(
|
2270
|
+
img_update,
|
2271
|
+
alpha_matting=True,
|
2272
|
+
alpha_matting_background_threshold=value,
|
2273
|
+
)
|
2274
|
+
elif 2 <= len(value) < 3:
|
2275
|
+
img_update = remove(
|
2276
|
+
img_update,
|
2277
|
+
alpha_matting=True,
|
2278
|
+
alpha_matting_background_threshold=value[0],
|
2279
|
+
alpha_matting_foreground_threshold=value[1],
|
2280
|
+
)
|
2281
|
+
elif 3 <= len(value) < 4:
|
2282
|
+
img_update = remove(
|
2283
|
+
img_update,
|
2284
|
+
alpha_matting=True,
|
2285
|
+
alpha_matting_background_threshold=value[0],
|
2286
|
+
alpha_matting_foreground_threshold=value[1],
|
2287
|
+
alpha_matting_erode_size=value[2],
|
2288
|
+
)
|
2289
|
+
if isinstance(value, tuple): # replace the background color
|
2290
|
+
if len(value) == 3:
|
2291
|
+
value += (255,)
|
2292
|
+
img_update = remove(img_update, bgcolor=value)
|
2293
|
+
if isinstance(value, str):
|
2294
|
+
if confirm_rembg_models(value):
|
2295
|
+
img_update = remove(img_update, session=new_session(value))
|
2296
|
+
else:
|
2297
|
+
img_update = remove(img_update)
|
2298
|
+
elif "bgcolor" in k.lower():
|
2299
|
+
if isinstance(value, list):
|
2300
|
+
value = tuple(value)
|
2301
|
+
if isinstance(value, tuple): # replace the background color
|
2302
|
+
if len(value) == 3:
|
2303
|
+
value += (255,)
|
2304
|
+
img_update = remove(img_update, bgcolor=value)
|
2305
|
+
if filter_kws:
|
2306
|
+
for filter_name, filter_value in filter_kws.items():
|
2307
|
+
img_update = apply_filter(img_update, filter_name, filter_value)
|
2308
|
+
# Display the image if requested
|
2309
|
+
if show:
|
2310
|
+
if figsize is None:
|
2311
|
+
plt.figure(dpi=dpi)
|
2312
|
+
else:
|
2313
|
+
plt.figure(figsize=figsize, dpi=dpi)
|
2314
|
+
plt.imshow(img_update)
|
2315
|
+
plt.axis("on") if show_axis else plt.axis("off")
|
2316
|
+
return img_update
|
2317
|
+
|
2318
|
+
|
2319
|
+
from sklearn.decomposition import PCA
|
2320
|
+
from skimage import transform, feature, filters, measure
|
2321
|
+
from skimage.color import rgb2gray
|
2322
|
+
from scipy.fftpack import fftshift, fft2
|
2323
|
+
import numpy as np
|
2324
|
+
import cv2 # Used for template matching
|
2325
|
+
|
2326
|
+
|
2327
|
+
def crop_black_borders(image):
|
2328
|
+
"""Crop the black borders from a rotated image."""
|
2329
|
+
# Convert the image to grayscale if it's not already
|
2330
|
+
if image.ndim == 3:
|
2331
|
+
gray_image = color.rgb2gray(image)
|
2332
|
+
else:
|
2333
|
+
gray_image = image
|
2334
|
+
|
2335
|
+
# Find all the non-black (non-zero) pixels
|
2336
|
+
mask = gray_image > 0 # Mask for non-black pixels (assuming black is zero)
|
2337
|
+
coords = np.column_stack(np.where(mask))
|
2338
|
+
|
2339
|
+
# Get the bounding box of non-black pixels
|
2340
|
+
if coords.any(): # Check if there are any non-black pixels
|
2341
|
+
y_min, x_min = coords.min(axis=0)
|
2342
|
+
y_max, x_max = coords.max(axis=0)
|
2343
|
+
|
2344
|
+
# Crop the image to the bounding box
|
2345
|
+
cropped_image = image[y_min : y_max + 1, x_min : x_max + 1]
|
2346
|
+
else:
|
2347
|
+
# If the image is completely black (which shouldn't happen), return the original image
|
2348
|
+
cropped_image = image
|
2349
|
+
|
2350
|
+
return cropped_image
|
2351
|
+
|
2352
|
+
|
2353
|
+
def detect_angle(image, by="median", template=None):
|
2354
|
+
"""Detect the angle of rotation using various methods."""
|
2355
|
+
# Convert to grayscale
|
2356
|
+
gray_image = rgb2gray(image)
|
2357
|
+
|
2358
|
+
# Detect edges using Canny edge detector
|
2359
|
+
edges = feature.canny(gray_image, sigma=2)
|
2360
|
+
|
2361
|
+
# Use Hough transform to detect lines
|
2362
|
+
lines = transform.probabilistic_hough_line(edges)
|
2363
|
+
|
2364
|
+
if not lines and any(["me" in by, "pca" in by]):
|
2365
|
+
print("No lines detected. Adjust the edge detection parameters.")
|
2366
|
+
return 0
|
2367
|
+
|
2368
|
+
# Hough Transform-based angle detection (Median/Mean)
|
2369
|
+
if "me" in by:
|
2370
|
+
angles = []
|
2371
|
+
for line in lines:
|
2372
|
+
(x0, y0), (x1, y1) = line
|
2373
|
+
angle = np.arctan2(y1 - y0, x1 - x0) * 180 / np.pi
|
2374
|
+
if 80 < abs(angle) < 100:
|
2375
|
+
angles.append(angle)
|
2376
|
+
if not angles:
|
2377
|
+
return 0
|
2378
|
+
if "di" in by:
|
2379
|
+
median_angle = np.median(angles)
|
2380
|
+
rotation_angle = (
|
2381
|
+
90 - median_angle if median_angle > 0 else -90 - median_angle
|
2382
|
+
)
|
2383
|
+
|
2384
|
+
return rotation_angle
|
2385
|
+
else:
|
2386
|
+
mean_angle = np.mean(angles)
|
2387
|
+
rotation_angle = 90 - mean_angle if mean_angle > 0 else -90 - mean_angle
|
2388
|
+
|
2389
|
+
return rotation_angle
|
2390
|
+
|
2391
|
+
# PCA-based angle detection
|
2392
|
+
elif "pca" in by:
|
2393
|
+
y, x = np.nonzero(edges)
|
2394
|
+
if len(x) == 0:
|
2395
|
+
return 0
|
2396
|
+
pca = PCA(n_components=2)
|
2397
|
+
pca.fit(np.vstack((x, y)).T)
|
2398
|
+
angle = np.arctan2(pca.components_[0, 1], pca.components_[0, 0]) * 180 / np.pi
|
2399
|
+
return angle
|
2400
|
+
|
2401
|
+
# Gradient Orientation-based angle detection
|
2402
|
+
elif "gra" in by:
|
2403
|
+
gx, gy = np.gradient(gray_image)
|
2404
|
+
angles = np.arctan2(gy, gx) * 180 / np.pi
|
2405
|
+
hist, bin_edges = np.histogram(angles, bins=360, range=(-180, 180))
|
2406
|
+
return bin_edges[np.argmax(hist)]
|
2407
|
+
|
2408
|
+
# Template Matching-based angle detection
|
2409
|
+
elif "temp" in by:
|
2410
|
+
if template is None:
|
2411
|
+
# Automatically extract a template from the center of the image
|
2412
|
+
height, width = gray_image.shape
|
2413
|
+
center_x, center_y = width // 2, height // 2
|
2414
|
+
size = (
|
2415
|
+
min(height, width) // 4
|
2416
|
+
) # Size of the template as a fraction of image size
|
2417
|
+
template = gray_image[
|
2418
|
+
center_y - size : center_y + size, center_x - size : center_x + size
|
2419
|
+
]
|
2420
|
+
best_angle = None
|
2421
|
+
best_corr = -1
|
2422
|
+
for angle in range(0, 180, 1): # Checking every degree
|
2423
|
+
rotated_template = transform.rotate(template, angle)
|
2424
|
+
res = cv2.matchTemplate(gray_image, rotated_template, cv2.TM_CCOEFF)
|
2425
|
+
_, max_val, _, _ = cv2.minMaxLoc(res)
|
2426
|
+
if max_val > best_corr:
|
2427
|
+
best_corr = max_val
|
2428
|
+
best_angle = angle
|
2429
|
+
return best_angle
|
2430
|
+
|
2431
|
+
# Image Moments-based angle detection
|
2432
|
+
elif "mo" in by:
|
2433
|
+
moments = measure.moments_central(gray_image)
|
2434
|
+
angle = (
|
2435
|
+
0.5
|
2436
|
+
* np.arctan2(2 * moments[1, 1], moments[0, 2] - moments[2, 0])
|
2437
|
+
* 180
|
2438
|
+
/ np.pi
|
2439
|
+
)
|
2440
|
+
return angle
|
2441
|
+
|
2442
|
+
# Fourier Transform-based angle detection
|
2443
|
+
elif "fft" in by:
|
2444
|
+
f = fft2(gray_image)
|
2445
|
+
fshift = fftshift(f)
|
2446
|
+
magnitude_spectrum = np.log(np.abs(fshift) + 1)
|
2447
|
+
rows, cols = magnitude_spectrum.shape
|
2448
|
+
r, c = np.unravel_index(np.argmax(magnitude_spectrum), (rows, cols))
|
2449
|
+
angle = np.arctan2(r - rows // 2, c - cols // 2) * 180 / np.pi
|
2450
|
+
return angle
|
2451
|
+
|
2452
|
+
else:
|
2453
|
+
print(f"Unknown method {by}")
|
2454
|
+
return 0
|
2287
2455
|
|
2288
2456
|
|
2289
2457
|
def imgsets(img, **kwargs):
|
@@ -2444,7 +2612,11 @@ def imgsets(img, **kwargs):
|
|
2444
2612
|
elif "cro" in k.lower() or "cut" in k.lower():
|
2445
2613
|
img_update = img_update.crop(value)
|
2446
2614
|
elif "rota" in k.lower():
|
2615
|
+
if isinstance(value, str):
|
2616
|
+
value = detect_angle(img_update, by=value)
|
2617
|
+
print(f"rotated by {value}°")
|
2447
2618
|
img_update = img_update.rotate(value)
|
2619
|
+
|
2448
2620
|
elif "si" in k.lower():
|
2449
2621
|
img_update = img_update.resize(value)
|
2450
2622
|
elif "thum" in k.lower():
|
py2ls/netfinder.py
CHANGED
@@ -204,11 +204,18 @@ def get_proxy():
|
|
204
204
|
|
205
205
|
|
206
206
|
# proxies_glob=get_proxy()
|
207
|
-
def get_soup(url,
|
208
|
-
_, soup_ = fetch_all(url,
|
207
|
+
def get_soup(url, **kwargs):
|
208
|
+
_, soup_ = fetch_all(url, **kwargs)
|
209
209
|
return soup_
|
210
210
|
|
211
211
|
|
212
|
+
def get_cookies(url, login={"username": "your_username", "password": "your_password"}):
|
213
|
+
session = requests.Session()
|
214
|
+
response = session.post(url, login)
|
215
|
+
cookies_dict = session.cookies.get_dict()
|
216
|
+
return cookies_dict
|
217
|
+
|
218
|
+
|
212
219
|
def fetch_all(
|
213
220
|
url,
|
214
221
|
parser="lxml",
|
@@ -216,6 +223,8 @@ def fetch_all(
|
|
216
223
|
by=By.TAG_NAME,
|
217
224
|
timeout=10,
|
218
225
|
retry=2,
|
226
|
+
wait=0,
|
227
|
+
scroll_try=3,
|
219
228
|
login_url=None,
|
220
229
|
username=None,
|
221
230
|
password=None,
|
@@ -230,6 +239,7 @@ def fetch_all(
|
|
230
239
|
javascript=True, # Add JavaScript option
|
231
240
|
disable_images=False, # Add option to disable images
|
232
241
|
iframe_name=None,
|
242
|
+
login_dict=None,
|
233
243
|
): # Add option to handle iframe): # lxml is faster, # parser="html.parser"
|
234
244
|
try:
|
235
245
|
# # Generate a random user-agent string
|
@@ -302,6 +312,23 @@ def fetch_all(
|
|
302
312
|
|
303
313
|
driver_ = webdriver.Chrome(service=service, options=chrome_options)
|
304
314
|
|
315
|
+
# 隐式等等待
|
316
|
+
if 3 < wait < 5:
|
317
|
+
wait_ = random.uniform(3, 5)
|
318
|
+
elif 5 <= wait < 8:
|
319
|
+
wait_ = random.uniform(5, 8)
|
320
|
+
elif 8 <= wait < 12:
|
321
|
+
wait_ = random.uniform(8, 10)
|
322
|
+
else:
|
323
|
+
wait_ = 0
|
324
|
+
driver_.implicitly_wait(wait_)
|
325
|
+
|
326
|
+
if login_url and login_dict:
|
327
|
+
cookies = get_cookies(url=login_url, login=login_dict)
|
328
|
+
driver_.get(url)
|
329
|
+
for cookie_name, cookie_value in cookies.items():
|
330
|
+
driver_.add_cookie({"name": cookie_name, "value": cookie_value})
|
331
|
+
|
305
332
|
if not javascript:
|
306
333
|
driver_.execute_cdp_cmd(
|
307
334
|
"Emulation.setScriptExecutionDisabled", {"value": True}
|
@@ -330,10 +357,20 @@ def fetch_all(
|
|
330
357
|
# WebDriverWait(driver, timeout).until(
|
331
358
|
# EC.presence_of_element_located((by, where))
|
332
359
|
# )
|
333
|
-
|
360
|
+
|
361
|
+
# 设置轮询
|
362
|
+
for attempt in range(scroll_try):
|
363
|
+
page_source = driver_.page_source
|
364
|
+
content = BeautifulSoup(page_source, "html.parser")
|
365
|
+
if content and content.find_all(by):
|
366
|
+
break
|
367
|
+
sleep(
|
368
|
+
random.uniform(2, 4)
|
369
|
+
) # Wait for a random time before polling again
|
370
|
+
|
334
371
|
driver_.quit()
|
335
372
|
|
336
|
-
content = BeautifulSoup(page_source, "html.parser")
|
373
|
+
# content = BeautifulSoup(page_source, "html.parser")
|
337
374
|
if content:
|
338
375
|
return "text/html", content
|
339
376
|
else:
|
@@ -627,13 +664,17 @@ def downloader(
|
|
627
664
|
else:
|
628
665
|
file_links = []
|
629
666
|
print("No files detected")
|
630
|
-
|
667
|
+
if isinstance(file_links, str):
|
668
|
+
file_links_all = [file_links]
|
669
|
+
elif isinstance(file_links, list):
|
670
|
+
file_links_all.extend(file_links)
|
631
671
|
if dir_save:
|
632
672
|
if rm_folder:
|
633
673
|
ips.rm_folder(dir_save)
|
634
674
|
if verbose:
|
635
675
|
print(f"\n... attempting to download to local\n")
|
636
676
|
fnames = [file_link.split("/")[-1] for file_link in file_links_all]
|
677
|
+
|
637
678
|
for idx, file_link in enumerate(file_links_all):
|
638
679
|
headers = {"User-Agent": user_agent()}
|
639
680
|
itry = 0 # Retry logic with exception handling
|
@@ -673,6 +714,7 @@ def downloader(
|
|
673
714
|
print(
|
674
715
|
f"Failed to download file: HTTP status code {response.status_code}"
|
675
716
|
)
|
717
|
+
break
|
676
718
|
except (ChunkedEncodingError, ConnectionError) as e:
|
677
719
|
print(f"Attempt {itry+1} failed: {e}. Retrying in a few seconds...")
|
678
720
|
# time.sleep(random.uniform(0, 2)) # Random sleep to mitigate server issues
|
@@ -683,13 +725,13 @@ def downloader(
|
|
683
725
|
if itry == n_try:
|
684
726
|
print(f"Failed to download {file_link} after {n_try} attempts.")
|
685
727
|
|
686
|
-
print(f"\n{len(fnames)} files were downloaded:")
|
728
|
+
# print(f"\n{len(fnames)} files were downloaded:")
|
687
729
|
if verbose:
|
688
730
|
if corrected_fname:
|
689
731
|
pp(corrected_fname)
|
732
|
+
print(f"\n\nsaved @:\n{dir_save}")
|
690
733
|
else:
|
691
734
|
pp(fnames)
|
692
|
-
print(f"\n\nsaved @:\n{dir_save}")
|
693
735
|
|
694
736
|
|
695
737
|
def find_img(url, driver="request", dir_save="images", rm_folder=False, verbose=True):
|
@@ -1303,6 +1345,7 @@ def search(
|
|
1303
1345
|
verbose=False,
|
1304
1346
|
download=False,
|
1305
1347
|
dir_save=dir_save,
|
1348
|
+
**kwargs,
|
1306
1349
|
):
|
1307
1350
|
|
1308
1351
|
if "te" in kind.lower():
|
@@ -1313,7 +1356,9 @@ def search(
|
|
1313
1356
|
print(f'searching "{query}": got the results below\n{res}')
|
1314
1357
|
if download:
|
1315
1358
|
try:
|
1316
|
-
downloader(
|
1359
|
+
downloader(
|
1360
|
+
url=res.links.tolist(), dir_save=dir_save, verbose=verbose, **kwargs
|
1361
|
+
)
|
1317
1362
|
except:
|
1318
1363
|
if verbose:
|
1319
1364
|
print(f"failed link")
|
@@ -1,4 +1,4 @@
|
|
1
|
-
py2ls/.DS_Store,sha256=
|
1
|
+
py2ls/.DS_Store,sha256=BloZZz2vlFVfF-I3X7ZsqXusvqOawJMx7erKcnIP-b0,6148
|
2
2
|
py2ls/.git/COMMIT_EDITMSG,sha256=5xj-jWMbrdOc9m7gSn-UcsAQ9FMNvWSbLWSsrOUIO5w,7
|
3
3
|
py2ls/.git/FETCH_HEAD,sha256=6cJaQzb6VhkvNNm-KsABX6R28FNKZB8qMmqacDEP_dQ,100
|
4
4
|
py2ls/.git/HEAD,sha256=KNJb-Cr0wOK3L1CVmyvrhZ4-YLljCl6MYD2tTdsrboA,21
|
@@ -173,14 +173,14 @@ py2ls/db2ls.py,sha256=MMfFX47aIPIyu7fU9aPvX9lbPRPYOpJ_VXwlnWk-8qo,13615
|
|
173
173
|
py2ls/doc.py,sha256=xN3g1OWfoaGUhikbJ0NqbN5eKy1VZVvWwRlhHMgyVEc,4243
|
174
174
|
py2ls/export_requirements.py,sha256=x2WgUF0jYKz9GfA1MVKN-MdsM-oQ8yUeC6Ua8oCymio,2325
|
175
175
|
py2ls/freqanalysis.py,sha256=F4218VSPbgL5tnngh6xNCYuNnfR-F_QjECUUxrPYZss,32594
|
176
|
-
py2ls/ips.py,sha256=
|
177
|
-
py2ls/netfinder.py,sha256=
|
176
|
+
py2ls/ips.py,sha256=gVHch6c3-FAv-vT6LlBI9gxS-SdHTDpIP4z2JH3EqGg,105424
|
177
|
+
py2ls/netfinder.py,sha256=oo8Nyqe9Oi3TON7YS9TCs2RBUjPY3KY7772DrsNPkyU,50679
|
178
178
|
py2ls/plot.py,sha256=Cpx0cZoU-TN-q3Awmk75DYZsN4nGpnB_dHh262l_-Is,86130
|
179
179
|
py2ls/setuptools-70.1.0-py3-none-any.whl,sha256=2bi3cUVal8ip86s0SOvgspteEF8SKLukECi-EWmFomc,882588
|
180
180
|
py2ls/sleep_events_detectors.py,sha256=bQA3HJqv5qnYKJJEIhCyhlDtkXQfIzqksnD0YRXso68,52145
|
181
181
|
py2ls/stats.py,sha256=U2yeTYUkInI4JXtfhdSbSAzna_h8rh8MZmY31o51_EU,38169
|
182
182
|
py2ls/translator.py,sha256=bc5FB-wqC4TtQz9gyCP1mE38HqNRJ_pmuRIgKnAlMzM,30581
|
183
183
|
py2ls/wb_detector.py,sha256=7y6TmBUj9exCZeIgBAJ_9hwuhkDh1x_-yg4dvNY1_GQ,6284
|
184
|
-
py2ls-0.1.9.
|
185
|
-
py2ls-0.1.9.
|
186
|
-
py2ls-0.1.9.
|
184
|
+
py2ls-0.1.9.8.dist-info/METADATA,sha256=j6jzJhMF2pbmdLbqxP9p4GmvKXQvem6N_lhat4EalxE,20017
|
185
|
+
py2ls-0.1.9.8.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
186
|
+
py2ls-0.1.9.8.dist-info/RECORD,,
|
File without changes
|