stouputils 1.14.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- stouputils/__init__.py +40 -0
- stouputils/__main__.py +86 -0
- stouputils/_deprecated.py +37 -0
- stouputils/all_doctests.py +160 -0
- stouputils/applications/__init__.py +22 -0
- stouputils/applications/automatic_docs.py +634 -0
- stouputils/applications/upscaler/__init__.py +39 -0
- stouputils/applications/upscaler/config.py +128 -0
- stouputils/applications/upscaler/image.py +247 -0
- stouputils/applications/upscaler/video.py +287 -0
- stouputils/archive.py +344 -0
- stouputils/backup.py +488 -0
- stouputils/collections.py +244 -0
- stouputils/continuous_delivery/__init__.py +27 -0
- stouputils/continuous_delivery/cd_utils.py +243 -0
- stouputils/continuous_delivery/github.py +522 -0
- stouputils/continuous_delivery/pypi.py +130 -0
- stouputils/continuous_delivery/pyproject.py +147 -0
- stouputils/continuous_delivery/stubs.py +86 -0
- stouputils/ctx.py +408 -0
- stouputils/data_science/config/get.py +51 -0
- stouputils/data_science/config/set.py +125 -0
- stouputils/data_science/data_processing/image/__init__.py +66 -0
- stouputils/data_science/data_processing/image/auto_contrast.py +79 -0
- stouputils/data_science/data_processing/image/axis_flip.py +58 -0
- stouputils/data_science/data_processing/image/bias_field_correction.py +74 -0
- stouputils/data_science/data_processing/image/binary_threshold.py +73 -0
- stouputils/data_science/data_processing/image/blur.py +59 -0
- stouputils/data_science/data_processing/image/brightness.py +54 -0
- stouputils/data_science/data_processing/image/canny.py +110 -0
- stouputils/data_science/data_processing/image/clahe.py +92 -0
- stouputils/data_science/data_processing/image/common.py +30 -0
- stouputils/data_science/data_processing/image/contrast.py +53 -0
- stouputils/data_science/data_processing/image/curvature_flow_filter.py +74 -0
- stouputils/data_science/data_processing/image/denoise.py +378 -0
- stouputils/data_science/data_processing/image/histogram_equalization.py +123 -0
- stouputils/data_science/data_processing/image/invert.py +64 -0
- stouputils/data_science/data_processing/image/laplacian.py +60 -0
- stouputils/data_science/data_processing/image/median_blur.py +52 -0
- stouputils/data_science/data_processing/image/noise.py +59 -0
- stouputils/data_science/data_processing/image/normalize.py +65 -0
- stouputils/data_science/data_processing/image/random_erase.py +66 -0
- stouputils/data_science/data_processing/image/resize.py +69 -0
- stouputils/data_science/data_processing/image/rotation.py +80 -0
- stouputils/data_science/data_processing/image/salt_pepper.py +68 -0
- stouputils/data_science/data_processing/image/sharpening.py +55 -0
- stouputils/data_science/data_processing/image/shearing.py +64 -0
- stouputils/data_science/data_processing/image/threshold.py +64 -0
- stouputils/data_science/data_processing/image/translation.py +71 -0
- stouputils/data_science/data_processing/image/zoom.py +83 -0
- stouputils/data_science/data_processing/image_augmentation.py +118 -0
- stouputils/data_science/data_processing/image_preprocess.py +183 -0
- stouputils/data_science/data_processing/prosthesis_detection.py +359 -0
- stouputils/data_science/data_processing/technique.py +481 -0
- stouputils/data_science/dataset/__init__.py +45 -0
- stouputils/data_science/dataset/dataset.py +292 -0
- stouputils/data_science/dataset/dataset_loader.py +135 -0
- stouputils/data_science/dataset/grouping_strategy.py +296 -0
- stouputils/data_science/dataset/image_loader.py +100 -0
- stouputils/data_science/dataset/xy_tuple.py +696 -0
- stouputils/data_science/metric_dictionnary.py +106 -0
- stouputils/data_science/metric_utils.py +847 -0
- stouputils/data_science/mlflow_utils.py +206 -0
- stouputils/data_science/models/abstract_model.py +149 -0
- stouputils/data_science/models/all.py +85 -0
- stouputils/data_science/models/base_keras.py +765 -0
- stouputils/data_science/models/keras/all.py +38 -0
- stouputils/data_science/models/keras/convnext.py +62 -0
- stouputils/data_science/models/keras/densenet.py +50 -0
- stouputils/data_science/models/keras/efficientnet.py +60 -0
- stouputils/data_science/models/keras/mobilenet.py +56 -0
- stouputils/data_science/models/keras/resnet.py +52 -0
- stouputils/data_science/models/keras/squeezenet.py +233 -0
- stouputils/data_science/models/keras/vgg.py +42 -0
- stouputils/data_science/models/keras/xception.py +38 -0
- stouputils/data_science/models/keras_utils/callbacks/__init__.py +20 -0
- stouputils/data_science/models/keras_utils/callbacks/colored_progress_bar.py +219 -0
- stouputils/data_science/models/keras_utils/callbacks/learning_rate_finder.py +148 -0
- stouputils/data_science/models/keras_utils/callbacks/model_checkpoint_v2.py +31 -0
- stouputils/data_science/models/keras_utils/callbacks/progressive_unfreezing.py +249 -0
- stouputils/data_science/models/keras_utils/callbacks/warmup_scheduler.py +66 -0
- stouputils/data_science/models/keras_utils/losses/__init__.py +12 -0
- stouputils/data_science/models/keras_utils/losses/next_generation_loss.py +56 -0
- stouputils/data_science/models/keras_utils/visualizations.py +416 -0
- stouputils/data_science/models/model_interface.py +939 -0
- stouputils/data_science/models/sandbox.py +116 -0
- stouputils/data_science/range_tuple.py +234 -0
- stouputils/data_science/scripts/augment_dataset.py +77 -0
- stouputils/data_science/scripts/exhaustive_process.py +133 -0
- stouputils/data_science/scripts/preprocess_dataset.py +70 -0
- stouputils/data_science/scripts/routine.py +168 -0
- stouputils/data_science/utils.py +285 -0
- stouputils/decorators.py +605 -0
- stouputils/image.py +441 -0
- stouputils/installer/__init__.py +18 -0
- stouputils/installer/common.py +67 -0
- stouputils/installer/downloader.py +101 -0
- stouputils/installer/linux.py +144 -0
- stouputils/installer/main.py +223 -0
- stouputils/installer/windows.py +136 -0
- stouputils/io.py +486 -0
- stouputils/parallel.py +483 -0
- stouputils/print.py +482 -0
- stouputils/py.typed +1 -0
- stouputils/stouputils/__init__.pyi +15 -0
- stouputils/stouputils/_deprecated.pyi +12 -0
- stouputils/stouputils/all_doctests.pyi +46 -0
- stouputils/stouputils/applications/__init__.pyi +2 -0
- stouputils/stouputils/applications/automatic_docs.pyi +106 -0
- stouputils/stouputils/applications/upscaler/__init__.pyi +3 -0
- stouputils/stouputils/applications/upscaler/config.pyi +18 -0
- stouputils/stouputils/applications/upscaler/image.pyi +109 -0
- stouputils/stouputils/applications/upscaler/video.pyi +60 -0
- stouputils/stouputils/archive.pyi +67 -0
- stouputils/stouputils/backup.pyi +109 -0
- stouputils/stouputils/collections.pyi +86 -0
- stouputils/stouputils/continuous_delivery/__init__.pyi +5 -0
- stouputils/stouputils/continuous_delivery/cd_utils.pyi +129 -0
- stouputils/stouputils/continuous_delivery/github.pyi +162 -0
- stouputils/stouputils/continuous_delivery/pypi.pyi +53 -0
- stouputils/stouputils/continuous_delivery/pyproject.pyi +67 -0
- stouputils/stouputils/continuous_delivery/stubs.pyi +39 -0
- stouputils/stouputils/ctx.pyi +211 -0
- stouputils/stouputils/decorators.pyi +252 -0
- stouputils/stouputils/image.pyi +172 -0
- stouputils/stouputils/installer/__init__.pyi +5 -0
- stouputils/stouputils/installer/common.pyi +39 -0
- stouputils/stouputils/installer/downloader.pyi +24 -0
- stouputils/stouputils/installer/linux.pyi +39 -0
- stouputils/stouputils/installer/main.pyi +57 -0
- stouputils/stouputils/installer/windows.pyi +31 -0
- stouputils/stouputils/io.pyi +213 -0
- stouputils/stouputils/parallel.pyi +216 -0
- stouputils/stouputils/print.pyi +136 -0
- stouputils/stouputils/version_pkg.pyi +15 -0
- stouputils/version_pkg.py +189 -0
- stouputils-1.14.0.dist-info/METADATA +178 -0
- stouputils-1.14.0.dist-info/RECORD +140 -0
- stouputils-1.14.0.dist-info/WHEEL +4 -0
- stouputils-1.14.0.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,359 @@
|
|
|
1
|
+
|
|
2
|
+
# pyright: reportUnknownMemberType=false
|
|
3
|
+
# pyright: reportUnknownVariableType=false
|
|
4
|
+
# pyright: reportUnknownArgumentType=false
|
|
5
|
+
# pyright: reportArgumentType=false
|
|
6
|
+
# pyright: reportCallIssue=false
|
|
7
|
+
# pyright: reportMissingTypeStubs=false
|
|
8
|
+
|
|
9
|
+
# Imports
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
import cv2
|
|
13
|
+
import numpy as np
|
|
14
|
+
from numpy.typing import NDArray
|
|
15
|
+
from PIL import Image
|
|
16
|
+
|
|
17
|
+
from .image.canny import canny_image
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def get_brightness_score(image: NDArray[Any], rect: Any, percentile: int = 95) -> float:
|
|
21
|
+
""" Compute brightness score using high-percentile pixel intensity. """
|
|
22
|
+
x, y, w, h = rect
|
|
23
|
+
roi = image[y:y+h, x:x+w]
|
|
24
|
+
|
|
25
|
+
# Use 95th percentile for brightness (high-density areas)
|
|
26
|
+
high_intensity = np.percentile(roi, percentile)
|
|
27
|
+
|
|
28
|
+
return float(high_intensity)
|
|
29
|
+
|
|
30
|
+
def get_contrast_score(image: NDArray[Any], rect: Any) -> float:
|
|
31
|
+
""" Compute contrast score between object and surrounding background. """
|
|
32
|
+
x, y, w, h = rect
|
|
33
|
+
roi = image[y:y+h, x:x+w]
|
|
34
|
+
|
|
35
|
+
# Define a slightly larger background region
|
|
36
|
+
pad = max(w, h) // 10 # 10% padding
|
|
37
|
+
x_bg, y_bg = max(0, x-pad), max(0, y-pad)
|
|
38
|
+
w_bg, h_bg = min(image.shape[1] - x_bg, w + 2*pad), min(image.shape[0] - y_bg, h + 2*pad)
|
|
39
|
+
|
|
40
|
+
background = image[y_bg:y_bg+h_bg, x_bg:x_bg+w_bg]
|
|
41
|
+
|
|
42
|
+
# Compute contrast: Difference between ROI and background median
|
|
43
|
+
contrast = np.median(roi) - np.median(background)
|
|
44
|
+
|
|
45
|
+
return float(contrast)
|
|
46
|
+
|
|
47
|
+
def get_corners_distance(rect: Any, image_shape: tuple[int, int]) -> float:
|
|
48
|
+
""" Compute average distance between rectangle corners and image center. """
|
|
49
|
+
x, y, w, h = rect
|
|
50
|
+
# Get the 4 corners of the rectangle
|
|
51
|
+
corners = [
|
|
52
|
+
(x, y), # Top-left
|
|
53
|
+
(x + w, y), # Top-right
|
|
54
|
+
(x, y + h), # Bottom-left
|
|
55
|
+
(x + w, y + h) # Bottom-right
|
|
56
|
+
]
|
|
57
|
+
|
|
58
|
+
image_center_x = image_shape[1]/2
|
|
59
|
+
image_center_y = image_shape[0]/2
|
|
60
|
+
|
|
61
|
+
# Calculate distance from each corner to center
|
|
62
|
+
distances = [
|
|
63
|
+
np.sqrt((corner[0] - image_center_x)**2 + (corner[1] - image_center_y)**2)
|
|
64
|
+
for corner in corners
|
|
65
|
+
]
|
|
66
|
+
|
|
67
|
+
# Return average distance
|
|
68
|
+
return sum(distances) / len(distances)
|
|
69
|
+
|
|
70
|
+
def get_box_overlap_ratio(box1: Any, box2: Any) -> float:
|
|
71
|
+
""" Compute overlap ratio between two bounding boxes with intersection area. """
|
|
72
|
+
x1, y1, w1, h1 = box1
|
|
73
|
+
x2, y2, w2, h2 = box2
|
|
74
|
+
intersection_area = max(0, min(x1+w1, x2+w2) - max(x1, x2)) * max(0, min(y1+h1, y2+h2) - max(y1, y2))
|
|
75
|
+
return intersection_area / min(w1 * h1, w2 * h2)
|
|
76
|
+
|
|
77
|
+
def get_fracture_score(image: NDArray[Any], rect: Any, padding: int = 20) -> float:
|
|
78
|
+
""" Compute fracture score based on bone fractures around prosthesis. """
|
|
79
|
+
x, y, w, h = rect
|
|
80
|
+
|
|
81
|
+
# Add padding while ensuring we stay within image bounds
|
|
82
|
+
x_pad = max(0, x - padding)
|
|
83
|
+
y_pad = max(0, y - padding)
|
|
84
|
+
w_pad = min(image.shape[1] - x_pad, w + 2*padding)
|
|
85
|
+
h_pad = min(image.shape[0] - y_pad, h + 2*padding)
|
|
86
|
+
|
|
87
|
+
# Extract padded ROI
|
|
88
|
+
roi: NDArray[Any] = image[y_pad:y_pad+h_pad, x_pad:x_pad+w_pad]
|
|
89
|
+
roi = cv2.normalize(roi, None, 0, 255, cv2.NORM_MINMAX)
|
|
90
|
+
|
|
91
|
+
# Apply edge detection to find potential fracture lines
|
|
92
|
+
edges = cv2.Canny(roi, 50, 150)
|
|
93
|
+
|
|
94
|
+
# Count number of edge pixels
|
|
95
|
+
edge_count = np.count_nonzero(edges)
|
|
96
|
+
|
|
97
|
+
# Normalize by ROI area to get fracture score
|
|
98
|
+
fracture_score = edge_count / (roi.shape[0] * roi.shape[1])
|
|
99
|
+
return fracture_score
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
# Custom technique that segments the prosthesis from the image and zooms in on the prosthesis
|
|
103
|
+
def prosthesis_segmentation(image: NDArray[Any], debug_level: int = 0) -> NDArray[Any]:
|
|
104
|
+
|
|
105
|
+
# Convert to RGB if needed
|
|
106
|
+
image = np.array(Image.fromarray(image).convert("RGB"))
|
|
107
|
+
|
|
108
|
+
# Convert to grayscale if needed
|
|
109
|
+
gray: NDArray[Any] = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
|
|
110
|
+
gray = cv2.normalize(gray, None, 0, 255, cv2.NORM_MINMAX)
|
|
111
|
+
|
|
112
|
+
# Apply Canny edge detection
|
|
113
|
+
mask: NDArray[Any] = gray.copy()
|
|
114
|
+
mask = cv2.GaussianBlur(mask, (5,5), 0)
|
|
115
|
+
mask = canny_image(mask, 50 / 255, 150 / 255)
|
|
116
|
+
|
|
117
|
+
# Small gaps in the edges can break the contours. Try closing them.
|
|
118
|
+
kernel: NDArray[Any] = np.ones((7,7), np.uint8)
|
|
119
|
+
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
|
|
120
|
+
|
|
121
|
+
# Find contours
|
|
122
|
+
contours: list[NDArray[Any]] = list(cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[0])
|
|
123
|
+
|
|
124
|
+
# Filter contours based on image size
|
|
125
|
+
image_area: int = image.shape[0] * image.shape[1]
|
|
126
|
+
min_area: int = int(0.05 * image_area) # 5% of image
|
|
127
|
+
max_area: int = int(0.60 * image_area) # 60% of image
|
|
128
|
+
|
|
129
|
+
# Filter contours based on area
|
|
130
|
+
def get_area(rect: Any) -> int:
|
|
131
|
+
return int(np.prod(rect[2:]))
|
|
132
|
+
filtered_contours: list[NDArray[Any]] = [c for c in contours if min_area <= get_area(cv2.boundingRect(c)) <= max_area]
|
|
133
|
+
|
|
134
|
+
# Apply median blur to the image (for better filtering)
|
|
135
|
+
for _ in range(10):
|
|
136
|
+
gray = cv2.medianBlur(gray, 15)
|
|
137
|
+
|
|
138
|
+
## STRICT FILTERS
|
|
139
|
+
# Only keep contours that has a height higher than the 60% of the width
|
|
140
|
+
if True:
|
|
141
|
+
def is_tall_enough(rect: Any) -> bool:
|
|
142
|
+
return rect[3] > (rect[2] * 0.6)
|
|
143
|
+
filtered_contours = [c for c in filtered_contours if is_tall_enough(cv2.boundingRect(c))]
|
|
144
|
+
|
|
145
|
+
# Remove contours that are too thin (height > 4 * width)
|
|
146
|
+
if True:
|
|
147
|
+
def is_too_thin(rect: Any) -> bool:
|
|
148
|
+
return rect[3] > rect[2] * 4
|
|
149
|
+
filtered_contours = [c for c in filtered_contours if not is_too_thin(cv2.boundingRect(c))]
|
|
150
|
+
|
|
151
|
+
# Only keep contours that are not touching two sides of the image
|
|
152
|
+
# (excluding bottom unless it's more than 30% of the image width)
|
|
153
|
+
if True:
|
|
154
|
+
def is_touching_two_sides(box: Any) -> bool:
|
|
155
|
+
x, y, w, h = box
|
|
156
|
+
OFFSET: int = 5
|
|
157
|
+
touches_left: bool = x < OFFSET
|
|
158
|
+
touches_right: bool = x + w >= image.shape[1] - OFFSET
|
|
159
|
+
touches_top: bool = y < OFFSET
|
|
160
|
+
touches_bottom: bool = (y + h >= image.shape[0] - OFFSET) if (w > 0.3 * image.shape[1]) else False
|
|
161
|
+
sides_touched: int = sum([touches_left, touches_right, touches_top, touches_bottom])
|
|
162
|
+
return sides_touched >= 2
|
|
163
|
+
|
|
164
|
+
filtered_contours = [c for c in filtered_contours if not is_touching_two_sides(cv2.boundingRect(c))]
|
|
165
|
+
|
|
166
|
+
# Only keep contours that are not too dark (brightness score > 100)
|
|
167
|
+
if True:
|
|
168
|
+
def is_bright_enough(rect: Any) -> bool:
|
|
169
|
+
return get_brightness_score(gray, rect) > 100
|
|
170
|
+
filtered_contours = [c for c in filtered_contours if is_bright_enough(cv2.boundingRect(c))]
|
|
171
|
+
|
|
172
|
+
## SOFT FILTERS (only apply if there are more than 1 contour)
|
|
173
|
+
# Sort by brightness function
|
|
174
|
+
def sort_by_brightness(c: Any) -> float:
|
|
175
|
+
return get_brightness_score(gray, cv2.boundingRect(c))
|
|
176
|
+
|
|
177
|
+
# Only keep contours that have more than 75% of the brightness that the best contour
|
|
178
|
+
if True and len(filtered_contours) > 1:
|
|
179
|
+
if filtered_contours:
|
|
180
|
+
best_contour = sorted(filtered_contours, key=sort_by_brightness, reverse=True)[0]
|
|
181
|
+
filtered_contours = [
|
|
182
|
+
c for c in filtered_contours
|
|
183
|
+
if sort_by_brightness(c) > sort_by_brightness(best_contour) * 0.75
|
|
184
|
+
]
|
|
185
|
+
|
|
186
|
+
# Remove contours that are too similar to each other
|
|
187
|
+
if True and len(filtered_contours) > 1:
|
|
188
|
+
def is_different(box1: Any, box2: Any) -> bool:
|
|
189
|
+
return abs(box1[0] - box2[0]) > 10 or abs(box1[1] - box2[1]) > 10
|
|
190
|
+
new_contours = []
|
|
191
|
+
for c in filtered_contours:
|
|
192
|
+
if all(is_different(cv2.boundingRect(c), cv2.boundingRect(other)) for other in new_contours):
|
|
193
|
+
new_contours.append(c)
|
|
194
|
+
filtered_contours = new_contours
|
|
195
|
+
|
|
196
|
+
# If a contour's bounding box is at least 80% inside another contour's bounding box, remove the biggest one
|
|
197
|
+
if True and len(filtered_contours) > 1:
|
|
198
|
+
new_contours = []
|
|
199
|
+
for c in sorted(filtered_contours, key=lambda c: get_area(cv2.boundingRect(c))): # Sort by smallest area first
|
|
200
|
+
if not any(
|
|
201
|
+
get_box_overlap_ratio(cv2.boundingRect(c), cv2.boundingRect(other)) > 0.8
|
|
202
|
+
for other in new_contours
|
|
203
|
+
):
|
|
204
|
+
new_contours.append(c)
|
|
205
|
+
filtered_contours = new_contours
|
|
206
|
+
|
|
207
|
+
# If a contour's bounding box is at least 30% inside another contour's bounding box,
|
|
208
|
+
# keep the one with highest brightness score
|
|
209
|
+
if True and len(filtered_contours) > 1:
|
|
210
|
+
new_contours = []
|
|
211
|
+
for c in sorted(filtered_contours, key=sort_by_brightness, reverse=True): # Sort by highest brightness score
|
|
212
|
+
if not any(
|
|
213
|
+
get_box_overlap_ratio(cv2.boundingRect(c), cv2.boundingRect(other)) > 0.3
|
|
214
|
+
for other in new_contours
|
|
215
|
+
):
|
|
216
|
+
new_contours.append(c)
|
|
217
|
+
filtered_contours = new_contours
|
|
218
|
+
|
|
219
|
+
# If the 5th percentile is too dark, remove it
|
|
220
|
+
if True and len(filtered_contours) > 1:
|
|
221
|
+
new_contours = []
|
|
222
|
+
for c in filtered_contours:
|
|
223
|
+
percentile: int = 5
|
|
224
|
+
threshold: int = 100
|
|
225
|
+
if np.percentile(gray, percentile) < threshold:
|
|
226
|
+
new_contours.append(c)
|
|
227
|
+
filtered_contours = new_contours
|
|
228
|
+
|
|
229
|
+
# Only keep contours that have more than 85% of the brightness that the best contour
|
|
230
|
+
if True and len(filtered_contours) > 1:
|
|
231
|
+
if filtered_contours:
|
|
232
|
+
best_contour = sorted(filtered_contours, key=sort_by_brightness, reverse=True)[0]
|
|
233
|
+
filtered_contours = [
|
|
234
|
+
c for c in filtered_contours
|
|
235
|
+
if sort_by_brightness(c) > sort_by_brightness(best_contour) * 0.85
|
|
236
|
+
]
|
|
237
|
+
|
|
238
|
+
# Now sort by prosthesis score
|
|
239
|
+
scores: dict[int, float] = {
|
|
240
|
+
id(c): get_fracture_score(gray, cv2.boundingRect(c)) +
|
|
241
|
+
get_brightness_score(gray, cv2.boundingRect(c)) / 255
|
|
242
|
+
for c in filtered_contours
|
|
243
|
+
}
|
|
244
|
+
if True and len(filtered_contours) > 1:
|
|
245
|
+
filtered_contours = sorted(
|
|
246
|
+
filtered_contours,
|
|
247
|
+
key=lambda c: scores[id(c)],
|
|
248
|
+
reverse=True # Highest score first
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
# Get the distance to the center of the image of the supposed prosthesis. Then, remove the contours
|
|
252
|
+
# that are too far from the center (more than 50% of the image size) compared to the supposed prosthesis
|
|
253
|
+
def get_distance_to_center(c: Any) -> float:
|
|
254
|
+
x, y = cv2.boundingRect(c)[:2]
|
|
255
|
+
return np.sqrt((x - image.shape[1]/2)**2 + (y - image.shape[0]/2)**2)
|
|
256
|
+
if True and len(filtered_contours) > 1:
|
|
257
|
+
distance = get_distance_to_center(filtered_contours[0])
|
|
258
|
+
max_distance: float = max(image.shape[0], image.shape[1]) / 2
|
|
259
|
+
filtered_contours = [c for c in filtered_contours if abs(get_distance_to_center(c) - distance) < max_distance]
|
|
260
|
+
|
|
261
|
+
# If scores are too similar, and there are not centered (more than 5% of the image size), merge them
|
|
262
|
+
if True and len(filtered_contours) > 1:
|
|
263
|
+
max_distance: float = max(image.shape[0], image.shape[1]) / 20
|
|
264
|
+
score_diff: float = abs(scores[id(filtered_contours[0])] - scores[id(filtered_contours[1])])
|
|
265
|
+
contours_not_centered: bool = all(get_distance_to_center(c) > max_distance for c in filtered_contours[:2])
|
|
266
|
+
if score_diff < 0.2 and contours_not_centered:
|
|
267
|
+
filtered_contours = [
|
|
268
|
+
cv2.convexHull(np.concatenate([filtered_contours[0], filtered_contours[1]])),
|
|
269
|
+
*filtered_contours[2:]
|
|
270
|
+
]
|
|
271
|
+
|
|
272
|
+
# Normalize the image
|
|
273
|
+
image = cv2.normalize(image, None, 0, 255, cv2.NORM_MINMAX)
|
|
274
|
+
|
|
275
|
+
# Debug mode (show the mask)
|
|
276
|
+
if debug_level > 0:
|
|
277
|
+
if debug_level > 2:
|
|
278
|
+
image = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
|
|
279
|
+
|
|
280
|
+
# Add bounding boxes to visualize detected regions
|
|
281
|
+
for i, contour in enumerate(filtered_contours):
|
|
282
|
+
x, y, w, h = cv2.boundingRect(contour)
|
|
283
|
+
color = (0, 255, 0) if i == 0 else (255, 0, 0) # Green for best match, blue for second
|
|
284
|
+
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
|
|
285
|
+
|
|
286
|
+
else:
|
|
287
|
+
# Get the best contour's bounding rectangle and crop to it
|
|
288
|
+
if len(filtered_contours) > 0:
|
|
289
|
+
# Get bounding box of best contour
|
|
290
|
+
x, y, w, h = cv2.boundingRect(filtered_contours[0])
|
|
291
|
+
|
|
292
|
+
# Add padding
|
|
293
|
+
padding: int = 10
|
|
294
|
+
x = max(0, x - padding)
|
|
295
|
+
y = max(0, y - padding)
|
|
296
|
+
w = min(image.shape[1] - x, w + 2*padding)
|
|
297
|
+
h = min(image.shape[0] - y, h + 2*padding)
|
|
298
|
+
|
|
299
|
+
# Crop image to bounding box
|
|
300
|
+
output: NDArray[Any] = image[y:y+h, x:x+w]
|
|
301
|
+
return output
|
|
302
|
+
|
|
303
|
+
# No prosthesis found, keep the original image
|
|
304
|
+
return image
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
# Custom technique that only keeps the brightest parts of the image
|
|
308
|
+
def keep_bright_enough_parts(
|
|
309
|
+
image: NDArray[Any],
|
|
310
|
+
window_size: int = 101,
|
|
311
|
+
invert: bool = False,
|
|
312
|
+
debug_level: int = 0
|
|
313
|
+
) -> NDArray[Any]:
|
|
314
|
+
""" Keep only the brightest parts of the image.
|
|
315
|
+
|
|
316
|
+
For each pixel, if the window around it is brighter than 60% of the brightest pixels in the image, keep it.
|
|
317
|
+
|
|
318
|
+
Args:
|
|
319
|
+
image (NDArray[Any]): Image to process.
|
|
320
|
+
window_size (int): Size of the window to consider around each pixel.
|
|
321
|
+
invert (bool): Instead of keeping the brightest parts, keep the darkest parts.
|
|
322
|
+
debug_level (int): Debug level.
|
|
323
|
+
|
|
324
|
+
Returns:
|
|
325
|
+
NDArray[Any]: Processed image with only bright parts preserved.
|
|
326
|
+
"""
|
|
327
|
+
new_image: NDArray[Any] = image.copy()
|
|
328
|
+
|
|
329
|
+
# Create a mask for bright regions
|
|
330
|
+
mask: NDArray[Any] = np.zeros_like(image, dtype=bool)
|
|
331
|
+
image_brightness: float = float(np.percentile(image, 60 if not invert else 40))
|
|
332
|
+
|
|
333
|
+
# Blur the image
|
|
334
|
+
image = cv2.GaussianBlur(image, (window_size, window_size), 0)
|
|
335
|
+
|
|
336
|
+
# Use vectorized operations instead of pixel-by-pixel loop
|
|
337
|
+
# Calculate brightness scores for all pixels at once
|
|
338
|
+
from scipy.ndimage import maximum_filter, minimum_filter
|
|
339
|
+
|
|
340
|
+
# Calculate average brightness in window around each pixel
|
|
341
|
+
if invert:
|
|
342
|
+
avg_brightness: NDArray[Any] = minimum_filter(image.astype(float), size=window_size)
|
|
343
|
+
else:
|
|
344
|
+
avg_brightness: NDArray[Any] = maximum_filter(image.astype(float), size=window_size)
|
|
345
|
+
|
|
346
|
+
# Create mask where brightness exceeds threshold
|
|
347
|
+
if invert:
|
|
348
|
+
mask = avg_brightness < image_brightness
|
|
349
|
+
else:
|
|
350
|
+
mask = avg_brightness > image_brightness
|
|
351
|
+
|
|
352
|
+
# Apply mask to create output image
|
|
353
|
+
new_image[~mask] = 0
|
|
354
|
+
|
|
355
|
+
if debug_level > 0:
|
|
356
|
+
return image
|
|
357
|
+
else:
|
|
358
|
+
return new_image
|
|
359
|
+
|