radnn 0.0.7.2__py3-none-any.whl → 0.0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- radnn/__init__.py +7 -5
- radnn/core.py +44 -28
- radnn/data/__init__.py +8 -0
- radnn/data/data_feed.py +147 -0
- radnn/data/dataset_base.py +3 -5
- radnn/data/dataset_folder.py +55 -0
- radnn/data/image_dataset.py +0 -2
- radnn/data/image_dataset_files.py +175 -0
- radnn/data/preprocess/normalizer.py +7 -1
- radnn/data/preprocess/standardizer.py +9 -2
- radnn/data/sample_set.py +30 -17
- radnn/data/sequence_dataset.py +0 -2
- radnn/data/subset_type.py +45 -0
- radnn/data/tf_classification_data_feed.py +113 -0
- radnn/errors.py +29 -0
- radnn/evaluation/evaluate_classification.py +7 -3
- radnn/experiment/ml_experiment.py +29 -0
- radnn/experiment/ml_experiment_config.py +61 -32
- radnn/experiment/ml_experiment_env.py +6 -2
- radnn/experiment/ml_experiment_store.py +0 -1
- radnn/images/__init__.py +2 -0
- radnn/images/colors.py +28 -0
- radnn/images/image_processor.py +513 -0
- radnn/learn/learning_algorithm.py +4 -3
- radnn/ml_system.py +59 -18
- radnn/plots/plot_auto_multi_image.py +27 -17
- radnn/plots/plot_confusion_matrix.py +7 -4
- radnn/plots/plot_learning_curve.py +7 -3
- radnn/plots/plot_multi_scatter.py +7 -3
- radnn/plots/plot_roc.py +8 -4
- radnn/plots/plot_voronoi_2d.py +8 -5
- radnn/stats/__init__.py +1 -0
- radnn/stats/descriptive_stats.py +45 -0
- radnn/system/files/__init__.py +1 -0
- radnn/system/files/csvfile.py +8 -5
- radnn/system/files/filelist.py +40 -0
- radnn/system/files/fileobject.py +9 -4
- radnn/system/files/imgfile.py +8 -4
- radnn/system/files/jsonfile.py +8 -4
- radnn/system/files/picklefile.py +8 -4
- radnn/system/files/textfile.py +37 -7
- radnn/system/filestore.py +36 -18
- radnn/system/filesystem.py +8 -3
- radnn/system/hosts/colab_host.py +29 -0
- radnn/system/hosts/linux_host.py +29 -0
- radnn/system/hosts/windows_host.py +39 -1
- radnn/system/tee_logger.py +7 -3
- radnn/system/threads/__init__.py +5 -0
- radnn/system/threads/semaphore_lock.py +58 -0
- radnn/system/threads/thread_context.py +175 -0
- radnn/system/threads/thread_safe_queue.py +163 -0
- radnn/system/threads/thread_safe_string_collection.py +66 -0
- radnn/system/threads/thread_worker.py +68 -0
- radnn/utils.py +96 -2
- {radnn-0.0.7.2.dist-info → radnn-0.0.8.dist-info}/METADATA +1 -1
- radnn-0.0.8.dist-info/RECORD +70 -0
- radnn-0.0.7.2.dist-info/RECORD +0 -53
- {radnn-0.0.7.2.dist-info → radnn-0.0.8.dist-info}/LICENSE.txt +0 -0
- {radnn-0.0.7.2.dist-info → radnn-0.0.8.dist-info}/WHEEL +0 -0
- {radnn-0.0.7.2.dist-info → radnn-0.0.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,513 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import cv2
|
|
3
|
+
from PIL import Image, ImageChops, ImageFilter, ImageDraw
|
|
4
|
+
from .colors import color
|
|
5
|
+
|
|
6
|
+
phi=(1.0+np.sqrt(5.0))/2.0
|
|
7
|
+
|
|
8
|
+
class ImageProcessor(object):
|
|
9
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
10
|
+
def __init__(self, pixels=None, filename=None, image=None, bgcolor=None):
|
|
11
|
+
self.pixels = pixels
|
|
12
|
+
self.filename = filename
|
|
13
|
+
self.image = image
|
|
14
|
+
self.bgcolor = bgcolor
|
|
15
|
+
self._size = None
|
|
16
|
+
|
|
17
|
+
if self.pixels is not None:
|
|
18
|
+
if self.pixels is not None:
|
|
19
|
+
nShape = self.pixels.shape
|
|
20
|
+
if len(nShape) == 4:
|
|
21
|
+
self._size = nShape[1:4]
|
|
22
|
+
elif len(nShape) == 3:
|
|
23
|
+
self._size = nShape[1:3]
|
|
24
|
+
elif len(nShape) == 2:
|
|
25
|
+
self._size = nShape
|
|
26
|
+
|
|
27
|
+
if image is not None:
|
|
28
|
+
self.pixels = np.array(self.image)
|
|
29
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
30
|
+
@property
|
|
31
|
+
def size(self):
|
|
32
|
+
if self.image is not None:
|
|
33
|
+
return self.image.size
|
|
34
|
+
else:
|
|
35
|
+
return self._size
|
|
36
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
37
|
+
def load(self, filename=None):
|
|
38
|
+
if filename is not None:
|
|
39
|
+
self.filename = filename
|
|
40
|
+
self.image = Image.open(self.filename).convert("RGB")
|
|
41
|
+
self.pixels = np.array(self.image)
|
|
42
|
+
return self
|
|
43
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
44
|
+
def pad_square_with_edges(self, size=(227, 227)):
|
|
45
|
+
img = self.image
|
|
46
|
+
img_width, img_height = img.size
|
|
47
|
+
|
|
48
|
+
# Determine the scaling factor to maintain aspect ratio
|
|
49
|
+
if img_width > img_height:
|
|
50
|
+
bIsLandscape = True
|
|
51
|
+
ratio = img_height / img_width
|
|
52
|
+
new_width = size[0]
|
|
53
|
+
new_height = int(size[0] * ratio)
|
|
54
|
+
else:
|
|
55
|
+
bIsLandscape = False
|
|
56
|
+
ratio = img_width / img_height
|
|
57
|
+
new_width = int(size[1] * ratio)
|
|
58
|
+
new_height = size[1]
|
|
59
|
+
|
|
60
|
+
# Resize the image while maintaining aspect ratio
|
|
61
|
+
img = img.resize((new_width, new_height), Image.LANCZOS)
|
|
62
|
+
|
|
63
|
+
# Create a blank canvas for the final image
|
|
64
|
+
Result = np.zeros((size[1], size[0], 3), dtype=np.uint8)
|
|
65
|
+
img_array = np.array(img)
|
|
66
|
+
|
|
67
|
+
# Center the resized image on the new canvas
|
|
68
|
+
offset_x = (size[0] - new_width) // 2
|
|
69
|
+
offset_y = (size[1] - new_height) // 2
|
|
70
|
+
Result[offset_y:offset_y + new_height, offset_x:offset_x + new_width, :] = img_array
|
|
71
|
+
|
|
72
|
+
# Fill edges with repeated stripes and blur for a smooth effect
|
|
73
|
+
if bIsLandscape:
|
|
74
|
+
# Top and bottom padding
|
|
75
|
+
first_row = Result[offset_y, :, :]
|
|
76
|
+
last_row = Result[offset_y + new_height - 1, :, :]
|
|
77
|
+
|
|
78
|
+
top_rows = np.repeat(first_row.reshape(1, size[0], 3), offset_y, axis=0)
|
|
79
|
+
bottom_rows = np.repeat(last_row.reshape(1, size[0], 3), size[1] - offset_y - new_height, axis=0)
|
|
80
|
+
|
|
81
|
+
# Apply blur to soften edges
|
|
82
|
+
im_top = Image.fromarray(top_rows).filter(ImageFilter.BLUR)
|
|
83
|
+
im_bottom = Image.fromarray(bottom_rows).filter(ImageFilter.BLUR)
|
|
84
|
+
|
|
85
|
+
Result[0:offset_y, :, :] = np.array(im_top)
|
|
86
|
+
Result[offset_y + new_height:size[1], :, :] = np.array(im_bottom)
|
|
87
|
+
else:
|
|
88
|
+
# Left and right padding
|
|
89
|
+
first_col = Result[:, offset_x, :]
|
|
90
|
+
last_col = Result[:, offset_x + new_width - 1, :]
|
|
91
|
+
|
|
92
|
+
left_cols = np.repeat(first_col.reshape(size[1], 1, 3), offset_x, axis=1)
|
|
93
|
+
right_cols = np.repeat(last_col.reshape(size[1], 1, 3), size[0] - offset_x - new_width, axis=1)
|
|
94
|
+
|
|
95
|
+
# Apply blur to soften edges
|
|
96
|
+
im_left = Image.fromarray(left_cols).filter(ImageFilter.BLUR)
|
|
97
|
+
im_right = Image.fromarray(right_cols).filter(ImageFilter.BLUR)
|
|
98
|
+
|
|
99
|
+
Result[:, 0:offset_x, :] = np.array(im_left)
|
|
100
|
+
Result[:, offset_x + new_width:size[0], :] = np.array(im_right)
|
|
101
|
+
|
|
102
|
+
# Convert back to a PIL image
|
|
103
|
+
final_img = Image.fromarray(Result)
|
|
104
|
+
return ImageProcessor(image=final_img, filename=self.filename, bgcolor=self.bgcolor)
|
|
105
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
106
|
+
def _get_background_color(self, bgcolor):
|
|
107
|
+
if bgcolor is not None:
|
|
108
|
+
self.bgcolor = bgcolor
|
|
109
|
+
if self.bgcolor is None:
|
|
110
|
+
self.bgcolor = "black"
|
|
111
|
+
if isinstance(self.bgcolor, str):
|
|
112
|
+
nBGColor = color(self.bgcolor)
|
|
113
|
+
else:
|
|
114
|
+
nBGColor = self.bgcolor
|
|
115
|
+
return nBGColor
|
|
116
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
117
|
+
def roll(self, shift_x=0, shift_y=0, bgcolor=None):
|
|
118
|
+
nBGColor = self._get_background_color(bgcolor)
|
|
119
|
+
img = self.image
|
|
120
|
+
|
|
121
|
+
arr = np.array(img)
|
|
122
|
+
|
|
123
|
+
# Apply roll effect
|
|
124
|
+
arr = np.roll(arr, shift_x, axis=1) # Shift horizontally
|
|
125
|
+
arr = np.roll(arr, shift_y, axis=0) # Shift vertically
|
|
126
|
+
|
|
127
|
+
img_out = Image.fromarray(arr)
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
return ImageProcessor(image=img_out, filename=self.filename, bgcolor=self.bgcolor)
|
|
131
|
+
|
|
132
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
133
|
+
def zoom_center(self, scale=1.2, bgcolor=None):
|
|
134
|
+
nBGColor = self._get_background_color(bgcolor)
|
|
135
|
+
img = self.pixels
|
|
136
|
+
w, h = self.size
|
|
137
|
+
|
|
138
|
+
tx, ty = 0, 0
|
|
139
|
+
if scale < 1.0:
|
|
140
|
+
tx = int((w * (1.0 - scale)) // 2)
|
|
141
|
+
ty = int((h * (1.0 - scale)) // 2)
|
|
142
|
+
else:
|
|
143
|
+
tx = - int(w*(scale - 1.0) // 2)
|
|
144
|
+
ty = - int(w*(scale - 1.0) // 2)
|
|
145
|
+
|
|
146
|
+
# Transformation matrix for scaling and translation
|
|
147
|
+
M = np.array([[scale, 0, tx],
|
|
148
|
+
[0, scale, ty]], dtype=np.float32)
|
|
149
|
+
|
|
150
|
+
# Apply affine warp
|
|
151
|
+
result = cv2.warpAffine(img, M, (w, h))
|
|
152
|
+
|
|
153
|
+
oImage = Image.fromarray(result)
|
|
154
|
+
oCropped = oImage.crop((tx, ty, w - tx - 1, h - ty - 1)).convert("RGBA")
|
|
155
|
+
oNewImageWithBackground = Image.new("RGBA",self.size, nBGColor)
|
|
156
|
+
oNewImageWithBackground.paste(oCropped, (tx, ty), oCropped)
|
|
157
|
+
|
|
158
|
+
return ImageProcessor(image=oNewImageWithBackground, filename=self.filename, bgcolor=self.bgcolor)
|
|
159
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
160
|
+
def zoom_pan(self, scale=1.2, tx=20, ty=20, bgcolor=None):
|
|
161
|
+
nBGColor = self._get_background_color(bgcolor)
|
|
162
|
+
img = self.pixels
|
|
163
|
+
w, h = self.size
|
|
164
|
+
|
|
165
|
+
# Transformation matrix for scaling and translation
|
|
166
|
+
M = np.array([[scale, 0, tx],
|
|
167
|
+
[0, scale, ty]], dtype=np.float32)
|
|
168
|
+
|
|
169
|
+
# Apply affine warp
|
|
170
|
+
result = cv2.warpAffine(img, M, (w, h))
|
|
171
|
+
|
|
172
|
+
oImage = Image.fromarray(result)
|
|
173
|
+
oCropped = oImage.crop((tx, ty, w*scale - tx, h*scale - ty)).convert("RGBA")
|
|
174
|
+
oNewImageWithBackground = Image.new("RGBA",self.size, nBGColor)
|
|
175
|
+
oNewImageWithBackground.paste(oCropped, (tx, ty), oCropped)
|
|
176
|
+
|
|
177
|
+
return ImageProcessor(image=oNewImageWithBackground, filename=self.filename, bgcolor=self.bgcolor)
|
|
178
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
179
|
+
def wave_effect(self, amplitude=20, frequency=0.1):
|
|
180
|
+
img = self.pixels
|
|
181
|
+
w, h = self.size
|
|
182
|
+
|
|
183
|
+
# Create mapping arrays
|
|
184
|
+
map_x = np.zeros((h, w), dtype=np.float32)
|
|
185
|
+
map_y = np.zeros((h, w), dtype=np.float32)
|
|
186
|
+
|
|
187
|
+
for i in range(h):
|
|
188
|
+
for j in range(w):
|
|
189
|
+
offset_x = int(amplitude * np.sin(2 * np.pi * frequency * i))
|
|
190
|
+
map_x[i, j] = j + offset_x
|
|
191
|
+
map_y[i, j] = i
|
|
192
|
+
|
|
193
|
+
# Apply remapping
|
|
194
|
+
result = cv2.remap(img, map_x, map_y, cv2.INTER_LINEAR)
|
|
195
|
+
pil_image = Image.fromarray(result)
|
|
196
|
+
return ImageProcessor(image=pil_image, filename=self.filename, bgcolor=self.bgcolor)
|
|
197
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
198
|
+
def rotate(self, degrees, is_original_scale=True, bgcolor=None):
|
|
199
|
+
nBGColor = self._get_background_color(bgcolor)
|
|
200
|
+
oImageRGBA = self.image.convert('RGBA')
|
|
201
|
+
oImageRotated = oImageRGBA.rotate(degrees, expand=True)
|
|
202
|
+
oBackground = Image.new('RGBA', oImageRotated.size, nBGColor)
|
|
203
|
+
oNewImage = Image.composite(oImageRotated, oBackground, oImageRotated).convert(self.image.mode)
|
|
204
|
+
|
|
205
|
+
if is_original_scale:
|
|
206
|
+
orig_w, orig_h = self.image.size
|
|
207
|
+
new_w, new_h = oNewImage.size
|
|
208
|
+
left = (new_w - orig_w) // 2
|
|
209
|
+
top = (new_h - orig_h) // 2
|
|
210
|
+
right = left + orig_w
|
|
211
|
+
bottom = top + orig_h
|
|
212
|
+
|
|
213
|
+
# Crop to original aspect ratio
|
|
214
|
+
oNewImage = oNewImage.crop((left, top, right, bottom))
|
|
215
|
+
else:
|
|
216
|
+
# Composite the rotated image onto the background using its alpha mask
|
|
217
|
+
oNewImage = Image.composite(oImageRotated, oBackground, oImageRotated).convert(self.image.mode)
|
|
218
|
+
|
|
219
|
+
return ImageProcessor(image=oNewImage, filename=self.filename, bgcolor=self.bgcolor)
|
|
220
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
221
|
+
def fit_to_size(self, size=(227, 227), bgcolor=None):
|
|
222
|
+
nBGColor = self._get_background_color(bgcolor)
|
|
223
|
+
|
|
224
|
+
img = self.image
|
|
225
|
+
img_width = float(self.size[0])
|
|
226
|
+
img_height = float(self.size[1])
|
|
227
|
+
|
|
228
|
+
if img_width > img_height:
|
|
229
|
+
bIsLandscape = True
|
|
230
|
+
ratio = img_height / img_width
|
|
231
|
+
new_width = size[0]
|
|
232
|
+
new_height = int(size[0] * ratio)
|
|
233
|
+
else:
|
|
234
|
+
bIsLandscape = False
|
|
235
|
+
ratio = img_width / img_height
|
|
236
|
+
new_width = int(size[0] * ratio)
|
|
237
|
+
new_height = size[0]
|
|
238
|
+
|
|
239
|
+
img = img.resize((new_width, new_height), Image.NONE).convert("RGBA")
|
|
240
|
+
|
|
241
|
+
nOffsetX = 0
|
|
242
|
+
nOffsetY = 0
|
|
243
|
+
if bIsLandscape:
|
|
244
|
+
nOffsetY = (size[1] - img.size[1]) // 2
|
|
245
|
+
else:
|
|
246
|
+
nOffsetX = (size[0] - img.size[0]) // 2
|
|
247
|
+
|
|
248
|
+
thumb = img.crop((0, 0, size[0], size[1]))
|
|
249
|
+
|
|
250
|
+
oMovedImage = ImageChops.offset(thumb, int(nOffsetX), int(nOffsetY))
|
|
251
|
+
|
|
252
|
+
oNewImageWithBackground = Image.new("RGBA", oMovedImage.size, nBGColor)
|
|
253
|
+
oNewImageWithBackground.paste(oMovedImage, (0, 0), oMovedImage)
|
|
254
|
+
|
|
255
|
+
return ImageProcessor(image=oNewImageWithBackground, filename=self.filename, bgcolor=self.bgcolor)
|
|
256
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
257
|
+
def crop_to_size(self, target_size=(227, 227)):
|
|
258
|
+
"""
|
|
259
|
+
Resizes the image so that the smallest dimension matches the corresponding
|
|
260
|
+
target size dimension, then center crops it to the exact target size.
|
|
261
|
+
|
|
262
|
+
:param image: PIL Image to be processed.
|
|
263
|
+
:param target_size: Tuple (width, height) of the desired output size.
|
|
264
|
+
:return: Cropped PIL Image of the specified size.
|
|
265
|
+
"""
|
|
266
|
+
target_w, target_h = target_size
|
|
267
|
+
img_w, img_h = self.size
|
|
268
|
+
|
|
269
|
+
# Determine scale factor to match the smallest dimension
|
|
270
|
+
scale = max(target_w / img_w, target_h / img_h)
|
|
271
|
+
new_size = (int(img_w * scale), int(img_h * scale))
|
|
272
|
+
|
|
273
|
+
# Resize while maintaining aspect ratio
|
|
274
|
+
oResizedImage = self.image.resize(new_size, Image.NONE) #Image.LANCZOS
|
|
275
|
+
|
|
276
|
+
# Center crop to the exact target size
|
|
277
|
+
left = (oResizedImage.width - target_w) / 2
|
|
278
|
+
top = (oResizedImage.height - target_h) / 2
|
|
279
|
+
right = left + target_w
|
|
280
|
+
bottom = top + target_h
|
|
281
|
+
|
|
282
|
+
oCroppedImage = oResizedImage.crop((left, top, right, bottom))
|
|
283
|
+
|
|
284
|
+
return ImageProcessor(image=oCroppedImage, filename=self.filename, bgcolor=self.bgcolor)
|
|
285
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
286
|
+
def horizontal_wave_effect(self, amplitude=7, frequency=0.01, bgcolor=None):
|
|
287
|
+
nBGColor = self._get_background_color(bgcolor)
|
|
288
|
+
|
|
289
|
+
img = self.pixels
|
|
290
|
+
w, h = self.size
|
|
291
|
+
|
|
292
|
+
# Create mapping arrays
|
|
293
|
+
map_x = np.zeros((h, w), dtype=np.float32)
|
|
294
|
+
map_y = np.zeros((h, w), dtype=np.float32)
|
|
295
|
+
|
|
296
|
+
for i in range(h):
|
|
297
|
+
for j in range(w):
|
|
298
|
+
offset_y = int(amplitude * np.sin(2 * np.pi * frequency * j)) # Horizontal wave
|
|
299
|
+
map_x[i, j] = j
|
|
300
|
+
map_y[i, j] = i + offset_y # Apply wave effect in Y direction
|
|
301
|
+
|
|
302
|
+
# Apply remapping
|
|
303
|
+
result = cv2.remap(img, map_x, map_y, cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=nBGColor)
|
|
304
|
+
oImage = Image.fromarray(result)
|
|
305
|
+
return ImageProcessor(image=oImage, filename=self.filename, bgcolor=self.bgcolor)
|
|
306
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
307
|
+
def drop_shadow(self, shrink=0.9, shadow_offset=(8, 8), shadow_blur=15,
|
|
308
|
+
shadow_color=(32, 32, 32, 255), bgcolor=None):
|
|
309
|
+
"""
|
|
310
|
+
Shrinks an image while keeping the final output dimensions the same by adding a drop shadow.
|
|
311
|
+
|
|
312
|
+
Parameters:
|
|
313
|
+
shrink_factor (float): The factor by which the image is shrunk (0 < shrink_factor ≤ 1).
|
|
314
|
+
shadow_offset (tuple): The (x, y) offset for the shadow.
|
|
315
|
+
shadow_blur (int): The blur radius for the shadow.
|
|
316
|
+
shadow_color (tuple): RGBA color of the shadow (default is semi-transparent black).
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
ImageProcessor: A new instance with the processed image.
|
|
320
|
+
"""
|
|
321
|
+
nBGColor = self._get_background_color(bgcolor)
|
|
322
|
+
assert shrink < 1.0, "The shrink factor should be less than 1.0"
|
|
323
|
+
img = self.pixels
|
|
324
|
+
w, h = self.size
|
|
325
|
+
|
|
326
|
+
# Compute new size for the shrunk image
|
|
327
|
+
new_w = int(w * shrink)
|
|
328
|
+
new_h = int(h * shrink)
|
|
329
|
+
|
|
330
|
+
# Place the shrunk image centered in the original dimensions
|
|
331
|
+
x_center = (w - new_w) // 2
|
|
332
|
+
y_center = (h - new_h) // 2
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
# Resize the image (shrink)
|
|
336
|
+
shrunk_img = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_AREA)
|
|
337
|
+
|
|
338
|
+
# Create transparent background
|
|
339
|
+
shadow_img = np.zeros((h, w, 4), dtype=np.uint8)
|
|
340
|
+
shadow_img[:,:,:4] = list(nBGColor)[:]
|
|
341
|
+
|
|
342
|
+
# Create shadow by filling an ellipse or rectangle (based on image shape)
|
|
343
|
+
shadow = np.full((new_h, new_w, 4), shadow_color, dtype=np.uint8)
|
|
344
|
+
|
|
345
|
+
# Blur the shadow
|
|
346
|
+
shadow = cv2.GaussianBlur(shadow, (shadow_blur, shadow_blur), 0)
|
|
347
|
+
|
|
348
|
+
# Position the shadow
|
|
349
|
+
x_offset, y_offset = x_center + shadow_offset[0], y_center + shadow_offset[1]
|
|
350
|
+
shadow_img[y_offset:y_offset + new_h, x_offset:x_offset + new_w] = shadow
|
|
351
|
+
|
|
352
|
+
# Convert shrunk image to 4-channel RGBA if not already
|
|
353
|
+
if shrunk_img.shape[2] == 3:
|
|
354
|
+
shrunk_img = cv2.cvtColor(shrunk_img, cv2.COLOR_RGB2RGBA)
|
|
355
|
+
|
|
356
|
+
# Paste the shrunk image onto the shadow
|
|
357
|
+
shadow_img[y_center:y_center + new_h, x_center:x_center + new_w] = shrunk_img
|
|
358
|
+
|
|
359
|
+
# Convert back to PIL image
|
|
360
|
+
oImage = Image.fromarray(shadow_img)
|
|
361
|
+
|
|
362
|
+
return ImageProcessor(image=oImage, filename=self.filename, bgcolor=self.bgcolor)
|
|
363
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
364
|
+
def make_mbsf_augmented_square(self, size=(227, 227)):
|
|
365
|
+
'''
|
|
366
|
+
Used in MSBF paper
|
|
367
|
+
:param size:
|
|
368
|
+
:return:
|
|
369
|
+
'''
|
|
370
|
+
nSize = size
|
|
371
|
+
|
|
372
|
+
nHalfSize = (nSize[0] // 2, nSize[1] // 2)
|
|
373
|
+
nModX = nSize[0] % 2
|
|
374
|
+
nModY = nSize[1] % 2
|
|
375
|
+
|
|
376
|
+
img = self.image
|
|
377
|
+
|
|
378
|
+
img_width = float(self.size[0])
|
|
379
|
+
img_height = float(self.size[1])
|
|
380
|
+
nAspectRatio = img_width / img_height
|
|
381
|
+
if nAspectRatio > 1.0:
|
|
382
|
+
bIrregular = nAspectRatio > (phi * 0.9)
|
|
383
|
+
bIsTopBottomPadding = True
|
|
384
|
+
else:
|
|
385
|
+
bIrregular = nAspectRatio < (1.0 / (phi * 0.9))
|
|
386
|
+
bIsTopBottomPadding = False
|
|
387
|
+
|
|
388
|
+
# print("[%d,%d] AspectRatio:%.4f Irregular:%r" % (img_width, img_height, nAspectRatio, bIrregular))
|
|
389
|
+
nRatioWidth = 1.0
|
|
390
|
+
nRatioHeight = 1.0
|
|
391
|
+
if bIrregular:
|
|
392
|
+
if img_width > img_height:
|
|
393
|
+
nRatioHeight = img_height / img_width
|
|
394
|
+
else:
|
|
395
|
+
nRatioWidth = img_width / img_height
|
|
396
|
+
else:
|
|
397
|
+
if img_width > img_height:
|
|
398
|
+
nRatioWidth = img_width / img_height
|
|
399
|
+
else:
|
|
400
|
+
nRatioHeight = img_height / img_width
|
|
401
|
+
|
|
402
|
+
new_width = int(nSize[0] * nRatioWidth)
|
|
403
|
+
new_height = int(nSize[1] * nRatioHeight)
|
|
404
|
+
|
|
405
|
+
img = img.resize((new_width, new_height), Image.NONE)
|
|
406
|
+
# print("New Image Size", self.size)
|
|
407
|
+
|
|
408
|
+
if bIrregular:
|
|
409
|
+
thumb = img.crop((0, 0, nSize[0], nSize[1]))
|
|
410
|
+
|
|
411
|
+
offset_x = int(max((nSize[0] - self.size[0]) / 2, 0))
|
|
412
|
+
offset_y = int(max((nSize[1] - self.size[1]) / 2, 0))
|
|
413
|
+
|
|
414
|
+
img = ImageChops.offset(thumb, offset_x, offset_y)
|
|
415
|
+
|
|
416
|
+
Result = np.array(img)
|
|
417
|
+
|
|
418
|
+
# TODO: Fadding out by number of space size
|
|
419
|
+
if bIsTopBottomPadding:
|
|
420
|
+
space_size_top = offset_y
|
|
421
|
+
space_size_bottom = nSize[1] - new_height - offset_y
|
|
422
|
+
# print("top %i, bottom %i" %(space_size_top, space_size_bottom))
|
|
423
|
+
|
|
424
|
+
first_row = Result[offset_y + 1, :, :]
|
|
425
|
+
last_row = Result[offset_y + new_height - 1, :, :]
|
|
426
|
+
# first_row=np.repeat( np.mean(first_row, axis=0).reshape(1, Result.shape[2]), Result.shape[1], axis=0)
|
|
427
|
+
# last_row=np.repeat( np.mean(first_row, axis=0).reshape(1, Result.shape[2]), Result.shape[1], axis=0 )
|
|
428
|
+
|
|
429
|
+
top_rows = np.repeat(first_row.reshape(1, Result.shape[1], Result.shape[2]), space_size_top + 1, axis=0)
|
|
430
|
+
bottom_rows = np.repeat(last_row.reshape(1, Result.shape[1], Result.shape[2]), space_size_bottom, axis=0)
|
|
431
|
+
|
|
432
|
+
im1 = Image.fromarray(top_rows)
|
|
433
|
+
im1 = im1.filter(ImageFilter.BLUR)
|
|
434
|
+
top_rows = np.array(im1)
|
|
435
|
+
|
|
436
|
+
im2 = Image.fromarray(bottom_rows)
|
|
437
|
+
im2 = im2.filter(ImageFilter.BLUR)
|
|
438
|
+
bottom_rows = np.array(im2)
|
|
439
|
+
|
|
440
|
+
Result[0:offset_y + 1, :, :] = top_rows[:, :, :]
|
|
441
|
+
Result[offset_y + new_height:nSize[1], :, :] = bottom_rows[:, :, :]
|
|
442
|
+
else:
|
|
443
|
+
|
|
444
|
+
space_size_left = offset_x
|
|
445
|
+
space_size_right = nSize[0] - new_width - space_size_left
|
|
446
|
+
# print("left %i, right %i" %(space_size_left, space_size_left))
|
|
447
|
+
|
|
448
|
+
first_col = Result[:, offset_x + 1, :]
|
|
449
|
+
last_col = Result[:, offset_x + new_width - 1, :]
|
|
450
|
+
|
|
451
|
+
left_cols = np.repeat(first_col.reshape(Result.shape[0], 1, Result.shape[2]), space_size_left + 1, axis=1)
|
|
452
|
+
right_cols = np.repeat(last_col.reshape(Result.shape[0], 1, Result.shape[2]), space_size_right, axis=1)
|
|
453
|
+
|
|
454
|
+
im1 = Image.fromarray(left_cols)
|
|
455
|
+
im1 = im1.filter(ImageFilter.BLUR)
|
|
456
|
+
left_cols = np.array(im1)
|
|
457
|
+
|
|
458
|
+
im2 = Image.fromarray(right_cols)
|
|
459
|
+
im2 = im2.filter(ImageFilter.BLUR)
|
|
460
|
+
right_cols = np.array(im2)
|
|
461
|
+
|
|
462
|
+
Result[:, 0:offset_x + 1, :] = left_cols[:, :, :]
|
|
463
|
+
Result[:, offset_x + new_width:nSize[0], :] = right_cols[:, :, :]
|
|
464
|
+
|
|
465
|
+
img = Image.fromarray(Result)
|
|
466
|
+
|
|
467
|
+
# print("Base Image Size", self.size)
|
|
468
|
+
# plt.imshow(np.array(img))
|
|
469
|
+
# plt.show()
|
|
470
|
+
|
|
471
|
+
|
|
472
|
+
if nAspectRatio > 1.0:
|
|
473
|
+
nDiff = (self.size[0] - self.size[1]) // 2
|
|
474
|
+
else:
|
|
475
|
+
nDiff = (self.size[1] - self.size[0]) // 2
|
|
476
|
+
#
|
|
477
|
+
#
|
|
478
|
+
# if False:
|
|
479
|
+
# a4im = Image.new('RGB',
|
|
480
|
+
# (595, 842), # A4 at 72dpi
|
|
481
|
+
# (255, 255, 255)) # White
|
|
482
|
+
# a4im.paste(img, img.getbbox()) # Not centered, top-left corne
|
|
483
|
+
# plt.imshow(np.array(a4im))
|
|
484
|
+
# plt.show()
|
|
485
|
+
nCenterX = self.size[0] // 2
|
|
486
|
+
nCenterY = self.size[1] // 2
|
|
487
|
+
|
|
488
|
+
nImgCropped = [None] * 3
|
|
489
|
+
if nDiff > 40:
|
|
490
|
+
nCropPositions = [0, -nDiff // 2, nDiff // 2]
|
|
491
|
+
else:
|
|
492
|
+
nCropPositions = [0]
|
|
493
|
+
|
|
494
|
+
for nIndex, nShiftPos in enumerate(nCropPositions):
|
|
495
|
+
nPosX = nCenterX
|
|
496
|
+
nPosY = nCenterY
|
|
497
|
+
if nAspectRatio > 1.0:
|
|
498
|
+
nPosX += nShiftPos
|
|
499
|
+
else:
|
|
500
|
+
nPosY += nShiftPos
|
|
501
|
+
|
|
502
|
+
nLeft = nPosX - nHalfSize[0]
|
|
503
|
+
nRight = nPosX + nHalfSize[0] + nModX
|
|
504
|
+
|
|
505
|
+
nTop = nPosY - nHalfSize[1]
|
|
506
|
+
nBottom = nPosY + nHalfSize[1] + nModY
|
|
507
|
+
nImgCropped[nIndex] = np.array(img.crop((nLeft, nTop, nRight, nBottom)))
|
|
508
|
+
|
|
509
|
+
if len(nCropPositions) == 1:
|
|
510
|
+
nImgCropped[1] = np.array(self.rotate(img, 12).pixels)
|
|
511
|
+
nImgCropped[2] = np.array(self.rotate(img, -12).pixels)
|
|
512
|
+
|
|
513
|
+
return nImgCropped
|
|
@@ -1,6 +1,7 @@
|
|
|
1
|
-
from radnn
|
|
1
|
+
from radnn import mlsys
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
|
|
4
|
+
if mlsys.is_tensorflow_installed:
|
|
4
5
|
from .keras_optimization_algorithm import KOptimizationAlgorithm
|
|
5
6
|
|
|
6
7
|
class LearningAlgorithm(object):
|
|
@@ -29,7 +30,7 @@ class LearningAlgorithm(object):
|
|
|
29
30
|
return oResult
|
|
30
31
|
# -----------------------------------------------------------------------------------
|
|
31
32
|
def prepare(self):
|
|
32
|
-
if is_tensorflow_installed:
|
|
33
|
+
if mlsys.is_tensorflow_installed:
|
|
33
34
|
self._implementation = KOptimizationAlgorithm(self.config, self.is_verbose)
|
|
34
35
|
return self
|
|
35
36
|
# -----------------------------------------------------------------------------------
|
radnn/ml_system.py
CHANGED
|
@@ -1,12 +1,38 @@
|
|
|
1
|
+
# ======================================================================================
|
|
2
|
+
#
|
|
3
|
+
# Rapid Deep Neural Networks
|
|
4
|
+
#
|
|
5
|
+
# Licensed under the MIT License
|
|
6
|
+
# ______________________________________________________________________________________
|
|
7
|
+
# ......................................................................................
|
|
8
|
+
|
|
9
|
+
# Copyright (c) 2018-2025 Pantelis I. Kaplanoglou
|
|
10
|
+
|
|
11
|
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
12
|
+
# of this software and associated documentation files (the "Software"), to deal
|
|
13
|
+
# in the Software without restriction, including without limitation the rights
|
|
14
|
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
15
|
+
# copies of the Software, and to permit persons to whom the Software is
|
|
16
|
+
# furnished to do so, subject to the following conditions:
|
|
17
|
+
|
|
18
|
+
# The above copyright notice and this permission notice shall be included in all
|
|
19
|
+
# copies or substantial portions of the Software.
|
|
20
|
+
|
|
21
|
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
22
|
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
23
|
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
24
|
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
25
|
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
26
|
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
27
|
+
# SOFTWARE.
|
|
28
|
+
|
|
29
|
+
# .......................................................................................
|
|
1
30
|
import os
|
|
2
31
|
import random
|
|
3
32
|
import numpy as np
|
|
4
33
|
import importlib
|
|
5
34
|
|
|
6
35
|
class MLSystem(object):
|
|
7
|
-
IS_USING_TENSORFLOW = False
|
|
8
|
-
IS_USING_TORCH = False
|
|
9
|
-
|
|
10
36
|
# --------------------------------------------------------------------------------------
|
|
11
37
|
_instance = None
|
|
12
38
|
@classmethod
|
|
@@ -17,30 +43,36 @@ class MLSystem(object):
|
|
|
17
43
|
return cls._instance
|
|
18
44
|
# --------------------------------------------------------------------------------------
|
|
19
45
|
@property
|
|
20
|
-
def
|
|
21
|
-
return self.is_tensorflow_installed and
|
|
46
|
+
def is_using_tensorflow(self):
|
|
47
|
+
return self.is_tensorflow_installed and self._is_using_tensorflow
|
|
48
|
+
# --------------------------------------------------------------------------------------
|
|
49
|
+
@is_using_tensorflow.setter
|
|
50
|
+
def is_using_tensorflow(self, value):
|
|
51
|
+
self._is_using_tensorflow = value
|
|
52
|
+
self._is_using_torch = not value
|
|
22
53
|
# --------------------------------------------------------------------------------------
|
|
23
54
|
@property
|
|
24
|
-
def
|
|
25
|
-
return self.is_torch_installed and
|
|
55
|
+
def is_using_torch(self):
|
|
56
|
+
return self.is_torch_installed and self.is_using_torch
|
|
57
|
+
# --------------------------------------------------------------------------------------
|
|
58
|
+
@is_using_torch.setter
|
|
59
|
+
def is_using_torch(self, value):
|
|
60
|
+
self._is_using_torch = value
|
|
61
|
+
self._is_using_tensorflow = not value
|
|
26
62
|
# --------------------------------------------------------------------------------------
|
|
27
63
|
def __init__(self):
|
|
28
64
|
self._is_random_seed_initialized = False
|
|
29
65
|
self._filesys = None
|
|
66
|
+
self._seed = None
|
|
30
67
|
self.switches = dict()
|
|
31
68
|
self.switches["IsDebuggable"] = False
|
|
32
69
|
|
|
33
70
|
self.is_tensorflow_installed = False
|
|
34
71
|
self.is_torch_installed = False
|
|
35
72
|
self.is_opencv_installed = False
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
self.
|
|
39
|
-
self.IS_USING_TORCH = False
|
|
40
|
-
# --------------------------------------------------------------------------------------
|
|
41
|
-
def use_torch(self):
|
|
42
|
-
self.IS_USING_TORCH = True
|
|
43
|
-
self.IS_USING_TENSORFLOW = False
|
|
73
|
+
|
|
74
|
+
self._is_using_tensorflow = False
|
|
75
|
+
self.is_using_torch = False
|
|
44
76
|
# --------------------------------------------------------------------------------------
|
|
45
77
|
@property
|
|
46
78
|
def filesys(self):
|
|
@@ -49,10 +81,17 @@ class MLSystem(object):
|
|
|
49
81
|
@filesys.setter
|
|
50
82
|
def filesys(self, value):
|
|
51
83
|
self._filesys = value
|
|
84
|
+
|
|
85
|
+
# --------------------------------------------------------------------------------------
|
|
86
|
+
@property
|
|
87
|
+
def seed(self):
|
|
88
|
+
return self._seed
|
|
52
89
|
# --------------------------------------------------------------------------------------
|
|
53
90
|
# We are seeding the number generators to get some amount of determinism for the whole ML training process.
|
|
54
91
|
# For Tensorflow it is not ensuring 100% deterministic reproduction of an experiment on the GPU.
|
|
55
|
-
def random_seed_all(self, seed, is_done_once=False):
|
|
92
|
+
def random_seed_all(self, seed, is_done_once=False, is_parallel_deterministic=False):
|
|
93
|
+
self._seed = seed
|
|
94
|
+
|
|
56
95
|
bContinue = True
|
|
57
96
|
if is_done_once:
|
|
58
97
|
bContinue = (not self._is_random_seed_initialized)
|
|
@@ -61,12 +100,14 @@ class MLSystem(object):
|
|
|
61
100
|
random.seed(seed)
|
|
62
101
|
os.environ['PYTHONHASHSEED'] = str(seed)
|
|
63
102
|
np.random.seed(seed)
|
|
64
|
-
if mlsys.
|
|
103
|
+
if mlsys.is_tensorflow_installed:
|
|
65
104
|
import tensorflow as tf
|
|
66
105
|
tf.compat.v1.reset_default_graph()
|
|
106
|
+
if is_parallel_deterministic:
|
|
107
|
+
tf.config.experimental.enable_op_determinism() # Enable determinism for num_parallel_calls
|
|
67
108
|
tf.random.set_seed(seed)
|
|
68
109
|
tf.keras.utils.set_random_seed(seed)
|
|
69
|
-
|
|
110
|
+
if mlsys.is_torch_installed:
|
|
70
111
|
import torch
|
|
71
112
|
torch.manual_seed(seed)
|
|
72
113
|
# GPU and multi-GPU
|