radnn 0.0.7.3__py3-none-any.whl → 0.0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- radnn/__init__.py +2 -1
- radnn/data/__init__.py +2 -0
- radnn/data/data_feed.py +5 -0
- radnn/data/dataset_folder.py +55 -0
- radnn/data/image_dataset_files.py +175 -0
- radnn/data/subset_type.py +8 -2
- radnn/data/tf_classification_data_feed.py +22 -6
- radnn/experiment/ml_experiment_config.py +54 -29
- radnn/images/__init__.py +2 -0
- radnn/images/colors.py +28 -0
- radnn/images/image_processor.py +513 -0
- radnn/ml_system.py +1 -0
- radnn/plots/plot_auto_multi_image.py +6 -5
- radnn/stats/__init__.py +1 -0
- radnn/stats/descriptive_stats.py +45 -0
- radnn/system/files/__init__.py +1 -0
- radnn/system/files/filelist.py +40 -0
- radnn/system/files/textfile.py +29 -3
- radnn/system/filestore.py +26 -10
- radnn/system/filesystem.py +1 -1
- radnn/system/hosts/windows_host.py +10 -0
- radnn/system/threads/__init__.py +5 -0
- radnn/system/threads/semaphore_lock.py +58 -0
- radnn/system/threads/thread_context.py +175 -0
- radnn/system/threads/thread_safe_queue.py +163 -0
- radnn/system/threads/thread_safe_string_collection.py +66 -0
- radnn/system/threads/thread_worker.py +68 -0
- radnn/utils.py +43 -0
- {radnn-0.0.7.3.dist-info → radnn-0.0.8.dist-info}/METADATA +1 -1
- {radnn-0.0.7.3.dist-info → radnn-0.0.8.dist-info}/RECORD +33 -19
- {radnn-0.0.7.3.dist-info → radnn-0.0.8.dist-info}/LICENSE.txt +0 -0
- {radnn-0.0.7.3.dist-info → radnn-0.0.8.dist-info}/WHEEL +0 -0
- {radnn-0.0.7.3.dist-info → radnn-0.0.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,513 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import cv2
|
|
3
|
+
from PIL import Image, ImageChops, ImageFilter, ImageDraw
|
|
4
|
+
from .colors import color
|
|
5
|
+
|
|
6
|
+
phi=(1.0+np.sqrt(5.0))/2.0
|
|
7
|
+
|
|
8
|
+
class ImageProcessor(object):
|
|
9
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
10
|
+
def __init__(self, pixels=None, filename=None, image=None, bgcolor=None):
|
|
11
|
+
self.pixels = pixels
|
|
12
|
+
self.filename = filename
|
|
13
|
+
self.image = image
|
|
14
|
+
self.bgcolor = bgcolor
|
|
15
|
+
self._size = None
|
|
16
|
+
|
|
17
|
+
if self.pixels is not None:
|
|
18
|
+
if self.pixels is not None:
|
|
19
|
+
nShape = self.pixels.shape
|
|
20
|
+
if len(nShape) == 4:
|
|
21
|
+
self._size = nShape[1:4]
|
|
22
|
+
elif len(nShape) == 3:
|
|
23
|
+
self._size = nShape[1:3]
|
|
24
|
+
elif len(nShape) == 2:
|
|
25
|
+
self._size = nShape
|
|
26
|
+
|
|
27
|
+
if image is not None:
|
|
28
|
+
self.pixels = np.array(self.image)
|
|
29
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
30
|
+
@property
|
|
31
|
+
def size(self):
|
|
32
|
+
if self.image is not None:
|
|
33
|
+
return self.image.size
|
|
34
|
+
else:
|
|
35
|
+
return self._size
|
|
36
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
37
|
+
def load(self, filename=None):
|
|
38
|
+
if filename is not None:
|
|
39
|
+
self.filename = filename
|
|
40
|
+
self.image = Image.open(self.filename).convert("RGB")
|
|
41
|
+
self.pixels = np.array(self.image)
|
|
42
|
+
return self
|
|
43
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
44
|
+
def pad_square_with_edges(self, size=(227, 227)):
|
|
45
|
+
img = self.image
|
|
46
|
+
img_width, img_height = img.size
|
|
47
|
+
|
|
48
|
+
# Determine the scaling factor to maintain aspect ratio
|
|
49
|
+
if img_width > img_height:
|
|
50
|
+
bIsLandscape = True
|
|
51
|
+
ratio = img_height / img_width
|
|
52
|
+
new_width = size[0]
|
|
53
|
+
new_height = int(size[0] * ratio)
|
|
54
|
+
else:
|
|
55
|
+
bIsLandscape = False
|
|
56
|
+
ratio = img_width / img_height
|
|
57
|
+
new_width = int(size[1] * ratio)
|
|
58
|
+
new_height = size[1]
|
|
59
|
+
|
|
60
|
+
# Resize the image while maintaining aspect ratio
|
|
61
|
+
img = img.resize((new_width, new_height), Image.LANCZOS)
|
|
62
|
+
|
|
63
|
+
# Create a blank canvas for the final image
|
|
64
|
+
Result = np.zeros((size[1], size[0], 3), dtype=np.uint8)
|
|
65
|
+
img_array = np.array(img)
|
|
66
|
+
|
|
67
|
+
# Center the resized image on the new canvas
|
|
68
|
+
offset_x = (size[0] - new_width) // 2
|
|
69
|
+
offset_y = (size[1] - new_height) // 2
|
|
70
|
+
Result[offset_y:offset_y + new_height, offset_x:offset_x + new_width, :] = img_array
|
|
71
|
+
|
|
72
|
+
# Fill edges with repeated stripes and blur for a smooth effect
|
|
73
|
+
if bIsLandscape:
|
|
74
|
+
# Top and bottom padding
|
|
75
|
+
first_row = Result[offset_y, :, :]
|
|
76
|
+
last_row = Result[offset_y + new_height - 1, :, :]
|
|
77
|
+
|
|
78
|
+
top_rows = np.repeat(first_row.reshape(1, size[0], 3), offset_y, axis=0)
|
|
79
|
+
bottom_rows = np.repeat(last_row.reshape(1, size[0], 3), size[1] - offset_y - new_height, axis=0)
|
|
80
|
+
|
|
81
|
+
# Apply blur to soften edges
|
|
82
|
+
im_top = Image.fromarray(top_rows).filter(ImageFilter.BLUR)
|
|
83
|
+
im_bottom = Image.fromarray(bottom_rows).filter(ImageFilter.BLUR)
|
|
84
|
+
|
|
85
|
+
Result[0:offset_y, :, :] = np.array(im_top)
|
|
86
|
+
Result[offset_y + new_height:size[1], :, :] = np.array(im_bottom)
|
|
87
|
+
else:
|
|
88
|
+
# Left and right padding
|
|
89
|
+
first_col = Result[:, offset_x, :]
|
|
90
|
+
last_col = Result[:, offset_x + new_width - 1, :]
|
|
91
|
+
|
|
92
|
+
left_cols = np.repeat(first_col.reshape(size[1], 1, 3), offset_x, axis=1)
|
|
93
|
+
right_cols = np.repeat(last_col.reshape(size[1], 1, 3), size[0] - offset_x - new_width, axis=1)
|
|
94
|
+
|
|
95
|
+
# Apply blur to soften edges
|
|
96
|
+
im_left = Image.fromarray(left_cols).filter(ImageFilter.BLUR)
|
|
97
|
+
im_right = Image.fromarray(right_cols).filter(ImageFilter.BLUR)
|
|
98
|
+
|
|
99
|
+
Result[:, 0:offset_x, :] = np.array(im_left)
|
|
100
|
+
Result[:, offset_x + new_width:size[0], :] = np.array(im_right)
|
|
101
|
+
|
|
102
|
+
# Convert back to a PIL image
|
|
103
|
+
final_img = Image.fromarray(Result)
|
|
104
|
+
return ImageProcessor(image=final_img, filename=self.filename, bgcolor=self.bgcolor)
|
|
105
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
106
|
+
def _get_background_color(self, bgcolor):
|
|
107
|
+
if bgcolor is not None:
|
|
108
|
+
self.bgcolor = bgcolor
|
|
109
|
+
if self.bgcolor is None:
|
|
110
|
+
self.bgcolor = "black"
|
|
111
|
+
if isinstance(self.bgcolor, str):
|
|
112
|
+
nBGColor = color(self.bgcolor)
|
|
113
|
+
else:
|
|
114
|
+
nBGColor = self.bgcolor
|
|
115
|
+
return nBGColor
|
|
116
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
117
|
+
def roll(self, shift_x=0, shift_y=0, bgcolor=None):
|
|
118
|
+
nBGColor = self._get_background_color(bgcolor)
|
|
119
|
+
img = self.image
|
|
120
|
+
|
|
121
|
+
arr = np.array(img)
|
|
122
|
+
|
|
123
|
+
# Apply roll effect
|
|
124
|
+
arr = np.roll(arr, shift_x, axis=1) # Shift horizontally
|
|
125
|
+
arr = np.roll(arr, shift_y, axis=0) # Shift vertically
|
|
126
|
+
|
|
127
|
+
img_out = Image.fromarray(arr)
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
return ImageProcessor(image=img_out, filename=self.filename, bgcolor=self.bgcolor)
|
|
131
|
+
|
|
132
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
133
|
+
def zoom_center(self, scale=1.2, bgcolor=None):
|
|
134
|
+
nBGColor = self._get_background_color(bgcolor)
|
|
135
|
+
img = self.pixels
|
|
136
|
+
w, h = self.size
|
|
137
|
+
|
|
138
|
+
tx, ty = 0, 0
|
|
139
|
+
if scale < 1.0:
|
|
140
|
+
tx = int((w * (1.0 - scale)) // 2)
|
|
141
|
+
ty = int((h * (1.0 - scale)) // 2)
|
|
142
|
+
else:
|
|
143
|
+
tx = - int(w*(scale - 1.0) // 2)
|
|
144
|
+
ty = - int(w*(scale - 1.0) // 2)
|
|
145
|
+
|
|
146
|
+
# Transformation matrix for scaling and translation
|
|
147
|
+
M = np.array([[scale, 0, tx],
|
|
148
|
+
[0, scale, ty]], dtype=np.float32)
|
|
149
|
+
|
|
150
|
+
# Apply affine warp
|
|
151
|
+
result = cv2.warpAffine(img, M, (w, h))
|
|
152
|
+
|
|
153
|
+
oImage = Image.fromarray(result)
|
|
154
|
+
oCropped = oImage.crop((tx, ty, w - tx - 1, h - ty - 1)).convert("RGBA")
|
|
155
|
+
oNewImageWithBackground = Image.new("RGBA",self.size, nBGColor)
|
|
156
|
+
oNewImageWithBackground.paste(oCropped, (tx, ty), oCropped)
|
|
157
|
+
|
|
158
|
+
return ImageProcessor(image=oNewImageWithBackground, filename=self.filename, bgcolor=self.bgcolor)
|
|
159
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
160
|
+
def zoom_pan(self, scale=1.2, tx=20, ty=20, bgcolor=None):
|
|
161
|
+
nBGColor = self._get_background_color(bgcolor)
|
|
162
|
+
img = self.pixels
|
|
163
|
+
w, h = self.size
|
|
164
|
+
|
|
165
|
+
# Transformation matrix for scaling and translation
|
|
166
|
+
M = np.array([[scale, 0, tx],
|
|
167
|
+
[0, scale, ty]], dtype=np.float32)
|
|
168
|
+
|
|
169
|
+
# Apply affine warp
|
|
170
|
+
result = cv2.warpAffine(img, M, (w, h))
|
|
171
|
+
|
|
172
|
+
oImage = Image.fromarray(result)
|
|
173
|
+
oCropped = oImage.crop((tx, ty, w*scale - tx, h*scale - ty)).convert("RGBA")
|
|
174
|
+
oNewImageWithBackground = Image.new("RGBA",self.size, nBGColor)
|
|
175
|
+
oNewImageWithBackground.paste(oCropped, (tx, ty), oCropped)
|
|
176
|
+
|
|
177
|
+
return ImageProcessor(image=oNewImageWithBackground, filename=self.filename, bgcolor=self.bgcolor)
|
|
178
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
179
|
+
def wave_effect(self, amplitude=20, frequency=0.1):
|
|
180
|
+
img = self.pixels
|
|
181
|
+
w, h = self.size
|
|
182
|
+
|
|
183
|
+
# Create mapping arrays
|
|
184
|
+
map_x = np.zeros((h, w), dtype=np.float32)
|
|
185
|
+
map_y = np.zeros((h, w), dtype=np.float32)
|
|
186
|
+
|
|
187
|
+
for i in range(h):
|
|
188
|
+
for j in range(w):
|
|
189
|
+
offset_x = int(amplitude * np.sin(2 * np.pi * frequency * i))
|
|
190
|
+
map_x[i, j] = j + offset_x
|
|
191
|
+
map_y[i, j] = i
|
|
192
|
+
|
|
193
|
+
# Apply remapping
|
|
194
|
+
result = cv2.remap(img, map_x, map_y, cv2.INTER_LINEAR)
|
|
195
|
+
pil_image = Image.fromarray(result)
|
|
196
|
+
return ImageProcessor(image=pil_image, filename=self.filename, bgcolor=self.bgcolor)
|
|
197
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
198
|
+
def rotate(self, degrees, is_original_scale=True, bgcolor=None):
|
|
199
|
+
nBGColor = self._get_background_color(bgcolor)
|
|
200
|
+
oImageRGBA = self.image.convert('RGBA')
|
|
201
|
+
oImageRotated = oImageRGBA.rotate(degrees, expand=True)
|
|
202
|
+
oBackground = Image.new('RGBA', oImageRotated.size, nBGColor)
|
|
203
|
+
oNewImage = Image.composite(oImageRotated, oBackground, oImageRotated).convert(self.image.mode)
|
|
204
|
+
|
|
205
|
+
if is_original_scale:
|
|
206
|
+
orig_w, orig_h = self.image.size
|
|
207
|
+
new_w, new_h = oNewImage.size
|
|
208
|
+
left = (new_w - orig_w) // 2
|
|
209
|
+
top = (new_h - orig_h) // 2
|
|
210
|
+
right = left + orig_w
|
|
211
|
+
bottom = top + orig_h
|
|
212
|
+
|
|
213
|
+
# Crop to original aspect ratio
|
|
214
|
+
oNewImage = oNewImage.crop((left, top, right, bottom))
|
|
215
|
+
else:
|
|
216
|
+
# Composite the rotated image onto the background using its alpha mask
|
|
217
|
+
oNewImage = Image.composite(oImageRotated, oBackground, oImageRotated).convert(self.image.mode)
|
|
218
|
+
|
|
219
|
+
return ImageProcessor(image=oNewImage, filename=self.filename, bgcolor=self.bgcolor)
|
|
220
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
221
|
+
def fit_to_size(self, size=(227, 227), bgcolor=None):
|
|
222
|
+
nBGColor = self._get_background_color(bgcolor)
|
|
223
|
+
|
|
224
|
+
img = self.image
|
|
225
|
+
img_width = float(self.size[0])
|
|
226
|
+
img_height = float(self.size[1])
|
|
227
|
+
|
|
228
|
+
if img_width > img_height:
|
|
229
|
+
bIsLandscape = True
|
|
230
|
+
ratio = img_height / img_width
|
|
231
|
+
new_width = size[0]
|
|
232
|
+
new_height = int(size[0] * ratio)
|
|
233
|
+
else:
|
|
234
|
+
bIsLandscape = False
|
|
235
|
+
ratio = img_width / img_height
|
|
236
|
+
new_width = int(size[0] * ratio)
|
|
237
|
+
new_height = size[0]
|
|
238
|
+
|
|
239
|
+
img = img.resize((new_width, new_height), Image.NONE).convert("RGBA")
|
|
240
|
+
|
|
241
|
+
nOffsetX = 0
|
|
242
|
+
nOffsetY = 0
|
|
243
|
+
if bIsLandscape:
|
|
244
|
+
nOffsetY = (size[1] - img.size[1]) // 2
|
|
245
|
+
else:
|
|
246
|
+
nOffsetX = (size[0] - img.size[0]) // 2
|
|
247
|
+
|
|
248
|
+
thumb = img.crop((0, 0, size[0], size[1]))
|
|
249
|
+
|
|
250
|
+
oMovedImage = ImageChops.offset(thumb, int(nOffsetX), int(nOffsetY))
|
|
251
|
+
|
|
252
|
+
oNewImageWithBackground = Image.new("RGBA", oMovedImage.size, nBGColor)
|
|
253
|
+
oNewImageWithBackground.paste(oMovedImage, (0, 0), oMovedImage)
|
|
254
|
+
|
|
255
|
+
return ImageProcessor(image=oNewImageWithBackground, filename=self.filename, bgcolor=self.bgcolor)
|
|
256
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
257
|
+
def crop_to_size(self, target_size=(227, 227)):
|
|
258
|
+
"""
|
|
259
|
+
Resizes the image so that the smallest dimension matches the corresponding
|
|
260
|
+
target size dimension, then center crops it to the exact target size.
|
|
261
|
+
|
|
262
|
+
:param image: PIL Image to be processed.
|
|
263
|
+
:param target_size: Tuple (width, height) of the desired output size.
|
|
264
|
+
:return: Cropped PIL Image of the specified size.
|
|
265
|
+
"""
|
|
266
|
+
target_w, target_h = target_size
|
|
267
|
+
img_w, img_h = self.size
|
|
268
|
+
|
|
269
|
+
# Determine scale factor to match the smallest dimension
|
|
270
|
+
scale = max(target_w / img_w, target_h / img_h)
|
|
271
|
+
new_size = (int(img_w * scale), int(img_h * scale))
|
|
272
|
+
|
|
273
|
+
# Resize while maintaining aspect ratio
|
|
274
|
+
oResizedImage = self.image.resize(new_size, Image.NONE) #Image.LANCZOS
|
|
275
|
+
|
|
276
|
+
# Center crop to the exact target size
|
|
277
|
+
left = (oResizedImage.width - target_w) / 2
|
|
278
|
+
top = (oResizedImage.height - target_h) / 2
|
|
279
|
+
right = left + target_w
|
|
280
|
+
bottom = top + target_h
|
|
281
|
+
|
|
282
|
+
oCroppedImage = oResizedImage.crop((left, top, right, bottom))
|
|
283
|
+
|
|
284
|
+
return ImageProcessor(image=oCroppedImage, filename=self.filename, bgcolor=self.bgcolor)
|
|
285
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
286
|
+
def horizontal_wave_effect(self, amplitude=7, frequency=0.01, bgcolor=None):
|
|
287
|
+
nBGColor = self._get_background_color(bgcolor)
|
|
288
|
+
|
|
289
|
+
img = self.pixels
|
|
290
|
+
w, h = self.size
|
|
291
|
+
|
|
292
|
+
# Create mapping arrays
|
|
293
|
+
map_x = np.zeros((h, w), dtype=np.float32)
|
|
294
|
+
map_y = np.zeros((h, w), dtype=np.float32)
|
|
295
|
+
|
|
296
|
+
for i in range(h):
|
|
297
|
+
for j in range(w):
|
|
298
|
+
offset_y = int(amplitude * np.sin(2 * np.pi * frequency * j)) # Horizontal wave
|
|
299
|
+
map_x[i, j] = j
|
|
300
|
+
map_y[i, j] = i + offset_y # Apply wave effect in Y direction
|
|
301
|
+
|
|
302
|
+
# Apply remapping
|
|
303
|
+
result = cv2.remap(img, map_x, map_y, cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=nBGColor)
|
|
304
|
+
oImage = Image.fromarray(result)
|
|
305
|
+
return ImageProcessor(image=oImage, filename=self.filename, bgcolor=self.bgcolor)
|
|
306
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
307
|
+
def drop_shadow(self, shrink=0.9, shadow_offset=(8, 8), shadow_blur=15,
|
|
308
|
+
shadow_color=(32, 32, 32, 255), bgcolor=None):
|
|
309
|
+
"""
|
|
310
|
+
Shrinks an image while keeping the final output dimensions the same by adding a drop shadow.
|
|
311
|
+
|
|
312
|
+
Parameters:
|
|
313
|
+
shrink_factor (float): The factor by which the image is shrunk (0 < shrink_factor ≤ 1).
|
|
314
|
+
shadow_offset (tuple): The (x, y) offset for the shadow.
|
|
315
|
+
shadow_blur (int): The blur radius for the shadow.
|
|
316
|
+
shadow_color (tuple): RGBA color of the shadow (default is semi-transparent black).
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
ImageProcessor: A new instance with the processed image.
|
|
320
|
+
"""
|
|
321
|
+
nBGColor = self._get_background_color(bgcolor)
|
|
322
|
+
assert shrink < 1.0, "The shrink factor should be less than 1.0"
|
|
323
|
+
img = self.pixels
|
|
324
|
+
w, h = self.size
|
|
325
|
+
|
|
326
|
+
# Compute new size for the shrunk image
|
|
327
|
+
new_w = int(w * shrink)
|
|
328
|
+
new_h = int(h * shrink)
|
|
329
|
+
|
|
330
|
+
# Place the shrunk image centered in the original dimensions
|
|
331
|
+
x_center = (w - new_w) // 2
|
|
332
|
+
y_center = (h - new_h) // 2
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
# Resize the image (shrink)
|
|
336
|
+
shrunk_img = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_AREA)
|
|
337
|
+
|
|
338
|
+
# Create transparent background
|
|
339
|
+
shadow_img = np.zeros((h, w, 4), dtype=np.uint8)
|
|
340
|
+
shadow_img[:,:,:4] = list(nBGColor)[:]
|
|
341
|
+
|
|
342
|
+
# Create shadow by filling an ellipse or rectangle (based on image shape)
|
|
343
|
+
shadow = np.full((new_h, new_w, 4), shadow_color, dtype=np.uint8)
|
|
344
|
+
|
|
345
|
+
# Blur the shadow
|
|
346
|
+
shadow = cv2.GaussianBlur(shadow, (shadow_blur, shadow_blur), 0)
|
|
347
|
+
|
|
348
|
+
# Position the shadow
|
|
349
|
+
x_offset, y_offset = x_center + shadow_offset[0], y_center + shadow_offset[1]
|
|
350
|
+
shadow_img[y_offset:y_offset + new_h, x_offset:x_offset + new_w] = shadow
|
|
351
|
+
|
|
352
|
+
# Convert shrunk image to 4-channel RGBA if not already
|
|
353
|
+
if shrunk_img.shape[2] == 3:
|
|
354
|
+
shrunk_img = cv2.cvtColor(shrunk_img, cv2.COLOR_RGB2RGBA)
|
|
355
|
+
|
|
356
|
+
# Paste the shrunk image onto the shadow
|
|
357
|
+
shadow_img[y_center:y_center + new_h, x_center:x_center + new_w] = shrunk_img
|
|
358
|
+
|
|
359
|
+
# Convert back to PIL image
|
|
360
|
+
oImage = Image.fromarray(shadow_img)
|
|
361
|
+
|
|
362
|
+
return ImageProcessor(image=oImage, filename=self.filename, bgcolor=self.bgcolor)
|
|
363
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
364
|
+
def make_mbsf_augmented_square(self, size=(227, 227)):
|
|
365
|
+
'''
|
|
366
|
+
Used in MSBF paper
|
|
367
|
+
:param size:
|
|
368
|
+
:return:
|
|
369
|
+
'''
|
|
370
|
+
nSize = size
|
|
371
|
+
|
|
372
|
+
nHalfSize = (nSize[0] // 2, nSize[1] // 2)
|
|
373
|
+
nModX = nSize[0] % 2
|
|
374
|
+
nModY = nSize[1] % 2
|
|
375
|
+
|
|
376
|
+
img = self.image
|
|
377
|
+
|
|
378
|
+
img_width = float(self.size[0])
|
|
379
|
+
img_height = float(self.size[1])
|
|
380
|
+
nAspectRatio = img_width / img_height
|
|
381
|
+
if nAspectRatio > 1.0:
|
|
382
|
+
bIrregular = nAspectRatio > (phi * 0.9)
|
|
383
|
+
bIsTopBottomPadding = True
|
|
384
|
+
else:
|
|
385
|
+
bIrregular = nAspectRatio < (1.0 / (phi * 0.9))
|
|
386
|
+
bIsTopBottomPadding = False
|
|
387
|
+
|
|
388
|
+
# print("[%d,%d] AspectRatio:%.4f Irregular:%r" % (img_width, img_height, nAspectRatio, bIrregular))
|
|
389
|
+
nRatioWidth = 1.0
|
|
390
|
+
nRatioHeight = 1.0
|
|
391
|
+
if bIrregular:
|
|
392
|
+
if img_width > img_height:
|
|
393
|
+
nRatioHeight = img_height / img_width
|
|
394
|
+
else:
|
|
395
|
+
nRatioWidth = img_width / img_height
|
|
396
|
+
else:
|
|
397
|
+
if img_width > img_height:
|
|
398
|
+
nRatioWidth = img_width / img_height
|
|
399
|
+
else:
|
|
400
|
+
nRatioHeight = img_height / img_width
|
|
401
|
+
|
|
402
|
+
new_width = int(nSize[0] * nRatioWidth)
|
|
403
|
+
new_height = int(nSize[1] * nRatioHeight)
|
|
404
|
+
|
|
405
|
+
img = img.resize((new_width, new_height), Image.NONE)
|
|
406
|
+
# print("New Image Size", self.size)
|
|
407
|
+
|
|
408
|
+
if bIrregular:
|
|
409
|
+
thumb = img.crop((0, 0, nSize[0], nSize[1]))
|
|
410
|
+
|
|
411
|
+
offset_x = int(max((nSize[0] - self.size[0]) / 2, 0))
|
|
412
|
+
offset_y = int(max((nSize[1] - self.size[1]) / 2, 0))
|
|
413
|
+
|
|
414
|
+
img = ImageChops.offset(thumb, offset_x, offset_y)
|
|
415
|
+
|
|
416
|
+
Result = np.array(img)
|
|
417
|
+
|
|
418
|
+
# TODO: Fadding out by number of space size
|
|
419
|
+
if bIsTopBottomPadding:
|
|
420
|
+
space_size_top = offset_y
|
|
421
|
+
space_size_bottom = nSize[1] - new_height - offset_y
|
|
422
|
+
# print("top %i, bottom %i" %(space_size_top, space_size_bottom))
|
|
423
|
+
|
|
424
|
+
first_row = Result[offset_y + 1, :, :]
|
|
425
|
+
last_row = Result[offset_y + new_height - 1, :, :]
|
|
426
|
+
# first_row=np.repeat( np.mean(first_row, axis=0).reshape(1, Result.shape[2]), Result.shape[1], axis=0)
|
|
427
|
+
# last_row=np.repeat( np.mean(first_row, axis=0).reshape(1, Result.shape[2]), Result.shape[1], axis=0 )
|
|
428
|
+
|
|
429
|
+
top_rows = np.repeat(first_row.reshape(1, Result.shape[1], Result.shape[2]), space_size_top + 1, axis=0)
|
|
430
|
+
bottom_rows = np.repeat(last_row.reshape(1, Result.shape[1], Result.shape[2]), space_size_bottom, axis=0)
|
|
431
|
+
|
|
432
|
+
im1 = Image.fromarray(top_rows)
|
|
433
|
+
im1 = im1.filter(ImageFilter.BLUR)
|
|
434
|
+
top_rows = np.array(im1)
|
|
435
|
+
|
|
436
|
+
im2 = Image.fromarray(bottom_rows)
|
|
437
|
+
im2 = im2.filter(ImageFilter.BLUR)
|
|
438
|
+
bottom_rows = np.array(im2)
|
|
439
|
+
|
|
440
|
+
Result[0:offset_y + 1, :, :] = top_rows[:, :, :]
|
|
441
|
+
Result[offset_y + new_height:nSize[1], :, :] = bottom_rows[:, :, :]
|
|
442
|
+
else:
|
|
443
|
+
|
|
444
|
+
space_size_left = offset_x
|
|
445
|
+
space_size_right = nSize[0] - new_width - space_size_left
|
|
446
|
+
# print("left %i, right %i" %(space_size_left, space_size_left))
|
|
447
|
+
|
|
448
|
+
first_col = Result[:, offset_x + 1, :]
|
|
449
|
+
last_col = Result[:, offset_x + new_width - 1, :]
|
|
450
|
+
|
|
451
|
+
left_cols = np.repeat(first_col.reshape(Result.shape[0], 1, Result.shape[2]), space_size_left + 1, axis=1)
|
|
452
|
+
right_cols = np.repeat(last_col.reshape(Result.shape[0], 1, Result.shape[2]), space_size_right, axis=1)
|
|
453
|
+
|
|
454
|
+
im1 = Image.fromarray(left_cols)
|
|
455
|
+
im1 = im1.filter(ImageFilter.BLUR)
|
|
456
|
+
left_cols = np.array(im1)
|
|
457
|
+
|
|
458
|
+
im2 = Image.fromarray(right_cols)
|
|
459
|
+
im2 = im2.filter(ImageFilter.BLUR)
|
|
460
|
+
right_cols = np.array(im2)
|
|
461
|
+
|
|
462
|
+
Result[:, 0:offset_x + 1, :] = left_cols[:, :, :]
|
|
463
|
+
Result[:, offset_x + new_width:nSize[0], :] = right_cols[:, :, :]
|
|
464
|
+
|
|
465
|
+
img = Image.fromarray(Result)
|
|
466
|
+
|
|
467
|
+
# print("Base Image Size", self.size)
|
|
468
|
+
# plt.imshow(np.array(img))
|
|
469
|
+
# plt.show()
|
|
470
|
+
|
|
471
|
+
|
|
472
|
+
if nAspectRatio > 1.0:
|
|
473
|
+
nDiff = (self.size[0] - self.size[1]) // 2
|
|
474
|
+
else:
|
|
475
|
+
nDiff = (self.size[1] - self.size[0]) // 2
|
|
476
|
+
#
|
|
477
|
+
#
|
|
478
|
+
# if False:
|
|
479
|
+
# a4im = Image.new('RGB',
|
|
480
|
+
# (595, 842), # A4 at 72dpi
|
|
481
|
+
# (255, 255, 255)) # White
|
|
482
|
+
# a4im.paste(img, img.getbbox()) # Not centered, top-left corne
|
|
483
|
+
# plt.imshow(np.array(a4im))
|
|
484
|
+
# plt.show()
|
|
485
|
+
nCenterX = self.size[0] // 2
|
|
486
|
+
nCenterY = self.size[1] // 2
|
|
487
|
+
|
|
488
|
+
nImgCropped = [None] * 3
|
|
489
|
+
if nDiff > 40:
|
|
490
|
+
nCropPositions = [0, -nDiff // 2, nDiff // 2]
|
|
491
|
+
else:
|
|
492
|
+
nCropPositions = [0]
|
|
493
|
+
|
|
494
|
+
for nIndex, nShiftPos in enumerate(nCropPositions):
|
|
495
|
+
nPosX = nCenterX
|
|
496
|
+
nPosY = nCenterY
|
|
497
|
+
if nAspectRatio > 1.0:
|
|
498
|
+
nPosX += nShiftPos
|
|
499
|
+
else:
|
|
500
|
+
nPosY += nShiftPos
|
|
501
|
+
|
|
502
|
+
nLeft = nPosX - nHalfSize[0]
|
|
503
|
+
nRight = nPosX + nHalfSize[0] + nModX
|
|
504
|
+
|
|
505
|
+
nTop = nPosY - nHalfSize[1]
|
|
506
|
+
nBottom = nPosY + nHalfSize[1] + nModY
|
|
507
|
+
nImgCropped[nIndex] = np.array(img.crop((nLeft, nTop, nRight, nBottom)))
|
|
508
|
+
|
|
509
|
+
if len(nCropPositions) == 1:
|
|
510
|
+
nImgCropped[1] = np.array(self.rotate(img, 12).pixels)
|
|
511
|
+
nImgCropped[2] = np.array(self.rotate(img, -12).pixels)
|
|
512
|
+
|
|
513
|
+
return nImgCropped
|
radnn/ml_system.py
CHANGED
|
@@ -106,6 +106,7 @@ class MLSystem(object):
|
|
|
106
106
|
if is_parallel_deterministic:
|
|
107
107
|
tf.config.experimental.enable_op_determinism() # Enable determinism for num_parallel_calls
|
|
108
108
|
tf.random.set_seed(seed)
|
|
109
|
+
tf.keras.utils.set_random_seed(seed)
|
|
109
110
|
if mlsys.is_torch_installed:
|
|
110
111
|
import torch
|
|
111
112
|
torch.manual_seed(seed)
|
|
@@ -80,13 +80,14 @@ class AutoMultiImagePlot(object):
|
|
|
80
80
|
for nRowIndex, oRowColumns in enumerate(self.rows):
|
|
81
81
|
if len(oRowColumns) > 0:
|
|
82
82
|
sRowTitle = self.row_titles[nRowIndex]
|
|
83
|
-
|
|
84
|
-
nIncr = nImageCount //
|
|
83
|
+
nRowImageCount = len(oRowColumns)
|
|
84
|
+
#nIncr = nImageCount // nRowColumnCount
|
|
85
|
+
nIncr = 1
|
|
85
86
|
nImageIndex = 0
|
|
86
87
|
for nColIndex in range(nColumns):
|
|
87
|
-
bMustPlot =
|
|
88
|
-
if (nIncr == 0) and (nColIndex > 0):
|
|
89
|
-
|
|
88
|
+
bMustPlot = nColIndex < nRowImageCount
|
|
89
|
+
#if (nIncr == 0) and (nColIndex > 0):
|
|
90
|
+
# bMustPlot = False
|
|
90
91
|
|
|
91
92
|
if bMustPlot:
|
|
92
93
|
dImage = oRowColumns[nImageIndex]
|
radnn/stats/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .descriptive_stats import DescriptiveStats
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
class DescriptiveStats(object):
|
|
4
|
+
INNER_FENCE_RATIO = 1.5
|
|
5
|
+
OUTER_FENCE_RATIO = 3.0
|
|
6
|
+
|
|
7
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
8
|
+
def __init__(self, data):
|
|
9
|
+
if (data.ndim == 2) and (np.prod(data.shape) == np.max(data.shape)):
|
|
10
|
+
data = data.reshape(-1)
|
|
11
|
+
|
|
12
|
+
self.min = np.min(data)
|
|
13
|
+
self.max = np.max(data)
|
|
14
|
+
|
|
15
|
+
self.mean = np.mean(data)
|
|
16
|
+
self.std = np.std(data)
|
|
17
|
+
|
|
18
|
+
self.q1 = np.percentile(data, q=25)
|
|
19
|
+
self.median = np.median(data)
|
|
20
|
+
self.q2 = self.median
|
|
21
|
+
self.q3 = np.percentile(data, q=75)
|
|
22
|
+
self.iq_range = self.q3 - self.q1
|
|
23
|
+
|
|
24
|
+
self.inner_fence_low = self.q1 - type(self).INNER_FENCE_RATIO * self.iq_range
|
|
25
|
+
self.inner_fence_high = self.q3 + type(self).INNER_FENCE_RATIO * self.iq_range
|
|
26
|
+
|
|
27
|
+
self.outer_fence_low = self.q1 - type(self).OUTER_FENCE_RATIO * self.iq_range
|
|
28
|
+
self.outer_fence_high = self.q3 + type(self).OUTER_FENCE_RATIO * self.iq_range
|
|
29
|
+
|
|
30
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
31
|
+
def outliers(self, data, is_using_outer_fence=False, dtype=np.float32):
|
|
32
|
+
if is_using_outer_fence:
|
|
33
|
+
nResult = np.asarray([x for x in data if x < self.outer_fence_low or x > self.outer_fence_high], dtype=dtype)
|
|
34
|
+
else:
|
|
35
|
+
nResult = np.asarray([x for x in data if x < self.inner_fence_low or x > self.inner_fence_high], dtype=dtype)
|
|
36
|
+
return nResult
|
|
37
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
38
|
+
def __str__(self):
|
|
39
|
+
sResult = "range=[%.6f,%.6f] mean=%6f std=%.6f iqrange=%.6f Q1=%.6f median (Q2)=%.6f Q3=%.6f max (Q4)=%.6f" % (self.min, self.max,
|
|
40
|
+
self.mean, self.std,
|
|
41
|
+
self.iq_range,
|
|
42
|
+
self.q1, self.median,
|
|
43
|
+
self.q3, self.max)
|
|
44
|
+
return sResult
|
|
45
|
+
# --------------------------------------------------------------------------------------------------------------------
|
radnn/system/files/__init__.py
CHANGED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
# ======================================================================================================================
|
|
5
|
+
class FileListFullPathIterator(object):
|
|
6
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
7
|
+
def __init__(self, filelist):
|
|
8
|
+
self.filelist = filelist
|
|
9
|
+
self.index = 0
|
|
10
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
11
|
+
def __iter__(self):
|
|
12
|
+
return self
|
|
13
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
14
|
+
def __next__(self):
|
|
15
|
+
if self.index >= len(self.filelist):
|
|
16
|
+
raise StopIteration
|
|
17
|
+
sFileFullPath = os.path.join(self.filelist.parent_folder_path, self.filelist[self.index])
|
|
18
|
+
self.index += 1
|
|
19
|
+
return sFileFullPath
|
|
20
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
21
|
+
# ======================================================================================================================
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
# ======================================================================================================================
|
|
28
|
+
class FileList(list):
|
|
29
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
30
|
+
def __init__(self, parent_folder_path=None):
|
|
31
|
+
self.parent_folder_path = parent_folder_path
|
|
32
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
33
|
+
@property
|
|
34
|
+
def full_paths(self):
|
|
35
|
+
if self.parent_folder_path is not None:
|
|
36
|
+
return FileListFullPathIterator(self)
|
|
37
|
+
else:
|
|
38
|
+
return None
|
|
39
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
40
|
+
# ======================================================================================================================
|