cellects 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. cellects/__init__.py +0 -0
  2. cellects/__main__.py +49 -0
  3. cellects/config/__init__.py +0 -0
  4. cellects/config/all_vars_dict.py +155 -0
  5. cellects/core/__init__.py +0 -0
  6. cellects/core/cellects_paths.py +31 -0
  7. cellects/core/cellects_threads.py +1451 -0
  8. cellects/core/motion_analysis.py +2010 -0
  9. cellects/core/one_image_analysis.py +1061 -0
  10. cellects/core/one_video_per_blob.py +540 -0
  11. cellects/core/program_organizer.py +1316 -0
  12. cellects/core/script_based_run.py +154 -0
  13. cellects/gui/__init__.py +0 -0
  14. cellects/gui/advanced_parameters.py +1258 -0
  15. cellects/gui/cellects.py +189 -0
  16. cellects/gui/custom_widgets.py +790 -0
  17. cellects/gui/first_window.py +449 -0
  18. cellects/gui/if_several_folders_window.py +239 -0
  19. cellects/gui/image_analysis_window.py +2066 -0
  20. cellects/gui/required_output.py +232 -0
  21. cellects/gui/video_analysis_window.py +656 -0
  22. cellects/icons/__init__.py +0 -0
  23. cellects/icons/cellects_icon.icns +0 -0
  24. cellects/icons/cellects_icon.ico +0 -0
  25. cellects/image_analysis/__init__.py +0 -0
  26. cellects/image_analysis/cell_leaving_detection.py +54 -0
  27. cellects/image_analysis/cluster_flux_study.py +102 -0
  28. cellects/image_analysis/image_segmentation.py +706 -0
  29. cellects/image_analysis/morphological_operations.py +1635 -0
  30. cellects/image_analysis/network_functions.py +1757 -0
  31. cellects/image_analysis/one_image_analysis_threads.py +289 -0
  32. cellects/image_analysis/progressively_add_distant_shapes.py +508 -0
  33. cellects/image_analysis/shape_descriptors.py +1016 -0
  34. cellects/utils/__init__.py +0 -0
  35. cellects/utils/decorators.py +14 -0
  36. cellects/utils/formulas.py +637 -0
  37. cellects/utils/load_display_save.py +1054 -0
  38. cellects/utils/utilitarian.py +490 -0
  39. cellects-0.1.2.dist-info/LICENSE.odt +0 -0
  40. cellects-0.1.2.dist-info/METADATA +132 -0
  41. cellects-0.1.2.dist-info/RECORD +44 -0
  42. cellects-0.1.2.dist-info/WHEEL +5 -0
  43. cellects-0.1.2.dist-info/entry_points.txt +2 -0
  44. cellects-0.1.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,706 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ This module contains functions for basic operations of image segmentation.
4
+ It starts from converting bgr images into grayscale, filtering these grayscale images,
5
+ and various way of splitting these grayscale pixels into two categories (i.e. methods of thresholding)
6
+ """
7
+ import numpy as np
8
+ import cv2
9
+ from tqdm import tqdm
10
+ from numba.typed import Dict
11
+ from cellects.utils.decorators import njit
12
+ from numpy.typing import NDArray
13
+ from typing import Tuple
14
+ from cellects.utils.utilitarian import less_along_first_axis, greater_along_first_axis, translate_dict
15
+ from cellects.utils.formulas import bracket_to_uint8_image_contrast
16
+ from cellects.image_analysis.morphological_operations import get_largest_connected_component
17
+ from skimage.measure import perimeter
18
+ from scipy.optimize import minimize
19
+ from skimage.filters import (threshold_otsu, gaussian, butterworth, farid, frangi, hessian, laplace, median, meijering,
20
+ prewitt, roberts, sato, scharr, sobel)
21
+
22
+
23
+ filter_dict = {"": {'': {}},
24
+ "Gaussian": {'Param1': {'Name': 'Sigma:', 'Minimum': 0., 'Maximum': 1000., 'Default': 1.}},
25
+ "Median": {'': {}},
26
+ "Butterworth": {'Param1': {'Name': 'Cutoff fr:', 'Minimum': 0., 'Maximum': .5, 'Default': .005},
27
+ 'Param2': {'Name': 'Order:', 'Minimum': 0., 'Maximum': 1000., 'Default': 2.}},
28
+ "Frangi": {'Param1': {'Name': 'Sigma min:', 'Minimum': 0., 'Maximum': 1000., 'Default': .5},
29
+ 'Param2': {'Name': 'Sigma max:', 'Minimum': 0., 'Maximum': 1000., 'Default': 5.}},
30
+ "Sato": {'Param1': {'Name': 'Sigma min:', 'Minimum': 0., 'Maximum': 1000., 'Default': .5},
31
+ 'Param2': {'Name': 'Sigma max:', 'Minimum': 0., 'Maximum': 1000., 'Default': 5.}},
32
+ "Meijering": {'Param1': {'Name': 'Sigma min:', 'Minimum': 0., 'Maximum': 1000., 'Default': 1.},
33
+ 'Param2': {'Name': 'Sigma max:', 'Minimum': 0., 'Maximum': 1000., 'Default': 10.}},
34
+ "Hessian": {'Param1': {'Name': 'Sigma min:', 'Minimum': 0., 'Maximum': 1000., 'Default': 1.},
35
+ 'Param2': {'Name': 'Sigma max:', 'Minimum': 0., 'Maximum': 1000., 'Default': 10.}},
36
+ "Laplace": {'Param1': {'Name': 'Ksize:', 'Minimum': 0., 'Maximum': 100., 'Default': 3}},
37
+ "Sharpen": {'': {}},
38
+ "Mexican hat": {'': {}},
39
+ "Farid": {'': {}},
40
+ "Prewitt": {'': {}},
41
+ "Scharr": {'': {}},
42
+ "Sobel": {'': {}},
43
+ }
44
+
45
+
46
+ def apply_filter(image: NDArray, filter_type: str, param, rescale_to_uint8=False) -> NDArray:
47
+ """
48
+ Apply various filters to an image based on the specified filter type.
49
+
50
+ This function applies a filter to the input image according to the
51
+ specified `filter_type` and associated parameters. Supported filters
52
+ include Gaussian, Median, Butterworth, Frangi, Sato, Meijering,
53
+ Hessian, Laplace, Mexican hat, Farid, Prewitt, Roberts, Scharr, and Sobel.
54
+ Except from Sharpen and Mexican hat, these filters are implemented using the skimage.filters module.
55
+ Additionally, the function can rescale the output image to uint8
56
+ format if specified.
57
+
58
+ Parameters
59
+ ----------
60
+ image : NDArray
61
+ The input image to which the filter will be applied.
62
+ filter_type : str
63
+ The type of filter to apply. Supported values include:
64
+ "Gaussian", "Median", "Butterworth", "Frangi",
65
+ "Sato", "Meijering", "Hessian", "Laplace", "Mexican hat",
66
+ "Sharpen", "Farid", "Prewitt", "Roberts", "Scharr", and "Sobel".
67
+ param : list or tuple
68
+ Parameters specific to the filter type. The structure of `param`
69
+ depends on the chosen filter.
70
+ rescale_to_uint8 : bool, optional
71
+ Whether to rescale the output image to uint8 format. Default is False.
72
+
73
+ Notes
74
+ -----
75
+ The Sharpen filter is implemented through:
76
+ cv2.filter2D(image, -1, np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]))
77
+ The Maxican hat filter is implemented through:
78
+ cv2.filter2D(image, -1, np.array(
79
+ [[0, 0, -1, 0, 0], [0, -1, -2, -1, 0], [-1, -2, 16, -2, -1], [0, -1, -2, -1, 0], [0, 0, -1, 0, 0]]))
80
+ All other filters are skimage filters.
81
+
82
+ Returns
83
+ -------
84
+ NDArray
85
+ The filtered image. If `rescale_to_uint8` is True and the output
86
+ image's dtype is not uint8, it will be rescaled accordingly.
87
+
88
+ Examples
89
+ --------
90
+ >>> image = np.zeros((3, 3))
91
+ >>> image[1, 1] = 1
92
+ >>> filtered_image = apply_filter(image, "Gaussian", [1.0])
93
+ >>> print(filtered_image)
94
+ [[0.05855018 0.09653293 0.05855018]
95
+ [0.09653293 0.15915589 0.09653293]
96
+ [0.05855018 0.09653293 0.05855018]]
97
+ Filtered image with Gaussian filter.
98
+
99
+ >>> image = np.zeros((3, 3))
100
+ >>> image[1, 1] = 1
101
+ >>> filtered_image = apply_filter(image, "Median", [])
102
+ >>> print(filtered_image)
103
+ [[0. 0. 0.]
104
+ [0. 0. 0.]
105
+ [0. 0. 0.]]
106
+ Filtered image with Median filter.
107
+
108
+ >>> image = np.zeros((3, 3))
109
+ >>> image[1, 1] = 1
110
+ >>> filtered_image = apply_filter(image, "Butterworth", [0.005, 2])
111
+ >>> print(filtered_image)
112
+ [[-0.1111111 -0.11111111 -0.1111111 ]
113
+ [-0.11111111 0.88888886 -0.11111111]
114
+ [-0.1111111 -0.11111111 -0.1111111 ]]
115
+ Filtered image with Butterworth filter.
116
+ """
117
+ if filter_type == "Gaussian":
118
+ image = gaussian(image, sigma=param[0])
119
+ elif filter_type == "Median":
120
+ image = median(image)
121
+ elif filter_type == "Butterworth":
122
+ image = butterworth(image, cutoff_frequency_ratio=param[0], order=param[1])
123
+ elif filter_type == "Frangi":
124
+ image = frangi(image, sigmas=np.linspace(param[0], param[1], num=3))
125
+ elif filter_type == "Sato":
126
+ image = sato(image, sigmas=np.linspace(param[0], param[1], num=3))
127
+ elif filter_type == "Meijering":
128
+ image = meijering(image, sigmas=np.linspace(param[0], param[1], num=3))
129
+ elif filter_type == "Hessian":
130
+ image = hessian(image, sigmas=np.linspace(param[0], param[1], num=3))
131
+ elif filter_type == "Laplace":
132
+ image = laplace(image, ksize=int(param[0]))
133
+ elif filter_type == "Sharpen":
134
+ image = cv2.filter2D(image, -1, np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]))
135
+ elif filter_type == "Mexican hat":
136
+ image = cv2.filter2D(image, -1, np.array(
137
+ [[0, 0, -1, 0, 0], [0, -1, -2, -1, 0], [-1, -2, 16, -2, -1], [0, -1, -2, -1, 0], [0, 0, -1, 0, 0]]))
138
+ elif filter_type == "Farid":
139
+ image = farid(image)
140
+ elif filter_type == "Prewitt":
141
+ image = prewitt(image)
142
+ elif filter_type == "Roberts":
143
+ image = roberts(image)
144
+ elif filter_type == "Scharr":
145
+ image = scharr(image)
146
+ elif filter_type == "Sobel":
147
+ image = sobel(image)
148
+ if rescale_to_uint8 and image.dtype != np.uint8:
149
+ image = bracket_to_uint8_image_contrast(image)
150
+ return image
151
+
152
+
153
+ def get_color_spaces(bgr_image: NDArray[np.uint8], space_names: list="") -> Dict:
154
+ """
155
+ Convert a BGR image into various color spaces.
156
+
157
+ Converts the input BGR image to specified color spaces and returns them
158
+ as a dictionary. If no space names are provided, converts to all default
159
+ color spaces (LAB, HSV, LUV, HLS, YUV). If 'logical' is in the space names,
160
+ it will be removed before conversion.
161
+
162
+ Parameters
163
+ ----------
164
+ bgr_image : ndarray of uint8
165
+ Input image in BGR color space.
166
+ space_names : list of str, optional
167
+ List of color spaces to convert the image to. Defaults to none.
168
+
169
+ Returns
170
+ -------
171
+ out : dict
172
+ Dictionary with keys as color space names and values as the converted images.
173
+
174
+ Examples
175
+ --------
176
+ >>> bgr_image = np.zeros((5, 5, 3), dtype=np.uint8)
177
+ >>> c_spaces = get_color_spaces(bgr_image, ['lab', 'hsv'])
178
+ >>> print(list(c_spaces.keys()))
179
+ ['bgr', 'lab', 'hsv']
180
+ """
181
+ if 'logical' in space_names:
182
+ space_names.pop(np.nonzero(np.array(space_names, dtype=str) == 'logical')[0][0])
183
+ c_spaces = Dict()
184
+ c_spaces['bgr'] = bgr_image.astype(np.float64)
185
+ if len(space_names) == 0:
186
+ c_spaces['lab'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2LAB).astype(np.float64)
187
+ c_spaces['hsv'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV).astype(np.float64)
188
+ c_spaces['luv'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2LUV).astype(np.float64)
189
+ c_spaces['hls'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HLS).astype(np.float64)
190
+ c_spaces['yuv'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2YUV).astype(np.float64)
191
+ else:
192
+ if np.isin('lab', space_names):
193
+ c_spaces['lab'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2LAB).astype(np.float64)
194
+ if np.isin('hsv', space_names):
195
+ c_spaces['hsv'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV).astype(np.float64)
196
+ if np.isin('luv', space_names):
197
+ c_spaces['luv'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2LUV).astype(np.float64)
198
+ if np.isin('hls', space_names):
199
+ c_spaces['hls'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HLS).astype(np.float64)
200
+ if np.isin('yuv', space_names):
201
+ c_spaces['yuv'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2YUV).astype(np.float64)
202
+ return c_spaces
203
+
204
+
205
+ @njit()
206
+ def combine_color_spaces(c_space_dict: Dict, all_c_spaces: Dict, subtract_background: NDArray=None) -> NDArray:
207
+ """
208
+ Combine color spaces from a dictionary and generate an analyzable image.
209
+
210
+ This function processes multiple color spaces defined in `c_space_dict`, combines
211
+ them according to given coefficients, and produces a normalized image that can be
212
+ converted to uint8. Optionally subtracts background from the resultant image.
213
+
214
+ Parameters
215
+ ----------
216
+ c_space_dict : dict
217
+ Dictionary containing color spaces and their respective coefficients.
218
+ all_c_spaces : Dict
219
+ Dictionary of all available color spaces in the image.
220
+ subtract_background : NDArray, optional
221
+ Background image to subtract from the resultant image. Defaults to None.
222
+
223
+ Returns
224
+ -------
225
+ out : NDArray
226
+ Processed and normalized image in float64 format, ready for uint8 conversion.
227
+
228
+ Examples
229
+ --------
230
+ >>> c_space_dict = Dict()
231
+ >>> c_space_dict['hsv'] = np.array((0, 1, 1))
232
+ >>> all_c_spaces = Dict()
233
+ >>> all_c_spaces['bgr'] = np.random.rand(5, 5, 3)
234
+ >>> all_c_spaces['hsv'] = np.random.rand(5, 5, 3)
235
+ >>> background = np.zeros((5, 5))
236
+ >>> result = combine_color_spaces(c_space_dict, all_c_spaces)
237
+ >>> print(result.shape)
238
+ (5, 5)
239
+ """
240
+ image = np.zeros((all_c_spaces['bgr'].shape[0], all_c_spaces['bgr'].shape[1]), dtype=np.float64)
241
+ for space, channels in c_space_dict.items():
242
+ image += c_space_dict[space][0] * all_c_spaces[space][:, :, 0] + c_space_dict[space][1] * \
243
+ all_c_spaces[space][:, :, 1] + c_space_dict[space][2] * all_c_spaces[space][:, :, 2]
244
+ if subtract_background is not None:
245
+ # add (resp. subtract) the most negative (resp. smallest) value to the whole matrix to get a min = 0
246
+ image -= np.min(image)
247
+ # Make analysable this image by bracketing its values between 0 and 255 and converting it to uint8
248
+ max_im = np.max(image)
249
+ if max_im != 0:
250
+ image = 255 * (image / np.max(image))
251
+ if image.sum() > subtract_background.sum():
252
+ image -= subtract_background
253
+ else:
254
+ image = subtract_background - image
255
+ # add (resp. subtract) the most negative (resp. smallest) value to the whole matrix to get a min = 0
256
+ image -= np.min(image)
257
+ # Make analysable this image by bracketing its values between 0 and 255 and converting it to uint8
258
+ max_im = np.max(image)
259
+ if max_im != 0:
260
+ image = 255 * (image / max_im)
261
+ return image
262
+ # c_space_dict=first_dict; all_c_spaces=self.all_c_spaces; subtract_background=background
263
+
264
+
265
+ def generate_color_space_combination(bgr_image: NDArray[np.uint8], c_spaces: list, first_dict: Dict, second_dict: Dict={}, background: NDArray=None, background2: NDArray=None, convert_to_uint8: bool=False) -> NDArray[np.uint8]:
266
+ """
267
+ Generate color space combinations for an input image.
268
+
269
+ This function generates a grayscale image by combining multiple color spaces
270
+ from an input BGR image and provided dictionaries. Optionally, it can also generate
271
+ a second grayscale image using another dictionary.
272
+
273
+ Parameters
274
+ ----------
275
+ bgr_image : ndarray of uint8
276
+ The input image in BGR color space.
277
+ c_spaces : list
278
+ List of color spaces to consider for combination.
279
+ first_dict : Dict
280
+ Dictionary containing color space and transformation details for the first grayscale image.
281
+ second_dict : Dict, optional
282
+ Dictionary containing color space and transformation details for the second grayscale image.
283
+ background : ndarray, optional
284
+ Background image to be used. Default is None.
285
+ background2 : ndarray, optional
286
+ Second background image to be used for the second grayscale image. Default is None.
287
+ convert_to_uint8 : bool, optional
288
+ Flag indicating whether to convert the output images to uint8. Default is False.
289
+
290
+ Returns
291
+ -------
292
+ out : tuple of ndarray of uint8
293
+ A tuple containing the first and second grayscale images.
294
+
295
+ Examples
296
+ --------
297
+ >>> bgr_image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
298
+ >>> c_spaces = ['bgr', 'hsv']
299
+ >>> first_dict = Dict()
300
+ >>> first_dict['bgr'] = np.array((0, 1, 1))
301
+ >>> second_dict = Dict()
302
+ >>> second_dict['hsv'] = np.array((0, 0, 1))
303
+ >>> greyscale_image1, greyscale_image2 = generate_color_space_combination(bgr_image, c_spaces, first_dict, second_dict)
304
+ >>> print(greyscale_image1.shape)
305
+ (100, 100)
306
+ """
307
+ all_c_spaces = get_color_spaces(bgr_image, c_spaces)
308
+ try:
309
+ greyscale_image = combine_color_spaces(first_dict, all_c_spaces, background)
310
+ except:
311
+ first_dict = translate_dict(first_dict)
312
+ greyscale_image = combine_color_spaces(first_dict, all_c_spaces, background)
313
+ if convert_to_uint8:
314
+ greyscale_image = bracket_to_uint8_image_contrast(greyscale_image)
315
+ greyscale_image2 = None
316
+ if len(second_dict) > 0:
317
+ greyscale_image2 = combine_color_spaces(second_dict, all_c_spaces, background2)
318
+ if convert_to_uint8:
319
+ greyscale_image2 = bracket_to_uint8_image_contrast(greyscale_image2)
320
+ return greyscale_image, greyscale_image2
321
+
322
+
323
+ @njit()
324
+ def get_otsu_threshold(image: NDArray):
325
+ """
326
+ Calculate the optimal threshold value for an image using Otsu's method.
327
+
328
+ This function computes the Otsu's thresholding which automatically
329
+ performs histogram shape analysis for threshold selection.
330
+
331
+ Parameters
332
+ ----------
333
+ image : NDArray
334
+ The input grayscale image, represented as a NumPy array.
335
+
336
+ Returns
337
+ -------
338
+ int or float
339
+ The computed Otsu's threshold value.
340
+ """
341
+ # Set total number of bins in the histogram
342
+ bins_num = 256
343
+
344
+ # Get the image histogram
345
+ hist, bin_edges = np.histogram(image, bins=bins_num)
346
+
347
+ # Calculate centers of bins
348
+ bin_mids = (bin_edges[:-1] + bin_edges[1:]) / 2.
349
+
350
+ # Iterate over all thresholds (indices) and get the probabilities w1(t), w2(t)
351
+ weight1 = np.cumsum(hist)
352
+ weight2 = np.cumsum(hist[::-1])[::-1]
353
+
354
+ # Get the class means mu0(t)
355
+ mean1 = np.cumsum(hist * bin_mids) / weight1
356
+ # Get the class means mu1(t)
357
+ mean2 = (np.cumsum((hist * bin_mids)[::-1]) / weight2[::-1])[::-1]
358
+
359
+ inter_class_variance = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
360
+
361
+ # Maximize the inter_class_variance function val
362
+ index_of_max_val = np.argmax(inter_class_variance)
363
+
364
+ threshold = bin_mids[:-1][index_of_max_val]
365
+ return threshold
366
+
367
+
368
+ @njit()
369
+ def otsu_thresholding(image: NDArray) -> NDArray[np.uint8]:
370
+ """
371
+ Apply Otsu's thresholding to a grayscale image.
372
+
373
+ This function calculates the optimal threshold using
374
+ Otsu's method and applies it to binarize the input image.
375
+ The output is a binary image where pixel values are either
376
+ 0 or 1.
377
+
378
+ Parameters
379
+ ----------
380
+ image : ndarray
381
+ Input grayscale image with any kind of value.
382
+
383
+ Returns
384
+ -------
385
+ out : ndarray of uint8
386
+ Binarized image with pixel values 0 or 1.
387
+
388
+ Examples
389
+ --------
390
+ >>> image = np.array([10, 20, 30])
391
+ >>> result = otsu_thresholding(image)
392
+ >>> print(result)
393
+ [1 0 0]
394
+ """
395
+ threshold = get_otsu_threshold(image)
396
+ binary_image = (image > threshold)
397
+ binary_image2 = np.logical_not(binary_image)
398
+ if binary_image.sum() < binary_image2.sum():
399
+ return binary_image.astype(np.uint8)
400
+ else:
401
+ return binary_image2.astype(np.uint8)
402
+
403
+
404
+ @njit()
405
+ def segment_with_lum_value(converted_video: NDArray, basic_bckgrnd_values: NDArray, l_threshold, lighter_background: bool) -> Tuple[NDArray, NDArray]:
406
+ """
407
+ Segment video frames based on luminance threshold.
408
+
409
+ This function segments the input video frames by comparing against a dynamic
410
+ luminance threshold. The segmentation can be based on either lighter or darker
411
+ background.
412
+
413
+ Parameters
414
+ ----------
415
+ converted_video : ndarray
416
+ The input video frames in numpy array format.
417
+
418
+ basic_bckgrnd_values : ndarray
419
+ Array containing background values for each frame.
420
+
421
+ l_threshold : int or float
422
+ The luminance threshold value for segmentation.
423
+
424
+ lighter_background : bool, optional
425
+ If True, the segmentation is done assuming a lighter background.
426
+ Defaults to False.
427
+
428
+ Returns
429
+ -------
430
+ segmentation : ndarray
431
+ Array containing the segmented video frames.
432
+ l_threshold_over_time : ndarray
433
+ Computed threshold over time for each frame.
434
+
435
+ Examples
436
+ --------
437
+ >>> converted_video = np.array([[[100, 120], [130, 140]], [[160, 170], [180, 200]]], dtype=np.uint8)
438
+ >>> basic_bckgrnd_values = np.array([100, 120])
439
+ >>> lighter_background = False
440
+ >>> l_threshold = 130
441
+ >>> segmentation, threshold_over_time = segment_with_lum_value(converted_video, basic_bckgrnd_values, l_threshold, lighter_background)
442
+ >>> print(segmentation)
443
+ [[[0 1]
444
+ [1 1]]
445
+ [[1 1]
446
+ [1 1]]]
447
+
448
+ """
449
+ # segmentation = None
450
+ if lighter_background:
451
+ l_threshold_over_time = l_threshold - (basic_bckgrnd_values[-1] - basic_bckgrnd_values)
452
+ if np.all(np.logical_and(0 <= l_threshold_over_time, l_threshold_over_time <= 255)):
453
+ segmentation = less_along_first_axis(converted_video, l_threshold_over_time)
454
+ else:
455
+ segmentation = np.zeros_like(converted_video)
456
+ if l_threshold > 255:
457
+ l_threshold = 255
458
+ segmentation += converted_video > l_threshold
459
+ else:
460
+ l_threshold_over_time = l_threshold - (basic_bckgrnd_values[-1] - basic_bckgrnd_values)
461
+ if np.all(np.logical_and(0 <= l_threshold_over_time, l_threshold_over_time <= 255)):
462
+ segmentation = greater_along_first_axis(converted_video, l_threshold_over_time)
463
+ else:
464
+ segmentation = np.zeros_like(converted_video)
465
+ if l_threshold > 255:
466
+ l_threshold = 255
467
+ segmentation += converted_video > l_threshold
468
+ return segmentation, l_threshold_over_time
469
+
470
+
471
+ def _network_perimeter(threshold, img: NDArray):
472
+ """
473
+ Calculate the negative perimeter of a binary image created from an input image based on a threshold.
474
+
475
+ This function takes an image and a threshold value to create a binary
476
+ image, then calculates the negative perimeter of that binary image.
477
+
478
+ Parameters
479
+ ----------
480
+ threshold : float
481
+ The threshold value to apply to the input image.
482
+ img : ndarray
483
+ The input grayscale image as a NumPy array.
484
+
485
+ Returns
486
+ -------
487
+ out : float
488
+ The negative perimeter of the binary image created from the input
489
+ image and threshold.
490
+
491
+ Examples
492
+ --------
493
+ >>> img = np.array([[1, 2, 1, 1], [1, 3, 4, 1], [2, 4, 3, 1], [2, 1, 2, 1]])
494
+ >>> _network_perimeter(threshold=2.5, img=img)
495
+ -4
496
+ """
497
+ binary_img = img > threshold
498
+ return -perimeter(binary_img)
499
+
500
+
501
+ def rolling_window_segmentation(greyscale_image: NDArray, possibly_filled_pixels: NDArray, patch_size: tuple=(10, 10)) -> NDArray[np.uint8]:
502
+ """
503
+ Perform rolling window segmentation on a greyscale image, using potentially filled pixels and a specified patch size.
504
+
505
+ The function divides the input greyscale image into overlapping patches defined by `patch_size`,
506
+ and applies Otsu's thresholding method to each patch. The thresholds can be optionally
507
+ refined using a minimization algorithm.
508
+
509
+ Parameters
510
+ ----------
511
+ greyscale_image : ndarray of uint8
512
+ The input greyscale image to segment.
513
+ possibly_filled_pixels : ndarray of uint8
514
+ An array indicating which pixels are possibly filled.
515
+ patch_size : tuple, optional
516
+ The dimensions of the patches to segment. Default is (10, 10).
517
+ Must be superior to (1, 1).
518
+
519
+ Returns
520
+ -------
521
+ output : ndarray of uint8
522
+ The segmented binary image where the network is marked as True.
523
+
524
+ Examples
525
+ --------
526
+ >>> greyscale_image = np.array([[1, 2, 1, 1], [1, 3, 4, 1], [2, 4, 3, 1], [2, 1, 2, 1]])
527
+ >>> possibly_filled_pixels = greyscale_image > 1
528
+ >>> patch_size = (2, 2)
529
+ >>> result = rolling_window_segmentation(greyscale_image, possibly_filled_pixels, patch_size)
530
+ >>> print(result)
531
+ [[0 1 0 0]
532
+ [0 1 1 0]
533
+ [0 1 1 0]
534
+ [0 0 1 0]]
535
+ """
536
+ patch_centers = [
537
+ np.floor(np.linspace(
538
+ p // 2, s - p // 2, int(np.ceil(s / (p // 2))) - 1
539
+ )).astype(int)
540
+ for s, p in zip(greyscale_image.shape, patch_size)
541
+ ]
542
+ patch_centers = np.transpose(np.meshgrid(*patch_centers), (1, 2, 0)).reshape((-1, 2))
543
+
544
+ patch_slices = [
545
+ tuple(slice(c - p // 2, c + p // 2, 1)
546
+ for c, p in zip(p_c, patch_size)) for p_c in patch_centers
547
+ ]
548
+ maximize_parameter = False
549
+
550
+ network_patches = []
551
+ patch_thresholds = []
552
+ # for patch in tqdm(patch_slices):
553
+ for patch in patch_slices:
554
+ v = greyscale_image[patch] * possibly_filled_pixels[patch]
555
+ if v.max() > 0 and np.ptp(v) > 0.5:
556
+ t = threshold_otsu(v)
557
+
558
+ if maximize_parameter:
559
+ res = minimize(_network_perimeter, x0=t, args=(v,), method='Nelder-Mead')
560
+ t = res.x[0]
561
+
562
+ network_patches.append(v > t)
563
+ patch_thresholds.append(t)
564
+ else:
565
+ network_patches.append(np.zeros_like(v))
566
+ patch_thresholds.append(0)
567
+
568
+ network_img = np.zeros(greyscale_image.shape, dtype=np.float64)
569
+ count_img = np.zeros_like(greyscale_image)
570
+ for patch, network_patch, t in zip(patch_slices, network_patches, patch_thresholds):
571
+ network_img[patch] += network_patch
572
+ count_img[patch] += np.ones_like(network_patch)
573
+
574
+ # Safe in-place division: zeros remain where count_img == 0
575
+ np.divide(network_img, count_img, out=network_img, where=count_img != 0)
576
+
577
+ return (network_img > 0.5).astype(np.uint8)
578
+
579
+ def binary_quality_index(binary_img: NDArray[np.uint8]) -> float:
580
+ """
581
+ Calculate the binary quality index for a binary image.
582
+
583
+ The binary quality index is computed based on the perimeter of the largest
584
+ connected component in the binary image, normalized by the total number of
585
+ pixels.
586
+
587
+ Parameters
588
+ ----------
589
+ binary_img : ndarray of uint8
590
+ Input binary image array.
591
+
592
+ Returns
593
+ -------
594
+ out : float
595
+ The binary quality index value.
596
+ """
597
+ if np.any(binary_img):
598
+ # SD = ShapeDescriptors(binary_img, ["euler_number"])
599
+ # index = - SD.descriptors['euler_number']
600
+ size, largest_cc = get_largest_connected_component(binary_img)
601
+ index = np.square(perimeter(largest_cc)) / binary_img.sum()
602
+ # index = (largest_cc.sum() * perimeter(largest_cc)) / binary_img.sum()
603
+ else:
604
+ index = 0.
605
+ return index
606
+
607
+
608
+ def find_threshold_given_mask(greyscale: NDArray[np.uint8], mask: np.uint8, min_threshold: np.uint8=0) -> np.uint8:
609
+ """
610
+ Find the optimal threshold value for a greyscale image given a mask.
611
+
612
+ This function performs a binary search to find the optimal threshold
613
+ that maximizes the separation between two regions defined by the mask.
614
+ The search is bounded by a minimum threshold value.
615
+
616
+ Parameters
617
+ ----------
618
+ greyscale : ndarray of uint8
619
+ The greyscale image array.
620
+ mask : ndarray of uint8
621
+ The binary mask array where positive values define region A and zero values define region B.
622
+ min_threshold : uint8, optional
623
+ The minimum threshold value for the search. Defaults to 0.
624
+
625
+ Returns
626
+ -------
627
+ out : uint8
628
+ The optimal threshold value found.
629
+
630
+ Examples
631
+ --------
632
+ >>> greyscale = np.array([[255, 128, 54], [0, 64, 20]], dtype=np.uint8)
633
+ >>> mask = np.array([[1, 1, 0], [0, 0, 0]], dtype=np.uint8)
634
+ >>> find_threshold_given_mask(greyscale, mask)
635
+ 54
636
+ """
637
+ region_a = greyscale[mask > 0]
638
+ if len(region_a) == 0:
639
+ return np.uint8(255)
640
+ region_b = greyscale[mask == 0]
641
+ if len(region_b) == 0:
642
+ return min_threshold
643
+ else:
644
+ low = min_threshold
645
+ high = 255
646
+ best_thresh = low
647
+
648
+ while 0 <= low <= high:
649
+ mid = (low + high) // 2
650
+ count_a, count_b = _get_counts_jit(mid, region_a, region_b)
651
+
652
+ if count_a > count_b:
653
+ # Try to find a lower threshold that still satisfies the condition
654
+ best_thresh = mid
655
+ high = mid - 1
656
+ else:
657
+ if count_a == 0 and count_b == 0:
658
+ best_thresh = greyscale.mean()
659
+ break
660
+ # Need higher threshold
661
+ low = mid + 1
662
+ return best_thresh
663
+
664
+
665
+ @njit()
666
+ def _get_counts_jit(thresh: np.uint8, region_a: NDArray[np.uint8], region_b: NDArray[np.uint8]) -> Tuple[int, int]:
667
+ """
668
+ Get counts of values in two regions above a threshold using Just-In-Time compilation.
669
+
670
+ Count the number of elements greater than `thresh` in both `region_a`
671
+ and `region_b`, returning the counts as a tuple. This function utilizes
672
+ Numba's JIT compilation for performance optimization.
673
+
674
+ Parameters
675
+ ----------
676
+ thresh : uint8
677
+ The threshold value to compare against.
678
+ region_a : ndarray of uint8
679
+ First region array containing values to be compared with `thresh`.
680
+ region_b : ndarray of uint8
681
+ Second region array containing values to be compared with `thresh`.
682
+
683
+ Returns
684
+ -------
685
+ out : tuple of int, int
686
+ A tuple containing the count of elements greater than `thresh` in
687
+ `region_a` and `region_b`, respectively.
688
+
689
+ Examples
690
+ --------
691
+ >>> import numpy as np
692
+ >>> region_a = np.array([1, 250, 3], dtype=np.uint8)
693
+ >>> region_b = np.array([4, 250, 6], dtype=np.uint8)
694
+ >>> thresh = np.uint8(100)
695
+ >>> _get_counts_jit(thresh, region_a, region_b)
696
+ (1, 1)
697
+ """
698
+ count_a = 0
699
+ count_b = 0
700
+ for val in region_a:
701
+ if val > thresh:
702
+ count_a += 1
703
+ for val in region_b:
704
+ if val > thresh:
705
+ count_b += 1
706
+ return count_a, count_b