cellects 0.1.2__py3-none-any.whl → 0.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. cellects/__main__.py +65 -25
  2. cellects/config/all_vars_dict.py +18 -17
  3. cellects/core/cellects_threads.py +1034 -396
  4. cellects/core/motion_analysis.py +1664 -2010
  5. cellects/core/one_image_analysis.py +1082 -1061
  6. cellects/core/program_organizer.py +1687 -1316
  7. cellects/core/script_based_run.py +80 -76
  8. cellects/gui/advanced_parameters.py +390 -330
  9. cellects/gui/cellects.py +102 -91
  10. cellects/gui/custom_widgets.py +16 -33
  11. cellects/gui/first_window.py +226 -104
  12. cellects/gui/if_several_folders_window.py +117 -68
  13. cellects/gui/image_analysis_window.py +866 -454
  14. cellects/gui/required_output.py +104 -57
  15. cellects/gui/ui_strings.py +840 -0
  16. cellects/gui/video_analysis_window.py +333 -155
  17. cellects/image_analysis/cell_leaving_detection.py +64 -4
  18. cellects/image_analysis/image_segmentation.py +451 -22
  19. cellects/image_analysis/morphological_operations.py +2166 -1635
  20. cellects/image_analysis/network_functions.py +616 -253
  21. cellects/image_analysis/one_image_analysis_threads.py +94 -153
  22. cellects/image_analysis/oscillations_functions.py +131 -0
  23. cellects/image_analysis/progressively_add_distant_shapes.py +2 -3
  24. cellects/image_analysis/shape_descriptors.py +517 -466
  25. cellects/utils/formulas.py +169 -6
  26. cellects/utils/load_display_save.py +362 -109
  27. cellects/utils/utilitarian.py +86 -9
  28. cellects-0.2.6.dist-info/LICENSE +675 -0
  29. cellects-0.2.6.dist-info/METADATA +829 -0
  30. cellects-0.2.6.dist-info/RECORD +44 -0
  31. cellects/core/one_video_per_blob.py +0 -540
  32. cellects/image_analysis/cluster_flux_study.py +0 -102
  33. cellects-0.1.2.dist-info/LICENSE.odt +0 -0
  34. cellects-0.1.2.dist-info/METADATA +0 -132
  35. cellects-0.1.2.dist-info/RECORD +0 -44
  36. {cellects-0.1.2.dist-info → cellects-0.2.6.dist-info}/WHEEL +0 -0
  37. {cellects-0.1.2.dist-info → cellects-0.2.6.dist-info}/entry_points.txt +0 -0
  38. {cellects-0.1.2.dist-info → cellects-0.2.6.dist-info}/top_level.txt +0 -0
@@ -5,10 +5,70 @@ This function considers the pixel intensity curve of each covered pixel and asse
5
5
  """
6
6
  import cv2
7
7
  import numpy as np
8
+ from numpy.typing import NDArray
9
+ from typing import Tuple
8
10
  from cellects.image_analysis.morphological_operations import cross_33
9
11
 
10
12
 
11
- def cell_leaving_detection(new_shape, covering_intensity, previous_binary, greyscale_image, fading_coefficient, lighter_background, several_blob_per_arena, erodila_disk, protect_from_fading=None, add_to_fading=None):
13
+ def cell_leaving_detection(new_shape: NDArray[np.uint8], covering_intensity:NDArray, previous_binary: NDArray[np.uint8], greyscale_image: NDArray, fading_coefficient: float, lighter_background: bool, several_blob_per_arena: bool, erodila_disk: NDArray[np.uint8], protect_from_fading: NDArray=None, add_to_fading: NDArray=None) -> Tuple[NDArray[np.uint8], NDArray]:
14
+ """
15
+ Perform cell leaving detection based on shape changes and intensity variations.
16
+
17
+ Checks for fading pixels by considering the internal contour of a previous binary
18
+ image, applies erosion and subtraction operations, and updates the shape based on
19
+ fading detection. It handles cases where the background is lighter or darker and
20
+ ensures that detected fading regions do not fragment the shape into multiple components,
21
+ unless specified otherwise.
22
+
23
+ Parameters
24
+ ----------
25
+ new_shape : NDArray[np.uint8]
26
+ The current shape to be updated based on fading detection.
27
+
28
+ covering_intensity : NDArray
29
+ Intensity values used to determine if pixels are fading.
30
+ Should have the same dimensions as new_shape.
31
+
32
+ previous_binary : NDArray[np.uint8]
33
+ Binary representation of the shape at the previous time step.
34
+ Should have the same dimensions as new_shape.
35
+
36
+ greyscale_image : NDArray
37
+ Greyscale image used for intensity comparison.
38
+ Should have the same dimensions as new_shape.
39
+
40
+ fading_coefficient : float
41
+ A coefficient to determine fading thresholds based on covering intensity.
42
+ Should be between 0 and 1.
43
+
44
+ lighter_background : bool
45
+ Flag indicating if the background is lighter.
46
+ True if background is lighter, False otherwise.
47
+
48
+ several_blob_per_arena : bool
49
+ Flag indicating if multiple blobs per arena are allowed.
50
+ True to allow fragmentation, False otherwise.
51
+
52
+ erodila_disk : NDArray[np.uint8]
53
+ Disk used for erosion operations.
54
+ Should be a valid structuring element.
55
+
56
+ protect_from_fading : NDArray, optional
57
+ An optional array to prevent certain pixels from being marked as fading.
58
+ Should have the same dimensions as new_shape.
59
+
60
+ add_to_fading : NDArray, optional
61
+ An optional array to mark additional pixels as fading.
62
+ Should have the same dimensions as new_shape.
63
+
64
+ Returns
65
+ -------
66
+ new_shape : NDArray[np.uint8]
67
+ Updated shape after applying fading detection and morphological operations.
68
+
69
+ covering_intensity : NDArray
70
+ Updated intensity values.
71
+ """
12
72
  # To look for fading pixels, only consider the internal contour of the shape at t-1
13
73
  fading = cv2.erode(previous_binary, erodila_disk)
14
74
  fading = previous_binary - fading
@@ -21,14 +81,14 @@ def cell_leaving_detection(new_shape, covering_intensity, previous_binary, greys
21
81
  add_to_fading_coord = np.nonzero(add_to_fading)
22
82
  fading[add_to_fading_coord] = 1
23
83
  if lighter_background:
24
- covering_intensity[add_to_fading_coord] = 1 / (1 - fading_coefficient) # 0.9 * covering_intensity[add_to_fading_coord] #
84
+ covering_intensity[add_to_fading_coord] = 1 # 0.9 * covering_intensity[add_to_fading_coord] #
25
85
  else:
26
86
  covering_intensity[add_to_fading_coord] = 255 # 1.1 * covering_intensity[add_to_fading_coord]
27
87
  # With a lighter background, fading them if their intensity gets higher than the covering intensity
28
88
  if lighter_background:
29
- fading = fading * np.greater((greyscale_image), (1 - fading_coefficient) * covering_intensity).astype(np.uint8)
89
+ fading = fading * np.greater(greyscale_image, (1 - fading_coefficient) * covering_intensity).astype(np.uint8)
30
90
  else:
31
- fading = fading * np.less((greyscale_image), (1 + fading_coefficient) * covering_intensity).astype(np.uint8)
91
+ fading = fading * np.less(greyscale_image, (1 + fading_coefficient) * covering_intensity).astype(np.uint8)
32
92
 
33
93
  if np.any(fading):
34
94
  fading = cv2.morphologyEx(fading, cv2.MORPH_CLOSE, cross_33, iterations=1)
@@ -1,8 +1,21 @@
1
1
  #!/usr/bin/env python3
2
- """
3
- This module contains functions for basic operations of image segmentation.
4
- It starts from converting bgr images into grayscale, filtering these grayscale images,
5
- and various way of splitting these grayscale pixels into two categories (i.e. methods of thresholding)
2
+ """Module for image segmentation operations including filtering, color space conversion, thresholding, and quality assessment.
3
+
4
+ This module provides tools to process images through grayscale conversion, apply various filters (e.g., Gaussian, Median, Butterworth), perform thresholding methods like Otsu's algorithm, combine color spaces for enhanced segmentation, and evaluate binary image quality. Key functionalities include dynamic background subtraction, rolling window segmentation with localized thresholds, and optimization of segmentation masks using shape descriptors.
5
+
6
+ Functions
7
+ apply_filter : Apply skimage or OpenCV-based filters to grayscale images.
8
+ get_color_spaces : Convert BGR images into specified color space representations (e.g., LAB, HSV).
9
+ combine_color_spaces : Merge multiple color channels with coefficients to produce a segmented image.
10
+ generate_color_space_combination : Create custom grayscale combinations using two sets of channel weights and backgrounds.
11
+ otsu_thresholding : Binarize an image using histogram-based Otsu thresholding.
12
+ segment_with_lum_value : Segment video frames using luminance thresholds adjusted for background variation.
13
+ rolling_window_segmentation : Apply localized Otsu thresholding across overlapping patches to improve segmentation accuracy.
14
+ binary_quality_index : Calculate a quality metric based on perimeter and connected components in binary images.
15
+ find_threshold_given_mask : Binary search optimization to determine optimal threshold between masked regions.
16
+
17
+ Notes
18
+ Uses Numba's @njit decorator for JIT compilation of performance-critical functions like combine_color_spaces and _get_counts_jit.
6
19
  """
7
20
  import numpy as np
8
21
  import cv2
@@ -11,7 +24,7 @@ from numba.typed import Dict
11
24
  from cellects.utils.decorators import njit
12
25
  from numpy.typing import NDArray
13
26
  from typing import Tuple
14
- from cellects.utils.utilitarian import less_along_first_axis, greater_along_first_axis, translate_dict
27
+ from cellects.utils.utilitarian import less_along_first_axis, greater_along_first_axis, translate_dict, split_dict
15
28
  from cellects.utils.formulas import bracket_to_uint8_image_contrast
16
29
  from cellects.image_analysis.morphological_operations import get_largest_connected_component
17
30
  from skimage.measure import perimeter
@@ -254,7 +267,7 @@ def combine_color_spaces(c_space_dict: Dict, all_c_spaces: Dict, subtract_backgr
254
267
  image = subtract_background - image
255
268
  # add (resp. subtract) the most negative (resp. smallest) value to the whole matrix to get a min = 0
256
269
  image -= np.min(image)
257
- # Make analysable this image by bracketing its values between 0 and 255 and converting it to uint8
270
+ # Make analysable this image by bracketing its values between 0 and 255
258
271
  max_im = np.max(image)
259
272
  if max_im != 0:
260
273
  image = 255 * (image / max_im)
@@ -262,7 +275,7 @@ def combine_color_spaces(c_space_dict: Dict, all_c_spaces: Dict, subtract_backgr
262
275
  # c_space_dict=first_dict; all_c_spaces=self.all_c_spaces; subtract_background=background
263
276
 
264
277
 
265
- def generate_color_space_combination(bgr_image: NDArray[np.uint8], c_spaces: list, first_dict: Dict, second_dict: Dict={}, background: NDArray=None, background2: NDArray=None, convert_to_uint8: bool=False) -> NDArray[np.uint8]:
278
+ def generate_color_space_combination(bgr_image: NDArray[np.uint8], c_spaces: list, first_dict: Dict, second_dict: Dict={}, background: NDArray=None, background2: NDArray=None, convert_to_uint8: bool=False, all_c_spaces: dict={}) -> NDArray[np.uint8]:
266
279
  """
267
280
  Generate color space combinations for an input image.
268
281
 
@@ -304,20 +317,85 @@ def generate_color_space_combination(bgr_image: NDArray[np.uint8], c_spaces: lis
304
317
  >>> print(greyscale_image1.shape)
305
318
  (100, 100)
306
319
  """
307
- all_c_spaces = get_color_spaces(bgr_image, c_spaces)
308
- try:
309
- greyscale_image = combine_color_spaces(first_dict, all_c_spaces, background)
310
- except:
311
- first_dict = translate_dict(first_dict)
312
- greyscale_image = combine_color_spaces(first_dict, all_c_spaces, background)
320
+ greyscale_image2 = None
321
+ first_pc_vector = None
322
+ if "PCA" in c_spaces:
323
+ greyscale_image, var_ratio, first_pc_vector = extract_first_pc(bgr_image)
324
+ else:
325
+ if len(all_c_spaces) == 0:
326
+ all_c_spaces = get_color_spaces(bgr_image, c_spaces)
327
+ try:
328
+ greyscale_image = combine_color_spaces(first_dict, all_c_spaces, background)
329
+ except:
330
+ first_dict = translate_dict(first_dict)
331
+ greyscale_image = combine_color_spaces(first_dict, all_c_spaces, background)
332
+ if len(second_dict) > 0:
333
+ greyscale_image2 = combine_color_spaces(second_dict, all_c_spaces, background2)
334
+
313
335
  if convert_to_uint8:
314
336
  greyscale_image = bracket_to_uint8_image_contrast(greyscale_image)
315
- greyscale_image2 = None
316
- if len(second_dict) > 0:
317
- greyscale_image2 = combine_color_spaces(second_dict, all_c_spaces, background2)
318
- if convert_to_uint8:
337
+ if greyscale_image2 is not None and len(second_dict) > 0:
319
338
  greyscale_image2 = bracket_to_uint8_image_contrast(greyscale_image2)
320
- return greyscale_image, greyscale_image2
339
+ return greyscale_image, greyscale_image2, all_c_spaces, first_pc_vector
340
+
341
+
342
+ @njit()
343
+ def get_window_allowed_for_segmentation(im_shape: Tuple, mask:NDArray[np.uint8]=None, padding: int=0) -> Tuple[int, int, int, int]:
344
+ """
345
+ Get the allowed window for segmentation within an image.
346
+
347
+ This function calculates a bounding box (min_y, max_y, min_x, max_x) around
348
+ a segmentation mask or the entire image if no mask is provided.
349
+
350
+ Parameters
351
+ ----------
352
+ im_shape : Tuple[int, int]
353
+ The shape of the image (height, width).
354
+ mask : NDArray[np.uint8], optional
355
+ The binary mask for segmentation. Default is `None`.
356
+ padding : int, optional
357
+ Additional padding around the bounding box. Default is 0.
358
+
359
+ Returns
360
+ -------
361
+ Tuple[int, int, int, int]
362
+ A tuple containing the bounding box coordinates (min_y, max_y,
363
+ min_x, max_x).
364
+
365
+ Notes
366
+ -----
367
+ This function uses NumPy operations to determine the bounding box.
368
+ If `mask` is `None`, the full image dimensions are used.
369
+
370
+ Examples
371
+ --------
372
+ >>> im_shape = (500, 400)
373
+ >>> mask = np.zeros((500, 400), dtype=np.uint8)
374
+ >>> mask[200:300, 200:300] = 1
375
+ >>> result = get_window_allowed_for_segmentation(im_shape, mask, padding=10)
376
+ >>> print(result)
377
+ (190, 310, 190, 310)
378
+
379
+ >>> result = get_window_allowed_for_segmentation(im_shape)
380
+ >>> print(result)
381
+ (0, 500, 0, 400)
382
+ """
383
+ if mask is None or mask.sum() == 0:
384
+ min_y = 0
385
+ min_x = 0
386
+ max_y = im_shape[0]
387
+ max_x = im_shape[1]
388
+ else:
389
+ y, x = np.nonzero(mask)
390
+ min_y = np.min(y)
391
+ min_y = np.max((min_y - padding, 0))
392
+ min_x = np.min(x)
393
+ min_x = np.max((min_x - padding, 0))
394
+ max_y = np.max(y)
395
+ max_y = np.min((max_y + padding + 1, mask.shape[0]))
396
+ max_x = np.max(x)
397
+ max_x = np.min((max_x + padding + 1, mask.shape[0]))
398
+ return min_y, max_y, min_x, max_x
321
399
 
322
400
 
323
401
  @njit()
@@ -352,9 +430,16 @@ def get_otsu_threshold(image: NDArray):
352
430
  weight2 = np.cumsum(hist[::-1])[::-1]
353
431
 
354
432
  # Get the class means mu0(t)
355
- mean1 = np.cumsum(hist * bin_mids) / weight1
433
+ if weight1.all():
434
+ mean1 = np.cumsum(hist * bin_mids) / weight1
435
+ else:
436
+ mean1 = np.zeros_like(bin_mids)
437
+
356
438
  # Get the class means mu1(t)
357
- mean2 = (np.cumsum((hist * bin_mids)[::-1]) / weight2[::-1])[::-1]
439
+ if weight2.all():
440
+ mean2 = (np.cumsum((hist * bin_mids)[::-1]) / weight2[::-1])[::-1]
441
+ else:
442
+ mean2 = np.zeros_like(bin_mids)
358
443
 
359
444
  inter_class_variance = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
360
445
 
@@ -401,7 +486,6 @@ def otsu_thresholding(image: NDArray) -> NDArray[np.uint8]:
401
486
  return binary_image2.astype(np.uint8)
402
487
 
403
488
 
404
- @njit()
405
489
  def segment_with_lum_value(converted_video: NDArray, basic_bckgrnd_values: NDArray, l_threshold, lighter_background: bool) -> Tuple[NDArray, NDArray]:
406
490
  """
407
491
  Segment video frames based on luminance threshold.
@@ -468,6 +552,197 @@ def segment_with_lum_value(converted_video: NDArray, basic_bckgrnd_values: NDArr
468
552
  return segmentation, l_threshold_over_time
469
553
 
470
554
 
555
+
556
+ def kmeans(greyscale: NDArray, greyscale2: NDArray=None, kmeans_clust_nb: int=2,
557
+ biomask: NDArray[np.uint8]=None, backmask: NDArray[np.uint8]=None, logical: str='None',
558
+ bio_label=None, bio_label2=None, previous_binary_image: NDArray[np.uint8]=None):
559
+ """
560
+
561
+ Perform K-means clustering on a greyscale image to generate binary images.
562
+
563
+ Extended Description
564
+ --------------------
565
+ This function applies the K-means algorithm to a greyscale image or pair of images to segment them into binary images. It supports optional masks and previous segmentation labels for refining the clustering.
566
+
567
+ Parameters
568
+ ----------
569
+ greyscale : NDArray
570
+ The input greyscale image to segment.
571
+ greyscale2 : NDArray, optional
572
+ A second greyscale image for logical operations. Default is `None`.
573
+ kmeans_clust_nb : int, optional
574
+ Number of clusters for K-means. Default is `2`.
575
+ biomask : NDArray[np.uint8], optional
576
+ Mask for selecting biological objects. Default is `None`.
577
+ backmask : NDArray[np.uint8], optional
578
+ Mask for selecting background regions. Default is `None`.
579
+ logical : str, optional
580
+ Logical operation flag to enable processing of the second image. Default is `'None'`.
581
+ bio_label : int, optional
582
+ Label for biological objects in the first segmentation. Default is `None`.
583
+ bio_label2 : int, optional
584
+ Label for biological objects in the second segmentation. Default is `None`.
585
+ previous_binary_image : NDArray[np.uint8], optional
586
+ Previous binary image for refinement. Default is `None`.
587
+
588
+ Other Parameters
589
+ ----------------
590
+ **greyscale2, logical, bio_label2**: Optional parameters for processing a second image with logical operations.
591
+
592
+ Returns
593
+ -------
594
+ tuple
595
+ A tuple containing:
596
+ - `binary_image`: Binary image derived from the first input.
597
+ - `binary_image2`: Binary image for the second input if processed, else `None`.
598
+ - `new_bio_label`: New biological label for the first segmentation.
599
+ - `new_bio_label2`: New biological label for the second segmentation, if applicable.
600
+
601
+ Notes
602
+ -----
603
+ - The function performs K-means clustering with random centers.
604
+ - If `logical` is not `'None'`, both images are processed.
605
+ - Default clustering uses 2 clusters, modify `kmeans_clust_nb` for different needs.
606
+
607
+ """
608
+ new_bio_label = None
609
+ new_bio_label2 = None
610
+ binary_image2 = None
611
+ image = greyscale.reshape((-1, 1))
612
+ image = np.float32(image)
613
+ criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
614
+ compactness, label, center = cv2.kmeans(image, kmeans_clust_nb, None, criteria, attempts=10, flags=cv2.KMEANS_RANDOM_CENTERS)
615
+ kmeans_image = np.uint8(label.flatten().reshape(greyscale.shape[:2]))
616
+ sum_per_label = np.zeros(kmeans_clust_nb)
617
+ binary_image = np.zeros(greyscale.shape[:2], np.uint8)
618
+ if previous_binary_image is not None:
619
+ binary_images = []
620
+ image_scores = np.zeros(kmeans_clust_nb, np.uint64)
621
+ for i in range(kmeans_clust_nb):
622
+ binary_image_i = np.zeros(greyscale.shape[:2], np.uint8)
623
+ binary_image_i[kmeans_image == i] = 1
624
+ image_scores[i] = (binary_image_i * previous_binary_image).sum()
625
+ binary_images.append(binary_image_i)
626
+ binary_image[kmeans_image == np.argmax(image_scores)] = 1
627
+ elif bio_label is not None:
628
+ binary_image[kmeans_image == bio_label] = 1
629
+ new_bio_label = bio_label
630
+ else:
631
+ if biomask is not None:
632
+ all_labels = kmeans_image[biomask[0], biomask[1]]
633
+ for i in range(kmeans_clust_nb):
634
+ sum_per_label[i] = (all_labels == i).sum()
635
+ new_bio_label = np.argsort(sum_per_label)[1]
636
+ elif backmask is not None:
637
+ all_labels = kmeans_image[backmask[0], backmask[1]]
638
+ for i in range(kmeans_clust_nb):
639
+ sum_per_label[i] = (all_labels == i).sum()
640
+ new_bio_label = np.argsort(sum_per_label)[-2]
641
+ else:
642
+ for i in range(kmeans_clust_nb):
643
+ sum_per_label[i] = (kmeans_image == i).sum()
644
+ new_bio_label = np.argsort(sum_per_label)[-2]
645
+ binary_image[np.nonzero(np.isin(kmeans_image, new_bio_label))] = 1
646
+
647
+ if logical != 'None' and greyscale is not None:
648
+ image = greyscale2.reshape((-1, 1))
649
+ image = np.float32(image)
650
+ criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
651
+ compactness, label, center = cv2.kmeans(image, kmeans_clust_nb, None, criteria, attempts=10,
652
+ flags=cv2.KMEANS_RANDOM_CENTERS)
653
+ kmeans_image = np.uint8(label.flatten().reshape(greyscale.shape[:2]))
654
+ sum_per_label = np.zeros(kmeans_clust_nb)
655
+ binary_image2 = np.zeros(greyscale.shape[:2], np.uint8)
656
+ if previous_binary_image is not None:
657
+ binary_images = []
658
+ image_scores = np.zeros(kmeans_clust_nb, np.uint64)
659
+ for i in range(kmeans_clust_nb):
660
+ binary_image_i = np.zeros(greyscale.shape[:2], np.uint8)
661
+ binary_image_i[kmeans_image == i] = 1
662
+ image_scores[i] = (binary_image_i * previous_binary_image).sum()
663
+ binary_images.append(binary_image_i)
664
+ binary_image2[kmeans_image == np.argmax(image_scores)] = 1
665
+ elif bio_label2 is not None:
666
+ binary_image2[kmeans_image == bio_label2] = 1
667
+ new_bio_label2 = bio_label2
668
+ else:
669
+ if biomask is not None:
670
+ all_labels = kmeans_image[biomask[0], biomask[1]]
671
+ for i in range(kmeans_clust_nb):
672
+ sum_per_label[i] = (all_labels == i).sum()
673
+ new_bio_label2 = np.argsort(sum_per_label)[1]
674
+ elif backmask is not None:
675
+ all_labels = kmeans_image[backmask[0], backmask[1]]
676
+ for i in range(kmeans_clust_nb):
677
+ sum_per_label[i] = (all_labels == i).sum()
678
+ new_bio_label2 = np.argsort(sum_per_label)[-2]
679
+ else:
680
+ for i in range(kmeans_clust_nb):
681
+ sum_per_label[i] = (kmeans_image == i).sum()
682
+ new_bio_label2 = np.argsort(sum_per_label)[-2]
683
+ binary_image2[kmeans_image == new_bio_label2] = 1
684
+ return binary_image, binary_image2, new_bio_label, new_bio_label2
685
+
686
+
687
+ def windowed_thresholding(image:NDArray, lighter_background: bool=None, side_length: int=None, step: int=None, min_int_var: float=None):
688
+ """
689
+ Perform grid segmentation on the image.
690
+
691
+ This method applies a sliding window approach to segment the image into
692
+ a grid-like pattern based on intensity variations and optionally uses a mask.
693
+ The segmented regions are stored in `self.binary_image`.
694
+
695
+ Args:
696
+ lighter_background (bool): If True, areas lighter than the Otsu threshold are considered;
697
+ otherwise, darker areas are considered.
698
+ side_length (int, optional): The size of each grid square. Default is None.
699
+ step (int, optional): The step size for the sliding window. Default is None.
700
+ min_int_var (int, optional): Threshold for intensity variation within a grid.
701
+ Default is 20.
702
+ mask (NDArray, optional): A binary mask to restrict the segmentation area. Default is None.
703
+ """
704
+ if lighter_background is None:
705
+ binary_image = otsu_thresholding(image)
706
+ lighter_background = binary_image.sum() > (binary_image.size / 2)
707
+ if min_int_var is None:
708
+ min_int_var = np.ptp(image).astype(np.float64) * 0.1
709
+ if side_length is None:
710
+ side_length = int(np.min(image.shape) // 10)
711
+ if step is None:
712
+ step = side_length // 2
713
+ grid_image = np.zeros(image.shape, np.uint64)
714
+ homogeneities = np.zeros(image.shape, np.uint64)
715
+ mask = np.ones(image.shape, np.uint64)
716
+ for to_add in np.arange(0, side_length, step):
717
+ y_windows = np.arange(0, image.shape[0], side_length)
718
+ x_windows = np.arange(0, image.shape[1], side_length)
719
+ y_windows += to_add
720
+ x_windows += to_add
721
+ for y_start in y_windows:
722
+ if y_start < image.shape[0]:
723
+ y_end = y_start + side_length
724
+ if y_end < image.shape[0]:
725
+ for x_start in x_windows:
726
+ if x_start < image.shape[1]:
727
+ x_end = x_start + side_length
728
+ if x_end < image.shape[1]:
729
+ if np.any(mask[y_start:y_end, x_start:x_end]):
730
+ potential_detection = image[y_start:y_end, x_start:x_end]
731
+ if np.any(potential_detection):
732
+ if np.ptp(potential_detection[np.nonzero(potential_detection)]) < min_int_var:
733
+ homogeneities[y_start:y_end, x_start:x_end] += 1
734
+ threshold = get_otsu_threshold(potential_detection)
735
+ if lighter_background:
736
+ net_coord = np.nonzero(potential_detection < threshold)
737
+ else:
738
+ net_coord = np.nonzero(potential_detection > threshold)
739
+ grid_image[y_start + net_coord[0], x_start + net_coord[1]] += 1
740
+
741
+ binary_image = (grid_image >= (side_length // step)).astype(np.uint8)
742
+ binary_image[homogeneities >= (((side_length // step) // 2) + 1)] = 0
743
+ return binary_image
744
+
745
+
471
746
  def _network_perimeter(threshold, img: NDArray):
472
747
  """
473
748
  Calculate the negative perimeter of a binary image created from an input image based on a threshold.
@@ -703,4 +978,158 @@ def _get_counts_jit(thresh: np.uint8, region_a: NDArray[np.uint8], region_b: NDA
703
978
  for val in region_b:
704
979
  if val > thresh:
705
980
  count_b += 1
706
- return count_a, count_b
981
+ return count_a, count_b
982
+
983
+
984
+ def extract_first_pc(bgr_image: np.ndarray, standardize: bool=True) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
985
+ """
986
+
987
+ Extract the first principal component from a BGR image.
988
+
989
+ Parameters
990
+ ----------
991
+ bgr_image : numpy.ndarray
992
+ A 3D or 2D array representing the BGR image. Expected shape is either
993
+ (height, width, 3) or (3, height, width).
994
+ standardize : bool, optional
995
+ If True, standardizes the image pixel values by subtracting the mean and
996
+ dividing by the standard deviation before computing the principal
997
+ components. Default is True.
998
+
999
+ Returns
1000
+ -------
1001
+ numpy.ndarray
1002
+ The first principal component image, reshaped to the original image height and width.
1003
+ float
1004
+ The explained variance ratio of the first principal component.
1005
+ numpy.ndarray
1006
+ The first principal component vector.
1007
+
1008
+ Notes
1009
+ -----
1010
+ The principal component analysis (PCA) is performed using Singular Value Decomposition (SVD).
1011
+ Standardization helps in scenarios where the pixel values have different scales.
1012
+ Pixels with zero standard deviation are handled by setting their standardization
1013
+ denominator to 1.0 to avoid division by zero.
1014
+
1015
+ Examples
1016
+ --------
1017
+ >>> bgr_image = np.random.rand(100, 200, 3) # Example BGR image
1018
+ >>> first_pc_image, explained_variance_ratio, first_pc_vector = extract_first_pc(bgr_image)
1019
+ >>> print(first_pc_image.shape)
1020
+ (100, 200)
1021
+ >>> print(explained_variance_ratio)
1022
+ 0.339
1023
+ >>> print(first_pc_vector.shape)
1024
+ (3,)
1025
+ """
1026
+ height, width, channels = bgr_image.shape
1027
+ pixels = bgr_image.reshape(-1, channels) # Flatten to Nx3 matrix
1028
+
1029
+ if standardize:
1030
+ mean = np.mean(pixels, axis=0)
1031
+ std = np.std(pixels, axis=0)
1032
+ std[std == 0] = 1.0 # Avoid division by zero
1033
+ pixels_scaled = (pixels - mean) / std
1034
+ else:
1035
+ pixels_scaled = pixels
1036
+
1037
+ # Perform SVD on standardized data to get principal components
1038
+ U, d, Vt = np.linalg.svd(pixels_scaled, full_matrices=False)
1039
+
1040
+ # First PC is the projection of each pixel onto first right singular vector (Vt[0])
1041
+ first_pc_vector = Vt[0] # Shape: (3,)
1042
+ eigen = d ** 2
1043
+ total_variance = np.sum(eigen)
1044
+
1045
+ explained_variance_ratio = np.zeros_like(eigen)
1046
+ np.divide(eigen, total_variance, out=explained_variance_ratio, where=total_variance != 0)
1047
+
1048
+ # Compute first principal component scores
1049
+ first_pc_scores = U[:, 0] * d[0]
1050
+
1051
+ # Reshape to image shape and threshold negative values
1052
+ first_pc_image = first_pc_scores.reshape(height, width)
1053
+ # first_pc_thresholded = np.maximum(first_pc_image, 0)
1054
+
1055
+ return first_pc_image, explained_variance_ratio[0], first_pc_vector
1056
+
1057
+
1058
+ def convert_subtract_and_filter_video(video: NDArray, color_space_combination: dict, background: NDArray=None,
1059
+ background2: NDArray=None, lose_accuracy_to_save_memory:bool=False,
1060
+ filter_spec: dict=None) -> Tuple[NDArray, NDArray]:
1061
+ """
1062
+ Convert a video to grayscale, subtract the background, and apply filters.
1063
+
1064
+ Parameters
1065
+ ----------
1066
+ video : NDArray
1067
+ The input video as a 4D NumPy array.
1068
+ color_space_combination : dict
1069
+ A dictionary containing the combinations of color space transformations.
1070
+ background : NDArray, optional
1071
+ The first background image for subtraction. If `None`, no subtraction is performed.
1072
+ background2 : NDArray, optional
1073
+ The second background image for subtraction. If `None`, no subtraction is performed.
1074
+ lose_accuracy_to_save_memory : bool
1075
+ Flag to reduce accuracy and save memory by using `uint8` instead of `float64`.
1076
+ filter_spec : dict
1077
+ A dictionary containing the specifications for filters to apply.
1078
+
1079
+ Returns
1080
+ -------
1081
+ Tuple[NDArray, NDArray]
1082
+ A tuple containing:
1083
+ - `converted_video`: The converted grayscale video.
1084
+ - `converted_video2`: The second converted grayscale video if logical operation is not 'None'.
1085
+
1086
+ Notes
1087
+ -----
1088
+ - The function reduces accuracy of the converted video when `lose_accuracy_to_save_memory` is set to True.
1089
+ - If `color_space_combination['logical']` is not 'None', a second converted video will be created.
1090
+ - This function uses the `generate_color_space_combination` and `apply_filter` functions internally.
1091
+ """
1092
+
1093
+ converted_video2 = None
1094
+ if len(video.shape) == 3:
1095
+ converted_video = video
1096
+ else:
1097
+ if lose_accuracy_to_save_memory:
1098
+ array_type = np.uint8
1099
+ else:
1100
+ array_type = np.float64
1101
+ first_dict, second_dict, c_spaces = split_dict(color_space_combination)
1102
+ if 'PCA' in first_dict:
1103
+ greyscale_image, var_ratio, first_pc_vector = extract_first_pc(video[0])
1104
+ first_dict = Dict()
1105
+ first_dict['bgr'] = bracket_to_uint8_image_contrast(first_pc_vector)
1106
+ c_spaces = ['bgr']
1107
+ if 'PCA' in second_dict:
1108
+ greyscale_image, var_ratio, first_pc_vector = extract_first_pc(video[0])
1109
+ second_dict = Dict()
1110
+ second_dict['bgr'] = bracket_to_uint8_image_contrast(first_pc_vector)
1111
+ c_spaces = ['bgr']
1112
+
1113
+ converted_video = np.zeros(video.shape[:3], dtype=array_type)
1114
+ if color_space_combination['logical'] != 'None':
1115
+ converted_video2 = converted_video.copy()
1116
+ for im_i in range(video.shape[0]):
1117
+ if im_i == 0 and background is not None:
1118
+ # when doing background subtraction, the first and the second image are equal
1119
+ image_i = video[1, ...]
1120
+ else:
1121
+ image_i = video[im_i, ...]
1122
+ results = generate_color_space_combination(image_i, c_spaces, first_dict, second_dict, background,
1123
+ background2, lose_accuracy_to_save_memory)
1124
+ greyscale_image, greyscale_image2, all_c_spaces, first_pc_vector = results
1125
+ if filter_spec is not None and filter_spec['filter1_type'] != "":
1126
+ greyscale_image = apply_filter(greyscale_image, filter_spec['filter1_type'],
1127
+ filter_spec['filter1_param'],lose_accuracy_to_save_memory)
1128
+ if greyscale_image2 is not None and filter_spec['filter2_type'] != "":
1129
+ greyscale_image2 = apply_filter(greyscale_image2,
1130
+ filter_spec['filter2_type'], filter_spec['filter2_param'],
1131
+ lose_accuracy_to_save_memory)
1132
+ converted_video[im_i, ...] = greyscale_image
1133
+ if color_space_combination['logical'] != 'None':
1134
+ converted_video2[im_i, ...] = greyscale_image2
1135
+ return converted_video, converted_video2