cellects 0.1.3__py3-none-any.whl → 0.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. cellects/__main__.py +65 -25
  2. cellects/config/all_vars_dict.py +18 -17
  3. cellects/core/cellects_threads.py +1034 -396
  4. cellects/core/motion_analysis.py +1664 -2010
  5. cellects/core/one_image_analysis.py +1082 -1061
  6. cellects/core/program_organizer.py +1687 -1316
  7. cellects/core/script_based_run.py +80 -76
  8. cellects/gui/advanced_parameters.py +365 -326
  9. cellects/gui/cellects.py +102 -91
  10. cellects/gui/custom_widgets.py +4 -3
  11. cellects/gui/first_window.py +226 -104
  12. cellects/gui/if_several_folders_window.py +117 -68
  13. cellects/gui/image_analysis_window.py +841 -450
  14. cellects/gui/required_output.py +100 -56
  15. cellects/gui/ui_strings.py +840 -0
  16. cellects/gui/video_analysis_window.py +317 -135
  17. cellects/image_analysis/cell_leaving_detection.py +64 -4
  18. cellects/image_analysis/image_segmentation.py +451 -22
  19. cellects/image_analysis/morphological_operations.py +2166 -1635
  20. cellects/image_analysis/network_functions.py +616 -253
  21. cellects/image_analysis/one_image_analysis_threads.py +94 -153
  22. cellects/image_analysis/oscillations_functions.py +131 -0
  23. cellects/image_analysis/progressively_add_distant_shapes.py +2 -3
  24. cellects/image_analysis/shape_descriptors.py +517 -466
  25. cellects/utils/formulas.py +169 -6
  26. cellects/utils/load_display_save.py +362 -105
  27. cellects/utils/utilitarian.py +86 -9
  28. cellects-0.2.6.dist-info/LICENSE +675 -0
  29. cellects-0.2.6.dist-info/METADATA +829 -0
  30. cellects-0.2.6.dist-info/RECORD +44 -0
  31. cellects/core/one_video_per_blob.py +0 -540
  32. cellects/image_analysis/cluster_flux_study.py +0 -102
  33. cellects-0.1.3.dist-info/LICENSE.odt +0 -0
  34. cellects-0.1.3.dist-info/METADATA +0 -176
  35. cellects-0.1.3.dist-info/RECORD +0 -44
  36. {cellects-0.1.3.dist-info → cellects-0.2.6.dist-info}/WHEEL +0 -0
  37. {cellects-0.1.3.dist-info → cellects-0.2.6.dist-info}/entry_points.txt +0 -0
  38. {cellects-0.1.3.dist-info → cellects-0.2.6.dist-info}/top_level.txt +0 -0
@@ -1,24 +1,57 @@
1
1
  #!/usr/bin/env python3
2
- """
3
- This script contains 2 classes used by the OneImageAnalysis class
4
- They are threads to process the first image and save the selected combinations simultaneously
2
+ """Module containing classes for image processing and saving selected color space combinations.
3
+
4
+ This module provides two thread-based components for analyzing images and storing results:
5
+
6
+ ProcessFirstImage handles initial segmentation, thresholding, clustering, and shape validation
7
+ SaveCombinationThread stores processed features in parent objects asynchronously
8
+ The processing pipeline includes Otsu thresholding, k-means clustering, connected component analysis,
9
+ and geometric filtering based on size/shape constraints.
10
+
11
+ Classes
12
+ ProcessFirstImage : Processes image data with segmentation techniques and validates shapes.
13
+ SaveCombinationThread : Thread to save combination results while maintaining UI responsiveness.
14
+
15
+ Functions (in ProcessFirstImage)
16
+ shape_selection : Filters shapes by size thresholds and geometric criteria.
17
+ kmeans : Performs clustering-based image segmentation into specified number of clusters.
18
+ process_binary_image : Validates detected shapes against area constraints and spot count targets.
19
+
20
+ Notes
21
+ Uses threading.Thread for background operations to maintain application responsiveness during processing.
5
22
  """
6
23
  import threading
7
24
  import logging
8
- from copy import deepcopy
9
25
  import numpy as np
10
26
  import cv2
27
+ from numpy.typing import NDArray
28
+ from typing import Tuple
11
29
  from cellects.image_analysis.image_segmentation import otsu_thresholding, combine_color_spaces
30
+ from cellects.image_analysis.morphological_operations import shape_selection
12
31
 
13
32
 
14
33
  class ProcessFirstImage:
34
+ """
35
+ A class for processing lists.
36
+ """
15
37
  def __init__(self, l):
38
+ """
39
+ Arguments:
40
+ list : list
41
+
42
+ """
16
43
  self.start_processing(l)
17
44
 
18
- def start_processing(self, l):
45
+ def start_processing(self, l: list):
19
46
  """
20
- Wil process the first image according to rules and parameters in l
21
- :param l: list containing the necessary data to process the first image
47
+
48
+ Start the processing based on given list input.
49
+
50
+ The method processes the provided list to perform various operations
51
+ on the image data. It sets up several attributes and performs different
52
+ image processing tasks like Otsu thresholding or k-means clustering.
53
+
54
+ The method does not return any value.
22
55
  """
23
56
  self.parent = l[0]
24
57
  get_one_channel_result = l[1]
@@ -26,10 +59,11 @@ class ProcessFirstImage:
26
59
  self.all_c_spaces = self.parent.all_c_spaces
27
60
  self.several_blob_per_arena = l[4]
28
61
  self.sample_number = l[5]
29
- self.spot_size = l[6]
30
- kmeans_clust_nb = l[7]
31
- self.biomask = l[8]
32
- self.backmask = l[9]
62
+ self.horizontal_size = l[6]
63
+ self.spot_shape = l[7]
64
+ kmeans_clust_nb = l[8]
65
+ self.biomask = l[9]
66
+ self.backmask = l[10]
33
67
  if get_one_channel_result:
34
68
  self.csc_dict = l[3]
35
69
  self.image = combine_color_spaces(self.csc_dict, self.all_c_spaces)
@@ -43,14 +77,15 @@ class ProcessFirstImage:
43
77
  self.unaltered_concomp_nb, shapes = cv2.connectedComponents(self.binary_image)
44
78
  if 1 < self.unaltered_concomp_nb < 10000:
45
79
  self.total_area = np.sum(self.binary_image)
46
- if 100 < self.total_area < self.binary_image.size * 0.75:
80
+ inf_lim = np.min((100, np.ceil(self.binary_image.size / 1000)))
81
+ if inf_lim < self.total_area < self.binary_image.size * 0.9:
47
82
  self.process_binary_image()
48
83
  self.parent.save_combination_features(self)
49
84
  # except RuntimeWarning:
50
- # logging.info("Make sure that scaling and spot size are correct")
85
+ # Make sure that scaling and spot size are correct
51
86
  if combine_channels:
52
87
  i = l[3]
53
- possibilities = l[10]
88
+ possibilities = l[11]
54
89
  saved_color_space_list = self.parent.saved_color_space_list
55
90
  combination_features = self.parent.combination_features
56
91
  self.csc_dict = saved_color_space_list[i]
@@ -58,7 +93,7 @@ class ProcessFirstImage:
58
93
  previous_sum = combination_features[i, 5]
59
94
  for j in possibilities[::-1]:
60
95
  csc_dict2 = saved_color_space_list[j]
61
- csc_dict = deepcopy(self.csc_dict)
96
+ csc_dict = self.csc_dict.copy()
62
97
  keys = list(csc_dict.keys())
63
98
 
64
99
  k2 = list(csc_dict2.keys())[0]
@@ -80,79 +115,27 @@ class ProcessFirstImage:
80
115
  if previous_shape_number >= self.shape_number and self.total_area > previous_sum * 0.9:
81
116
  previous_shape_number = self.shape_number
82
117
  previous_sum = self.total_area
83
- self.csc_dict = deepcopy(csc_dict)
118
+ self.csc_dict = csc_dict.copy()
84
119
  self.unaltered_concomp_nb = combination_features[i, 3]
85
120
  self.parent.save_combination_features(self)
86
- logging.info(str(saved_color_space_list[i]) + "-->" + str(self.csc_dict ))
87
121
 
88
- def shape_selection(self, horizontal_size, shape, confint, do_not_delete=None):
89
- """
90
- This method use the statistics of the connected components of a binary image to make shape selection
91
- :param horizontal_size: the average horizontal size of one shape in pixels
92
- :param shape: the geometry of the shape: circle or rectangle
93
- :param confint: confidence interval for horizontal size and shape detection
94
- :param do_not_delete: binary image with 1 in area drawn by the user as "Cell"
95
- :return: A binary matrix of the resulting validated shapes and the number of shapes detected
122
+ def kmeans(self, cluster_number: int, biomask: NDArray[np.uint8]=None, backmask: NDArray[np.uint8]=None, bio_label=None):
96
123
  """
97
- # counter+=1;horizontal_size = self.spot_size; shape = self.parent.spot_shapes[counter];confint = self.parent.spot_size_confints[::-1][counter]
98
- # stats columns contain in that order:
99
- # - x leftmost coordinate of boundingbox
100
- # - y topmost coordinate of boundingbox
101
- # - The horizontal size of the bounding box.
102
- # - The vertical size of the bounding box.
103
- # - The total area (in pixels) of the connected component.
104
-
105
- # First, remove each stain which horizontal size varies too much from reference
106
- size_interval = [horizontal_size * (1 - confint), horizontal_size * (1 + confint)]
107
- cc_to_remove = np.argwhere(np.logical_or(self.stats[:, 2] < size_interval[0], self.stats[:, 2] > size_interval[1]))
108
124
 
109
- if do_not_delete is None:
110
- self.shapes2[np.isin(self.shapes2, cc_to_remove)] = 0
111
- else:
112
- self.shapes2[np.logical_and(np.isin(self.shapes2, cc_to_remove), np.logical_not(np.isin(self.shapes2, do_not_delete)))] = 0
125
+ Perform k-means clustering on the image to segment it into a specified number of clusters.
113
126
 
114
- # Second, determine the shape of each stain to only keep the ones corresponding to the reference shape
115
- shapes = np.zeros(self.binary_image.shape, dtype=np.uint8)
116
- shapes[self.shapes2 > 0] = 1
117
- nb_components, self.shapes2, self.stats, self.centroids = cv2.connectedComponentsWithStats(shapes,
118
- connectivity=8)
119
- if nb_components > 1:
120
- if shape == 'circle':
121
- surf_interval = [np.pi * np.square(horizontal_size // 2) * (1 - confint), np.pi * np.square(horizontal_size // 2) * (1 + confint)]
122
- cc_to_remove = np.argwhere(np.logical_or(self.stats[:, 4] < surf_interval[0], self.stats[:, 4] > surf_interval[1]))
123
- elif shape == 'rectangle':
124
- # If the smaller side is the horizontal one, use the user provided horizontal side
125
- if np.argmin((np.mean(self.stats[1:, 2]), np.mean(self.stats[1:, 3]))) == 0:
126
- surf_interval = [np.square(horizontal_size) * (1 - confint), np.square(horizontal_size) * (1 + confint)]
127
- cc_to_remove = np.argwhere(np.logical_or(self.stats[:, 4] < surf_interval[0], self.stats[:, 4] > surf_interval[1]))
128
- # If the smaller side is the vertical one, use the median vertical length shape
129
- else:
130
- surf_interval = [np.square(np.median(self.stats[1:, 3])) * (1 - confint), np.square(np.median(self.stats[1:, 3])) * (1 + confint)]
131
- cc_to_remove = np.argwhere(np.logical_or(self.stats[:, 4] < surf_interval[0], self.stats[:, 4] > surf_interval[1]))
132
- else:
133
- logging.info("Original blob shape not well written")
127
+ Args:
128
+ cluster_number (int): The desired number of clusters.
129
+ biomask (NDArray[np.uint8]): Optional mask for biological regions. Default is None.
130
+ backmask (NDArray[np.uint8]): Optional mask for background regions. Default is None.
131
+ bio_label (int): The label assigned to the biological region. Default is None.
134
132
 
135
- if do_not_delete is None:
136
- self.shapes2[np.isin(self.shapes2, cc_to_remove)] = 0
137
- else:
138
- self.shapes2[np.logical_and(np.isin(self.shapes2, cc_to_remove),
139
- np.logical_not(np.isin(self.shapes2, do_not_delete)))] = 0
140
- # There was only that before:
141
- shapes = np.zeros(self.binary_image.shape, dtype=np.uint8)
142
- shapes[np.nonzero(self.shapes2)] = 1
133
+ Returns:
134
+ None
143
135
 
144
- nb_components, self.shapes2, self.stats, self.centroids = cv2.connectedComponentsWithStats(shapes, connectivity=8)
145
- self.validated_shapes = shapes
146
- self.shape_number = nb_components - 1
136
+ Note:
137
+ This method modifies the `binary_image` and `bio_label` attributes of the instance.
147
138
 
148
- def kmeans(self, cluster_number, biomask=None, backmask=None, bio_label=None):
149
- """
150
- Use of Kmeans to detect the Cell(s) after having segmented the grayscale image into two or more categories
151
- :param cluster_number: the number of categories to find
152
- :param biomask: the mask of pixels marked as Cell(s) by the user
153
- :param backmask: the mask of pixels marked as Background by the user
154
- :param bio_label:
155
- :return:
156
139
  """
157
140
  image = self.image.reshape((-1, 1))
158
141
  image = np.float32(image)
@@ -181,94 +164,52 @@ class ProcessFirstImage:
181
164
  self.bio_label = np.nonzero(sum_per_label == np.min(sum_per_label))
182
165
  self.binary_image[np.nonzero(kmeans_image == self.bio_label)] = 1
183
166
 
184
- def process_binary_image(self, use_bio_and_back_masks=False):
167
+ def process_binary_image(self):
185
168
  """
186
- Process the binary image to get the final validated shapes
187
- Starts by computin connected components, then remove the background pixels marked by the user,
188
- then, if there are not several blob per arena, select spot according to their sizes
189
- :param use_bio_and_back_masks: if true, will use the cell(s) and background matked by the user
190
- :return:
191
- """
192
- self.shape_number, self.shapes, self.stats, self.centroids = cv2.connectedComponentsWithStats(
193
- self.binary_image, connectivity=8)
194
- do_not_delete = None
195
- if use_bio_and_back_masks:
196
- if self.backmask is not None:
197
- if np.any(self.shapes[self.backmask]):
198
- self.shapes[np.isin(self.shapes, np.unique(self.shapes[self.backmask]))] = 0
199
- self.shape_number, self.shapes, self.stats, self.centroids = cv2.connectedComponentsWithStats(
200
- (self.shapes > 0).astype(np.uint8), connectivity=8)
201
- self.shape_number -= 1
202
- if self.biomask is not None:
203
- if np.any(self.shapes[self.biomask]):
204
- do_not_delete = np.unique(self.shapes[self.biomask])
205
- do_not_delete = do_not_delete[do_not_delete != 0]
206
- if not self.several_blob_per_arena and self.spot_size is not None:
207
- counter = 0
208
- self.shapes2 = deepcopy(self.shapes)
209
- while self.shape_number != self.sample_number and counter < len(self.parent.spot_size_confints):
210
- self.shape_selection(horizontal_size=self.spot_size, shape=self.parent.spot_shapes[counter],
211
- confint=self.parent.spot_size_confints[counter], do_not_delete=do_not_delete)
212
- logging.info(f"Shape selection algorithm found {self.shape_number} disconnected shapes")
213
- counter += 1
214
- if self.shape_number == self.sample_number:
215
- self.shapes = self.shapes2
216
- if self.shape_number == self.sample_number:
217
- self.validated_shapes = np.zeros(self.shapes.shape, dtype=np.uint8)
218
- self.validated_shapes[self.shapes > 0] = 1
219
- else:
220
- max_size = self.binary_image.size * 0.75
221
- min_size = 10
222
- cc_to_remove = np.argwhere(np.logical_or(self.stats[1:, 4] < min_size, self.stats[1:, 4] > max_size)) + 1
223
- self.shapes[np.isin(self.shapes, cc_to_remove)] = 0
224
- self.validated_shapes = np.zeros(self.shapes.shape, dtype=np.uint8)
225
- self.validated_shapes[self.shapes > 0] = 1
226
- self.shape_number, self.shapes, self.stats, self.centroids = cv2.connectedComponentsWithStats(
227
- self.validated_shapes,
228
- connectivity=8)
229
- if not self.several_blob_per_arena and self.sample_number is not None and self.shape_number > self.sample_number:
230
- # Sort shapes by size and compare the largest with the second largest
231
- # If the difference is too large, remove that largest shape.
232
- cc_to_remove = np.array([], dtype=np.uint8)
233
- to_remove = np.array([], dtype=np.uint8)
234
- self.stats = self.stats[1:, :]
235
- while self.stats.shape[0] > self.sample_number and to_remove is not None:
236
- # 1) rank by height
237
- sorted_height = np.argsort(self.stats[:, 2])
238
- # and only consider the number of shapes we want to detect
239
- standard_error = np.std(self.stats[sorted_height, 2][-self.sample_number:])
240
- differences = np.diff(self.stats[sorted_height, 2])
241
- # Look for very big changes from one height to the next
242
- if differences.any() and np.max(differences) > 2 * standard_error:
243
- # Within these, remove shapes that are too large
244
- to_remove = sorted_height[np.argmax(differences)]
245
- cc_to_remove = np.append(cc_to_remove, to_remove + 1)
246
- self.stats = np.delete(self.stats, to_remove, 0)
169
+ Process the binary image to identify and validate shapes.
247
170
 
248
- else:
249
- to_remove = None
250
- self.shapes[np.isin(self.shapes, cc_to_remove)] = 0
251
- self.validated_shapes = np.zeros(self.shapes.shape, dtype=np.uint8)
252
- self.validated_shapes[self.shapes > 0] = 1
253
- self.shape_number, self.shapes, self.stats, self.centroids = cv2.connectedComponentsWithStats(
254
- self.validated_shapes,
255
- connectivity=8)
171
+ This method processes a binary image to detect connected components,
172
+ validate their sizes, and handle bio and back masks if specified.
173
+ It ensures that the number of validated shapes matches the expected
174
+ sample number or applies additional filtering if necessary.
256
175
 
257
- self.shape_number -= 1
176
+ """
177
+ shapes_features = shape_selection(self.binary_image, true_shape_number=self.sample_number, horizontal_size=self.horizontal_size,
178
+ spot_shape=self.spot_shape, several_blob_per_arena=self.several_blob_per_arena,
179
+ bio_mask=self.biomask, back_mask=self.backmask)
180
+ self.validated_shapes, self.shape_number, self.stats, self.centroids = shapes_features
258
181
 
259
182
 
260
183
  class SaveCombinationThread(threading.Thread):
184
+ """
185
+ SaveCombinationThread
186
+
187
+ This class represents a thread for saving combinations.
188
+
189
+ """
261
190
  def __init__(self, parent=None):
191
+ """
192
+ **Args:**
193
+
194
+ - `parent`: The parent object that initiated the thread. This is an optional argument and defaults to 'None'.
195
+
196
+ """
262
197
  # super(SaveCombinationThread, self).__init__()
263
198
  threading.Thread.__init__(self)
264
199
  self.parent = parent
265
200
 
266
201
  def run(self):
267
202
  """
268
- Save the current process_i data into the combination_features list
269
- :return:
203
+ Runs the color space combination process and saves the results.
204
+
205
+ This method performs several tasks to save intermediate and final
206
+ results of the color space combination process. It logs messages,
207
+ updates lists with valid shapes, converts images to a specific format,
208
+ and updates combination features with various statistics. The method
209
+ also handles biomask and backmask calculations if they are not None.
210
+ Finally, it increments the saved color space number counter.
270
211
  """
271
- logging.info(f"Saving results from the color space combination: {self.process_i.csc_dict}. {self.process_i.shape_number} distinct spots detected.")
212
+ logging.info(f"Saving results from the color space combination: {self.process_i.csc_dict}. {self.process_i.shape_number} distinct specimen(s) detected.")
272
213
  self.parent.saved_images_list.append(self.process_i.validated_shapes)
273
214
  self.parent.converted_images_list.append(np.round(self.process_i.image).astype(np.uint8))
274
215
  self.parent.saved_color_space_list.append(self.process_i.csc_dict)
@@ -0,0 +1,131 @@
1
+ #!/usr/bin/env python3
2
+ """Analyze oscillating clusters in 2D video data through flux tracking.
3
+
4
+ This module implements a class to track cluster dynamics by analyzing pixel flux changes over time.
5
+ The core functionality updates cluster identifiers, tracks periods of activity, and archives final data for completed clusters based on morphological analysis and contour boundaries.
6
+
7
+ Classes
8
+ ClusterFluxStudy : Updates flux information and tracks oscillating clusters in 2D space
9
+
10
+ Functions
11
+ update_flux : Processes flux changes to update cluster tracking and archive completed clusters
12
+
13
+ Notes
14
+ Uses cv2.connectedComponentsWithStats and custom distance calculations for boundary analysis
15
+ Maintains cumulative pixel data for active clusters during time-lapse processing
16
+ """
17
+ import cv2
18
+ import numpy as np
19
+ from numpy.typing import NDArray
20
+ from typing import Tuple
21
+ import logging
22
+ from cellects.image_analysis.morphological_operations import cross_33, get_minimal_distance_between_2_shapes, cc, get_contours, CompareNeighborsWithValue
23
+ from cellects.utils.utilitarian import smallest_memory_array, PercentAndTimeTracker
24
+ from psutil import virtual_memory
25
+
26
+
27
+ def detect_oscillations_dynamics(converted_video: NDArray, binary: NDArray[np.uint8], arena_label: int,
28
+ starting_time: int, expected_oscillation_period: int,
29
+ time_interval: int, minimal_oscillating_cluster_size:int,
30
+ min_ram_free: float=1., lose_accuracy_to_save_memory: bool=False,
31
+ save_coord_thickening_slimming: bool=True):
32
+ """
33
+ Detects oscillatory dynamics in a labeled arena from processed video data
34
+
35
+ Parameters
36
+ ----------
37
+ converted_video : NDArray
38
+ Processed intensity values of the input video as 3D/4D array (t,y,x[,c])
39
+ binary : NDArray[np.uint8]
40
+ Binary segmentation mask with 1 for active region and 0 otherwise
41
+ arena_label : int
42
+ Label identifier for the specific arena being analyzed in binary mask
43
+ starting_time : int
44
+ Timepoint index to start oscillation analysis from (earlier frames are ignored)
45
+ expected_oscillation_period : int
46
+ Expected average period of oscillations in seconds
47
+ time_interval : int
48
+ Sampling interval between consecutive video frames in seconds
49
+ minimal_oscillating_cluster_size : int
50
+ Minimum number of pixels required for a cluster to be considered an oscillation feature
51
+ min_ram_free : float, optional (default=1.0)
52
+ Minimum free RAM in GB that must remain available during processing
53
+ lose_accuracy_to_save_memory : bool, optional (default=False)
54
+ If True, uses low-precision calculations to reduce memory usage at the cost of accuracy
55
+ save_coord_thickening_slimming : bool, optional (default=True)
56
+ If True, saves detected cluster coordinates as .npy files
57
+
58
+ Returns
59
+ -------
60
+ NDArray[np.int8]
61
+ 3D array where each pixel is labeled with 1=influx region, 2=efflux region, or 0=no oscillation
62
+
63
+ Notes
64
+ -----
65
+ - Processes video data by calculating intensity gradients to detect directional oscillations
66
+ - Memory-intensive operations use float16 when available RAM would otherwise be exceeded
67
+ - Saves coordinate arrays if requested, which may consume significant disk space for large datasets
68
+ """
69
+ logging.info(f"Arena n°{arena_label}. Starting oscillation analysis.")
70
+ dims = converted_video.shape
71
+ oscillations_video = None
72
+ if dims[0] > 1:
73
+ period_in_frame_nb = int(expected_oscillation_period / time_interval)
74
+ if period_in_frame_nb < 2:
75
+ period_in_frame_nb = 2
76
+ necessary_memory = dims[0] * dims[1] * dims[2] * 64 * 4 * 1.16415e-10
77
+ available_memory = (virtual_memory().available >> 30) - min_ram_free
78
+ if len(dims) == 4:
79
+ converted_video = converted_video[:, :, :, 0]
80
+ average_intensities = np.mean(converted_video, (1, 2))
81
+ if lose_accuracy_to_save_memory or (necessary_memory > available_memory):
82
+ oscillations_video = np.zeros(dims, dtype=np.float16)
83
+ for cy in np.arange(dims[1]):
84
+ for cx in np.arange(dims[2]):
85
+ oscillations_video[:, cy, cx] = np.round(
86
+ np.gradient(converted_video[:, cy, cx, ...] / average_intensities, period_in_frame_nb), 3).astype(np.float16)
87
+ else:
88
+ oscillations_video = np.gradient(converted_video / average_intensities[:, None, None], period_in_frame_nb, axis=0)
89
+ oscillations_video = np.sign(oscillations_video)
90
+ oscillations_video = oscillations_video.astype(np.int8)
91
+ oscillations_video[binary == 0] = 0
92
+
93
+ for t in np.arange(starting_time, dims[0]):
94
+ oscillations_image = np.zeros(dims[1:], np.uint8)
95
+ # Add in or ef if a pixel has at least 4 neighbor in or ef
96
+ neigh_comp = CompareNeighborsWithValue(oscillations_video[t, :, :], connectivity=8, data_type=np.int8)
97
+ neigh_comp.is_inf(0, and_itself=False)
98
+ neigh_comp.is_sup(0, and_itself=False)
99
+ # Not verified if influx is really influx (resp efflux)
100
+ influx = neigh_comp.sup_neighbor_nb
101
+ efflux = neigh_comp.inf_neighbor_nb
102
+
103
+ # Only keep pixels having at least 4 positive (resp. negative) neighbors
104
+ influx[influx <= 4] = 0
105
+ efflux[efflux <= 4] = 0
106
+ influx[influx > 4] = 1
107
+ efflux[efflux > 4] = 1
108
+ if np.any(influx) or np.any(efflux):
109
+ influx, in_stats, in_centroids = cc(influx)
110
+ efflux, ef_stats, ef_centroids = cc(efflux)
111
+ # Only keep clusters larger than 'minimal_oscillating_cluster_size' pixels (smaller are considered as noise
112
+ in_smalls = np.nonzero(in_stats[:, 4] < minimal_oscillating_cluster_size)[0]
113
+ if len(in_smalls) > 0:
114
+ influx[np.isin(influx, in_smalls)] = 0
115
+ ef_smalls = np.nonzero(ef_stats[:, 4] < minimal_oscillating_cluster_size)[0]
116
+ if len(ef_smalls) > 0:
117
+ efflux[np.isin(efflux, ef_smalls)] = 0
118
+ oscillations_image[influx > 0] = 1
119
+ oscillations_image[efflux > 0] = 2
120
+ oscillations_video[t, :, :] = oscillations_image
121
+ oscillations_video[:starting_time, :, :] = 0
122
+ if save_coord_thickening_slimming:
123
+ np.save(
124
+ f"coord_thickening{arena_label}_t{dims[0]}_y{dims[1]}_x{dims[2]}.npy",
125
+ smallest_memory_array(np.nonzero(oscillations_video == 1), "uint"))
126
+ np.save(
127
+ f"coord_slimming{arena_label}_t{dims[0]}_y{dims[1]}_x{dims[2]}.npy",
128
+ smallest_memory_array(np.nonzero(oscillations_video == 2), "uint"))
129
+ return oscillations_video
130
+
131
+
@@ -256,7 +256,8 @@ class ProgressivelyAddDistantShapes:
256
256
  order_of_shapes_to_expand = np.append(order_of_shapes_to_expand, new_connections)
257
257
  connections[dil_main_shape > 0] = 1
258
258
  connections[other_shapes > 0] = 1
259
- nb, connections = cv2.connectedComponents(connections)
259
+ connections[connections > 0] = 1
260
+ nb, connections = cv2.connectedComponents(connections.astype(np.uint8))
260
261
  if len(order_of_shapes_to_expand) == 0:
261
262
  order_of_shapes_to_expand = np.unique(new_order)[2:]
262
263
  return order_of_shapes_to_expand
@@ -418,8 +419,6 @@ class ProgressivelyAddDistantShapes:
418
419
  rated_extension *= self.expanded_shape
419
420
  self.expanded_shape += rated_extension
420
421
 
421
- #binary_video = self.binary[(self.step // 2):(self.t + 1), :, :]
422
- #draft_seg = self.segmentation[(self.step // 2):(self.t + 1), :, :]
423
422
  def modify_past_analysis(self, binary_video: NDArray[np.uint8], draft_seg: NDArray[np.uint8]) -> NDArray[np.uint8]:
424
423
  """
425
424
  Modify past analysis based on binary video and draft segmentation.