cellects 0.1.0.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. cellects/__init__.py +0 -0
  2. cellects/__main__.py +49 -0
  3. cellects/config/__init__.py +0 -0
  4. cellects/config/all_vars_dict.py +154 -0
  5. cellects/core/__init__.py +0 -0
  6. cellects/core/cellects_paths.py +30 -0
  7. cellects/core/cellects_threads.py +1464 -0
  8. cellects/core/motion_analysis.py +1931 -0
  9. cellects/core/one_image_analysis.py +1065 -0
  10. cellects/core/one_video_per_blob.py +679 -0
  11. cellects/core/program_organizer.py +1347 -0
  12. cellects/core/script_based_run.py +154 -0
  13. cellects/gui/__init__.py +0 -0
  14. cellects/gui/advanced_parameters.py +1258 -0
  15. cellects/gui/cellects.py +189 -0
  16. cellects/gui/custom_widgets.py +789 -0
  17. cellects/gui/first_window.py +449 -0
  18. cellects/gui/if_several_folders_window.py +239 -0
  19. cellects/gui/image_analysis_window.py +1909 -0
  20. cellects/gui/required_output.py +232 -0
  21. cellects/gui/video_analysis_window.py +656 -0
  22. cellects/icons/__init__.py +0 -0
  23. cellects/icons/cellects_icon.icns +0 -0
  24. cellects/icons/cellects_icon.ico +0 -0
  25. cellects/image_analysis/__init__.py +0 -0
  26. cellects/image_analysis/cell_leaving_detection.py +54 -0
  27. cellects/image_analysis/cluster_flux_study.py +102 -0
  28. cellects/image_analysis/extract_exif.py +61 -0
  29. cellects/image_analysis/fractal_analysis.py +184 -0
  30. cellects/image_analysis/fractal_functions.py +108 -0
  31. cellects/image_analysis/image_segmentation.py +272 -0
  32. cellects/image_analysis/morphological_operations.py +867 -0
  33. cellects/image_analysis/network_functions.py +1244 -0
  34. cellects/image_analysis/one_image_analysis_threads.py +289 -0
  35. cellects/image_analysis/progressively_add_distant_shapes.py +246 -0
  36. cellects/image_analysis/shape_descriptors.py +981 -0
  37. cellects/utils/__init__.py +0 -0
  38. cellects/utils/formulas.py +881 -0
  39. cellects/utils/load_display_save.py +1016 -0
  40. cellects/utils/utilitarian.py +516 -0
  41. cellects-0.1.0.dev1.dist-info/LICENSE.odt +0 -0
  42. cellects-0.1.0.dev1.dist-info/METADATA +131 -0
  43. cellects-0.1.0.dev1.dist-info/RECORD +46 -0
  44. cellects-0.1.0.dev1.dist-info/WHEEL +5 -0
  45. cellects-0.1.0.dev1.dist-info/entry_points.txt +2 -0
  46. cellects-0.1.0.dev1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,54 @@
1
+ #!/usr/bin/env python3
2
+ """Contains the function: cell_leaving_detection
3
+ This function considers the pixel intensity curve of each covered pixel and assesesed whether a covered pixel retrieved
4
+ -partially at least- its initial intensity.
5
+ """
6
+ import cv2
7
+ import numpy as np
8
+ from cellects.image_analysis.morphological_operations import cross_33
9
+
10
+
11
+ def cell_leaving_detection(new_shape, covering_intensity, previous_binary, greyscale_image, fading_coefficient, lighter_background, several_blob_per_arena, erodila_disk, protect_from_fading=None, add_to_fading=None):
12
+ # To look for fading pixels, only consider the internal contour of the shape at t-1
13
+ fading = cv2.erode(previous_binary, erodila_disk)
14
+ fading = previous_binary - fading
15
+ # If the origin state is considered constant: origin pixels will never be fading
16
+ if protect_from_fading is not None:
17
+ fading *= (1 - protect_from_fading)
18
+ if add_to_fading is not None:
19
+ if protect_from_fading is not None:
20
+ add_to_fading[np.nonzero(protect_from_fading)] = 0
21
+ add_to_fading_coord = np.nonzero(add_to_fading)
22
+ fading[add_to_fading_coord] = 1
23
+ if lighter_background:
24
+ covering_intensity[add_to_fading_coord] = 1 / (1 - fading_coefficient) # 0.9 * covering_intensity[add_to_fading_coord] #
25
+ else:
26
+ covering_intensity[add_to_fading_coord] = 255 # 1.1 * covering_intensity[add_to_fading_coord]
27
+ # With a lighter background, fading them if their intensity gets higher than the covering intensity
28
+ if lighter_background:
29
+ fading = fading * np.greater((greyscale_image), (1 - fading_coefficient) * covering_intensity).astype(np.uint8)
30
+ else:
31
+ fading = fading * np.less((greyscale_image), (1 + fading_coefficient) * covering_intensity).astype(np.uint8)
32
+
33
+ if np.any(fading):
34
+ fading = cv2.morphologyEx(fading, cv2.MORPH_CLOSE, cross_33, iterations=1)
35
+ if not several_blob_per_arena:
36
+ # Check if uncov_potentials does not break the shape into several components
37
+ uncov_nb, uncov_shapes = cv2.connectedComponents(fading, ltype=cv2.CV_16U)
38
+ nb, garbage_img = cv2.connectedComponents(new_shape, ltype=cv2.CV_16U)
39
+ i = 0
40
+ while i <= uncov_nb:
41
+ i += 1
42
+ prev_nb = nb
43
+ new_shape[np.nonzero(uncov_shapes == i)] = 0
44
+ nb, garbage_img = cv2.connectedComponents(new_shape, ltype=cv2.CV_16U)
45
+ if nb > prev_nb:
46
+ new_shape[np.nonzero(uncov_shapes == i)] = 1
47
+ nb, garbage_img = cv2.connectedComponents(new_shape, ltype=cv2.CV_16U)
48
+ uncov_shapes = None
49
+ else:
50
+ new_shape[np.nonzero(fading)] = 0
51
+ new_shape = cv2.morphologyEx(new_shape, cv2.MORPH_OPEN, cross_33, iterations=0)
52
+ new_shape = cv2.morphologyEx(new_shape, cv2.MORPH_CLOSE, cross_33)
53
+
54
+ return new_shape, covering_intensity
@@ -0,0 +1,102 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ This script contains the class for studying oscillating clusters on videos in 2D
4
+ """
5
+ import cv2
6
+ import numpy as np
7
+ from numpy import (
8
+ append, float32, sum, mean, zeros, empty, array, nonzero, unique,
9
+ isin, logical_or, logical_not, greater, uint8,
10
+ uint32, min, any, zeros)
11
+ from cellects.image_analysis.morphological_operations import cross_33, get_minimal_distance_between_2_shapes
12
+
13
+
14
+ class ClusterFluxStudy:
15
+ """
16
+
17
+ """
18
+ def __init__(self, dims):
19
+ self.dims = dims
20
+
21
+ self.pixels_data = np.empty((4, 0), dtype=np.uint32)
22
+ self.clusters_id = np.zeros(self.dims[1:], dtype=np.uint32)
23
+ # self.alive_clusters_in_flux = np.empty(0, dtype=np.uint32)#list()
24
+ self.cluster_total_number = 0
25
+
26
+ def update_flux(self, t, contours, current_flux, period_tracking, clusters_final_data):
27
+ # Save the data from pixels that are not anymore in efflux
28
+ lost = np.greater(self.clusters_id > 0, current_flux > 0)
29
+ # Some pixels of that cluster faded, save their data
30
+ lost_data = np.nonzero(lost)
31
+ lost_data = np.array((period_tracking[lost], # lost_coord[0], lost_coord[1],
32
+ self.clusters_id[lost], lost_data[0], lost_data[1]), dtype=np.uint32)
33
+ # Add this to the array containing the data of each cluster that are still alive
34
+ self.pixels_data = np.append(self.pixels_data, lost_data, axis=1)
35
+ # Stop considering these pixels in period_tracking because they switched
36
+ period_tracking[lost] = 0
37
+ current_period_tracking = np.zeros(self.dims[1:], dtype=bool)
38
+ for curr_clust_id in np.unique(current_flux)[1:]:
39
+ # Get all pixels that were in the same flux previously
40
+ curr_clust = current_flux == curr_clust_id
41
+ already = self.clusters_id * curr_clust
42
+ new = np.greater(curr_clust, self.clusters_id > 0)
43
+
44
+ if not np.any(already):
45
+ # It is an entirely new cluster:
46
+ cluster_pixels = new
47
+ self.cluster_total_number += 1
48
+ cluster_name = self.cluster_total_number
49
+ else:
50
+ # Check whether parts of that cluster correspond to several clusters in clusters_id
51
+ cluster_names = np.unique(already)[1:]
52
+ # keep only one cluster name to gather clusters that just became connected
53
+ cluster_name = np.min(cluster_names)
54
+ # Put the same cluster name for new ones and every pixels that were
55
+ # a part of a cluster touching the current cluster
56
+ cluster_pixels = np.logical_or(np.isin(self.clusters_id, cluster_names), new)
57
+ # If they are more than one,
58
+ if len(cluster_names) > 1:
59
+ # Update these cluster names in pixels_data
60
+ self.pixels_data[1, np.isin(self.pixels_data[1, :], cluster_names)] = cluster_name
61
+ # Update clusters_id
62
+ self.clusters_id[cluster_pixels] = cluster_name
63
+ # Update period_tracking
64
+ current_period_tracking[curr_clust] = True
65
+
66
+ period_tracking[current_period_tracking] += 1
67
+ # Remove lost pixels from clusters_id
68
+ self.clusters_id[lost] = 0
69
+ # Find out which clusters are still alive or not
70
+ still_alive_clusters = np.isin(self.pixels_data[1, :], np.unique(self.clusters_id))
71
+ clusters_to_archive = np.unique(self.pixels_data[1, np.logical_not(still_alive_clusters)])
72
+ # store their data in clusters_final_data
73
+ clusters_data = np.zeros((len(clusters_to_archive), 6), dtype=np.float32)
74
+ for clust_i, cluster in enumerate(clusters_to_archive):
75
+ cluster_bool = self.pixels_data[1, :] == cluster
76
+ cluster_size = np.sum(cluster_bool)
77
+ cluster_img = np.zeros(self.dims[1:], dtype=np.uint8)
78
+ cluster_img[self.pixels_data[2, cluster_bool], self.pixels_data[3, cluster_bool]] = 1
79
+ nb, im, stats, centro = cv2.connectedComponentsWithStats(cluster_img)
80
+ if np.any(cv2.dilate(cluster_img, kernel=cross_33, borderType=cv2.BORDER_CONSTANT, borderValue=0) * contours):
81
+ minimal_distance = 1
82
+ else:
83
+ if cluster_size > 200:
84
+
85
+ eroded_cluster_img = cv2.erode(cluster_img, cross_33)
86
+ cluster_img = np.nonzero(cluster_img - eroded_cluster_img)
87
+ contours[cluster_img] = 2
88
+ else:
89
+ contours[self.pixels_data[2, cluster_bool], self.pixels_data[3, cluster_bool]] = 2
90
+ # Get the minimal distance between the border of the cell(s) (noted 1 in contours)
91
+ # and the border of the cluster in the cell(s) (now noted 2 in contours)
92
+ minimal_distance = get_minimal_distance_between_2_shapes(contours)
93
+ data_to_save = np.array([[np.mean(self.pixels_data[0, cluster_bool]), t,
94
+ cluster_size, minimal_distance, centro[1, 0], centro[1, 1]]], dtype=np.float32)
95
+ clusters_data[clust_i,:] = data_to_save
96
+ # and remove their data from pixels_data
97
+ clusters_final_data = np.append(clusters_final_data, clusters_data, axis=0)
98
+ self.pixels_data = self.pixels_data[:, still_alive_clusters]
99
+
100
+ return period_tracking, clusters_final_data
101
+
102
+
@@ -0,0 +1,61 @@
1
+ #!/usr/bin/env python3
2
+ """ADD DETAIL OF THE MODULE"""
3
+
4
+ """
5
+
6
+ dir(my_image)
7
+ ['<unknown EXIF tag 59932>', '<unknown EXIF tag 59933>', '_exif_ifd_pointer', '_gps_ifd_pointer', '_segments', 'aperture
8
+ _value', 'brightness_value', 'color_space', 'components_configuration', 'compression', 'datetime', 'datetime_digitized',
9
+ 'datetime_original', 'exif_version', 'exposure_bias_value', 'exposure_mode', 'exposure_program', 'exposure_time', 'f_
10
+ number', 'flash', 'flashpix_version', 'focal_length', 'focal_length_in_35mm_film', 'get', 'get_file', 'get_thumbnail',
11
+ 'gps_altitude', 'gps_altitude_ref', 'gps_datestamp', 'gps_dest_bearing', 'gps_dest_bearing_ref', 'gps_horizontal_
12
+ positioning_error', 'gps_img_direction', 'gps_img_direction_ref', 'gps_latitude', 'gps_latitude_ref', 'gps_longitude',
13
+ 'gps_longitude_ref', 'gps_speed', 'gps_speed_ref', 'gps_timestamp', 'has_exif', 'jpeg_interchange_format', 'jpeg_
14
+ interchange_format_length', 'lens_make', 'lens_model', 'lens_specification', 'make', 'maker_note', 'metering_mode',
15
+ 'model', 'orientation', 'photographic_sensitivity', 'pixel_x_dimension', 'pixel_y_dimension', 'resolution_unit',
16
+ 'scene_capture_type', 'scene_type', 'sensing_method', 'shutter_speed_value', 'software', 'subject_area', 'subsec_time_
17
+ digitized', 'subsec_time_original', 'white_balance', 'x_resolution', 'y_and_c_positioning', 'y_resolution']
18
+
19
+ """
20
+ import numpy as np
21
+
22
+ def extract_time(image_list, pathway="", raw_images=False):
23
+ from numpy import min, max, any, zeros, arange, all, int64, repeat
24
+ nb = len(image_list)
25
+ timings = np.zeros((nb, 6), dtype=np.int64)
26
+ if raw_images:
27
+ import exifread
28
+ for i in np.arange(nb):
29
+ with open(pathway + image_list[i], 'rb') as image_file:
30
+ my_image = exifread.process_file(image_file, details=False, stop_tag='DateTimeOriginal')
31
+ datetime = my_image["EXIF DateTimeOriginal"]
32
+ datetime = datetime.values[:10] + ':' + datetime.values[11:]
33
+ timings[i, :] = datetime.split(':')
34
+ else:
35
+ from exif import Image
36
+ for i in np.arange(nb):
37
+ with open(pathway + image_list[i], 'rb') as image_file:
38
+ my_image = Image(image_file)
39
+ if my_image.has_exif:
40
+ datetime = my_image.datetime
41
+ datetime = datetime[:10] + ':' + datetime[11:]
42
+ timings[i, :] = datetime.split(':')
43
+
44
+ if np.all(timings[:, 0] == timings[0, 0]):
45
+ if np.all(timings[:, 1] == timings[0, 1]):
46
+ if np.all(timings[:, 2] == timings[0, 2]):
47
+ time = timings[:, 3] * 3600 + timings[:, 4] * 60 + timings[:, 5]
48
+ else:
49
+ time = timings[:, 2] * 86400 + timings[:, 3] * 3600 + timings[:, 4] * 60 + timings[:, 5]
50
+ else:
51
+ days_per_month = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
52
+ for j in np.arange(nb):
53
+ month_number = timings[j, 1]#int(timings[j, 1])
54
+ timings[j, 1] = days_per_month[month_number] * month_number
55
+ time = (timings[:, 1] + timings[:, 2]) * 86400 + timings[:, 3] * 3600 + timings[:, 4] * 60 + timings[:, 5]
56
+ #time = int(time)
57
+ else:
58
+ time = np.repeat(0, nb)#arange(1, nb * 60, 60)#"Do not experiment the 31th of december!!!"
59
+ if time.sum() == 0:
60
+ time = np.repeat(0, nb)#arange(1, nb * 60, 60)
61
+ return time
@@ -0,0 +1,184 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ This script contains the class for analyzing fractals out of a binary image
4
+ """
5
+
6
+
7
+ import os
8
+ from pickletools import uint8
9
+ from copy import deepcopy
10
+ import numpy as np
11
+ import cv2
12
+ from scipy.optimize import curve_fit
13
+ from scipy.stats import linregress
14
+ from cellects.utils.formulas import linear_model
15
+ from cellects.core.cellects_paths import DATA_DIR
16
+ from cellects.image_analysis.morphological_operations import cross_33
17
+
18
+
19
+ def prepare_box_counting(binary_image, side_threshold=128, zoom_step=0, contours=True):
20
+ side_lengths = None
21
+ zoomed_binary = binary_image
22
+ binary_idx = np.nonzero(binary_image)
23
+ if binary_idx[0].size:
24
+ min_y = np.min(binary_idx[0])
25
+ min_y = np.max((min_y - 1, 0))
26
+
27
+ min_x = np.min(binary_idx[1])
28
+ min_x = np.max((min_x - 1, 0))
29
+
30
+ max_y = np.max(binary_idx[0])
31
+ max_y = np.min((max_y + 1, binary_image.shape[0] - 1))
32
+
33
+ max_x = np.max(binary_idx[1])
34
+ max_x = np.min((max_x + 1, binary_image.shape[1] - 1))
35
+
36
+ zoomed_binary = deepcopy(binary_image[min_y:(max_y + 1), min_x: (max_x + 1)])
37
+ min_side = np.min(zoomed_binary.shape)
38
+ if min_side >= side_threshold:
39
+ if contours:
40
+ eroded_zoomed_binary = cv2.erode(zoomed_binary, cross_33)
41
+ zoomed_binary = zoomed_binary - eroded_zoomed_binary
42
+ if zoom_step == 0:
43
+ max_power = int(np.floor(np.log2(min_side))) # Largest integer power of 2
44
+ side_lengths = 2 ** np.arange(max_power, -1, -1)
45
+ else:
46
+ side_lengths = np.arange(1, min_side, zoom_step)
47
+ return zoomed_binary, side_lengths
48
+
49
+
50
+ def box_counting(zoomed_binary, side_lengths):
51
+ """
52
+ Let us take:
53
+ - s: the side lengths of many boxes
54
+ - N(s): the number of pixels belonging to the shape contained in a box of side length s.
55
+ - c: a constant
56
+ - D: the fractal dimension
57
+
58
+ N(s) = C(1/s)^D
59
+ log(N(s)) = D*log(1/s) + log(C)
60
+
61
+ box_counting_dimension = log(N)/log(1/s)
62
+ The line of y=log(N(r)) vs x=log(1/r) has a slope equal to the box_counting_dimension
63
+ :param zoomed_binary:
64
+ :return:
65
+ """
66
+ box_counting_dimension:float = 0.
67
+ r_value:float = 0.
68
+ box_nb:float = 0.
69
+ if side_lengths is not None:
70
+ box_counts = np.zeros(len(side_lengths), dtype=np.uint64)
71
+ # Loop through side_lengths and compute block counts
72
+ for idx, side_length in enumerate(side_lengths):
73
+ S = np.add.reduceat(
74
+ np.add.reduceat(zoomed_binary, np.arange(0, zoomed_binary.shape[0], side_length), axis=0),
75
+ np.arange(0, zoomed_binary.shape[1], side_length),
76
+ axis=1
77
+ )
78
+ box_counts[idx] = len(np.where(S > 0)[0])
79
+
80
+ valid_indices = box_counts > 0
81
+ if valid_indices.sum() >= 2:
82
+ log_box_counts = np.log(box_counts)
83
+ log_reciprocal_lengths = np.log(1 / side_lengths)
84
+ slope, intercept, r_value, p_value, stderr = linregress(log_reciprocal_lengths, log_box_counts)
85
+ # coefficients = np.polyfit(log_reciprocal_lengths, log_box_counts, 1)
86
+ box_counting_dimension = slope
87
+ box_nb = len(side_lengths)
88
+
89
+ return box_counting_dimension, r_value, box_nb
90
+
91
+
92
+ class FractalAnalysis:
93
+ def __init__(self, binary_image):
94
+ """
95
+ Initialize FractalAnalysis class
96
+ :param binary_image: A 2D binary image. The two first dims are coordinates.
97
+ :type binary_image: uint8
98
+ """
99
+ self.binary_image = binary_image
100
+ self.fractal_contours = []
101
+ self.fractal_box_lengths = []
102
+ self.fractal_box_widths = []
103
+ self.minkowski_dimension = None
104
+
105
+ def detect_fractal(self, threshold=100):
106
+ """
107
+ Find the contours of the fractal
108
+ :param threshold:
109
+ :type threshold: int
110
+ :return:
111
+ """
112
+ contours, _ = cv2.findContours(self.binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
113
+ for contour in contours:
114
+ area = cv2.contourArea(contour)
115
+ if area > threshold:
116
+ self.fractal_contours.append(contour)
117
+
118
+ def extract_fractal(self, image):
119
+ """
120
+ Draw the fractal mesh on a colored image
121
+ :param image: 3D matrix of a bgr image. The two first dims are coordinates, le last is color.
122
+ :type image: uint8
123
+ :return:
124
+ """
125
+ # Créer un masque pour extraire la fractale
126
+ self.fractal_mesh = np.zeros(self.binary_image.shape, dtype=np.uint8)
127
+
128
+ for contour in self.fractal_contours:
129
+ # i=0
130
+ # i+=1
131
+ # contour = self.fractal_contours[i]
132
+ # cv2.drawContours(self.fractal_mesh, [contour], 0, 1, thickness=cv2.FILLED)
133
+ cont = np.reshape(contour, (len(contour), 2))
134
+ cont = np.transpose(cont)
135
+ self.fractal_mesh[cont[1], cont[0]] = 1
136
+
137
+ x, y, w, h = cv2.boundingRect(contour)
138
+ self.fractal_box_lengths.append(np.max((w, h)))
139
+ self.fractal_box_widths.append(np.min((w, h)))
140
+
141
+ def get_dimension(self):
142
+ """
143
+ Compute the minkowski dimension of the binary image
144
+ :return: Minkowski dimension
145
+ :rtype: =np.float64
146
+ """
147
+ if np.any(self.fractal_mesh):
148
+ fractal_mask = deepcopy(self.fractal_mesh[:, :])
149
+ # fractal_mask = (self.fractal_mesh[:, :] > 0).astype(np.uint8)
150
+ size = np.min(fractal_mask.shape)
151
+ scales = np.arange(1, int(np.log2(size)) + 1)
152
+
153
+ Ns = []
154
+ for scale in scales:
155
+ # i=0
156
+ # i+=1
157
+ # scale = scales[i]
158
+
159
+ box_size = 2 ** scale
160
+ boxes = np.add.reduceat(
161
+ np.add.reduceat(fractal_mask, np.arange(0, fractal_mask.shape[0], box_size), axis=0),
162
+ np.arange(0, fractal_mask.shape[1], box_size),
163
+ axis=1,
164
+ )
165
+
166
+ Ns.append(np.sum(boxes > 0))
167
+
168
+ scales_inv = 1 / np.array(scales, dtype=np.float32)
169
+ coeffs, _ = curve_fit(linear_model, scales_inv, np.log(np.array(Ns, dtype=np.float32)))
170
+ self.minkowski_dimension = coeffs[1]
171
+ else:
172
+ self.minkowski_dimension = 0
173
+
174
+ def save_fractal_mesh(self, image_save_path):
175
+ """
176
+ Save an image representing the fractal mesh
177
+ :param image_save_path: path where to save the image
178
+ :type image_save_path: str
179
+ :return:
180
+ """
181
+ cv2.imwrite(image_save_path, self.fractal_mesh)
182
+
183
+
184
+
@@ -0,0 +1,108 @@
1
+
2
+ import os
3
+ from pickletools import uint8
4
+ from copy import deepcopy
5
+
6
+ # import matplotlib
7
+ # matplotlib.use('QtAgg')
8
+ import matplotlib.pyplot as plt
9
+ import numpy as np
10
+ import cv2
11
+ from scipy.optimize import curve_fit
12
+ from scipy.stats import linregress
13
+ from cellects.utils.formulas import (linear_model)
14
+ from cellects.image_analysis.morphological_operations import cross_33
15
+
16
+
17
+ def display_boxes(binary_image, box_diameter):
18
+ plt.imshow(binary_image, cmap='gray')
19
+ height, width = binary_image.shape
20
+ for x in range(0, width + 1, box_diameter):
21
+ plt.axvline(x=x, color='white', linewidth=1)
22
+ for y in range(0, height + 1, box_diameter):
23
+ plt.axhline(y=y, color='white', linewidth=1)
24
+ plt.show()
25
+
26
+
27
+ def prepare_box_counting(binary_image, min_im_side=128, min_mesh_side=8, zoom_step=0, contours=True):
28
+ side_lengths = None
29
+ zoomed_binary = binary_image
30
+ binary_idx = np.nonzero(binary_image)
31
+ if binary_idx[0].size:
32
+ min_y = np.min(binary_idx[0])
33
+ min_y = np.max((min_y - 1, 0))
34
+
35
+ min_x = np.min(binary_idx[1])
36
+ min_x = np.max((min_x - 1, 0))
37
+
38
+ max_y = np.max(binary_idx[0])
39
+ max_y = np.min((max_y + 1, binary_image.shape[0] - 1))
40
+
41
+ max_x = np.max(binary_idx[1])
42
+ max_x = np.min((max_x + 1, binary_image.shape[1] - 1))
43
+
44
+ zoomed_binary = deepcopy(binary_image[min_y:(max_y + 1), min_x: (max_x + 1)])
45
+ min_side = np.min(zoomed_binary.shape)
46
+ if min_side >= min_im_side:
47
+ if contours:
48
+ eroded_zoomed_binary = cv2.erode(zoomed_binary, cross_33)
49
+ zoomed_binary = zoomed_binary - eroded_zoomed_binary
50
+ if zoom_step == 0:
51
+ max_power = int(np.floor(np.log2(min_side))) # Largest integer power of 2
52
+ side_lengths = 2 ** np.arange(max_power, int(np.log2(min_mesh_side // 2)), -1)
53
+ else:
54
+ side_lengths = np.arange(min_mesh_side, min_side, zoom_step)
55
+ return zoomed_binary, side_lengths
56
+
57
+
58
+ def box_counting(zoomed_binary, side_lengths, display=False):
59
+ """
60
+ Let us take:
61
+ - s: the side lengths of many boxes
62
+ - N(s): the number of pixels belonging to the shape contained in a box of side length s.
63
+ - c: a constant
64
+ - D: the fractal dimension
65
+
66
+ N(s) = C(1/s)^D
67
+ log(N(s)) = D*log(1/s) + log(C)
68
+
69
+ box_counting_dimension = log(N)/log(1/s)
70
+ The line of y=log(N(r)) vs x=log(1/r) has a slope equal to the box_counting_dimension
71
+ :param zoomed_binary:
72
+ :return:
73
+ """
74
+ box_counting_dimension:float = 0.
75
+ r_value:float = 0.
76
+ box_nb:float = 0.
77
+ if side_lengths is not None:
78
+ box_counts = np.zeros(len(side_lengths), dtype=np.uint64)
79
+ # Loop through side_lengths and compute block counts
80
+ for idx, side_length in enumerate(side_lengths):
81
+ S = np.add.reduceat(
82
+ np.add.reduceat(zoomed_binary, np.arange(0, zoomed_binary.shape[0], side_length), axis=0),
83
+ np.arange(0, zoomed_binary.shape[1], side_length),
84
+ axis=1
85
+ )
86
+ box_counts[idx] = len(np.where(S > 0)[0])
87
+
88
+ valid_indices = box_counts > 0
89
+ if valid_indices.sum() >= 2:
90
+ log_box_counts = np.log(box_counts)
91
+ log_reciprocal_lengths = np.log(1 / side_lengths)
92
+ slope, intercept, r_value, p_value, stderr = linregress(log_reciprocal_lengths, log_box_counts)
93
+ # coefficients = np.polyfit(log_reciprocal_lengths, log_box_counts, 1)
94
+ box_counting_dimension = slope
95
+ box_nb = len(side_lengths)
96
+ if display:
97
+ plt.scatter(log_reciprocal_lengths, log_box_counts, label="Box counting")
98
+ plt.plot([0, log_reciprocal_lengths.min()], [intercept, intercept + slope * log_reciprocal_lengths.min()], label="Linear regression")
99
+ plt.plot([], [], ' ', label=f"D = {slope:.2f}")
100
+ plt.plot([], [], ' ', label=f"R2 = {r_value:.6f}")
101
+ plt.plot([], [], ' ', label=f"p-value = {p_value:.2e}")
102
+ plt.legend(loc='best')
103
+ plt.xlabel(f"log(1/Diameter) | Diameter ⊆ [{side_lengths[0]}:{side_lengths[-1]}] (n={box_nb})")
104
+ plt.ylabel(f"log(Box number) | Box number ⊆ [{box_counts[0]}:{box_counts[-1]}]")
105
+ plt.show()
106
+ # plt.close()
107
+
108
+ return box_counting_dimension, r_value, box_nb