PyNutil 0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
PyNutil/__init__.py ADDED
@@ -0,0 +1 @@
1
+ from .main import PyNutil
@@ -0,0 +1,352 @@
1
+ import numpy as np
2
+ import pandas as pd
3
+ from .read_and_write import load_visualign_json
4
+ from .counting_and_load import flat_to_dataframe
5
+ from .visualign_deformations import triangulate, transform_vec
6
+ from glob import glob
7
+ import cv2
8
+ from skimage import measure
9
+ import threading
10
+ import re
11
+ from .reconstruct_dzi import reconstruct_dzi
12
+
13
+
14
+ def number_sections(filenames, legacy=False):
15
+ """
16
+ returns the section numbers of filenames
17
+
18
+ :param filenames: list of filenames
19
+ :type filenames: list[str]
20
+ :return: list of section numbers
21
+ :rtype: list[int]
22
+ """
23
+ filenames = [filename.split("\\")[-1] for filename in filenames]
24
+ section_numbers = []
25
+ for filename in filenames:
26
+ if not legacy:
27
+ match = re.findall(r"\_s\d+", filename)
28
+ if len(match) == 0:
29
+ raise ValueError(f"No section number found in filename: {filename}")
30
+ if len(match) > 1:
31
+ raise ValueError(
32
+ "Multiple section numbers found in filename, ensure only one instance of _s### is present, where ### is the section number"
33
+ )
34
+ section_numbers.append(int(match[-1][2:]))
35
+ else:
36
+ match = re.sub("[^0-9]", "", filename)
37
+ ###this gets the three numbers closest to the end
38
+ section_numbers.append(match[-3:])
39
+ if len(section_numbers) == 0:
40
+ raise ValueError("No section numbers found in filenames")
41
+ return section_numbers
42
+
43
+
44
+ # related to coordinate_extraction
45
+ def get_centroids_and_area(segmentation, pixel_cut_off=0):
46
+ """This function returns the center coordinate of each object in the segmentation.
47
+ You can set a pixel_cut_off to remove objects that are smaller than that number of pixels.
48
+ """
49
+ labels = measure.label(segmentation)
50
+ # This finds all the objects in the image
51
+ labels_info = measure.regionprops(labels)
52
+ # Remove objects that are less than pixel_cut_off
53
+ labels_info = [label for label in labels_info if label.area > pixel_cut_off]
54
+ # Get the centre points of the objects
55
+ centroids = np.array([label.centroid for label in labels_info])
56
+ # Get the area of the objects
57
+ area = np.array([label.area for label in labels_info])
58
+ # Get the coordinates for all the pixels in each object
59
+ coords = np.array([label.coords for label in labels_info], dtype=object)
60
+ return centroids, area, coords
61
+
62
+
63
+ # related to coordinate extraction
64
+ def transform_to_registration(seg_height, seg_width, reg_height, reg_width):
65
+ """This function returns the scaling factors to transform the segmentation to the registration space."""
66
+ y_scale = reg_height / seg_height
67
+ x_scale = reg_width / seg_width
68
+ return y_scale, x_scale
69
+
70
+ # related to coordinate extraction
71
+ def find_matching_pixels(segmentation, id):
72
+ """This function returns the Y and X coordinates of all the pixels in the segmentation that match the id provided."""
73
+ mask = segmentation == id
74
+ mask = np.all(mask, axis=2)
75
+ id_positions = np.where(mask)
76
+ id_y, id_x = id_positions[0], id_positions[1]
77
+ return id_y, id_x
78
+
79
+
80
+ # related to coordinate extraction
81
+ def scale_positions(id_y, id_x, y_scale, x_scale):
82
+ """This function scales the Y and X coordinates to the registration space.
83
+ (The y_scale and x_scale are the output of transform_to_registration.)
84
+ """
85
+ id_y = id_y * y_scale
86
+ id_x = id_x * x_scale
87
+ return id_y, id_x
88
+
89
+
90
+ # related to coordinate extraction
91
+ def transform_to_atlas_space(anchoring, y, x, reg_height, reg_width):
92
+ """Transform to atlas space using the QuickNII anchoring vector."""
93
+ o = anchoring[0:3]
94
+ u = anchoring[3:6]
95
+ # Swap order of U
96
+ u = np.array([u[0], u[1], u[2]])
97
+ v = anchoring[6:9]
98
+ # Swap order of V
99
+ v = np.array([v[0], v[1], v[2]])
100
+ # Scale X and Y to between 0 and 1 using the registration width and height
101
+ y_scale = y / reg_height
102
+ x_scale = x / reg_width
103
+ xyz_v = np.array([y_scale * v[0], y_scale * v[1], y_scale * v[2]])
104
+ xyz_u = np.array([x_scale * u[0], x_scale * u[1], x_scale * u[2]])
105
+ o = np.reshape(o, (3, 1))
106
+ return (o + xyz_u + xyz_v).T
107
+
108
+
109
+ # points.append would make list of lists, keeping sections separate.
110
+
111
+
112
+ # related to coordinate extraction
113
+ # This function returns an array of points
114
+ def folder_to_atlas_space(
115
+ folder,
116
+ quint_alignment,
117
+ atlas_labels,
118
+ pixel_id=[0, 0, 0],
119
+ non_linear=True,
120
+ method="all",
121
+ object_cutoff=0,
122
+ atlas_volume=None,
123
+ use_flat=False,
124
+ ):
125
+ """Apply Segmentation to atlas space to all segmentations in a folder."""
126
+ """Return pixel_points, centroids, points_len, centroids_len, segmentation_filenames, """
127
+ # This should be loaded above and passed as an argument
128
+ slices = load_visualign_json(quint_alignment)
129
+
130
+ segmentation_file_types = [".png", ".tif", ".tiff", ".jpg", ".jpeg", ".dzip"]
131
+ segmentations = [
132
+ file
133
+ for file in glob(folder + "/segmentations/*")
134
+ if any([file.endswith(type) for type in segmentation_file_types])
135
+ ]
136
+ if len(segmentations) == 0:
137
+ raise ValueError(
138
+ f"No segmentations found in folder {folder}. Make sure the folder contains a segmentations folder with segmentations."
139
+ )
140
+ print(f"Found {len(segmentations)} segmentations in folder {folder}")
141
+ if use_flat == True:
142
+ flat_files = [
143
+ file
144
+ for file in glob(folder + "/flat_files/*")
145
+ if any([file.endswith(".flat"), file.endswith(".seg")])
146
+ ]
147
+ print(f"Found {len(flat_files)} flat files in folder {folder}")
148
+ flat_file_nrs = [int(number_sections([ff])[0]) for ff in flat_files]
149
+
150
+ # Order segmentations and section_numbers
151
+ # segmentations = [x for _,x in sorted(zip(section_numbers,segmentations))]
152
+ # section_numbers.sort()
153
+ points_list = [np.array([])] * len(segmentations)
154
+ centroids_list = [np.array([])] * len(segmentations)
155
+ region_areas_list = [
156
+ pd.DataFrame(
157
+ {
158
+ "idx": [],
159
+ "name": [],
160
+ "r": [],
161
+ "g": [],
162
+ "b": [],
163
+ "region_area": [],
164
+ "pixel_count": [],
165
+ "object_count": [],
166
+ "area_fraction": [],
167
+ }
168
+ )
169
+ ] * len(segmentations)
170
+ threads = []
171
+ for segmentation_path, index in zip(segmentations, range(len(segmentations))):
172
+ seg_nr = int(number_sections([segmentation_path])[0])
173
+ current_slice_index = np.where([s["nr"] == seg_nr for s in slices])
174
+ current_slice = slices[current_slice_index[0][0]]
175
+ if current_slice["anchoring"] == []:
176
+ continue
177
+ if use_flat == True:
178
+ current_flat_file_index = np.where([f == seg_nr for f in flat_file_nrs])
179
+ current_flat = flat_files[current_flat_file_index[0][0]]
180
+ else:
181
+ current_flat = None
182
+
183
+ x = threading.Thread(
184
+ target=segmentation_to_atlas_space,
185
+ args=(
186
+ current_slice,
187
+ segmentation_path,
188
+ atlas_labels,
189
+ current_flat,
190
+ pixel_id,
191
+ non_linear,
192
+ points_list,
193
+ centroids_list,
194
+ region_areas_list,
195
+ index,
196
+ method,
197
+ object_cutoff,
198
+ atlas_volume,
199
+ use_flat,
200
+ ),
201
+ )
202
+ threads.append(x)
203
+ ## This converts the segmentation to a point cloud
204
+ # Start threads
205
+ [t.start() for t in threads]
206
+ # Wait for threads to finish
207
+ [t.join() for t in threads]
208
+ # Flatten points_list
209
+
210
+ points_len = [
211
+ len(points) if None not in points else 0 for points in points_list
212
+ ]
213
+ centroids_len = [
214
+ len(centroids) if None not in centroids else 0 for centroids in centroids_list
215
+ ]
216
+ points_list = [points for points in points_list if None not in points]
217
+ centroids_list = [centroids for centroids in centroids_list if None not in centroids]
218
+ if len(points_list) == 0:
219
+ points = np.array([])
220
+ else:
221
+ points = np.concatenate(points_list)
222
+ if len(centroids_list) == 0:
223
+ centroids = np.array([])
224
+ else:
225
+ centroids = np.concatenate(centroids_list)
226
+
227
+
228
+ return (
229
+ np.array(points),
230
+ np.array(centroids),
231
+ region_areas_list,
232
+ points_len,
233
+ centroids_len,
234
+ segmentations,
235
+ )
236
+
237
+ def load_segmentation(segmentation_path: str):
238
+ """Load a segmentation from a file."""
239
+ print(f"working on {segmentation_path}")
240
+ if segmentation_path.endswith(".dzip"):
241
+ print("Reconstructing dzi")
242
+ return reconstruct_dzi(segmentation_path)
243
+ else:
244
+ return cv2.imread(segmentation_path)
245
+
246
+ def detect_pixel_id(segmentation: np.array):
247
+ """Remove the background from the segmentation and return the pixel id."""
248
+ segmentation_no_background = segmentation[~np.all(segmentation == 0, axis=2)]
249
+ pixel_id = segmentation_no_background[0]
250
+ print("detected pixel_id: ", pixel_id)
251
+ return pixel_id
252
+
253
+ def get_region_areas(use_flat, atlas_labels, flat_file_atlas, seg_width, seg_height, slice_dict, atlas_volume, triangulation):
254
+ if use_flat:
255
+ region_areas = flat_to_dataframe(
256
+ atlas_labels, flat_file_atlas, (seg_width, seg_height)
257
+ )
258
+ else:
259
+ region_areas = flat_to_dataframe(
260
+ atlas_labels,
261
+ flat_file_atlas,
262
+ (seg_width, seg_height),
263
+ slice_dict["anchoring"],
264
+ atlas_volume,
265
+ triangulation
266
+ )
267
+ return region_areas
268
+
269
+ def get_transformed_coordinates(non_linear, slice_dict, method, scaled_x, scaled_y, centroids, scaled_centroidsX, scaled_centroidsY, triangulation):
270
+ new_x, new_y, centroids_new_x, centroids_new_y = None, None, None, None
271
+ if non_linear and "markers" in slice_dict:
272
+ if method in ["per_pixel", "all"] and scaled_x is not None:
273
+ new_x, new_y = transform_vec(triangulation, scaled_x, scaled_y)
274
+ if method in ["per_object", "all"] and centroids is not None:
275
+ centroids_new_x, centroids_new_y = transform_vec(triangulation, scaled_centroidsX, scaled_centroidsY)
276
+ else:
277
+ if method in ["per_pixel", "all"]:
278
+ new_x, new_y = scaled_x, scaled_y
279
+ if method in ["per_object", "all"]:
280
+ centroids_new_x, centroids_new_y = scaled_centroidsX, scaled_centroidsY
281
+ return new_x, new_y, centroids_new_x, centroids_new_y
282
+
283
+ def segmentation_to_atlas_space(
284
+ slice_dict,
285
+ segmentation_path,
286
+ atlas_labels,
287
+ flat_file_atlas=None,
288
+ pixel_id="auto",
289
+ non_linear=True,
290
+ points_list=None,
291
+ centroids_list=None,
292
+ region_areas_list=None,
293
+ index=None,
294
+ method="per_pixel",
295
+ object_cutoff=0,
296
+ atlas_volume=None,
297
+ use_flat=False,
298
+ ):
299
+ segmentation = load_segmentation(segmentation_path)
300
+ if pixel_id == "auto":
301
+ pixel_id = detect_pixel_id(segmentation)
302
+ seg_height, seg_width = segmentation.shape[:2]
303
+ reg_height, reg_width = slice_dict["height"], slice_dict["width"]
304
+ if non_linear and "markers" in slice_dict:
305
+ triangulation = triangulate(reg_width, reg_height, slice_dict["markers"])
306
+ else:
307
+ triangulation = None
308
+ region_areas = get_region_areas(use_flat, atlas_labels, flat_file_atlas, seg_width, seg_height, slice_dict, atlas_volume, triangulation)
309
+ y_scale, x_scale = transform_to_registration(seg_height, seg_width, reg_height, reg_width)
310
+ centroids, points = None, None
311
+ scaled_centroidsX, scaled_centroidsY, scaled_x, scaled_y = None, None, None, None
312
+ if method in ["per_object", "all"]:
313
+ centroids, scaled_centroidsX, scaled_centroidsY = get_centroids(segmentation, pixel_id, y_scale, x_scale, object_cutoff)
314
+ if method in ["per_pixel", "all"]:
315
+ scaled_y, scaled_x = get_scaled_pixels(segmentation, pixel_id, y_scale, x_scale)
316
+
317
+ new_x, new_y, centroids_new_x, centroids_new_y = get_transformed_coordinates(non_linear, slice_dict, method, scaled_x, scaled_y, centroids, scaled_centroidsX, scaled_centroidsY, triangulation)
318
+ if method in ["per_pixel", "all"] and new_x is not None:
319
+ points = transform_to_atlas_space(slice_dict["anchoring"], new_y, new_x, reg_height, reg_width)
320
+ if method in ["per_object", "all"] and centroids_new_x is not None:
321
+ centroids = transform_to_atlas_space(slice_dict["anchoring"], centroids_new_y, centroids_new_x, reg_height, reg_width)
322
+ points_list[index] = np.array(points if points is not None else [])
323
+ centroids_list[index] = np.array(centroids if centroids is not None else [])
324
+ region_areas_list[index] = region_areas
325
+
326
+
327
+ def get_centroids(segmentation, pixel_id, y_scale, x_scale, object_cutoff=0):
328
+ binary_seg = segmentation == pixel_id
329
+ binary_seg = np.all(binary_seg, axis=2)
330
+ centroids, area, coords = get_centroids_and_area(
331
+ binary_seg, pixel_cut_off=object_cutoff
332
+ )
333
+
334
+ print(f"using pixel id {pixel_id}")
335
+ print(f"Found {len(centroids)} objects in the segmentation")
336
+ if len(centroids) == 0:
337
+ return None, None, None
338
+ centroidsX = centroids[:, 1]
339
+ centroidsY = centroids[:, 0]
340
+ scaled_centroidsY, scaled_centroidsX = scale_positions(
341
+ centroidsY, centroidsX, y_scale, x_scale
342
+ )
343
+ return centroids, scaled_centroidsX, scaled_centroidsY
344
+
345
+
346
+ def get_scaled_pixels(segmentation, pixel_id, y_scale, x_scale):
347
+ id_pixels = find_matching_pixels(segmentation, pixel_id)
348
+ if len(id_pixels[0]) == 0:
349
+ return None, None
350
+ # Scale the seg coordinates to reg/seg
351
+ scaled_y, scaled_x = scale_positions(id_pixels[0], id_pixels[1], y_scale, x_scale)
352
+ return scaled_y, scaled_x
@@ -0,0 +1,241 @@
1
+ import numpy as np
2
+ import pandas as pd
3
+ import struct
4
+ import cv2
5
+ from .generate_target_slice import generate_target_slice
6
+ from .visualign_deformations import transform_vec
7
+
8
+ # related to counting and load
9
+ def label_points(points, label_volume, scale_factor=1):
10
+ """This function takes a list of points and assigns them to a region based on the region_volume.
11
+ These regions will just be the values in the region_volume at the points.
12
+ It returns a dictionary with the region as the key and the points as the value."""
13
+ # First convert the points to 3 columns
14
+ points = np.reshape(points, (-1, 3))
15
+ # Scale the points
16
+ points = points * scale_factor
17
+ # Round the points to the nearest whole number
18
+ points = np.round(points).astype(int)
19
+ x = points[:, 0]
20
+ y = points[:, 1]
21
+ z = points[:, 2]
22
+
23
+ # make sure the points are within the volume
24
+ x[x < 0] = 0
25
+ y[y < 0] = 0
26
+ z[z < 0] = 0
27
+ mask = (
28
+ (x > label_volume.shape[0] - 1)
29
+ | (y > label_volume.shape[1] - 1)
30
+ | (z > label_volume.shape[2] - 1)
31
+ )
32
+ x[mask] = 0
33
+ y[mask] = 0
34
+ z[mask] = 0
35
+
36
+ # Get the label value for each point
37
+ labels = label_volume[x, y, z]
38
+
39
+ return labels
40
+
41
+
42
+ # related to counting_and_load
43
+ def pixel_count_per_region(
44
+ labels_dict_points, labeled_dict_centroids, df_label_colours
45
+ ):
46
+ """Function for counting no. of pixels per region and writing to CSV based on
47
+ a dictionary with the region as the key and the points as the value."""
48
+ if labels_dict_points is not None and labeled_dict_centroids is not None:
49
+ counted_labels_points, label_counts_points = np.unique(
50
+ labels_dict_points, return_counts=True
51
+ )
52
+ counted_labels_centroids, label_counts_centroids = np.unique(
53
+ labeled_dict_centroids, return_counts=True
54
+ )
55
+ # Which regions have pixels, and how many pixels are there per region
56
+ counts_per_label = list(
57
+ zip(counted_labels_points, label_counts_points, label_counts_centroids)
58
+ )
59
+ # Create a list of unique regions and pixel counts per region
60
+ df_counts_per_label = pd.DataFrame(
61
+ counts_per_label, columns=["idx", "pixel_count", "object_count"]
62
+ )
63
+ elif labels_dict_points is None and labeled_dict_centroids is not None:
64
+ counted_labels_centroids, label_counts_centroids = np.unique(
65
+ labeled_dict_centroids, return_counts=True
66
+ )
67
+ # Which regions have pixels, and how many pixels are there per region
68
+ counts_per_label = list(zip(counted_labels_centroids, label_counts_centroids))
69
+ # Create a list of unique regions and pixel counts per region
70
+ df_counts_per_label = pd.DataFrame(
71
+ counts_per_label, columns=["idx", "object_count"]
72
+ )
73
+ elif labels_dict_points is not None and labeled_dict_centroids is None:
74
+ counted_labels_points, label_counts_points = np.unique(
75
+ labels_dict_points, return_counts=True
76
+ )
77
+ # Which regions have pixels, and how many pixels are there per region
78
+ counts_per_label = list(zip(counted_labels_points, label_counts_points))
79
+ # Create a list of unique regions and pixel counts per region
80
+ df_counts_per_label = pd.DataFrame(
81
+ counts_per_label, columns=["idx", "pixel_count"]
82
+ )
83
+ # Create a pandas df with regions and pixel counts
84
+
85
+ # df_label_colours = pd.read_csv(label_colours, sep=",")
86
+ # Find colours corresponding to each region ID and add to the pandas dataframe
87
+
88
+ # Look up name, r, g, b in df_allen_colours in df_counts_per_label based on "idx"
89
+ # Sharon, remove this here
90
+ new_rows = []
91
+ for index, row in df_counts_per_label.iterrows():
92
+ mask = df_label_colours["idx"] == row["idx"]
93
+ current_region_row = df_label_colours[mask]
94
+ current_region_name = current_region_row["name"].values
95
+ current_region_red = current_region_row["r"].values
96
+ current_region_green = current_region_row["g"].values
97
+ current_region_blue = current_region_row["b"].values
98
+
99
+ row["name"] = current_region_name[0]
100
+ row["r"] = int(current_region_red[0])
101
+ row["g"] = int(current_region_green[0])
102
+ row["b"] = int(current_region_blue[0])
103
+
104
+ new_rows.append(row)
105
+
106
+ df_counts_per_label_name = pd.DataFrame(
107
+ new_rows, columns=["idx", "name", "pixel_count", "object_count", "r", "g", "b"]
108
+ )
109
+ # Task for Sharon:
110
+ # If you can get the areas per region from the flat file here
111
+ # you can then use those areas to calculate the load per region here
112
+ # and add to dataframe
113
+ # see messing around pyflat.py
114
+
115
+ return df_counts_per_label_name
116
+
117
+
118
+ """Read flat file and write into an np array"""
119
+ """Read flat file, write into an np array, assign label file values, return array"""
120
+
121
+
122
+
123
+ def read_flat_file(file):
124
+ with open(file, "rb") as f:
125
+ b, w, h = struct.unpack(">BII", f.read(9))
126
+ data = struct.unpack(">" + ("xBH"[b] * (w * h)), f.read(b * w * h))
127
+ image_data = np.array(data)
128
+ image = np.zeros((h, w))
129
+ for x in range(w):
130
+ for y in range(h):
131
+ image[y, x] = image_data[x + y * w]
132
+ return image
133
+
134
+
135
+ def read_seg_file(file):
136
+ with open(file, "rb") as f:
137
+
138
+ def byte():
139
+ return f.read(1)[0]
140
+
141
+ def code():
142
+ c = byte()
143
+ if c < 0:
144
+ raise "!"
145
+ return c if c < 128 else (c & 127) | (code() << 7)
146
+
147
+ if "SegRLEv1" != f.read(8).decode():
148
+ raise "Header mismatch"
149
+ atlas = f.read(code()).decode()
150
+ print(f"Target atlas: {atlas}")
151
+ codes = [code() for x in range(code())]
152
+ w = code()
153
+ h = code()
154
+ data = []
155
+ while len(data) < w * h:
156
+ data += [codes[byte() if len(codes) <= 256 else code()]] * (code() + 1)
157
+ image_data = np.array(data)
158
+ image = np.reshape(image_data, (h, w))
159
+ return image
160
+
161
+
162
+ def rescale_image(image, rescaleXY):
163
+ w, h = rescaleXY
164
+ return cv2.resize(image, (h, w), interpolation=cv2.INTER_NEAREST)
165
+
166
+ def assign_labels_to_image(image, labelfile):
167
+ w, h = image.shape
168
+ allen_id_image = np.zeros((h, w)) # create an empty image array
169
+ coordsy, coordsx = np.meshgrid(list(range(w)), list(range(h)))
170
+
171
+ values = image[coordsy, coordsx]
172
+ lbidx = labelfile["idx"].values
173
+
174
+ allen_id_image = lbidx[values.astype(int)]
175
+ return allen_id_image
176
+
177
+
178
+ def count_pixels_per_label(image, scale_factor=False):
179
+ unique_ids, counts = np.unique(image, return_counts=True)
180
+ if scale_factor:
181
+ counts = counts * scale_factor
182
+ area_per_label = list(zip(unique_ids, counts))
183
+ df_area_per_label = pd.DataFrame(area_per_label, columns=["idx", "region_area"])
184
+ return df_area_per_label
185
+
186
+
187
+ def warp_image(image, triangulation, rescaleXY):
188
+ if rescaleXY is not None:
189
+ w,h = rescaleXY
190
+ else:
191
+ h, w = image.shape
192
+ reg_h, reg_w = image.shape
193
+ oldX, oldY = np.meshgrid(np.arange(reg_w), np.arange(reg_h))
194
+ oldX = oldX.flatten()
195
+ oldY = oldY.flatten()
196
+ h_scale = h / reg_h
197
+ w_scale = w / reg_w
198
+ oldX = oldX * w_scale
199
+ oldY = oldY * h_scale
200
+ newX, newY = transform_vec(triangulation, oldX, oldY)
201
+ newX = newX / w_scale
202
+ newY = newY / h_scale
203
+ newX = newX.reshape(reg_h, reg_w)
204
+ newY = newY.reshape(reg_h, reg_w)
205
+ newX = newX.astype(int)
206
+ newY = newY.astype(int)
207
+ newX[newX >= reg_w] = reg_w - 1
208
+ newY[newY >= reg_h] = reg_h - 1
209
+ newX[newX < 0] = 0
210
+ newY[newY < 0] = 0
211
+ new_image = image[newY, newX]
212
+ return new_image
213
+
214
+ def flat_to_dataframe(
215
+ labelfile, file=None, rescaleXY=None, image_vector=None, volume=None, triangulation=None
216
+ ):
217
+ if (image_vector is not None) and (volume is not None):
218
+ image = generate_target_slice(image_vector, volume)
219
+ image = np.float64(image)
220
+ if triangulation is not None:
221
+ image = warp_image(image, triangulation, rescaleXY)
222
+ elif file.endswith(".flat"):
223
+ image = read_flat_file(file)
224
+ elif file.endswith(".seg"):
225
+ image = read_seg_file(file)
226
+ print("datatype", image.dtype)
227
+ print("image shape open", image.shape)
228
+
229
+ if rescaleXY:
230
+ image_shapeY, image_shapeX = image.shape[0], image.shape[1]
231
+ image_pixels = image_shapeY * image_shapeX
232
+ seg_pixels = rescaleXY[0] * rescaleXY[1]
233
+ scale_factor = seg_pixels / image_pixels
234
+ else:
235
+ scale_factor = False
236
+ if (image_vector is None) or (volume is None):
237
+ allen_id_image = assign_labels_to_image(image, labelfile)
238
+ else:
239
+ allen_id_image = image
240
+ df_area_per_label = count_pixels_per_label(allen_id_image, scale_factor)
241
+ return df_area_per_label
@@ -0,0 +1,35 @@
1
+ import numpy as np
2
+ import math
3
+
4
+ def generate_target_slice(ouv, atlas):
5
+ width = None
6
+ height = None
7
+ ox, oy, oz, ux, uy, uz, vx, vy, vz = ouv
8
+ width = np.floor(math.hypot(ux,uy,uz)).astype(int) + 1
9
+ height = np.floor(math.hypot(vx,vy,vz)).astype(int) + 1
10
+ data = np.zeros((width, height), dtype=np.uint32).flatten()
11
+ xdim, ydim, zdim = atlas.shape
12
+ y_values = np.arange(height)
13
+ x_values = np.arange(width)
14
+ hx = ox + vx * (y_values / height)
15
+ hy = oy + vy * (y_values / height)
16
+ hz = oz + vz * (y_values / height)
17
+ wx = ux * (x_values / width)
18
+ wy = uy * (x_values / width)
19
+ wz = uz * (x_values / width)
20
+ lx = np.floor(hx[:, None] + wx).astype(int)
21
+ ly = np.floor(hy[:, None] + wy).astype(int)
22
+ lz = np.floor(hz[:, None] + wz).astype(int)
23
+ valid_indices = (0 <= lx) & (lx < xdim) & (0 <= ly) & (ly < ydim) & (0 <= lz) & (lz < zdim)
24
+ valid_indices = valid_indices.flatten()
25
+ lxf = lx.flatten()
26
+ lyf = ly.flatten()
27
+ lzf = lz.flatten()
28
+ valid_lx = lxf[valid_indices]
29
+ valid_ly = lyf[valid_indices]
30
+ valid_lz = lzf[valid_indices]
31
+ atlas_slice = atlas[valid_lx,valid_ly,valid_lz]
32
+ data[valid_indices] = atlas_slice
33
+ data_im = data.reshape((height, width))
34
+ return data_im
35
+
@@ -0,0 +1,67 @@
1
+ """Create workflow for calculating load based on atlas maps and segmentations"""
2
+
3
+ import pandas as pd
4
+ import cv2
5
+
6
+ # from read_and_write import flat_to_array, label_to_array
7
+ from counting_and_load import flat_to_dataframe
8
+
9
+ base = r"../test_data/tTA_2877_NOP_s037_atlasmap/2877_NOP_tTA_lacZ_Xgal_s037_nl.flat"
10
+ label = r"../annotation_volumes\allen2017_colours.csv"
11
+ ##optional
12
+ seg = r"../test_data/tTA_2877_NOP_s037_seg/2877_NOP_tTA_lacZ_Xgal_resize_Simple_Seg_s037.png"
13
+ segim = cv2.imread(seg)
14
+ # the indexing [:2] means the first two values and [::-1] means reverse the list
15
+ segXY = segim.shape[:2][::-1]
16
+ # image_arr = flat_to_array(base, label)
17
+
18
+ # plt.imshow(flat_to_array(base, label))
19
+
20
+ df_area_per_label = flat_to_dataframe(base, label, segXY)
21
+
22
+ """count pixels in np array for unique idx, return pd df"""
23
+ # unique_ids, counts = np.unique(allen_id_image, return_counts=True)
24
+
25
+ # area_per_label = list(zip(unique_ids, counts))
26
+ # create a list of unique regions and pixel counts per region
27
+
28
+ # df_area_per_label = pd.DataFrame(area_per_label, columns=["idx", "area_count"])
29
+ # create a pandas df with regions and pixel counts
30
+
31
+
32
+ """add region name and colours corresponding to each idx into dataframe.
33
+ This could be a separate function"""
34
+
35
+ df_label_colours = pd.read_csv(label, sep=",")
36
+ # find colours corresponding to each region ID and add to the pandas dataframe
37
+
38
+ # look up name, r, g, b in df_allen_colours in df_area_per_label based on "idx"
39
+ new_rows = []
40
+ for index, row in df_area_per_label.iterrows():
41
+ mask = df_label_colours["idx"] == row["idx"]
42
+ current_region_row = df_label_colours[mask]
43
+ current_region_name = current_region_row["name"].values
44
+ current_region_red = current_region_row["r"].values
45
+ current_region_green = current_region_row["g"].values
46
+ current_region_blue = current_region_row["b"].values
47
+
48
+ row["name"] = current_region_name[0]
49
+ row["r"] = current_region_red[0]
50
+ row["g"] = current_region_green[0]
51
+ row["b"] = current_region_blue[0]
52
+
53
+ new_rows.append(row)
54
+
55
+ df_area_per_label_name = pd.DataFrame(new_rows)
56
+
57
+ print(df_area_per_label_name)
58
+ df_area_per_label_name.to_csv(
59
+ "../outputs/NOP_s037_regionareas.csv", sep=";", na_rep="", index=False
60
+ )
61
+
62
+
63
+ # Count area per unique label in one flat file - done.
64
+ # Scale up to size of corresponding segmentation/ or size of reference atlas if points are already scaled?
65
+ # divide "segmentation value per idx per slice" by "area per idx per slice"
66
+ # also do for whole brain - need to loop through and match up section with corresponding atlasmap
67
+ # output reports per brain and per slice