PyNutil 0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
PyNutil-0.1/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Harry Carey
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
PyNutil-0.1/PKG-INFO ADDED
@@ -0,0 +1,82 @@
1
+ Metadata-Version: 2.1
2
+ Name: PyNutil
3
+ Version: 0.1
4
+ Summary: a package to translate data between common coordinate templates
5
+ License: MIT
6
+ Description-Content-Type: text/markdown
7
+ License-File: LICENSE
8
+ Requires-Dist: numpy
9
+ Requires-Dist: brainglobe_atlasapi
10
+ Requires-Dist: pandas
11
+ Requires-Dist: requests
12
+ Requires-Dist: pynrrd
13
+ Requires-Dist: xmltodict
14
+ Requires-Dist: opencv-python
15
+ Requires-Dist: scikit-image
16
+
17
+ # PyNutil
18
+ PyNutil is currently under development.
19
+
20
+ PyNutil is a Python library for brain-wide quantification and spatial analysis of features in serial section images from mouse and rat brain . It aims to replicate the Quantifier feature of the Nutil software (RRID: SCR_017183). It builds on registration to a standardised reference atlas with the QuickNII (RRID:SCR_016854) and VisuAlign software (RRID:SCR_017978) and feature extraction by segmentation with an image analysis software such as ilastik (RRID:SCR_015246).
21
+
22
+ For more information about the QUINT workflow:
23
+ https://quint-workflow.readthedocs.io/en/latest/
24
+
25
+ # Usage
26
+ As input, PyNutil requires:
27
+ 1. An alignment JSON created with the QuickNII or VisuAlign software
28
+ 2. A segmentation file for each brain section with the feature-of-interests displayed in a unique RGB colour (it currently accepts many image formats: png, jpg, jpeg, etc).
29
+
30
+
31
+ Note: The atlases available in PyNutil are those listed via the [brainglobe_atlasapi](https://github.com/brainglobe/brainglobe-atlasapi).
32
+
33
+ PyNutil requires Python 3.8 or above
34
+
35
+ ```python
36
+ from PyNutil import PyNutil
37
+ """
38
+ Here we define a quantifier object
39
+ The segmentations should be images which come out of ilastik, segmenting an object of interest
40
+ The alignment json should be out of DeepSlice, QuickNII, or VisuAlign, it defines the sections position in an atlas
41
+ The colour says which colour is the object you want to quantify in your segmentation. It is defined in RGB
42
+ Finally the atlas name is the relevant atlas from brainglobe_atlasapi you wish to use in Quantification.
43
+ """
44
+ pnt = PyNutil(
45
+ segmentation_folder='../tests/test_data/big_caudoputamen_test/',
46
+ alignment_json='../tests/test_data/big_caudoputamen.json',
47
+ colour=[0, 0, 0],
48
+ atlas_name='allen_mouse_25um'
49
+ )
50
+
51
+ pnt.get_coordinates(object_cutoff=0)
52
+
53
+ pnt.quantify_coordinates()
54
+
55
+ pnt.save_analysis("PyNutil/outputs/myResults")
56
+ ```
57
+ PyNutil generates a series of reports in the folder which you specify.
58
+ # Feature Requests
59
+ We are open to feature requests 😊 Simply open an issue in the github describing the feature you would like to see.
60
+
61
+ # Acknowledgements
62
+ PyNutil is developed at the Neural Systems Laboratory at the Institute of Basic Medical Sciences, University of Oslo, Norway with support from the EBRAINS infrastructure, and funding support from the European Union’s Horizon 2020 Framework Programme for Research and Innovation under the Framework Partnership Agreement No. 650003 (HBP FPA).
63
+
64
+ # Contributors
65
+ Harry Carey, Sharon C Yates, Gergely Csucs, Ingvild Bjerke, Rembrandt Bakker, Nicolaas Groeneboom, Maja A Punchades, Jan G Bjaalie.
66
+
67
+ # Licence
68
+ GNU General Public License v3
69
+
70
+ # Related articles
71
+ Yates SC, Groeneboom NE, Coello C, et al. & Bjaalie JG (2019) QUINT: Workflow for Quantification and Spatial Analysis of Features in Histological Images From Rodent Brain. Front. Neuroinform. 13:75. https://doi.org/10.3389/fninf.2019.00075
72
+
73
+ Groeneboom NE, Yates SC, Puchades MA and Bjaalie JG. Nutil: A Pre- and Post-processing Toolbox for Histological Rodent Brain Section Images. Front. Neuroinform. 2020,14:37. https://doi.org/10.3389/fninf.2020.00037
74
+
75
+ Puchades MA, Csucs G, Lederberger D, Leergaard TB and Bjaalie JG. Spatial registration of serial microscopic brain images to three-dimensional reference atlases with the QuickNII tool. PLosONE, 2019, 14(5): e0216796. https://doi.org/10.1371/journal.pone.0216796
76
+
77
+ Carey H, Pegios M, Martin L, Saleeba C, Turner A, Everett N, Puchades M, Bjaalie J, McMullan S. DeepSlice: rapid fully automatic registration of mouse brain imaging to a volumetric atlas. BioRxiv. https://doi.org/10.1101/2022.04.28.489953
78
+
79
+ Berg S., Kutra D., Kroeger T., Straehle C.N., Kausler B.X., Haubold C., et al. (2019) ilastik:interactive machine learning for (bio) image analysis. Nat Methods. 16, 1226–1232. https://doi.org/10.1038/s41592-019-0582-9
80
+
81
+ # Contact us
82
+ Report issues here on Github or email: support@ebrains.eu
@@ -0,0 +1 @@
1
+ from .main import PyNutil
@@ -0,0 +1,352 @@
1
+ import numpy as np
2
+ import pandas as pd
3
+ from .read_and_write import load_visualign_json
4
+ from .counting_and_load import flat_to_dataframe
5
+ from .visualign_deformations import triangulate, transform_vec
6
+ from glob import glob
7
+ import cv2
8
+ from skimage import measure
9
+ import threading
10
+ import re
11
+ from .reconstruct_dzi import reconstruct_dzi
12
+
13
+
14
+ def number_sections(filenames, legacy=False):
15
+ """
16
+ returns the section numbers of filenames
17
+
18
+ :param filenames: list of filenames
19
+ :type filenames: list[str]
20
+ :return: list of section numbers
21
+ :rtype: list[int]
22
+ """
23
+ filenames = [filename.split("\\")[-1] for filename in filenames]
24
+ section_numbers = []
25
+ for filename in filenames:
26
+ if not legacy:
27
+ match = re.findall(r"\_s\d+", filename)
28
+ if len(match) == 0:
29
+ raise ValueError(f"No section number found in filename: {filename}")
30
+ if len(match) > 1:
31
+ raise ValueError(
32
+ "Multiple section numbers found in filename, ensure only one instance of _s### is present, where ### is the section number"
33
+ )
34
+ section_numbers.append(int(match[-1][2:]))
35
+ else:
36
+ match = re.sub("[^0-9]", "", filename)
37
+ ###this gets the three numbers closest to the end
38
+ section_numbers.append(match[-3:])
39
+ if len(section_numbers) == 0:
40
+ raise ValueError("No section numbers found in filenames")
41
+ return section_numbers
42
+
43
+
44
+ # related to coordinate_extraction
45
+ def get_centroids_and_area(segmentation, pixel_cut_off=0):
46
+ """This function returns the center coordinate of each object in the segmentation.
47
+ You can set a pixel_cut_off to remove objects that are smaller than that number of pixels.
48
+ """
49
+ labels = measure.label(segmentation)
50
+ # This finds all the objects in the image
51
+ labels_info = measure.regionprops(labels)
52
+ # Remove objects that are less than pixel_cut_off
53
+ labels_info = [label for label in labels_info if label.area > pixel_cut_off]
54
+ # Get the centre points of the objects
55
+ centroids = np.array([label.centroid for label in labels_info])
56
+ # Get the area of the objects
57
+ area = np.array([label.area for label in labels_info])
58
+ # Get the coordinates for all the pixels in each object
59
+ coords = np.array([label.coords for label in labels_info], dtype=object)
60
+ return centroids, area, coords
61
+
62
+
63
+ # related to coordinate extraction
64
+ def transform_to_registration(seg_height, seg_width, reg_height, reg_width):
65
+ """This function returns the scaling factors to transform the segmentation to the registration space."""
66
+ y_scale = reg_height / seg_height
67
+ x_scale = reg_width / seg_width
68
+ return y_scale, x_scale
69
+
70
+ # related to coordinate extraction
71
+ def find_matching_pixels(segmentation, id):
72
+ """This function returns the Y and X coordinates of all the pixels in the segmentation that match the id provided."""
73
+ mask = segmentation == id
74
+ mask = np.all(mask, axis=2)
75
+ id_positions = np.where(mask)
76
+ id_y, id_x = id_positions[0], id_positions[1]
77
+ return id_y, id_x
78
+
79
+
80
+ # related to coordinate extraction
81
+ def scale_positions(id_y, id_x, y_scale, x_scale):
82
+ """This function scales the Y and X coordinates to the registration space.
83
+ (The y_scale and x_scale are the output of transform_to_registration.)
84
+ """
85
+ id_y = id_y * y_scale
86
+ id_x = id_x * x_scale
87
+ return id_y, id_x
88
+
89
+
90
+ # related to coordinate extraction
91
+ def transform_to_atlas_space(anchoring, y, x, reg_height, reg_width):
92
+ """Transform to atlas space using the QuickNII anchoring vector."""
93
+ o = anchoring[0:3]
94
+ u = anchoring[3:6]
95
+ # Swap order of U
96
+ u = np.array([u[0], u[1], u[2]])
97
+ v = anchoring[6:9]
98
+ # Swap order of V
99
+ v = np.array([v[0], v[1], v[2]])
100
+ # Scale X and Y to between 0 and 1 using the registration width and height
101
+ y_scale = y / reg_height
102
+ x_scale = x / reg_width
103
+ xyz_v = np.array([y_scale * v[0], y_scale * v[1], y_scale * v[2]])
104
+ xyz_u = np.array([x_scale * u[0], x_scale * u[1], x_scale * u[2]])
105
+ o = np.reshape(o, (3, 1))
106
+ return (o + xyz_u + xyz_v).T
107
+
108
+
109
+ # points.append would make list of lists, keeping sections separate.
110
+
111
+
112
+ # related to coordinate extraction
113
+ # This function returns an array of points
114
+ def folder_to_atlas_space(
115
+ folder,
116
+ quint_alignment,
117
+ atlas_labels,
118
+ pixel_id=[0, 0, 0],
119
+ non_linear=True,
120
+ method="all",
121
+ object_cutoff=0,
122
+ atlas_volume=None,
123
+ use_flat=False,
124
+ ):
125
+ """Apply Segmentation to atlas space to all segmentations in a folder."""
126
+ """Return pixel_points, centroids, points_len, centroids_len, segmentation_filenames, """
127
+ # This should be loaded above and passed as an argument
128
+ slices = load_visualign_json(quint_alignment)
129
+
130
+ segmentation_file_types = [".png", ".tif", ".tiff", ".jpg", ".jpeg", ".dzip"]
131
+ segmentations = [
132
+ file
133
+ for file in glob(folder + "/segmentations/*")
134
+ if any([file.endswith(type) for type in segmentation_file_types])
135
+ ]
136
+ if len(segmentations) == 0:
137
+ raise ValueError(
138
+ f"No segmentations found in folder {folder}. Make sure the folder contains a segmentations folder with segmentations."
139
+ )
140
+ print(f"Found {len(segmentations)} segmentations in folder {folder}")
141
+ if use_flat == True:
142
+ flat_files = [
143
+ file
144
+ for file in glob(folder + "/flat_files/*")
145
+ if any([file.endswith(".flat"), file.endswith(".seg")])
146
+ ]
147
+ print(f"Found {len(flat_files)} flat files in folder {folder}")
148
+ flat_file_nrs = [int(number_sections([ff])[0]) for ff in flat_files]
149
+
150
+ # Order segmentations and section_numbers
151
+ # segmentations = [x for _,x in sorted(zip(section_numbers,segmentations))]
152
+ # section_numbers.sort()
153
+ points_list = [np.array([])] * len(segmentations)
154
+ centroids_list = [np.array([])] * len(segmentations)
155
+ region_areas_list = [
156
+ pd.DataFrame(
157
+ {
158
+ "idx": [],
159
+ "name": [],
160
+ "r": [],
161
+ "g": [],
162
+ "b": [],
163
+ "region_area": [],
164
+ "pixel_count": [],
165
+ "object_count": [],
166
+ "area_fraction": [],
167
+ }
168
+ )
169
+ ] * len(segmentations)
170
+ threads = []
171
+ for segmentation_path, index in zip(segmentations, range(len(segmentations))):
172
+ seg_nr = int(number_sections([segmentation_path])[0])
173
+ current_slice_index = np.where([s["nr"] == seg_nr for s in slices])
174
+ current_slice = slices[current_slice_index[0][0]]
175
+ if current_slice["anchoring"] == []:
176
+ continue
177
+ if use_flat == True:
178
+ current_flat_file_index = np.where([f == seg_nr for f in flat_file_nrs])
179
+ current_flat = flat_files[current_flat_file_index[0][0]]
180
+ else:
181
+ current_flat = None
182
+
183
+ x = threading.Thread(
184
+ target=segmentation_to_atlas_space,
185
+ args=(
186
+ current_slice,
187
+ segmentation_path,
188
+ atlas_labels,
189
+ current_flat,
190
+ pixel_id,
191
+ non_linear,
192
+ points_list,
193
+ centroids_list,
194
+ region_areas_list,
195
+ index,
196
+ method,
197
+ object_cutoff,
198
+ atlas_volume,
199
+ use_flat,
200
+ ),
201
+ )
202
+ threads.append(x)
203
+ ## This converts the segmentation to a point cloud
204
+ # Start threads
205
+ [t.start() for t in threads]
206
+ # Wait for threads to finish
207
+ [t.join() for t in threads]
208
+ # Flatten points_list
209
+
210
+ points_len = [
211
+ len(points) if None not in points else 0 for points in points_list
212
+ ]
213
+ centroids_len = [
214
+ len(centroids) if None not in centroids else 0 for centroids in centroids_list
215
+ ]
216
+ points_list = [points for points in points_list if None not in points]
217
+ centroids_list = [centroids for centroids in centroids_list if None not in centroids]
218
+ if len(points_list) == 0:
219
+ points = np.array([])
220
+ else:
221
+ points = np.concatenate(points_list)
222
+ if len(centroids_list) == 0:
223
+ centroids = np.array([])
224
+ else:
225
+ centroids = np.concatenate(centroids_list)
226
+
227
+
228
+ return (
229
+ np.array(points),
230
+ np.array(centroids),
231
+ region_areas_list,
232
+ points_len,
233
+ centroids_len,
234
+ segmentations,
235
+ )
236
+
237
+ def load_segmentation(segmentation_path: str):
238
+ """Load a segmentation from a file."""
239
+ print(f"working on {segmentation_path}")
240
+ if segmentation_path.endswith(".dzip"):
241
+ print("Reconstructing dzi")
242
+ return reconstruct_dzi(segmentation_path)
243
+ else:
244
+ return cv2.imread(segmentation_path)
245
+
246
+ def detect_pixel_id(segmentation: np.array):
247
+ """Remove the background from the segmentation and return the pixel id."""
248
+ segmentation_no_background = segmentation[~np.all(segmentation == 0, axis=2)]
249
+ pixel_id = segmentation_no_background[0]
250
+ print("detected pixel_id: ", pixel_id)
251
+ return pixel_id
252
+
253
+ def get_region_areas(use_flat, atlas_labels, flat_file_atlas, seg_width, seg_height, slice_dict, atlas_volume, triangulation):
254
+ if use_flat:
255
+ region_areas = flat_to_dataframe(
256
+ atlas_labels, flat_file_atlas, (seg_width, seg_height)
257
+ )
258
+ else:
259
+ region_areas = flat_to_dataframe(
260
+ atlas_labels,
261
+ flat_file_atlas,
262
+ (seg_width, seg_height),
263
+ slice_dict["anchoring"],
264
+ atlas_volume,
265
+ triangulation
266
+ )
267
+ return region_areas
268
+
269
+ def get_transformed_coordinates(non_linear, slice_dict, method, scaled_x, scaled_y, centroids, scaled_centroidsX, scaled_centroidsY, triangulation):
270
+ new_x, new_y, centroids_new_x, centroids_new_y = None, None, None, None
271
+ if non_linear and "markers" in slice_dict:
272
+ if method in ["per_pixel", "all"] and scaled_x is not None:
273
+ new_x, new_y = transform_vec(triangulation, scaled_x, scaled_y)
274
+ if method in ["per_object", "all"] and centroids is not None:
275
+ centroids_new_x, centroids_new_y = transform_vec(triangulation, scaled_centroidsX, scaled_centroidsY)
276
+ else:
277
+ if method in ["per_pixel", "all"]:
278
+ new_x, new_y = scaled_x, scaled_y
279
+ if method in ["per_object", "all"]:
280
+ centroids_new_x, centroids_new_y = scaled_centroidsX, scaled_centroidsY
281
+ return new_x, new_y, centroids_new_x, centroids_new_y
282
+
283
+ def segmentation_to_atlas_space(
284
+ slice_dict,
285
+ segmentation_path,
286
+ atlas_labels,
287
+ flat_file_atlas=None,
288
+ pixel_id="auto",
289
+ non_linear=True,
290
+ points_list=None,
291
+ centroids_list=None,
292
+ region_areas_list=None,
293
+ index=None,
294
+ method="per_pixel",
295
+ object_cutoff=0,
296
+ atlas_volume=None,
297
+ use_flat=False,
298
+ ):
299
+ segmentation = load_segmentation(segmentation_path)
300
+ if pixel_id == "auto":
301
+ pixel_id = detect_pixel_id(segmentation)
302
+ seg_height, seg_width = segmentation.shape[:2]
303
+ reg_height, reg_width = slice_dict["height"], slice_dict["width"]
304
+ if non_linear and "markers" in slice_dict:
305
+ triangulation = triangulate(reg_width, reg_height, slice_dict["markers"])
306
+ else:
307
+ triangulation = None
308
+ region_areas = get_region_areas(use_flat, atlas_labels, flat_file_atlas, seg_width, seg_height, slice_dict, atlas_volume, triangulation)
309
+ y_scale, x_scale = transform_to_registration(seg_height, seg_width, reg_height, reg_width)
310
+ centroids, points = None, None
311
+ scaled_centroidsX, scaled_centroidsY, scaled_x, scaled_y = None, None, None, None
312
+ if method in ["per_object", "all"]:
313
+ centroids, scaled_centroidsX, scaled_centroidsY = get_centroids(segmentation, pixel_id, y_scale, x_scale, object_cutoff)
314
+ if method in ["per_pixel", "all"]:
315
+ scaled_y, scaled_x = get_scaled_pixels(segmentation, pixel_id, y_scale, x_scale)
316
+
317
+ new_x, new_y, centroids_new_x, centroids_new_y = get_transformed_coordinates(non_linear, slice_dict, method, scaled_x, scaled_y, centroids, scaled_centroidsX, scaled_centroidsY, triangulation)
318
+ if method in ["per_pixel", "all"] and new_x is not None:
319
+ points = transform_to_atlas_space(slice_dict["anchoring"], new_y, new_x, reg_height, reg_width)
320
+ if method in ["per_object", "all"] and centroids_new_x is not None:
321
+ centroids = transform_to_atlas_space(slice_dict["anchoring"], centroids_new_y, centroids_new_x, reg_height, reg_width)
322
+ points_list[index] = np.array(points if points is not None else [])
323
+ centroids_list[index] = np.array(centroids if centroids is not None else [])
324
+ region_areas_list[index] = region_areas
325
+
326
+
327
+ def get_centroids(segmentation, pixel_id, y_scale, x_scale, object_cutoff=0):
328
+ binary_seg = segmentation == pixel_id
329
+ binary_seg = np.all(binary_seg, axis=2)
330
+ centroids, area, coords = get_centroids_and_area(
331
+ binary_seg, pixel_cut_off=object_cutoff
332
+ )
333
+
334
+ print(f"using pixel id {pixel_id}")
335
+ print(f"Found {len(centroids)} objects in the segmentation")
336
+ if len(centroids) == 0:
337
+ return None, None, None
338
+ centroidsX = centroids[:, 1]
339
+ centroidsY = centroids[:, 0]
340
+ scaled_centroidsY, scaled_centroidsX = scale_positions(
341
+ centroidsY, centroidsX, y_scale, x_scale
342
+ )
343
+ return centroids, scaled_centroidsX, scaled_centroidsY
344
+
345
+
346
+ def get_scaled_pixels(segmentation, pixel_id, y_scale, x_scale):
347
+ id_pixels = find_matching_pixels(segmentation, pixel_id)
348
+ if len(id_pixels[0]) == 0:
349
+ return None, None
350
+ # Scale the seg coordinates to reg/seg
351
+ scaled_y, scaled_x = scale_positions(id_pixels[0], id_pixels[1], y_scale, x_scale)
352
+ return scaled_y, scaled_x