celldetective 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. celldetective/__init__.py +2 -0
  2. celldetective/__main__.py +432 -0
  3. celldetective/datasets/segmentation_annotations/blank +0 -0
  4. celldetective/datasets/signal_annotations/blank +0 -0
  5. celldetective/events.py +149 -0
  6. celldetective/extra_properties.py +100 -0
  7. celldetective/filters.py +89 -0
  8. celldetective/gui/__init__.py +20 -0
  9. celldetective/gui/about.py +44 -0
  10. celldetective/gui/analyze_block.py +563 -0
  11. celldetective/gui/btrack_options.py +898 -0
  12. celldetective/gui/classifier_widget.py +386 -0
  13. celldetective/gui/configure_new_exp.py +532 -0
  14. celldetective/gui/control_panel.py +438 -0
  15. celldetective/gui/gui_utils.py +495 -0
  16. celldetective/gui/json_readers.py +113 -0
  17. celldetective/gui/measurement_options.py +1425 -0
  18. celldetective/gui/neighborhood_options.py +452 -0
  19. celldetective/gui/plot_signals_ui.py +1042 -0
  20. celldetective/gui/process_block.py +1055 -0
  21. celldetective/gui/retrain_segmentation_model_options.py +706 -0
  22. celldetective/gui/retrain_signal_model_options.py +643 -0
  23. celldetective/gui/seg_model_loader.py +460 -0
  24. celldetective/gui/signal_annotator.py +2388 -0
  25. celldetective/gui/signal_annotator_options.py +340 -0
  26. celldetective/gui/styles.py +217 -0
  27. celldetective/gui/survival_ui.py +903 -0
  28. celldetective/gui/tableUI.py +608 -0
  29. celldetective/gui/thresholds_gui.py +1300 -0
  30. celldetective/icons/logo-large.png +0 -0
  31. celldetective/icons/logo.png +0 -0
  32. celldetective/icons/signals_icon.png +0 -0
  33. celldetective/icons/splash-test.png +0 -0
  34. celldetective/icons/splash.png +0 -0
  35. celldetective/icons/splash0.png +0 -0
  36. celldetective/icons/survival2.png +0 -0
  37. celldetective/icons/vignette_signals2.png +0 -0
  38. celldetective/icons/vignette_signals2.svg +114 -0
  39. celldetective/io.py +2050 -0
  40. celldetective/links/zenodo.json +561 -0
  41. celldetective/measure.py +1258 -0
  42. celldetective/models/segmentation_effectors/blank +0 -0
  43. celldetective/models/segmentation_generic/blank +0 -0
  44. celldetective/models/segmentation_targets/blank +0 -0
  45. celldetective/models/signal_detection/blank +0 -0
  46. celldetective/models/tracking_configs/mcf7.json +68 -0
  47. celldetective/models/tracking_configs/ricm.json +203 -0
  48. celldetective/models/tracking_configs/ricm2.json +203 -0
  49. celldetective/neighborhood.py +717 -0
  50. celldetective/scripts/analyze_signals.py +51 -0
  51. celldetective/scripts/measure_cells.py +275 -0
  52. celldetective/scripts/segment_cells.py +212 -0
  53. celldetective/scripts/segment_cells_thresholds.py +140 -0
  54. celldetective/scripts/track_cells.py +206 -0
  55. celldetective/scripts/train_segmentation_model.py +246 -0
  56. celldetective/scripts/train_signal_model.py +49 -0
  57. celldetective/segmentation.py +712 -0
  58. celldetective/signals.py +2826 -0
  59. celldetective/tracking.py +974 -0
  60. celldetective/utils.py +1681 -0
  61. celldetective-1.0.2.dist-info/LICENSE +674 -0
  62. celldetective-1.0.2.dist-info/METADATA +192 -0
  63. celldetective-1.0.2.dist-info/RECORD +66 -0
  64. celldetective-1.0.2.dist-info/WHEEL +5 -0
  65. celldetective-1.0.2.dist-info/entry_points.txt +2 -0
  66. celldetective-1.0.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1258 @@
1
+ import math
2
+ import sys
3
+ from collections import defaultdict
4
+
5
+ import numpy as np
6
+ import pandas as pd
7
+ from lmfit import Parameters, Model
8
+ import skimage.exposure
9
+ import tifffile
10
+ from lmfit import Parameters, Model, models
11
+ # import lmfit
12
+ from scipy import ndimage
13
+ from stardist import fill_label_holes
14
+ from tqdm import tqdm
15
+ from skimage.measure import regionprops_table
16
+ from scipy.ndimage.morphology import distance_transform_edt
17
+ from functools import reduce
18
+ from mahotas.features import haralick
19
+ from scipy.ndimage import zoom, binary_fill_holes
20
+ import os
21
+ import subprocess
22
+
23
+ from celldetective.filters import std_filter, gauss_filter
24
+ import datetime
25
+ from skimage.draw import disk as dsk
26
+
27
+ from celldetective.filters import std_filter, gauss_filter
28
+ from celldetective.utils import rename_intensity_column, create_patch_mask, remove_redundant_features, \
29
+ remove_trajectory_measurements
30
+ import celldetective.extra_properties as extra_properties
31
+ from celldetective.extra_properties import *
32
+ import cv2
33
+ from inspect import getmembers, isfunction
34
+ from skimage.morphology import disk
35
+
36
+ abs_path = os.sep.join([os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], 'celldetective'])
37
+
38
+ def measure(stack=None, labels=None, trajectories=None, channel_names=None,
39
+ features=None, intensity_measurement_radii=None, isotropic_operations=['mean'], border_distances=None,
40
+ haralick_options=None, column_labels={'track': "TRACK_ID", 'time': 'FRAME', 'x': 'POSITION_X', 'y': 'POSITION_Y'}, clear_previous=False):
41
+
42
+ """
43
+
44
+ Perform measurements on a stack of images or labels.
45
+
46
+ Parameters
47
+ ----------
48
+ stack : numpy array, optional
49
+ Stack of images with shape (T, Y, X, C), where T is the number of frames, Y and X are the spatial dimensions,
50
+ and C is the number of channels. Default is None.
51
+ labels : numpy array, optional
52
+ Label stack with shape (T, Y, X) representing cell segmentations. Default is None.
53
+ trajectories : pandas DataFrame, optional
54
+ DataFrame of cell trajectories with columns specified in `column_labels`. Default is None.
55
+ channel_names : list, optional
56
+ List of channel names corresponding to the image stack. Default is None.
57
+ features : list, optional
58
+ List of features to measure using the `measure_features` function. Default is None.
59
+ intensity_measurement_radii : int, float, or list, optional
60
+ Radius or list of radii specifying the size of the isotropic measurement area for intensity measurements.
61
+ If a single value is provided, a circular measurement area is used. If a list of values is provided, multiple
62
+ measurements are performed using ring-shaped measurement areas. Default is None.
63
+ isotropic_operations : list, optional
64
+ List of operations to perform on the isotropic intensity values. Default is ['mean'].
65
+ border_distances : int, float, or list, optional
66
+ Distance or list of distances specifying the size of the border region for intensity measurements.
67
+ If a single value is provided, measurements are performed at a fixed distance from the cell borders.
68
+ If a list of values is provided, measurements are performed at multiple border distances. Default is None.
69
+ haralick_options : dict, optional
70
+ Dictionary of options for Haralick feature measurements. Default is None.
71
+ column_labels : dict, optional
72
+ Dictionary containing the column labels for the DataFrame. Default is {'track': "TRACK_ID",
73
+ 'time': 'FRAME', 'x': 'POSITION_X', 'y': 'POSITION_Y'}.
74
+
75
+ Returns
76
+ -------
77
+ pandas DataFrame
78
+ DataFrame containing the measured features and intensities.
79
+
80
+ Notes
81
+ -----
82
+ This function performs measurements on a stack of images or labels. If both `stack` and `labels` are provided,
83
+ measurements are performed on each frame of the stack. The measurements include isotropic intensity values, computed
84
+ using the `measure_isotropic_intensity` function, and additional features, computed using the `measure_features` function.
85
+ The intensity measurements are performed at the positions specified in the `trajectories` DataFrame, using the
86
+ specified `intensity_measurement_radii` and `border_distances`. The resulting measurements are combined into a single
87
+ DataFrame and returned.
88
+
89
+ Examples
90
+ --------
91
+ >>> stack = np.random.rand(10, 100, 100, 3)
92
+ >>> labels = np.random.randint(0, 2, (10, 100, 100))
93
+ >>> trajectories = pd.DataFrame({'TRACK_ID': [1, 2, 3], 'FRAME': [1, 1, 1],
94
+ ... 'POSITION_X': [10, 20, 30], 'POSITION_Y': [15, 25, 35]})
95
+ >>> channel_names = ['channel1', 'channel2', 'channel3']
96
+ >>> features = ['area', 'intensity_mean']
97
+ >>> intensity_measurement_radii = [5, 10]
98
+ >>> border_distances = 2
99
+ >>> measurements = measure(stack=stack, labels=labels, trajectories=trajectories, channel_names=channel_names,
100
+ ... features=features, intensity_measurement_radii=intensity_measurement_radii,
101
+ ... border_distances=border_distances)
102
+ # Perform measurements on the stack, labels, and trajectories, computing isotropic intensities and additional features.
103
+
104
+ """
105
+
106
+
107
+ do_iso_intensities = True
108
+ do_features = True
109
+
110
+
111
+ # Check that conditions are satisfied to perform measurements
112
+ assert (labels is not None) or (stack is not None),'Please pass a stack and/or labels... Abort.'
113
+ if (labels is not None)*(stack is not None):
114
+ assert labels.shape==stack.shape[:-1],f"Shape mismatch between the stack of shape {stack.shape} and the segmentation {labels.shape}..."
115
+
116
+ # Condition to compute features
117
+ if labels is None:
118
+ do_features = False
119
+ nbr_frames = len(stack)
120
+ print('No labels were provided... Features will not be computed...')
121
+ else:
122
+ nbr_frames = len(labels)
123
+
124
+ # Condition to compute isotropic intensities
125
+ if (stack is None) or (trajectories is None) or (intensity_measurement_radii is None):
126
+ do_iso_intensities = False
127
+ print('Either no image, no positions or no radii were provided... Isotropic intensities will not be computed...')
128
+
129
+ # Compensate for non provided channel names
130
+ if (stack is not None)*(channel_names is None):
131
+ nbr_channels = stack.shape[-1]
132
+ channel_names = [f'intensity-{k}' for k in range(nbr_channels)]
133
+
134
+ if isinstance(intensity_measurement_radii, int) or isinstance(intensity_measurement_radii, float):
135
+ intensity_measurement_radii = [intensity_measurement_radii]
136
+
137
+ if isinstance(border_distances, int) or isinstance(border_distances, float):
138
+ border_distances = [border_distances]
139
+
140
+ if features is not None:
141
+ features = remove_redundant_features(features, trajectories.columns,
142
+ channel_names=channel_names)
143
+
144
+ if features is None:
145
+ features = []
146
+
147
+ # Prep for the case where no trajectory is provided but still want to measure isotropic intensities...
148
+ if (trajectories is None):
149
+ do_features = True
150
+ features += ['centroid']
151
+ else:
152
+ if clear_previous:
153
+ trajectories = remove_trajectory_measurements(trajectories, column_labels)
154
+
155
+ timestep_dataframes = []
156
+
157
+ for t in tqdm(range(nbr_frames),desc='frame'):
158
+
159
+ if stack is not None:
160
+ img = stack[t]
161
+ else:
162
+ img = None
163
+ if labels is not None:
164
+ lbl = labels[t]
165
+ else:
166
+ lbl = None
167
+
168
+ if trajectories is not None:
169
+ positions_at_t = trajectories.loc[trajectories[column_labels['time']]==t].copy()
170
+
171
+ if do_features:
172
+ feature_table = measure_features(img, lbl, features = features, border_dist=border_distances,
173
+ channels=channel_names, haralick_options=haralick_options, verbose=False)
174
+ if trajectories is None:
175
+ # Use the centroids as estimate for the location of the cells, to be passed to the measure_isotropic_intensity function.
176
+ positions_at_t = feature_table[['centroid-1', 'centroid-0','class_id']].copy()
177
+ positions_at_t['ID'] = np.arange(len(positions_at_t)) # temporary ID for the cells, that will be reset at the end since they are not tracked
178
+ positions_at_t.rename(columns={'centroid-1': 'POSITION_X', 'centroid-0': 'POSITION_Y'},inplace=True)
179
+ positions_at_t['FRAME'] = int(t)
180
+ column_labels = {'track': "ID", 'time': column_labels['time'], 'x': column_labels['x'], 'y': column_labels['y']}
181
+
182
+ # Isotropic measurements (circle, ring)
183
+ if do_iso_intensities:
184
+ iso_table = measure_isotropic_intensity(positions_at_t, img, channels=channel_names, intensity_measurement_radii=intensity_measurement_radii,
185
+ column_labels=column_labels, operations=isotropic_operations, verbose=False)
186
+
187
+ if do_iso_intensities*do_features:
188
+ measurements_at_t = iso_table.merge(feature_table, how='outer', on='class_id')
189
+ elif do_iso_intensities*(not do_features):
190
+ measurements_at_t = iso_table
191
+ elif do_features*(trajectories is not None):
192
+ measurements_at_t = positions_at_t.merge(feature_table, how='outer', on='class_id')
193
+ elif do_features*(trajectories is None):
194
+ measurements_at_t = positions_at_t
195
+
196
+ timestep_dataframes.append(measurements_at_t)
197
+
198
+ measurements = pd.concat(timestep_dataframes)
199
+ if trajectories is not None:
200
+ measurements = measurements.sort_values(by=[column_labels['track'],column_labels['time']])
201
+ measurements = measurements.dropna(subset=[column_labels['track']])
202
+ else:
203
+ measurements['ID'] = np.arange(len(df))
204
+
205
+ measurements = measurements.reset_index(drop=True)
206
+
207
+ return measurements
208
+
209
+ def write_first_detection_class(tab, column_labels={'track': "TRACK_ID", 'time': 'FRAME', 'x': 'POSITION_X', 'y': 'POSITION_Y'}):
210
+
211
+ tab = tab.sort_values(by=[column_labels['track'],column_labels['time']])
212
+ if 'area' in tab.columns:
213
+ for tid,track_group in tab.groupby(column_labels['track']):
214
+ indices = track_group.index
215
+ area = track_group['area'].values
216
+ timeline = track_group[column_labels['time']].values
217
+ if np.any(area==area):
218
+ t_first = timeline[area==area][0]
219
+ cclass = 1
220
+ if t_first==0:
221
+ t_first = 0
222
+ cclass = 2
223
+ else:
224
+ t_first = -1
225
+ cclass = 2
226
+
227
+ tab.loc[indices, 'class_firstdetection'] = cclass
228
+ tab.loc[indices, 't_firstdetection'] = t_first
229
+ return tab
230
+
231
+
232
+ def contour_of_instance_segmentation(label, distance):
233
+
234
+ """
235
+
236
+ Generate an instance mask containing the contour of the segmented objects.
237
+
238
+ Parameters
239
+ ----------
240
+ label : ndarray
241
+ The instance segmentation labels.
242
+ distance : int, float, list, or tuple
243
+ The distance or range of distances from the edge of each instance to include in the contour.
244
+ If a single value is provided, it represents the maximum distance. If a tuple or list is provided,
245
+ it represents the minimum and maximum distances.
246
+
247
+ Returns
248
+ -------
249
+ border_label : ndarray
250
+ An instance mask containing the contour of the segmented objects.
251
+
252
+ Notes
253
+ -----
254
+ This function generates an instance mask representing the contour of the segmented instances in the label image.
255
+ It use the distance_transform_edt function from the scipy.ndimage module to compute the Euclidean distance transform.
256
+ The contour is defined based on the specified distance(s) from the edge of each instance.
257
+ The resulting mask, `border_label`, contains the contour regions, while the interior regions are set to zero.
258
+
259
+ Examples
260
+ --------
261
+ >>> border_label = contour_of_instance_segmentation(label, distance=3)
262
+ # Generate a binary mask containing the contour of the segmented instances with a maximum distance of 3 pixels.
263
+
264
+ """
265
+ if isinstance(distance,(list,tuple)) or distance >= 0 :
266
+
267
+ edt = distance_transform_edt(label)
268
+
269
+ if isinstance(distance, list) or isinstance(distance, tuple):
270
+ min_distance = distance[0]; max_distance = distance[1]
271
+
272
+ elif isinstance(distance, (int, float)):
273
+ min_distance = 0
274
+ max_distance = distance
275
+
276
+ thresholded = (edt <= max_distance) * (edt > min_distance)
277
+ border_label = np.copy(label)
278
+ border_label[np.where(thresholded == 0)] = 0
279
+
280
+ else:
281
+ size = (2*abs(int(distance))+1, 2*abs(int(distance))+1)
282
+ dilated_image = ndimage.grey_dilation(label, footprint=disk(int(abs(distance)))) #size=size,
283
+ border_label=np.copy(dilated_image)
284
+ matching_cells = np.logical_and(dilated_image != 0, label == dilated_image)
285
+ border_label[np.where(matching_cells == True)] = 0
286
+ border_label[label!=0] = 0.
287
+
288
+ return border_label
289
+
290
+ def drop_tonal_features(features):
291
+
292
+ """
293
+ Removes features related to intensity from a list of feature names.
294
+
295
+ This function iterates over a list of feature names and removes any feature that includes the term 'intensity' in its name.
296
+ The operation is performed in-place, meaning the original list of features is modified directly.
297
+
298
+ Parameters
299
+ ----------
300
+ features : list of str
301
+ A list of feature names from which intensity-related features are to be removed.
302
+
303
+ Returns
304
+ -------
305
+ list of str
306
+ The modified list of feature names with intensity-related features removed. Note that this operation modifies the
307
+ input list in-place, so the return value is the same list object with some elements removed.
308
+
309
+ """
310
+
311
+ for f in features:
312
+ if 'intensity' in f:
313
+ features.remove(f)
314
+ return features
315
+
316
+ def measure_features(img, label, features=['area', 'intensity_mean'], channels=None,
317
+ border_dist=None, haralick_options=None, verbose=True, normalisation_list=None,
318
+ radial_intensity=None,
319
+ radial_channel=None, spot_detection=None):
320
+ """
321
+
322
+ Measure features within segmented regions of an image.
323
+
324
+ Parameters
325
+ ----------
326
+ img : ndarray
327
+ The input image as a NumPy array.
328
+ label : ndarray
329
+ The segmentation labels corresponding to the image regions.
330
+ features : list, optional
331
+ The list of features to measure within the segmented regions. The default is ['area', 'intensity_mean'].
332
+ channels : list, optional
333
+ The list of channel names in the image. The default is ["brightfield_channel", "dead_nuclei_channel", "live_nuclei_channel"].
334
+ border_dist : int, float, or list, optional
335
+ The distance(s) in pixels from the edge of each segmented region to measure features. The default is None.
336
+ haralick_options : dict, optional
337
+ The options for computing Haralick features. The default is None.
338
+
339
+ Returns
340
+ -------
341
+ df_props : DataFrame
342
+ A pandas DataFrame containing the measured features for each segmented region.
343
+
344
+ Notes
345
+ -----
346
+ This function measures features within segmented regions of an image.
347
+ It utilizes the regionprops_table function from the skimage.measure module for feature extraction.
348
+ The features to measure can be specified using the 'features' parameter.
349
+ Optional parameters such as 'channels' and 'border_dist' allow for additional measurements.
350
+ If provided, Haralick features can be computed using the 'haralick_options' parameter.
351
+ The results are returned as a pandas DataFrame.
352
+
353
+ Examples
354
+ --------
355
+ >>> df_props = measure_features(img, label, features=['area', 'intensity_mean'], channels=["brightfield_channel", "dead_nuclei_channel", "live_nuclei_channel"])
356
+ # Measure area and mean intensity within segmented regions of the image.
357
+
358
+ """
359
+
360
+ if features is None:
361
+ features = []
362
+
363
+ # Add label to have identity of mask
364
+ if 'label' not in features:
365
+ features.append('label')
366
+
367
+ if img is None:
368
+ if verbose:
369
+ print('No image was provided... Skip intensity measurements.')
370
+ border_dist = None;
371
+ haralick_options = None;
372
+ features = drop_tonal_features(features)
373
+ if img is not None:
374
+ if img.ndim == 2:
375
+ img = img[:, :, np.newaxis]
376
+ if channels is None:
377
+ channels = [f'intensity-{k}' for k in range(img.shape[-1])]
378
+ if (channels is not None) * (img.ndim == 3):
379
+ assert len(channels) == img.shape[
380
+ -1], "Mismatch between the provided channel names and the shape of the image"
381
+
382
+ if spot_detection is not None:
383
+ for index, channel in enumerate(channels):
384
+ if channel == spot_detection['channel']:
385
+ ind = index
386
+ blobs = blob_detection(img[:, :, ind], label, diameter=spot_detection['diameter'],
387
+ threshold=spot_detection['threshold'])
388
+ df_spots = pd.DataFrame.from_dict(blobs, orient='index',
389
+ columns=['count', 'spot_mean_intensity']).reset_index()
390
+ # Rename columns
391
+ df_spots.columns = ['label', 'spot_count', 'spot_mean_intensity']
392
+ if normalisation_list:
393
+ for norm in normalisation_list:
394
+ for index, channel in enumerate(channels):
395
+ if channel == norm['target channel']:
396
+ ind = index
397
+ if norm['mode'] == 'local':
398
+ normalised_image = normalise_by_cell(img[:, :, ind].copy(), label,
399
+ distance=int(norm['distance']), mode=norm['type'],
400
+ operation=norm['operation'])
401
+ img[:, :, ind] = normalised_image
402
+ else:
403
+ if norm['operation'] == 'Divide':
404
+ normalised_image, bg = field_normalisation(img[:, :, ind].copy(), threshold=norm['threshold'],
405
+ normalisation_operation=norm['operation'],
406
+ clip=False, mode=norm['type'])
407
+ else:
408
+ normalised_image, bg = field_normalisation(img[:, :, ind].copy(), threshold=norm['threshold'],
409
+ normalisation_operation=norm['operation'],
410
+ clip=norm['clip'], mode=norm['type'])
411
+ img[:, :, ind] = normalised_image
412
+
413
+ extra_props = getmembers(extra_properties, isfunction)
414
+ extra_props = [extra_props[i][0] for i in range(len(extra_props))]
415
+
416
+ extra_props_list = []
417
+ feats = features.copy()
418
+ for f in features:
419
+ if f in extra_props:
420
+ feats.remove(f)
421
+ extra_props_list.append(getattr(extra_properties, f))
422
+ if len(extra_props_list) == 0:
423
+ extra_props_list = None
424
+ else:
425
+ extra_props_list = tuple(extra_props_list)
426
+ props = regionprops_table(label, intensity_image=img, properties=feats, extra_properties=extra_props_list)
427
+ df_props = pd.DataFrame(props)
428
+ if spot_detection is not None:
429
+ df_props = df_props.merge(df_spots, how='outer', on='label')
430
+ df_props['spot_count'] = df_props['spot_count'].replace(np.nan, 0)
431
+ df_props['spot_mean_intensity'] = df_props['spot_mean_intensity'].replace(np.nan, 0)
432
+
433
+
434
+
435
+ # if spot_detection is not None:
436
+ # for index, channel in enumerate(channels):
437
+ # if channel == spot_detection['channel']:
438
+ # ind = index
439
+ # blobs = blob_detection(img[:, :, ind], label, diameter=spot_detection['diameter'],
440
+ # threshold=spot_detection['threshold'])
441
+ # df_spots = pd.DataFrame.from_dict(blobs, orient='index', columns=['count', 'spot_mean_intensity']).reset_index()
442
+ # # Rename columns
443
+ # df_spots.columns = ['label', 'spot_count', 'spot_mean_intensity']
444
+ # df_props = df_props.merge(df_spots, how='outer', on='label')
445
+ # df_props['spot_count'] = df_props['spot_count'].replace(np.nan, 0)
446
+ # df_props['spot_mean_intensity'] = df_props['spot_mean_intensity'].replace(np.nan, 0)
447
+
448
+
449
+ if border_dist is not None:
450
+ # automatically drop all non intensity features
451
+ intensity_features_test = [('intensity' in s and 'centroid' not in s and 'peripheral' not in s) for s in
452
+ features]
453
+ intensity_features = list(np.array(features)[np.array(intensity_features_test)])
454
+ # intensity_extra = [(s in extra_props_list)for s in intensity_features]
455
+ # print(intensity_extra)
456
+ intensity_extra = []
457
+ for s in intensity_features:
458
+ if s in extra_props:
459
+ intensity_extra.append(getattr(extra_properties, s))
460
+ intensity_features.remove(s)
461
+ # print(intensity_features)
462
+ # If no intensity feature was passed still measure mean intensity
463
+ if len(intensity_features) == 0:
464
+ if verbose:
465
+ print('No intensity feature was passed... Adding mean intensity for edge measurement...')
466
+ intensity_features = np.append(intensity_features, 'intensity_mean')
467
+ intensity_features = list(np.append(intensity_features, 'label'))
468
+
469
+ # Remove extra intensity properties from border measurements
470
+ new_intensity_features = intensity_features.copy()
471
+ for int_feat in intensity_features:
472
+ if int_feat in extra_props:
473
+ new_intensity_features.remove(int_feat)
474
+ intensity_features = new_intensity_features
475
+
476
+ if (isinstance(border_dist, int) or isinstance(border_dist, float)):
477
+ border_label = contour_of_instance_segmentation(label, border_dist)
478
+ props_border = regionprops_table(border_label, intensity_image=img, properties=intensity_features)
479
+ df_props_border = pd.DataFrame(props_border)
480
+ for c in df_props_border.columns:
481
+ if 'intensity' in c:
482
+ df_props_border = df_props_border.rename({c: c+f'_edge_{border_dist}px'},axis=1)
483
+
484
+ if isinstance(border_dist, list):
485
+ df_props_border_list = []
486
+ for d in border_dist:
487
+ border_label = contour_of_instance_segmentation(label, d)
488
+ props_border = regionprops_table(border_label, intensity_image=img, properties=intensity_features)
489
+ df_props_border_d = pd.DataFrame(props_border)
490
+ for c in df_props_border_d.columns:
491
+ if 'intensity' in c:
492
+ if '-' in str(d):
493
+ df_props_border_d = df_props_border_d.rename({c: c + f'_outer_edge_{d}px'}, axis=1)
494
+ else:
495
+ df_props_border_d = df_props_border_d.rename({c: c + f'_edge_{d}px'}, axis=1)
496
+ df_props_border_list.append(df_props_border_d)
497
+
498
+ df_props_border = reduce(lambda left,right: pd.merge(left,right,on=['label'],
499
+ how='outer'), df_props_border_list)
500
+
501
+ df_props = df_props.merge(df_props_border, how='outer', on='label')
502
+
503
+ if haralick_options is not None:
504
+ try:
505
+ df_haralick = compute_haralick_features(img, label, channels=channels, **haralick_options)
506
+ df_props = df_props.merge(df_haralick, left_on='label',right_on='cell_id')
507
+ #df_props = df_props.drop(columns=['cell_label'])
508
+ except Exception as e:
509
+ print(e)
510
+ pass
511
+
512
+ if channels is not None:
513
+ df_props = rename_intensity_column(df_props, channels)
514
+ df_props.rename(columns={"label": "class_id"},inplace=True)
515
+ df_props['class_id'] = df_props['class_id'].astype(float)
516
+
517
+ return df_props
518
+
519
+ def compute_haralick_features(img, labels, channels=None, target_channel=0, scale_factor=1, percentiles=(0.01,99.99), clip_values=None,
520
+ n_intensity_bins=256, ignore_zero=True, return_mean=True, return_mean_ptp=False, distance=1, disable_progress_bar=False, return_norm_image_only=False, return_digit_image_only=False):
521
+
522
+ """
523
+
524
+ Compute Haralick texture features on each segmented region of an image.
525
+
526
+ Parameters
527
+ ----------
528
+ img : ndarray
529
+ The input image as a NumPy array.
530
+ labels : ndarray
531
+ The segmentation labels corresponding to the image regions.
532
+ target_channel : int, optional
533
+ The target channel index of the image. The default is 0.
534
+ modality : str, optional
535
+ The modality or channel type of the image. The default is 'brightfield_channel'.
536
+ scale_factor : float, optional
537
+ The scale factor for resampling the image and labels. The default is 1.
538
+ percentiles : tuple of float, optional
539
+ The percentiles to use for image normalization. The default is (0.01, 99.99).
540
+ clip_values : tuple of float, optional
541
+ The minimum and maximum values to clip the image. If None, percentiles are used. The default is None.
542
+ n_intensity_bins : int, optional
543
+ The number of intensity bins for image normalization. The default is 255.
544
+ ignore_zero : bool, optional
545
+ Flag indicating whether to ignore zero values during feature computation. The default is True.
546
+ return_mean : bool, optional
547
+ Flag indicating whether to return the mean value of each Haralick feature. The default is True.
548
+ return_mean_ptp : bool, optional
549
+ Flag indicating whether to return the mean and peak-to-peak values of each Haralick feature. The default is False.
550
+ distance : int, optional
551
+ The distance parameter for Haralick feature computation. The default is 1.
552
+
553
+ Returns
554
+ -------
555
+ features : DataFrame
556
+ A pandas DataFrame containing the computed Haralick features for each segmented region.
557
+
558
+ Notes
559
+ -----
560
+ This function computes Haralick features on an image within segmented regions.
561
+ It uses the mahotas library for feature extraction and pandas DataFrame for storage.
562
+ The image is rescaled, normalized and digitized based on the specified parameters.
563
+ Haralick features are computed for each segmented region, and the results are returned as a DataFrame.
564
+
565
+ Examples
566
+ --------
567
+ >>> features = compute_haralick_features(img, labels, target_channel=0, modality="brightfield_channel")
568
+ # Compute Haralick features on the image within segmented regions.
569
+
570
+ """
571
+
572
+ assert ((img.ndim==2)|(img.ndim==3)),f'Invalid image shape to compute the Haralick features. Expected YXC, got {img.shape}...'
573
+ assert img.shape[:2]==labels.shape,f'Mismatch between image shape {img.shape} and labels shape {labels.shape}'
574
+
575
+ if img.ndim==2:
576
+ img = img[:,:,np.newaxis]
577
+ target_channel = 0
578
+ if isinstance(channels, list):
579
+ modality = channels[0]
580
+ elif isinstance(channels, str):
581
+ modality = channels
582
+ else:
583
+ print('Channel name unrecognized...')
584
+ modality=''
585
+ elif img.ndim==3:
586
+ assert target_channel is not None,"The image is multichannel. Please provide a target channel to compute the Haralick features. Abort."
587
+ modality = channels[target_channel]
588
+
589
+ haralick_labels = ["angular_second_moment",
590
+ "contrast",
591
+ "correlation",
592
+ "sum_of_square_variance",
593
+ "inverse_difference_moment",
594
+ "sum_average",
595
+ "sum_variance",
596
+ "sum_entropy",
597
+ "entropy",
598
+ "difference_variance",
599
+ "difference_entropy",
600
+ "information_measure_of_correlation_1",
601
+ "information_measure_of_correlation_2",
602
+ "maximal_correlation_coefficient"]
603
+
604
+ haralick_labels = ['haralick_'+h+"_"+modality for h in haralick_labels]
605
+ if len(img.shape)==3:
606
+ img = img[:,:,target_channel]
607
+
608
+ # Rescale image and mask
609
+ img = zoom(img,[scale_factor,scale_factor],order=3).astype(float)
610
+ labels = zoom(labels, [scale_factor,scale_factor],order=0)
611
+
612
+ # Normalize image
613
+ if clip_values is None:
614
+ min_value = np.nanpercentile(img[img!=0.].flatten(), percentiles[0])
615
+ max_value = np.nanpercentile(img[img!=0.].flatten(), percentiles[1])
616
+ else:
617
+ min_value = clip_values[0]; max_value = clip_values[1]
618
+
619
+ img -= min_value
620
+ img /= (max_value-min_value) / n_intensity_bins
621
+ img[img<=0.] = 0.
622
+ img[img>=n_intensity_bins] = n_intensity_bins
623
+
624
+ if return_norm_image_only:
625
+ return img
626
+
627
+ hist,bins = np.histogram(img.flatten(),bins=n_intensity_bins)
628
+ centered_bins = [bins[0]] + [bins[i] + (bins[i+1] - bins[i])/2. for i in range(len(bins)-1)]
629
+
630
+ digitized = np.digitize(img, bins)
631
+ img_binned = np.zeros_like(img)
632
+ for i in range(img.shape[0]):
633
+ for j in range(img.shape[1]):
634
+ img_binned[i,j] = centered_bins[digitized[i,j] - 1]
635
+
636
+ img = img_binned.astype(int)
637
+ if return_digit_image_only:
638
+ return img
639
+
640
+ haralick_properties = []
641
+
642
+ for cell in tqdm(np.unique(labels)[1:],disable=disable_progress_bar):
643
+
644
+ mask = labels==cell
645
+ f = img*mask
646
+ features = haralick(f, ignore_zeros=ignore_zero,return_mean=return_mean,distance=distance)
647
+
648
+ dictionary = {'cell_id': cell}
649
+ for k in range(len(features)):
650
+ dictionary.update({haralick_labels[k]: features[k]})
651
+ haralick_properties.append(dictionary)
652
+
653
+ assert len(haralick_properties)==(len(np.unique(labels))-1),'Some cells have not been measured...'
654
+
655
+ return pd.DataFrame(haralick_properties)
656
+
657
+
658
+ def measure_isotropic_intensity(positions, # Dataframe of cell positions @ t
659
+ img, # multichannel frame (YXC) @ t
660
+ channels=None, #channels, need labels to name measurements
661
+ intensity_measurement_radii=None, #list of radii, single value is circle, tuple is ring?
662
+ operations = ['mean'],
663
+ measurement_kernel = None,
664
+ pbar=None,
665
+ column_labels={'track': "TRACK_ID", 'time': 'FRAME', 'x': 'POSITION_X', 'y': 'POSITION_Y'},
666
+ verbose=True,
667
+ ):
668
+
669
+ """
670
+
671
+ Measure isotropic intensity values around cell positions in an image.
672
+
673
+ Parameters
674
+ ----------
675
+ positions : pandas DataFrame
676
+ DataFrame of cell positions at time 't' containing columns specified in `column_labels`.
677
+ img : numpy array
678
+ Multichannel frame (YXC) at time 't' used for intensity measurement.
679
+ channels : list or str, optional
680
+ List of channel names corresponding to the image channels. Default is None.
681
+ intensity_measurement_radii : int, list, or tuple
682
+ Radius or list of radii specifying the size of the isotropic measurement area.
683
+ If a single value is provided, a circular measurement area is used. If a list or tuple of two values
684
+ is provided, a ring-shaped measurement area is used. Default is None.
685
+ operations : list, optional
686
+ List of operations to perform on the intensity values. Default is ['mean'].
687
+ measurement_kernel : numpy array, optional
688
+ Kernel used for intensity measurement. If None, a circular or ring-shaped kernel is generated
689
+ based on the provided `intensity_measurement_radii`. Default is None.
690
+ pbar : tqdm progress bar, optional
691
+ Progress bar for tracking the measurement process. Default is None.
692
+ column_labels : dict, optional
693
+ Dictionary containing the column labels for the DataFrame. Default is {'track': "TRACK_ID",
694
+ 'time': 'FRAME', 'x': 'POSITION_X', 'y': 'POSITION_Y'}.
695
+ verbose : bool, optional
696
+ If True, enables verbose output. Default is True.
697
+
698
+ Returns
699
+ -------
700
+ pandas DataFrame
701
+ The updated DataFrame `positions` with additional columns representing the measured intensity values.
702
+
703
+ Notes
704
+ -----
705
+ This function measures the isotropic intensity values around the cell positions specified in the `positions`
706
+ DataFrame using the provided image `img`. The intensity measurements are performed using circular or ring-shaped
707
+ measurement areas defined by the `intensity_measurement_radii`. The measurements are calculated for each channel
708
+ specified in the `channels` list. The resulting intensity values are stored in additional columns of the `positions`
709
+ DataFrame. The `operations` parameter allows specifying different operations to be performed on the intensity
710
+ values, such as 'mean', 'median', etc. The measurement kernel can be customized by providing the `measurement_kernel`
711
+ parameter. If not provided, the measurement kernel is automatically generated based on the `intensity_measurement_radii`.
712
+ The progress bar `pbar` can be used to track the measurement process. The `column_labels` dictionary is used to
713
+ specify the column labels for the DataFrame.
714
+
715
+ Examples
716
+ --------
717
+ >>> positions = pd.DataFrame({'TRACK_ID': [1, 2, 3], 'FRAME': [1, 1, 1],
718
+ ... 'POSITION_X': [10, 20, 30], 'POSITION_Y': [15, 25, 35]})
719
+ >>> img = np.random.rand(100, 100, 3)
720
+ >>> channels = ['channel1', 'channel2', 'channel3']
721
+ >>> intensity_measurement_radii = 5
722
+ >>> positions = measure_isotropic_intensity(positions, img, channels=channels,
723
+ ... intensity_measurement_radii=intensity_measurement_radii)
724
+ # Measure isotropic intensity values around cell positions in the image.
725
+
726
+ """
727
+
728
+
729
+ assert ((img.ndim==2)|(img.ndim==3)),f'Invalid image shape to compute the Haralick features. Expected YXC, got {img.shape}...'
730
+
731
+ if img.ndim==2:
732
+ img = img[:,:,np.newaxis]
733
+ if isinstance(channels, str):
734
+ channels = [channels]
735
+ else:
736
+ if verbose:
737
+ print('Channel name unrecognized...')
738
+ channels=['intensity']
739
+ elif img.ndim==3:
740
+ assert channels is not None,"The image is multichannel. Please provide the list of channel names. Abort."
741
+
742
+ if isinstance(intensity_measurement_radii, int) or isinstance(intensity_measurement_radii, float):
743
+ intensity_measurement_radii = [intensity_measurement_radii]
744
+
745
+ if (measurement_kernel is None)*(intensity_measurement_radii is not None):
746
+
747
+ for r in intensity_measurement_radii:
748
+
749
+ if isinstance(r,list):
750
+ mask = create_patch_mask(2*max(r)+1,2*max(r)+1,((2*max(r))//2,(2*max(r))//2),radius=r)
751
+ else:
752
+ mask = create_patch_mask(2*r+1,2*r+1,((2*r)//2,(2*r)//2),r)
753
+
754
+ pad_value_x = mask.shape[0]//2 + 1
755
+ pad_value_y = mask.shape[1]//2 + 1
756
+ frame_padded = np.pad(img, [(pad_value_x,pad_value_x),(pad_value_y,pad_value_y),(0,0)])
757
+
758
+ # Find a way to measure intensity in mask
759
+ for tid,group in positions.groupby(column_labels['track']):
760
+
761
+ x = group[column_labels['x']].to_numpy()[0]
762
+ y = group[column_labels['y']].to_numpy()[0]
763
+
764
+ xmin = int(x)
765
+ xmax = int(x) + 2*pad_value_y - 1
766
+ ymin = int(y)
767
+ ymax = int(y) + 2*pad_value_x - 1
768
+
769
+ assert frame_padded[ymin:ymax,xmin:xmax,0].shape == mask.shape,"Shape mismatch between the measurement kernel and the image..."
770
+
771
+ expanded_mask = np.expand_dims(mask, axis=-1) # shape: (X, Y, 1)
772
+ crop = frame_padded[ymin:ymax,xmin:xmax]
773
+ projection = np.multiply(crop, expanded_mask)
774
+
775
+ for op in operations:
776
+ func = eval('np.'+op)
777
+ intensity_values = func(projection, axis=(0,1), where=projection!=0.)
778
+ for k in range(crop.shape[-1]):
779
+ if isinstance(r,list):
780
+ positions.loc[group.index, f'{channels[k]}_ring_{min(r)}_{max(r)}_{op}'] = intensity_values[k]
781
+ else:
782
+ positions.loc[group.index, f'{channels[k]}_circle_{r}_{op}'] = intensity_values[k]
783
+
784
+ elif (measurement_kernel is not None):
785
+ # do something like this
786
+ mask = measurement_kernel
787
+ pad_value_x = mask.shape[0]//2 + 1
788
+ pad_value_y = mask.shape[1]//2 + 1
789
+ frame_padded = np.pad(img, [(pad_value_x,pad_value_x),(pad_value_y,pad_value_y),(0,0)])
790
+
791
+ for tid,group in positions.groupby(column_labels['track']):
792
+
793
+ x = group[column_labels['x']].to_numpy()[0]
794
+ y = group[column_labels['y']].to_numpy()[0]
795
+
796
+ xmin = int(x)
797
+ xmax = int(x) + 2*pad_value_y - 1
798
+ ymin = int(y)
799
+ ymax = int(y) + 2*pad_value_x - 1
800
+
801
+ assert frame_padded[ymin:ymax,xmin:xmax,0].shape == mask.shape,"Shape mismatch between the measurement kernel and the image..."
802
+
803
+ expanded_mask = np.expand_dims(mask, axis=-1) # shape: (X, Y, 1)
804
+ crop = frame_padded[ymin:ymax,xmin:xmax]
805
+ projection = np.multiply(crop, expanded_mask)
806
+
807
+ for op in operations:
808
+ func = eval('np.'+op)
809
+ intensity_values = func(projection, axis=(0,1), where=projection!=0.)
810
+ for k in range(crop.shape[-1]):
811
+ positions.loc[group.index, f'{channels[k]}_custom_kernel_{op}'] = intensity_values[k]
812
+
813
+ if pbar is not None:
814
+ pbar.update(1)
815
+ positions['class_id'] = positions['class_id'].astype(float)
816
+ return positions
817
+
818
+ def measure_at_position(pos, mode, return_measurements=False, threads=1):
819
+
820
+ """
821
+ Executes a measurement script at a specified position directory, optionally returning the measured data.
822
+
823
+ This function calls an external Python script to perform measurements on data
824
+ located in a specified position directory. The measurement mode determines the type of analysis performed by the script.
825
+ The function can either return the path to the resulting measurements table or load and return the measurements as a
826
+ pandas DataFrame.
827
+
828
+ Parameters
829
+ ----------
830
+ pos : str
831
+ The path to the position directory where the measurements should be performed. The path should be a valid directory.
832
+ mode : str
833
+ The measurement mode to be used by the script. This determines the type of analysis performed (e.g., 'tracking',
834
+ 'feature_extraction').
835
+ return_measurements : bool, optional
836
+ If True, the function loads the resulting measurements from a CSV file into a pandas DataFrame and returns it. If
837
+ False, the function returns None (default is False).
838
+
839
+ Returns
840
+ -------
841
+ pandas.DataFrame or None
842
+ If `return_measurements` is True, returns a pandas DataFrame containing the measurements. Otherwise, returns None.
843
+
844
+ """
845
+
846
+ pos = pos.replace('\\','/')
847
+ pos = rf"{pos}"
848
+ assert os.path.exists(pos),f'Position {pos} is not a valid path.'
849
+ if not pos.endswith('/'):
850
+ pos += '/'
851
+ script_path = os.sep.join([abs_path, 'scripts', 'measure_cells.py'])
852
+ cmd = f'python "{script_path}" --pos "{pos}" --mode "{mode}" --threads "{threads}"'
853
+ subprocess.call(cmd, shell=True)
854
+
855
+ table = pos + os.sep.join(["output","tables",f"trajectories_{mode}.csv"])
856
+ if return_measurements:
857
+ df = pd.read_csv(table)
858
+ return df
859
+ else:
860
+ return None
861
+
862
+
863
+ def local_normalisation(image, labels, background_intensity, mode, operation):
864
+ """
865
+ Perform local normalization on an image based on labels.
866
+
867
+ Parameters:
868
+ - image (numpy.ndarray): The input image.
869
+ - labels (numpy.ndarray): An array specifying the labels for different regions in the image.
870
+ - background_intensity (pandas.DataFrame): A DataFrame containing background intensity values
871
+ corresponding to each label.
872
+ - mode (str): The normalization mode ('Mean' or 'Median').
873
+ - operation (str): The operation to perform ('Subtract' or 'Divide').
874
+
875
+ Returns:
876
+ - numpy.ndarray: The normalized image.
877
+
878
+ This function performs local normalization on an image based on the provided labels. It iterates over
879
+ each unique label, excluding the background label (0), and performs the specified operation with the
880
+ background intensity values corresponding to that label. The background intensity values are obtained
881
+ from the provided background_intensity DataFrame based on the normalization mode.
882
+
883
+ If the operation is 'Subtract', the background intensity is subtracted from the image pixel values.
884
+ If the operation is 'Divide', the image pixel values are divided by the background intensity.
885
+
886
+ Example:
887
+ >>> image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
888
+ >>> labels = np.array([[0, 1, 1], [2, 2, 3], [3, 3, 0]])
889
+ >>> background_intensity = pd.DataFrame({'intensity_mean': [10, 20, 30]})
890
+ >>> mode = 'Mean'
891
+ >>> operation = 'Subtract'
892
+ >>> result = local_normalisation(image, labels, background_intensity, mode, operation)
893
+ >>> print(result)
894
+ [[-9. -8. -7.]
895
+ [14. 15. 6.]
896
+ [27. 28. 9.]]
897
+
898
+ Note:
899
+ - The background intensity DataFrame should have columns named 'intensity_mean' or 'intensity_median'
900
+ based on the mode specified.
901
+ - The background intensity values should be provided in the same order as the labels.
902
+ """
903
+ for index, cell in enumerate(np.unique(labels)):
904
+ if cell == 0:
905
+ continue
906
+ if operation == 'Subtract':
907
+ image[np.where(labels == cell)] = image[np.where(labels == cell)].astype(float) - \
908
+ background_intensity[f'intensity_{mode.lower()}'][index-1].astype(float)
909
+ elif operation == 'Divide':
910
+ image[np.where(labels == cell)] = image[np.where(labels == cell)].astype(float) / \
911
+ background_intensity[f'intensity_{mode.lower()}'][index-1].astype(float)
912
+ return image.astype(float)
913
+
914
+
915
+ def normalise_by_cell(image, labels, distance, mode, operation):
916
+ """
917
+ Normalize an image based on cell regions.
918
+
919
+ Parameters:
920
+ - image (numpy.ndarray): The input image.
921
+ - labels (numpy.ndarray): An array specifying the labels for different regions in the image.
922
+ - distance (float): The distance parameter for finding the contour of cell regions.
923
+ - mode (str): The normalization mode ('Mean' or 'Median').
924
+ - operation (str): The operation to perform ('Subtract' or 'Divide').
925
+
926
+ Returns:
927
+ - numpy.ndarray: The normalized image.
928
+
929
+ This function normalizes an image based on cell regions defined by the provided labels. It calculates
930
+ the border of cell regions using the contour_of_instance_segmentation function with the specified
931
+ distance parameter. Then, it computes the background intensity of each cell region based on the mode
932
+ ('Mean' or 'Median'). Finally, it performs local normalization using the local_normalisation function
933
+ and returns the normalized image.
934
+
935
+ Example:
936
+ >>> image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
937
+ >>> labels = np.array([[0, 1, 1], [2, 2, 3], [3, 3, 0]])
938
+ >>> distance = 2.0
939
+ >>> mode = 'Mean'
940
+ >>> operation = 'Subtract'
941
+ >>> result = normalise_by_cell(image, labels, distance, mode, operation)
942
+ >>> print(result)
943
+ [[-9. -8. -7.]
944
+ [14. 15. 6.]
945
+ [27. 28. 9.]]
946
+
947
+ Note:
948
+ - The contour of cell regions is calculated using the contour_of_instance_segmentation function.
949
+ - The background intensity is computed based on the specified mode ('Mean' or 'Median').
950
+ - The operation determines whether to subtract or divide the background intensity from the image.
951
+ """
952
+ border = contour_of_instance_segmentation(label=labels, distance=distance * (-1))
953
+ if mode == 'Mean':
954
+ background_intensity = regionprops_table(intensity_image=image, label_image=border,
955
+ properties=['intensity_mean'])
956
+ elif mode == 'Median':
957
+ median = []
958
+ median.append(getattr(extra_properties, 'intensity_median'))
959
+ background_intensity = regionprops_table(intensity_image=image, label_image=border,
960
+ extra_properties=median)
961
+ normalised_frame = local_normalisation(image=image.astype(float).copy(),
962
+ labels=labels, background_intensity=background_intensity, mode=mode,
963
+ operation=operation)
964
+
965
+ return normalised_frame
966
+
967
+
968
+ def paraboloid(x, y, a, b, c, d, e, g):
969
+ return a * x ** 2 + b * y ** 2 + c * x * y + d * x + e * y + g
970
+
971
+
972
+ def plane(x, y, a, b, c):
973
+ return a * x + b * y + c
974
+
975
+
976
+ def fit_plane(image, cell_masks=None):
977
+ """
978
+ Fit a plane to the given image data.
979
+
980
+ Parameters:
981
+ - image (numpy.ndarray): The input image data.
982
+ - cell_masks (numpy.ndarray, optional): An array specifying cell masks. If provided, areas covered by
983
+ cell masks will be excluded from the fitting process.
984
+
985
+ Returns:
986
+ - numpy.ndarray: The fitted plane.
987
+
988
+ This function fits a plane to the given image data using least squares regression. It constructs a mesh
989
+ grid based on the dimensions of the image and fits a plane model to the data points. If cell masks are
990
+ provided, areas covered by cell masks will be excluded from the fitting process.
991
+
992
+ Example:
993
+ >>> image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
994
+ >>> result = fit_plane(image)
995
+ >>> print(result)
996
+ [[1. 2. 3.]
997
+ [4. 5. 6.]
998
+ [7. 8. 9.]]
999
+
1000
+ Note:
1001
+ - The 'cell_masks' parameter allows excluding areas covered by cell masks from the fitting process.
1002
+ """
1003
+ data = np.empty(image.shape)
1004
+ x = np.arange(0, image.shape[1])
1005
+ y = np.arange(0, image.shape[0])
1006
+ xx, yy = np.meshgrid(x, y)
1007
+ params = Parameters()
1008
+ params.add('a', value=1)
1009
+ params.add('b', value=1)
1010
+ params.add('c', value=1)
1011
+ model = Model(plane, independent_vars=['x', 'y'])
1012
+ weights = np.ones_like(xx, dtype=float)
1013
+ weights[np.where(cell_masks > 0)] = 0.
1014
+ result = model.fit(image,
1015
+ x=xx,
1016
+ y=yy,
1017
+ weights=weights,
1018
+ params=params, max_nfev=100)
1019
+ a = result.params['a'].value
1020
+ b = result.params['b'].value
1021
+ c = result.params['c'].value
1022
+ fitted_plane = plane(xx, yy, a, b, c)
1023
+ return fitted_plane
1024
+
1025
+
1026
+ def fit_paraboloid(image, cell_masks=None):
1027
+ """
1028
+ Fit a paraboloid to the given image data.
1029
+
1030
+ Parameters:
1031
+ - image (numpy.ndarray): The input image data.
1032
+ - cell_masks (numpy.ndarray, optional): An array specifying cell masks. If provided, areas covered by
1033
+ cell masks will be excluded from the fitting process.
1034
+
1035
+ Returns:
1036
+ - numpy.ndarray: The fitted paraboloid.
1037
+
1038
+ This function fits a paraboloid to the given image data using least squares regression. It constructs
1039
+ a mesh grid based on the dimensions of the image and fits a paraboloid model to the data points. If cell
1040
+ masks are provided, areas covered by cell masks will be excluded from the fitting process.
1041
+
1042
+ Example:
1043
+ >>> image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
1044
+ >>> result = fit_paraboloid(image)
1045
+ >>> print(result)
1046
+ [[1. 2. 3.]
1047
+ [4. 5. 6.]
1048
+ [7. 8. 9.]]
1049
+
1050
+ Note:
1051
+ - The 'cell_masks' parameter allows excluding areas covered by cell masks from the fitting process.
1052
+ """
1053
+ data = np.empty(image.shape)
1054
+ x = np.arange(0, image.shape[1])
1055
+ y = np.arange(0, image.shape[0])
1056
+ xx, yy = np.meshgrid(x, y)
1057
+ params = Parameters()
1058
+ params.add('a', value=1)
1059
+ params.add('b', value=1)
1060
+ params.add('c', value=1)
1061
+ params.add('d', value=1)
1062
+ params.add('e', value=1)
1063
+ params.add('g', value=1)
1064
+ model = Model(paraboloid, independent_vars=['x', 'y'])
1065
+ weights = np.ones_like(xx, dtype=float)
1066
+ weights[np.where(cell_masks > 0)] = 0.
1067
+
1068
+ result = model.fit(image,
1069
+ x=xx,
1070
+ y=yy,
1071
+ weights=weights,
1072
+ params=params, max_nfev=100)
1073
+ a = result.params['a'].value
1074
+ b = result.params['b'].value
1075
+ c = result.params['c'].value
1076
+ d = result.params['d'].value
1077
+ e = result.params['e'].value
1078
+ g = result.params['g'].value
1079
+ fitted_paraboloid = paraboloid(xx, yy, a, b, c, d, e, g)
1080
+ return fitted_paraboloid
1081
+
1082
+
1083
+ def correct_image(img, cell_masks=None, normalisation_operation=None, clip=False, mode=None):
1084
+ """
1085
+ Correct an image based on fitted models.
1086
+
1087
+ Parameters:
1088
+ - img (numpy.ndarray): The input image data.
1089
+ - cell_masks (numpy.ndarray, optional): An array specifying cell masks. If provided, areas covered by
1090
+ cell masks will be considered during correction.
1091
+ - normalisation_operation (str, optional): The normalisation operation ('Subtract' or 'Divide') to apply
1092
+ during correction.
1093
+ - clip (bool, optional): Whether to clip corrected values below zero to a minimum value of 0.00001.
1094
+ - mode (str, optional): The mode of correction ('Paraboloid' or 'Plane').
1095
+
1096
+ Returns:
1097
+ - tuple: A tuple containing the corrected image and the fitted model parameters.
1098
+
1099
+ This function corrects an image based on fitted models such as paraboloid or plane. It first fits a model
1100
+ to the image data based on the specified mode. Then, it performs correction by subtracting or dividing the
1101
+ image by the fitted model. Optionally, it clips corrected values below zero to a minimum value of 0.00001.
1102
+
1103
+ Example:
1104
+ >>> img = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
1105
+ >>> correction, para = correct_image(img, mode='Paraboloid', normalisation_operation='Subtract', clip=True)
1106
+ >>> print(correction)
1107
+ [[0. 0. 0.]
1108
+ [0. 0. 0.]
1109
+ [0. 0. 0.]]
1110
+ >>> print(para)
1111
+ [[1. 2. 3.]
1112
+ [4. 5. 6.]
1113
+ [7. 8. 9.]]
1114
+
1115
+ Note:
1116
+ - The 'cell_masks' parameter allows considering areas covered by cell masks during correction.
1117
+ - The 'normalisation_operation' parameter specifies whether to subtract or divide the fitted model from
1118
+ the image.
1119
+ - If 'clip' is set to True, corrected values below zero will be clipped to a minimum value of 0.00001.
1120
+ """
1121
+ if mode == "Paraboloid":
1122
+ para = fit_paraboloid(img.astype(float), cell_masks=cell_masks).astype(float)
1123
+ elif mode == "Plane":
1124
+ para = fit_plane(img.astype(float), cell_masks=cell_masks).astype(float)
1125
+
1126
+ if para is not None:
1127
+ para = np.array(para)
1128
+ if normalisation_operation == 'Subtract':
1129
+ correction = img.astype(float) - para.astype(float) # + 1000.
1130
+ else:
1131
+ correction = img.astype(float) / para.astype(float) # + 1000.
1132
+ correction = np.array(correction, dtype=float)
1133
+ if clip:
1134
+ correction[correction <= 0] = 0.00001
1135
+
1136
+ else:
1137
+ correction = None
1138
+
1139
+ return correction, para
1140
+
1141
+
1142
+ def field_normalisation(img, threshold, normalisation_operation, clip, mode):
1143
+ """
1144
+ Perform field normalization on an image.
1145
+
1146
+ Parameters:
1147
+ - img (numpy.ndarray): The input image data.
1148
+ - threshold (float): The threshold value for determining regions of interest.
1149
+ - normalisation_operation (str): The normalization operation ('Subtract' or 'Divide') to apply during correction.
1150
+ - clip (bool): Whether to clip corrected values below zero to a minimum value of 0.00001.
1151
+ - mode (str): The mode of correction ('Paraboloid' or 'Plane').
1152
+
1153
+ Returns:
1154
+ - tuple: A tuple containing the normalized image and the fitted background model.
1155
+
1156
+ This function performs field normalization on an image based on regions of interest determined by the
1157
+ specified threshold. It identifies regions with standard deviation above the threshold and considers them
1158
+ as areas of interest. Then, it corrects the image using the 'correct_image' function based on the specified
1159
+ mode and normalization operation.
1160
+
1161
+ Example:
1162
+ >>> img = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
1163
+ >>> fluo_max, bg_fit = field_normalisation(img, threshold=0.5, normalisation_operation='Subtract', clip=True, mode='Paraboloid')
1164
+ >>> print(fluo_max)
1165
+ [[0. 0. 0.]
1166
+ [0. 0. 0.]
1167
+ [0. 0. 0.]]
1168
+ >>> print(bg_fit)
1169
+ [[1. 2. 3.]
1170
+ [4. 5. 6.]
1171
+ [7. 8. 9.]]
1172
+
1173
+ Note:
1174
+ - The 'threshold' parameter determines regions of interest based on standard deviation.
1175
+ - The 'normalisation_operation' parameter specifies whether to subtract or divide the fitted model from
1176
+ the image during correction.
1177
+ - If 'clip' is set to True, corrected values below zero will be clipped to a minimum value of 0.00001.
1178
+ """
1179
+ std_img = std_filter(gauss_filter(img, 2), 4)
1180
+ mask = np.zeros_like(img)
1181
+ if threshold=='':
1182
+ pass
1183
+ mask[np.where(std_img > float(threshold))] = 1.0
1184
+ mask_int = mask.astype(int)
1185
+ mask_int = binary_fill_holes(mask_int).astype(float)
1186
+ # invert_mask = np.zeros_like(mask_int)
1187
+ # invert_mask[mask_int == 0] = 1
1188
+ # if isinstance(normalisation_operation,bool) and normalisation_operation:
1189
+ # normalisation_operation = 'Subtract'
1190
+ # else:
1191
+ # normalisation_operation = 'Divide'
1192
+ fluo_max, bg_fit = correct_image(img.astype(float), cell_masks=mask_int,
1193
+ normalisation_operation=normalisation_operation, clip=clip, mode=mode)
1194
+ return fluo_max, bg_fit
1195
+
1196
+
1197
+ def blob_detection(image, label, threshold, diameter):
1198
+ """
1199
+ Perform blob detection on an image based on labeled regions.
1200
+
1201
+ Parameters:
1202
+ - image (numpy.ndarray): The input image data.
1203
+ - label (numpy.ndarray): An array specifying labeled regions in the image.
1204
+ - threshold (float): The threshold value for blob detection.
1205
+ - diameter (float): The expected diameter of blobs.
1206
+
1207
+ Returns:
1208
+ - dict: A dictionary containing information about detected blobs.
1209
+
1210
+ This function performs blob detection on an image based on labeled regions. It iterates over each labeled region
1211
+ and detects blobs within the region using the Difference of Gaussians (DoG) method. Detected blobs are filtered
1212
+ based on the specified threshold and expected diameter. The function returns a dictionary containing the number of
1213
+ detected blobs and their mean intensity for each labeled region.
1214
+
1215
+ Example:
1216
+ >>> image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
1217
+ >>> label = np.array([[0, 1, 1], [2, 2, 0], [3, 3, 0]])
1218
+ >>> threshold = 0.1
1219
+ >>> diameter = 5.0
1220
+ >>> result = blob_detection(image, label, threshold, diameter)
1221
+ >>> print(result)
1222
+ {1: [1, 4.0], 2: [0, nan], 3: [0, nan]}
1223
+
1224
+ Note:
1225
+ - Blobs are detected using the Difference of Gaussians (DoG) method.
1226
+ - Detected blobs are filtered based on the specified threshold and expected diameter.
1227
+ - The returned dictionary contains information about the number of detected blobs and their mean intensity
1228
+ for each labeled region.
1229
+ """
1230
+ blob_labels = {}
1231
+ dilated_image = ndimage.grey_dilation(label, footprint=disk(10))
1232
+ for mask_index in np.unique(label):
1233
+ if mask_index == 0:
1234
+ continue
1235
+ removed_background = image.copy()
1236
+ one_mask = label.copy()
1237
+ one_mask[np.where(label != mask_index)] = 0
1238
+ dilated_copy = dilated_image.copy()
1239
+ dilated_copy[np.where(dilated_image != mask_index)] = 0
1240
+ removed_background[np.where(dilated_copy == 0)] = 0
1241
+ min_sigma = (1 / (1 + math.sqrt(2))) * diameter
1242
+ max_sigma = math.sqrt(2) * min_sigma
1243
+ blobs = skimage.feature.blob_dog(removed_background, threshold=threshold, min_sigma=min_sigma,
1244
+ max_sigma=max_sigma)
1245
+
1246
+ mask = np.array([one_mask[int(y), int(x)] != 0 for y, x, r in blobs])
1247
+ if not np.any(mask):
1248
+ continue
1249
+ blobs_filtered = blobs[mask]
1250
+ binary_blobs = np.zeros_like(label)
1251
+ for blob in blobs_filtered:
1252
+ y, x, r = blob
1253
+ rr, cc = dsk((y, x), r, shape=binary_blobs.shape)
1254
+ binary_blobs[rr, cc] = 1
1255
+ spot_intensity = regionprops_table(binary_blobs, removed_background, ['intensity_mean'])
1256
+ blob_labels[mask_index] = [blobs_filtered.shape[0], spot_intensity['intensity_mean'][0]]
1257
+ return blob_labels
1258
+