celldetective 1.3.9.post5__py3-none-any.whl → 1.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. celldetective/__init__.py +0 -3
  2. celldetective/_version.py +1 -1
  3. celldetective/events.py +2 -4
  4. celldetective/extra_properties.py +132 -0
  5. celldetective/gui/InitWindow.py +33 -45
  6. celldetective/gui/__init__.py +1 -0
  7. celldetective/gui/about.py +19 -15
  8. celldetective/gui/analyze_block.py +34 -19
  9. celldetective/gui/base_components.py +23 -0
  10. celldetective/gui/btrack_options.py +26 -34
  11. celldetective/gui/classifier_widget.py +68 -81
  12. celldetective/gui/configure_new_exp.py +113 -17
  13. celldetective/gui/control_panel.py +68 -141
  14. celldetective/gui/generic_signal_plot.py +9 -12
  15. celldetective/gui/gui_utils.py +49 -21
  16. celldetective/gui/json_readers.py +5 -4
  17. celldetective/gui/layouts.py +246 -22
  18. celldetective/gui/measurement_options.py +32 -17
  19. celldetective/gui/neighborhood_options.py +10 -13
  20. celldetective/gui/plot_measurements.py +21 -17
  21. celldetective/gui/plot_signals_ui.py +125 -72
  22. celldetective/gui/process_block.py +180 -123
  23. celldetective/gui/processes/compute_neighborhood.py +594 -0
  24. celldetective/gui/processes/measure_cells.py +5 -0
  25. celldetective/gui/processes/segment_cells.py +27 -6
  26. celldetective/gui/processes/track_cells.py +6 -0
  27. celldetective/gui/retrain_segmentation_model_options.py +12 -20
  28. celldetective/gui/retrain_signal_model_options.py +57 -56
  29. celldetective/gui/seg_model_loader.py +21 -62
  30. celldetective/gui/signal_annotator.py +129 -70
  31. celldetective/gui/signal_annotator2.py +431 -635
  32. celldetective/gui/signal_annotator_options.py +8 -11
  33. celldetective/gui/survival_ui.py +49 -95
  34. celldetective/gui/tableUI.py +28 -25
  35. celldetective/gui/thresholds_gui.py +617 -1221
  36. celldetective/gui/viewers.py +106 -39
  37. celldetective/gui/workers.py +9 -3
  38. celldetective/io.py +57 -20
  39. celldetective/measure.py +63 -27
  40. celldetective/neighborhood.py +342 -268
  41. celldetective/preprocessing.py +25 -17
  42. celldetective/relative_measurements.py +50 -29
  43. celldetective/scripts/analyze_signals.py +4 -1
  44. celldetective/scripts/measure_relative.py +4 -1
  45. celldetective/scripts/segment_cells.py +0 -6
  46. celldetective/scripts/track_cells.py +3 -1
  47. celldetective/scripts/train_segmentation_model.py +7 -4
  48. celldetective/signals.py +29 -14
  49. celldetective/tracking.py +7 -2
  50. celldetective/utils.py +36 -8
  51. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.0.dist-info}/METADATA +24 -16
  52. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.0.dist-info}/RECORD +57 -55
  53. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.0.dist-info}/WHEEL +1 -1
  54. tests/test_qt.py +21 -21
  55. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.0.dist-info}/entry_points.txt +0 -0
  56. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.0.dist-info/licenses}/LICENSE +0 -0
  57. {celldetective-1.3.9.post5.dist-info → celldetective-1.4.0.dist-info}/top_level.txt +0 -0
@@ -3,7 +3,7 @@ from celldetective.io import auto_load_number_of_frames, load_frames
3
3
  from celldetective.filters import *
4
4
  from celldetective.segmentation import filter_image, threshold_image
5
5
  from celldetective.measure import contour_of_instance_segmentation, extract_blobs_in_image
6
- from celldetective.utils import _get_img_num_per_channel, estimate_unreliable_edge
6
+ from celldetective.utils import _get_img_num_per_channel, estimate_unreliable_edge, is_integer_array
7
7
  from tifffile import imread
8
8
  import matplotlib.pyplot as plt
9
9
  from pathlib import Path
@@ -11,11 +11,11 @@ from natsort import natsorted
11
11
  from glob import glob
12
12
  import os
13
13
 
14
- from PyQt5.QtWidgets import QWidget, QHBoxLayout, QPushButton, QLabel, QComboBox, QLineEdit, QListWidget, QShortcut
14
+ from PyQt5.QtWidgets import QHBoxLayout, QPushButton, QLabel, QComboBox, QLineEdit, QListWidget, QShortcut
15
15
  from PyQt5.QtCore import Qt, QSize
16
16
  from PyQt5.QtGui import QKeySequence, QDoubleValidator
17
17
  from celldetective.gui.gui_utils import FigureCanvas, center_window, QuickSliderLayout, QHSeperationLine, ThresholdLineEdit, PreprocessingLayout2
18
- from celldetective.gui import Styles
18
+ from celldetective.gui import Styles, CelldetectiveWidget
19
19
  from superqt import QLabeledDoubleSlider, QLabeledSlider, QLabeledDoubleRangeSlider
20
20
  from superqt.fonticon import icon
21
21
  from fonticon_mdi6 import MDI6
@@ -24,7 +24,7 @@ import gc
24
24
  from celldetective.utils import mask_edges
25
25
  from scipy.ndimage import shift
26
26
 
27
- class StackVisualizer(QWidget, Styles):
27
+ class StackVisualizer(CelldetectiveWidget):
28
28
 
29
29
  """
30
30
  A widget for visualizing image stacks with interactive sliders and channel selection.
@@ -92,8 +92,7 @@ class StackVisualizer(QWidget, Styles):
92
92
  self.generate_frame_slider()
93
93
 
94
94
  self.canvas.layout.setContentsMargins(15,15,15,30)
95
- self.setAttribute(Qt.WA_DeleteOnClose)
96
- center_window(self)
95
+ #center_window(self)
97
96
 
98
97
  def show(self):
99
98
  # Display the widget
@@ -136,6 +135,7 @@ class StackVisualizer(QWidget, Styles):
136
135
  self.stack_path,
137
136
  normalize_input=False).astype(float)[:,:,0]
138
137
 
138
+
139
139
  def generate_figure_canvas(self):
140
140
  # Generate the figure canvas for displaying images
141
141
 
@@ -187,6 +187,16 @@ class StackVisualizer(QWidget, Styles):
187
187
  channel_layout.addWidget(self.channels_cb, 75)
188
188
  self.canvas.layout.addLayout(channel_layout)
189
189
 
190
+ def set_contrast_decimals(self):
191
+ if is_integer_array(self.init_frame):
192
+ self.contrast_slider.setDecimals(0)
193
+ self.contrast_slider.setSingleStep(1.0)
194
+ self.contrast_slider.setTickInterval(1.0)
195
+ else:
196
+ self.contrast_slider.setDecimals(3)
197
+ self.contrast_slider.setSingleStep(1.0E-03)
198
+ self.contrast_slider.setTickInterval(1.0E-03)
199
+
190
200
  def generate_contrast_slider(self):
191
201
  # Generate the contrast slider if enabled
192
202
 
@@ -197,15 +207,15 @@ class StackVisualizer(QWidget, Styles):
197
207
  slider_initial_value=[np.nanpercentile(self.init_frame, 0.1),np.nanpercentile(self.init_frame, 99.99)],
198
208
  slider_range=(np.nanmin(self.init_frame),np.nanmax(self.init_frame)),
199
209
  decimal_option=True,
200
- precision=1.0E-05,
210
+ precision=2,
201
211
  )
212
+ self.set_contrast_decimals()
213
+
202
214
  contrast_layout.setContentsMargins(15,0,15,0)
203
215
  self.im.set_clim(vmin=np.nanpercentile(self.init_frame, 0.1),vmax=np.nanpercentile(self.init_frame, 99.99))
204
216
  self.contrast_slider.valueChanged.connect(self.change_contrast)
205
217
  self.canvas.layout.addLayout(contrast_layout)
206
218
 
207
-
208
-
209
219
  def generate_frame_slider(self):
210
220
  # Generate the frame slider if enabled
211
221
 
@@ -250,6 +260,8 @@ class StackVisualizer(QWidget, Styles):
250
260
  self.channel_trigger = False
251
261
  self.init_contrast = False
252
262
 
263
+ self.set_contrast_decimals()
264
+
253
265
  def change_frame_from_channel_switch(self, value):
254
266
 
255
267
  self.channel_trigger = True
@@ -275,15 +287,17 @@ class StackVisualizer(QWidget, Styles):
275
287
  self.im.set_data(self.init_frame)
276
288
 
277
289
  if self.init_contrast:
278
- self.im.autoscale()
279
- I_min, I_max = self.im.get_clim()
280
- self.contrast_slider.setRange(np.nanmin([self.init_frame,self.last_frame]),np.nanmax([self.init_frame,self.last_frame]))
281
- self.contrast_slider.setValue((I_min,I_max))
290
+ imgs = np.array([self.init_frame,self.last_frame])
291
+ vmin = np.nanpercentile(imgs.flatten(), 1.0)
292
+ vmax = np.nanpercentile(imgs.flatten(), 99.99)
293
+ self.contrast_slider.setRange(np.nanmin(imgs),np.nanmax(imgs))
294
+ self.contrast_slider.setValue((vmin,vmax))
295
+ self.im.set_clim(vmin,vmax)
282
296
 
283
297
  if self.create_contrast_slider:
284
298
  self.change_contrast(self.contrast_slider.value())
285
299
 
286
-
300
+
287
301
  def closeEvent(self, event):
288
302
  # Event handler for closing the widget
289
303
  self.canvas.close()
@@ -318,16 +332,27 @@ class ThresholdedStackVisualizer(StackVisualizer):
318
332
  with interactive sliders for threshold and mask opacity adjustment.
319
333
  """
320
334
 
321
- def __init__(self, preprocessing=None, parent_le=None, initial_threshold=5, initial_mask_alpha=0.5, *args, **kwargs):
335
+ def __init__(self, preprocessing=None, parent_le=None, initial_threshold=5, initial_mask_alpha=0.5, show_opacity_slider=True, show_threshold_slider=True, *args, **kwargs):
322
336
  # Initialize the widget and its attributes
323
337
  super().__init__(*args, **kwargs)
324
338
  self.preprocessing = preprocessing
325
339
  self.thresh = initial_threshold
326
340
  self.mask_alpha = initial_mask_alpha
327
341
  self.parent_le = parent_le
328
- self.compute_mask(self.thresh)
329
- self.generate_mask_imshow()
342
+ self.show_opacity_slider = show_opacity_slider
343
+ self.show_threshold_slider = show_threshold_slider
344
+ self.thresholded = False
345
+ self.mask = np.zeros_like(self.init_frame)
346
+ self.thresh_min = 0.0
347
+ self.thresh_max = 30.0
348
+
330
349
  self.generate_threshold_slider()
350
+
351
+ if self.thresh is not None:
352
+ self.compute_mask(self.thresh)
353
+
354
+ self.generate_mask_imshow()
355
+ self.generate_scatter()
331
356
  self.generate_opacity_slider()
332
357
  if isinstance(self.parent_le, QLineEdit):
333
358
  self.generate_apply_btn()
@@ -349,23 +374,32 @@ class ThresholdedStackVisualizer(StackVisualizer):
349
374
  self.close()
350
375
 
351
376
  def generate_mask_imshow(self):
352
- # Generate the mask imshow
377
+ # Generate the mask imshow
378
+
353
379
  self.im_mask = self.ax.imshow(np.ma.masked_where(self.mask==0, self.mask), alpha=self.mask_alpha, interpolation='none')
354
380
  self.canvas.canvas.draw()
355
381
 
382
+ def generate_scatter(self):
383
+ self.scat_markers = self.ax.scatter([], [], color="tab:red")
384
+
356
385
  def generate_threshold_slider(self):
357
386
  # Generate the threshold slider
358
387
  self.threshold_slider = QLabeledDoubleSlider()
388
+ if self.thresh is None:
389
+ init_value = 1.0E5
390
+ else:
391
+ init_value = self.thresh
359
392
  thresh_layout = QuickSliderLayout(label='Threshold: ',
360
393
  slider=self.threshold_slider,
361
- slider_initial_value=self.thresh,
362
- slider_range=(0,30),
394
+ slider_initial_value=init_value,
395
+ slider_range=(self.thresh_min,np.amax([self.thresh_max, init_value])),
363
396
  decimal_option=True,
364
- precision=1.0E-05,
397
+ precision=4,
365
398
  )
366
399
  thresh_layout.setContentsMargins(15,0,15,0)
367
400
  self.threshold_slider.valueChanged.connect(self.change_threshold)
368
- self.canvas.layout.addLayout(thresh_layout)
401
+ if self.show_threshold_slider:
402
+ self.canvas.layout.addLayout(thresh_layout)
369
403
 
370
404
  def generate_opacity_slider(self):
371
405
  # Generate the opacity slider for the mask
@@ -375,11 +409,12 @@ class ThresholdedStackVisualizer(StackVisualizer):
375
409
  slider_initial_value=0.5,
376
410
  slider_range=(0,1),
377
411
  decimal_option=True,
378
- precision=1.0E-03
412
+ precision=3,
379
413
  )
380
414
  opacity_layout.setContentsMargins(15,0,15,0)
381
415
  self.opacity_slider.valueChanged.connect(self.change_mask_opacity)
382
- self.canvas.layout.addLayout(opacity_layout)
416
+ if self.show_opacity_slider:
417
+ self.canvas.layout.addLayout(opacity_layout)
383
418
 
384
419
  def change_mask_opacity(self, value):
385
420
  # Change the opacity of the mask
@@ -390,28 +425,61 @@ class ThresholdedStackVisualizer(StackVisualizer):
390
425
  def change_threshold(self, value):
391
426
  # Change the threshold value
392
427
  self.thresh = value
393
- self.compute_mask(self.thresh)
394
- mask = np.ma.masked_where(self.mask == 0, self.mask)
395
- self.im_mask.set_data(mask)
396
- self.canvas.canvas.draw_idle()
428
+ if self.thresh is not None:
429
+ self.compute_mask(self.thresh)
430
+ mask = np.ma.masked_where(self.mask == 0, self.mask)
431
+ self.im_mask.set_data(mask)
432
+ self.canvas.canvas.draw_idle()
397
433
 
398
434
  def change_frame(self, value):
399
- # Change the displayed frame and update the threshold
435
+ # Change the displayed frame and update the threshold
436
+ if self.thresholded:
437
+ self.init_contrast = True
400
438
  super().change_frame(value)
401
439
  self.change_threshold(self.threshold_slider.value())
440
+ if self.thresholded:
441
+ self.thresholded = False
442
+ self.init_contrast = False
402
443
 
403
444
  def compute_mask(self, threshold_value):
404
445
  # Compute the mask based on the threshold value
405
446
  self.preprocess_image()
406
447
  edge = estimate_unreliable_edge(self.preprocessing)
407
- self.mask = threshold_image(self.processed_image, threshold_value, np.inf, foreground_value=1, edge_exclusion=edge).astype(int)
448
+ if isinstance(threshold_value, (list,np.ndarray,tuple)):
449
+ self.mask = threshold_image(self.processed_image, threshold_value[0], threshold_value[1], foreground_value=1, fill_holes=True, edge_exclusion=edge).astype(int)
450
+ else:
451
+ self.mask = threshold_image(self.processed_image, threshold_value, np.inf, foreground_value=1, fill_holes=True, edge_exclusion=edge).astype(int)
408
452
 
409
453
  def preprocess_image(self):
410
454
  # Preprocess the image before thresholding
411
455
  if self.preprocessing is not None:
412
456
 
413
457
  assert isinstance(self.preprocessing, list)
414
- self.processed_image = filter_image(self.init_frame.copy(),filters=self.preprocessing)
458
+ self.processed_image = filter_image(self.init_frame.copy().astype(float),filters=self.preprocessing)
459
+ min_ = np.amin(self.processed_image)
460
+ max_ = np.amax(self.processed_image)
461
+
462
+ if min_ < self.thresh_min:
463
+ self.thresh_min = min_
464
+ if max_ > self.thresh_max:
465
+ self.thresh_max = max_
466
+
467
+ self.threshold_slider.setRange(self.thresh_min, self.thresh_max)
468
+
469
+ def set_preprocessing(self, activation_protocol):
470
+
471
+ self.preprocessing = activation_protocol
472
+ self.preprocess_image()
473
+
474
+ self.im.set_data(self.processed_image)
475
+ vmin = np.nanpercentile(self.processed_image, 1.0)
476
+ vmax = np.nanpercentile(self.processed_image, 99.99)
477
+ self.contrast_slider.setRange(np.nanmin(self.processed_image),
478
+ np.nanmax(self.processed_image))
479
+ self.contrast_slider.setValue((vmin, vmax))
480
+ self.im.set_clim(vmin,vmax)
481
+ self.canvas.canvas.draw_idle()
482
+ self.thresholded = True
415
483
 
416
484
 
417
485
  class CellEdgeVisualizer(StackVisualizer):
@@ -578,7 +646,7 @@ class CellEdgeVisualizer(StackVisualizer):
578
646
  slider_initial_value=0.5,
579
647
  slider_range=(0,1),
580
648
  decimal_option=True,
581
- precision=1.0E-03
649
+ precision=3,
582
650
  )
583
651
  opacity_layout.setContentsMargins(15,0,15,0)
584
652
  self.opacity_slider.valueChanged.connect(self.change_mask_opacity)
@@ -952,7 +1020,7 @@ class CellSizeViewer(StackVisualizer):
952
1020
  def generate_circle(self):
953
1021
  # Generate the circle for visualization
954
1022
 
955
- self.circ = plt.Circle((self.init_frame.shape[0]//2,self.init_frame.shape[1]//2), self.diameter//2, ec="tab:red",fill=False)
1023
+ self.circ = plt.Circle((self.init_frame.shape[0]//2,self.init_frame.shape[1]//2), self.diameter//2 / self.PxToUm, ec="tab:red",fill=False)
956
1024
  self.ax.add_patch(self.circ)
957
1025
 
958
1026
  self.ax.callbacks.connect('xlim_changed',self.on_xlims_or_ylims_change)
@@ -978,7 +1046,7 @@ class CellSizeViewer(StackVisualizer):
978
1046
  if self.set_radius_in_list:
979
1047
  val = int(self.diameter_slider.value()//2)
980
1048
  else:
981
- val = int(self.diameter_slider.value())
1049
+ val = int(self.diameter_slider.value())
982
1050
 
983
1051
  self.parent_list_widget.addItems([str(val)])
984
1052
  self.close()
@@ -1017,7 +1085,7 @@ class CellSizeViewer(StackVisualizer):
1017
1085
  slider_initial_value=self.diameter,
1018
1086
  slider_range=self.diameter_slider_range,
1019
1087
  decimal_option=True,
1020
- precision=1.0E-05,
1088
+ precision=5,
1021
1089
  )
1022
1090
  diameter_layout.setContentsMargins(15,0,15,0)
1023
1091
  self.diameter_slider.valueChanged.connect(self.change_diameter)
@@ -1025,9 +1093,8 @@ class CellSizeViewer(StackVisualizer):
1025
1093
 
1026
1094
  def change_diameter(self, value):
1027
1095
  # Change the diameter of the circle
1028
-
1029
1096
  self.diameter = value
1030
- self.circ.set_radius(self.diameter//2)
1097
+ self.circ.set_radius(self.diameter//2 / self.PxToUm)
1031
1098
  self.canvas.canvas.draw_idle()
1032
1099
 
1033
1100
 
@@ -1080,7 +1147,7 @@ class ChannelOffsetViewer(StackVisualizer):
1080
1147
  slider_initial_value=0.5,
1081
1148
  slider_range=(0,1.0),
1082
1149
  decimal_option=True,
1083
- precision=1.0E-05,
1150
+ precision=5,
1084
1151
  )
1085
1152
  alpha_layout.setContentsMargins(15,0,15,0)
1086
1153
  self.overlay_alpha_slider.valueChanged.connect(self.change_alpha_overlay)
@@ -1097,7 +1164,7 @@ class ChannelOffsetViewer(StackVisualizer):
1097
1164
  slider_initial_value=[np.nanpercentile(self.overlay_init_frame, 0.1),np.nanpercentile(self.overlay_init_frame, 99.99)],
1098
1165
  slider_range=(np.nanmin(self.overlay_init_frame),np.nanmax(self.overlay_init_frame)),
1099
1166
  decimal_option=True,
1100
- precision=1.0E-05,
1167
+ precision=5,
1101
1168
  )
1102
1169
  contrast_layout.setContentsMargins(15,0,15,0)
1103
1170
  self.im_overlay.set_clim(vmin=np.nanpercentile(self.overlay_init_frame, 0.1),vmax=np.nanpercentile(self.overlay_init_frame, 99.99))
@@ -1,18 +1,24 @@
1
1
  from multiprocessing import Queue
2
- from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QHBoxLayout, QWidget, QLabel, QProgressBar
2
+ from PyQt5.QtWidgets import QPushButton, QVBoxLayout, QHBoxLayout, QLabel, QProgressBar
3
3
  from PyQt5.QtCore import QRunnable, QObject, pyqtSignal, QThreadPool, QSize, Qt
4
+
5
+ from celldetective.gui.base_components import CelldetectiveDialog
4
6
  from celldetective.gui.gui_utils import center_window
7
+ from celldetective.gui import Styles
5
8
  import time
6
9
  import math
7
10
 
8
- class ProgressWindow(QDialog):
11
+ class ProgressWindow(CelldetectiveDialog):
9
12
 
10
13
  def __init__(self, process=None, parent_window=None, title="", position_info=True, process_args=None):
11
- QDialog.__init__(self)
14
+
15
+ super().__init__()
16
+ #QDialog.__init__(self)
12
17
 
13
18
  self.setWindowTitle(f'{title} Progress')
14
19
  self.__process = process
15
20
  self.parent_window = parent_window
21
+
16
22
  self.position_info = position_info
17
23
  if self.position_info:
18
24
  self.pos_name = self.parent_window.pos_name
celldetective/io.py CHANGED
@@ -29,7 +29,7 @@ from celldetective.utils import interpolate_nan_multichannel, _estimate_scale_fa
29
29
 
30
30
  from stardist import fill_label_holes
31
31
  from skimage.transform import resize
32
-
32
+ import re
33
33
 
34
34
  def extract_experiment_from_well(well_path):
35
35
 
@@ -596,6 +596,17 @@ def get_experiment_pharmaceutical_agents(experiment, dtype=str):
596
596
  return np.array([dtype(c) for c in pharmaceutical_agents])
597
597
 
598
598
 
599
+ def get_experiment_populations(experiment, dtype=str):
600
+
601
+ config = get_config(experiment)
602
+ populations_str = ConfigSectionMap(config, "Populations")
603
+ if populations_str is not None:
604
+ populations = populations_str['populations'].split(',')
605
+ else:
606
+ populations = ['effectors','targets']
607
+ return list([dtype(c) for c in populations])
608
+
609
+
599
610
  def interpret_wells_and_positions(experiment, well_option, position_option):
600
611
  """
601
612
  Interpret well and position options for a given experiment.
@@ -1165,6 +1176,9 @@ def locate_labels(position, population='target', frames=None):
1165
1176
  label_path = natsorted(glob(position + os.sep.join(["labels_targets", "*.tif"])))
1166
1177
  elif population.lower() == "effector" or population.lower() == "effectors":
1167
1178
  label_path = natsorted(glob(position + os.sep.join(["labels_effectors", "*.tif"])))
1179
+ else:
1180
+ label_path = natsorted(glob(position + os.sep.join([f"labels_{population}", "*.tif"])))
1181
+
1168
1182
 
1169
1183
  label_names = [os.path.split(lbl)[-1] for lbl in label_path]
1170
1184
 
@@ -1242,6 +1256,9 @@ def fix_missing_labels(position, population='target', prefix='Aligned'):
1242
1256
  elif population.lower() == "effector" or population.lower() == "effectors":
1243
1257
  label_path = natsorted(glob(position + os.sep.join(["labels_effectors", "*.tif"])))
1244
1258
  path = position + os.sep + "labels_effectors"
1259
+ else:
1260
+ label_path = natsorted(glob(position + os.sep.join([f"labels_{population}", "*.tif"])))
1261
+ path = position + os.sep + f"labels_{population}"
1245
1262
 
1246
1263
  if label_path!=[]:
1247
1264
  #path = os.path.split(label_path[0])[0]
@@ -1348,6 +1365,9 @@ def load_tracking_data(position, prefix="Aligned", population="target"):
1348
1365
  trajectories = pd.read_csv(position + os.sep.join(['output', 'tables', 'trajectories_targets.csv']))
1349
1366
  elif population.lower() == "effector" or population.lower() == "effectors":
1350
1367
  trajectories = pd.read_csv(position + os.sep.join(['output', 'tables', 'trajectories_effectors.csv']))
1368
+ else:
1369
+ trajectories = pd.read_csv(position + os.sep.join(['output', 'tables', f'trajectories_{population}.csv']))
1370
+
1351
1371
 
1352
1372
  stack, labels = locate_stack_and_labels(position, prefix=prefix, population=population)
1353
1373
 
@@ -2354,6 +2374,11 @@ def load_napari_data(position, prefix="Aligned", population="target", return_sta
2354
2374
  napari_data = np.load(position+os.sep.join(['output', 'tables', 'napari_effector_trajectories.npy']), allow_pickle=True)
2355
2375
  else:
2356
2376
  napari_data = None
2377
+ else:
2378
+ if os.path.exists(position+os.sep.join(['output', 'tables', f'napari_{population}_trajectories.npy'])):
2379
+ napari_data = np.load(position+os.sep.join(['output', 'tables', f'napari_{population}_trajectories.npy']), allow_pickle=True)
2380
+ else:
2381
+ napari_data = None
2357
2382
 
2358
2383
  if napari_data is not None:
2359
2384
  data = napari_data.item()['data']
@@ -2493,6 +2518,9 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
2493
2518
 
2494
2519
  def export_labels():
2495
2520
  labels_layer = viewer.layers['segmentation'].data
2521
+ if not os.path.exists(output_folder):
2522
+ os.mkdir(output_folder)
2523
+
2496
2524
  for t, im in enumerate(tqdm(labels_layer)):
2497
2525
 
2498
2526
  try:
@@ -2639,12 +2667,9 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
2639
2667
  return export_annotation()
2640
2668
 
2641
2669
  stack, labels = locate_stack_and_labels(position, prefix=prefix, population=population)
2642
-
2643
- if not population.endswith('s'):
2644
- population += 's'
2645
2670
  output_folder = position + f'labels_{population}{os.sep}'
2671
+ print(f"Shape of the loaded image stack: {stack.shape}...")
2646
2672
 
2647
- print(f"{stack.shape}")
2648
2673
  viewer = napari.Viewer()
2649
2674
  viewer.add_image(stack, channel_axis=-1, colormap=["gray"] * stack.shape[-1])
2650
2675
  viewer.add_labels(labels.astype(int), name='segmentation', opacity=0.4)
@@ -2678,6 +2703,8 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
2678
2703
  del labels
2679
2704
  gc.collect()
2680
2705
 
2706
+ print("napari viewer was successfully closed...")
2707
+
2681
2708
  def correct_annotation(filename):
2682
2709
 
2683
2710
  """
@@ -2827,21 +2854,31 @@ def control_tracking_table(position, calibration=1, prefix="Aligned", population
2827
2854
 
2828
2855
 
2829
2856
  def get_segmentation_models_list(mode='targets', return_path=False):
2830
- if mode == 'targets':
2831
- modelpath = os.sep.join(
2832
- [os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], "celldetective", "models",
2833
- "segmentation_targets", os.sep])
2834
- repository_models = get_zenodo_files(cat=os.sep.join(["models", "segmentation_targets"]))
2835
- elif mode == 'effectors':
2836
- modelpath = os.sep.join(
2837
- [os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], "celldetective", "models",
2838
- "segmentation_effectors", os.sep])
2839
- repository_models = get_zenodo_files(cat=os.sep.join(["models", "segmentation_effectors"]))
2840
- elif mode == 'generic':
2841
- modelpath = os.sep.join(
2857
+
2858
+ modelpath = os.sep.join(
2842
2859
  [os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], "celldetective", "models",
2843
- "segmentation_generic", os.sep])
2844
- repository_models = get_zenodo_files(cat=os.sep.join(["models", "segmentation_generic"]))
2860
+ f"segmentation_{mode}", os.sep])
2861
+ if not os.path.exists(modelpath):
2862
+ os.mkdir(modelpath)
2863
+ repository_models = []
2864
+ else:
2865
+ repository_models = get_zenodo_files(cat=os.sep.join(["models", f"segmentation_{mode}"]))
2866
+
2867
+ # if mode == 'targets':
2868
+ # modelpath = os.sep.join(
2869
+ # [os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], "celldetective", "models",
2870
+ # "segmentation_targets", os.sep])
2871
+ # repository_models = get_zenodo_files(cat=os.sep.join(["models", "segmentation_targets"]))
2872
+ # elif mode == 'effectors':
2873
+ # modelpath = os.sep.join(
2874
+ # [os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], "celldetective", "models",
2875
+ # "segmentation_effectors", os.sep])
2876
+ # repository_models = get_zenodo_files(cat=os.sep.join(["models", "segmentation_effectors"]))
2877
+ # elif mode == 'generic':
2878
+ # modelpath = os.sep.join(
2879
+ # [os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], "celldetective", "models",
2880
+ # "segmentation_generic", os.sep])
2881
+ # repository_models = get_zenodo_files(cat=os.sep.join(["models", "segmentation_generic"]))
2845
2882
 
2846
2883
  available_models = natsorted(glob(modelpath + '*/'))
2847
2884
  available_models = [m.replace('\\', '/').split('/')[-2] for m in available_models]
@@ -3275,7 +3312,7 @@ def normalize_multichannel(multichannel_frame, percentiles=None,
3275
3312
 
3276
3313
  return np.moveaxis(mf_new,0,-1)
3277
3314
 
3278
- def load_frames(img_nums, stack_path, scale=None, normalize_input=True, dtype=float, normalize_kwargs={"percentiles": (0.,99.99)}):
3315
+ def load_frames(img_nums, stack_path, scale=None, normalize_input=True, dtype=np.float64, normalize_kwargs={"percentiles": (0.,99.99)}):
3279
3316
 
3280
3317
  """
3281
3318
  Loads and optionally normalizes and rescales specified frames from a stack located at a given path.
celldetective/measure.py CHANGED
@@ -20,13 +20,21 @@ from skimage.feature import blob_dog, blob_log
20
20
  from celldetective.utils import rename_intensity_column, create_patch_mask, remove_redundant_features, \
21
21
  remove_trajectory_measurements, contour_of_instance_segmentation, extract_cols_from_query, step_function, interpolate_nan, _remove_invalid_cols
22
22
  from celldetective.preprocessing import field_correction
23
- from celldetective.extra_properties import *
23
+
24
+ # try:
25
+ # from celldetective.extra_properties import *
26
+ # extra_props = True
27
+ # except Exception as e:
28
+ # print(f"The module extra_properties seems corrupted: {e}... Skip...")
29
+ # extra_props = False
30
+
24
31
  from inspect import getmembers, isfunction
25
32
  from skimage.morphology import disk
26
33
  from scipy.signal import find_peaks, peak_widths
27
34
 
28
35
  from celldetective.segmentation import filter_image
29
36
  from celldetective.regionprops import regionprops_table
37
+ from celldetective.utils import pretty_table
30
38
 
31
39
  abs_path = os.sep.join([os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], 'celldetective'])
32
40
 
@@ -211,7 +219,7 @@ def measure(stack=None, labels=None, trajectories=None, channel_names=None,
211
219
  measurements = measurements.sort_values(by=[column_labels['track'],column_labels['time']])
212
220
  measurements = measurements.dropna(subset=[column_labels['track']])
213
221
  else:
214
- measurements['ID'] = np.arange(len(df))
222
+ measurements['ID'] = np.arange(len(measurements))
215
223
 
216
224
  measurements = measurements.reset_index(drop=True)
217
225
  measurements = _remove_invalid_cols(measurements)
@@ -365,26 +373,35 @@ def measure_features(img, label, features=['area', 'intensity_mean'], channels=N
365
373
  corrected_image = field_correction(img[:,:,ind].copy(), threshold_on_std=norm['threshold_on_std'], operation=norm['operation'], model=norm['model'], clip=norm['clip'])
366
374
  img[:, :, ind] = corrected_image
367
375
 
368
- import celldetective.extra_properties as extra_props
369
-
370
- extra = getmembers(extra_props, isfunction)
371
- extra = [extra[i][0] for i in range(len(extra))]
372
-
373
- extra_props_list = []
374
- feats = features.copy()
375
- for f in features:
376
- if f in extra:
377
- feats.remove(f)
378
- extra_props_list.append(getattr(extra_props, f))
379
-
380
- # Add intensity nan mean if need to measure mean intensities
381
- if measure_mean_intensities:
382
- extra_props_list.append(getattr(extra_props, 'intensity_nanmean'))
383
-
384
- if len(extra_props_list) == 0:
385
- extra_props_list = None
376
+ try:
377
+ import celldetective.extra_properties as extra_props
378
+ extraprops = True
379
+ except Exception as e:
380
+ print(f"The module extra_properties seems corrupted: {e}... Skip...")
381
+ extraprops = False
382
+
383
+ if extraprops:
384
+ extra = getmembers(extra_props, isfunction)
385
+ extra = [extra[i][0] for i in range(len(extra))]
386
+
387
+ extra_props_list = []
388
+ feats = features.copy()
389
+ for f in features:
390
+ if f in extra:
391
+ feats.remove(f)
392
+ extra_props_list.append(getattr(extra_props, f))
393
+
394
+ # Add intensity nan mean if need to measure mean intensities
395
+ if measure_mean_intensities:
396
+ extra_props_list.append(getattr(extra_props, 'intensity_nanmean'))
397
+
398
+ if len(extra_props_list) == 0:
399
+ extra_props_list = None
400
+ else:
401
+ extra_props_list = tuple(extra_props_list)
386
402
  else:
387
- extra_props_list = tuple(extra_props_list)
403
+ extra_props_list = []
404
+ feats = features.copy()
388
405
 
389
406
  props = regionprops_table(label, intensity_image=img, properties=feats, extra_properties=extra_props_list, channel_names=channels)
390
407
  df_props = pd.DataFrame(props)
@@ -837,17 +854,32 @@ def local_normalisation(image, labels, background_intensity, measurement='intens
837
854
 
838
855
  def normalise_by_cell(image, labels, distance=5, model='median', operation='subtract', clip=False):
839
856
 
840
- import celldetective.extra_properties as extra_props
857
+ try:
858
+ import celldetective.extra_properties as extra_props
859
+ extraprops = True
860
+ except Exception as e:
861
+ print(f"The module extra_properties seems corrupted: {e}... Skip...")
862
+ extraprops = False
841
863
 
842
864
  border = contour_of_instance_segmentation(label=labels, distance=distance * (-1))
843
865
  if model == 'mean':
866
+
844
867
  measurement = 'intensity_nanmean'
845
- extra_props = [getattr(extra_props, measurement)]
868
+ if extraprops:
869
+ extra_props = [getattr(extra_props, measurement)]
870
+ else:
871
+ extra_props = []
872
+
846
873
  background_intensity = regionprops_table(intensity_image=image, label_image=border,
847
874
  extra_properties=extra_props)
848
875
  elif model == 'median':
876
+
849
877
  measurement = 'intensity_median'
850
- extra_props = [getattr(extra_props, measurement)]
878
+ if extraprops:
879
+ extra_props = [getattr(extra_props, measurement)]
880
+ else:
881
+ extra_props = []
882
+
851
883
  background_intensity = regionprops_table(intensity_image=image, label_image=border,
852
884
  extra_properties=extra_props)
853
885
 
@@ -1250,16 +1282,20 @@ def classify_irreversible_events(data, class_attr, r2_threshold=0.5, percentile_
1250
1282
  # ambiguity, possible transition, use `unique_state` technique after
1251
1283
  df.loc[indices, class_attr] = 2
1252
1284
 
1253
- print("Classes after initial pass: ",df.loc[df['FRAME']==0,class_attr].value_counts())
1285
+ print("Number of cells per class after the initial pass: ")
1286
+ pretty_table(df.loc[df['FRAME']==0,class_attr].value_counts().to_dict())
1254
1287
 
1255
1288
  df.loc[df[class_attr]!=2, class_attr.replace('class', 't')] = -1
1256
1289
  # Try to fit time on class 2 cells (ambiguous)
1257
1290
  df = estimate_time(df, class_attr, model='step_function', class_of_interest=[2], r2_threshold=r2_threshold)
1258
- print("Classes after fit: ", df.loc[df['FRAME']==0,class_attr].value_counts())
1291
+
1292
+ print("Number of cells per class after conditional signal fit: ")
1293
+ pretty_table(df.loc[df['FRAME']==0,class_attr].value_counts().to_dict())
1259
1294
 
1260
1295
  # Revisit class 2 cells to classify as neg/pos with percentile tolerance
1261
1296
  df.loc[df[class_attr]==2,:] = classify_unique_states(df.loc[df[class_attr]==2,:].copy(), class_attr, percentile_recovery)
1262
- print("Classes after unique state recovery: ",df.loc[df['FRAME']==0,class_attr].value_counts())
1297
+ print("Number of cells per class after recovery pass (median state): ")
1298
+ pretty_table(df.loc[df['FRAME']==0,class_attr].value_counts().to_dict())
1263
1299
 
1264
1300
  return df
1265
1301