celldetective 1.3.4.post1__py3-none-any.whl → 1.3.6.post1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. celldetective/_version.py +1 -1
  2. celldetective/events.py +10 -5
  3. celldetective/filters.py +11 -0
  4. celldetective/gui/btrack_options.py +151 -1
  5. celldetective/gui/classifier_widget.py +44 -15
  6. celldetective/gui/configure_new_exp.py +13 -0
  7. celldetective/gui/control_panel.py +4 -2
  8. celldetective/gui/generic_signal_plot.py +2 -6
  9. celldetective/gui/gui_utils.py +170 -12
  10. celldetective/gui/measurement_options.py +85 -54
  11. celldetective/gui/neighborhood_options.py +1 -1
  12. celldetective/gui/plot_signals_ui.py +3 -4
  13. celldetective/gui/process_block.py +8 -6
  14. celldetective/gui/signal_annotator.py +10 -3
  15. celldetective/gui/signal_annotator2.py +146 -193
  16. celldetective/gui/survival_ui.py +121 -34
  17. celldetective/gui/tableUI.py +26 -12
  18. celldetective/gui/thresholds_gui.py +9 -52
  19. celldetective/gui/viewers.py +58 -21
  20. celldetective/io.py +1087 -161
  21. celldetective/measure.py +175 -102
  22. celldetective/preprocessing.py +2 -2
  23. celldetective/relative_measurements.py +6 -9
  24. celldetective/scripts/measure_cells.py +13 -3
  25. celldetective/scripts/segment_cells.py +0 -1
  26. celldetective/scripts/track_cells.py +25 -1
  27. celldetective/signals.py +9 -7
  28. celldetective/tracking.py +130 -81
  29. celldetective/utils.py +28 -7
  30. {celldetective-1.3.4.post1.dist-info → celldetective-1.3.6.post1.dist-info}/METADATA +3 -2
  31. {celldetective-1.3.4.post1.dist-info → celldetective-1.3.6.post1.dist-info}/RECORD +35 -35
  32. {celldetective-1.3.4.post1.dist-info → celldetective-1.3.6.post1.dist-info}/LICENSE +0 -0
  33. {celldetective-1.3.4.post1.dist-info → celldetective-1.3.6.post1.dist-info}/WHEEL +0 -0
  34. {celldetective-1.3.4.post1.dist-info → celldetective-1.3.6.post1.dist-info}/entry_points.txt +0 -0
  35. {celldetective-1.3.4.post1.dist-info → celldetective-1.3.6.post1.dist-info}/top_level.txt +0 -0
celldetective/io.py CHANGED
@@ -1,30 +1,65 @@
1
1
  from natsort import natsorted
2
+ from PyQt5.QtWidgets import QMessageBox
2
3
  from glob import glob
3
4
  from tifffile import imread, TiffFile
4
5
  import numpy as np
5
6
  import os
6
7
  import pandas as pd
7
8
  import napari
9
+ import json
10
+
8
11
  import gc
9
12
  from tqdm import tqdm
13
+ import threading
14
+ import concurrent.futures
15
+
10
16
  from csbdeep.utils import normalize_mi_ma
17
+ from csbdeep.io import save_tiff_imagej_compatible
18
+
11
19
  import skimage.io as skio
20
+ from skimage.measure import regionprops_table, label
21
+
12
22
  from scipy.ndimage import zoom
13
23
  from btrack.datasets import cell_config
14
24
  from magicgui import magicgui
15
- from csbdeep.io import save_tiff_imagej_compatible
16
25
  from pathlib import Path, PurePath
17
26
  from shutil import copyfile, rmtree
27
+
18
28
  from celldetective.utils import ConfigSectionMap, extract_experiment_channels, _extract_labels_from_config, get_zenodo_files, download_zenodo_file
19
- import json
20
- from skimage.measure import regionprops_table
21
- from celldetective.utils import _estimate_scale_factor, _extract_channel_indices_from_config, _extract_channel_indices, ConfigSectionMap, _extract_nbr_channels_from_config, _get_img_num_per_channel, normalize_per_channel
22
- from celldetective.utils import interpolate_nan
23
- import concurrent.futures
24
- from tifffile import imwrite
29
+ from celldetective.utils import _estimate_scale_factor, _extract_channel_indices_from_config, _extract_channel_indices, _extract_nbr_channels_from_config, _get_img_num_per_channel, normalize_per_channel
30
+
25
31
  from stardist import fill_label_holes
26
32
 
33
+
27
34
  def extract_experiment_from_well(well_path):
35
+
36
+ """
37
+ Extracts the experiment directory path from a given well directory path.
38
+
39
+ Parameters
40
+ ----------
41
+ well_path : str
42
+ The file system path to a well directory. The path should end with the well folder,
43
+ but it does not need to include a trailing separator.
44
+
45
+ Returns
46
+ -------
47
+ str
48
+ The path to the experiment directory, which is assumed to be two levels above the well directory.
49
+
50
+ Notes
51
+ -----
52
+ - This function expects the well directory to be organized such that the experiment directory is
53
+ two levels above it in the file system hierarchy.
54
+ - If the input path does not end with a file separator (`os.sep`), one is appended before processing.
55
+
56
+ Example
57
+ -------
58
+ >>> well_path = "/path/to/experiment/plate/well"
59
+ >>> extract_experiment_from_well(well_path)
60
+ '/path/to/experiment'
61
+ """
62
+
28
63
  if not well_path.endswith(os.sep):
29
64
  well_path += os.sep
30
65
  exp_path_blocks = well_path.split(os.sep)[:-2]
@@ -32,6 +67,35 @@ def extract_experiment_from_well(well_path):
32
67
  return experiment
33
68
 
34
69
  def extract_well_from_position(pos_path):
70
+
71
+ """
72
+ Extracts the well directory path from a given position directory path.
73
+
74
+ Parameters
75
+ ----------
76
+ pos_path : str
77
+ The file system path to a position directory. The path should end with the position folder,
78
+ but it does not need to include a trailing separator.
79
+
80
+ Returns
81
+ -------
82
+ str
83
+ The path to the well directory, which is assumed to be two levels above the position directory,
84
+ with a trailing separator appended.
85
+
86
+ Notes
87
+ -----
88
+ - This function expects the position directory to be organized such that the well directory is
89
+ two levels above it in the file system hierarchy.
90
+ - If the input path does not end with a file separator (`os.sep`), one is appended before processing.
91
+
92
+ Example
93
+ -------
94
+ >>> pos_path = "/path/to/experiment/plate/well/position"
95
+ >>> extract_well_from_position(pos_path)
96
+ '/path/to/experiment/plate/well/'
97
+ """
98
+
35
99
  if not pos_path.endswith(os.sep):
36
100
  pos_path += os.sep
37
101
  well_path_blocks = pos_path.split(os.sep)[:-2]
@@ -39,14 +103,92 @@ def extract_well_from_position(pos_path):
39
103
  return well_path
40
104
 
41
105
  def extract_experiment_from_position(pos_path):
42
- if not pos_path.endswith(os.sep):
43
- pos_path += os.sep
44
- exp_path_blocks = pos_path.split(os.sep)[:-3]
106
+
107
+ """
108
+ Extracts the experiment directory path from a given position directory path.
109
+
110
+ Parameters
111
+ ----------
112
+ pos_path : str
113
+ The file system path to a position directory. The path should end with the position folder,
114
+ but it does not need to include a trailing separator.
115
+
116
+ Returns
117
+ -------
118
+ str
119
+ The path to the experiment directory, which is assumed to be three levels above the position directory.
120
+
121
+ Notes
122
+ -----
123
+ - This function expects the position directory to be organized hierarchically such that the experiment directory
124
+ is three levels above it in the file system hierarchy.
125
+ - If the input path does not end with a file separator (`os.sep`), one is appended before processing.
126
+
127
+ Example
128
+ -------
129
+ >>> pos_path = "/path/to/experiment/plate/well/position"
130
+ >>> extract_experiment_from_position(pos_path)
131
+ '/path/to/experiment'
132
+ """
133
+
134
+ pos_path = pos_path.replace(os.sep, '/')
135
+ if not pos_path.endswith('/'):
136
+ pos_path += '/'
137
+ exp_path_blocks = pos_path.split('/')[:-3]
45
138
  experiment = os.sep.join(exp_path_blocks)
139
+
46
140
  return experiment
47
141
 
48
142
  def collect_experiment_metadata(pos_path=None, well_path=None):
49
143
 
144
+ """
145
+ Collects and organizes metadata for an experiment based on a given position or well directory path.
146
+
147
+ Parameters
148
+ ----------
149
+ pos_path : str, optional
150
+ The file system path to a position directory. If provided, it will be used to extract metadata.
151
+ This parameter takes precedence over `well_path`.
152
+ well_path : str, optional
153
+ The file system path to a well directory. If `pos_path` is not provided, this path will be used to extract metadata.
154
+
155
+ Returns
156
+ -------
157
+ dict
158
+ A dictionary containing the following metadata:
159
+ - `"pos_path"`: The path to the position directory (or `None` if not provided).
160
+ - `"position"`: The same as `pos_path`.
161
+ - `"pos_name"`: The name of the position (or `0` if `pos_path` is not provided).
162
+ - `"well_path"`: The path to the well directory.
163
+ - `"well_name"`: The name of the well.
164
+ - `"well_nbr"`: The numerical identifier of the well.
165
+ - `"experiment"`: The path to the experiment directory.
166
+ - `"antibody"`: The antibody associated with the well.
167
+ - `"concentration"`: The concentration associated with the well.
168
+ - `"cell_type"`: The cell type associated with the well.
169
+ - `"pharmaceutical_agent"`: The pharmaceutical agent associated with the well.
170
+
171
+ Notes
172
+ -----
173
+ - At least one of `pos_path` or `well_path` must be provided.
174
+ - The function determines the experiment path by navigating the directory structure and extracts metadata for the
175
+ corresponding well and position.
176
+ - The metadata is derived using helper functions like `extract_experiment_from_position`, `extract_well_from_position`,
177
+ and `get_experiment_*` family of functions.
178
+
179
+ Example
180
+ -------
181
+ >>> pos_path = "/path/to/experiment/plate/well/position"
182
+ >>> metadata = collect_experiment_metadata(pos_path=pos_path)
183
+ >>> metadata["well_name"]
184
+ 'W1'
185
+
186
+ >>> well_path = "/path/to/experiment/plate/well"
187
+ >>> metadata = collect_experiment_metadata(well_path=well_path)
188
+ >>> metadata["concentration"]
189
+ 10.0
190
+ """
191
+
50
192
  if pos_path is not None:
51
193
  if not pos_path.endswith(os.sep):
52
194
  pos_path += os.sep
@@ -69,7 +211,15 @@ def collect_experiment_metadata(pos_path=None, well_path=None):
69
211
  antibodies = get_experiment_antibodies(experiment)
70
212
  pharmaceutical_agents = get_experiment_pharmaceutical_agents(experiment)
71
213
 
72
- return {"pos_path": pos_path, "position": pos_path, "pos_name": pos_name, "well_path": well_path, "well_name": well_name, "well_nbr": well_nbr, "experiment": experiment, "antibody": antibodies[idx], "concentration": concentrations[idx], "cell_type": cell_types[idx], "pharmaceutical_agent": pharmaceutical_agents[idx]}
214
+ dico = {"pos_path": pos_path, "position": pos_path, "pos_name": pos_name, "well_path": well_path, "well_name": well_name, "well_nbr": well_nbr, "experiment": experiment, "antibody": antibodies[idx], "concentration": concentrations[idx], "cell_type": cell_types[idx], "pharmaceutical_agent": pharmaceutical_agents[idx]}
215
+
216
+ meta = get_experiment_metadata(experiment) # None or dict of metadata
217
+ if meta is not None:
218
+ keys = list(meta.keys())
219
+ for k in keys:
220
+ dico.update({k: meta[k]})
221
+
222
+ return dico
73
223
 
74
224
 
75
225
  def get_experiment_wells(experiment):
@@ -109,6 +259,38 @@ def get_experiment_wells(experiment):
109
259
 
110
260
  def get_config(experiment):
111
261
 
262
+ """
263
+ Retrieves the path to the configuration file for a given experiment.
264
+
265
+ Parameters
266
+ ----------
267
+ experiment : str
268
+ The file system path to the experiment directory.
269
+
270
+ Returns
271
+ -------
272
+ str
273
+ The full path to the configuration file (`config.ini`) within the experiment directory.
274
+
275
+ Raises
276
+ ------
277
+ AssertionError
278
+ If the `config.ini` file does not exist in the specified experiment directory.
279
+
280
+ Notes
281
+ -----
282
+ - The function ensures that the provided experiment path ends with the appropriate file separator (`os.sep`)
283
+ before appending `config.ini` to locate the configuration file.
284
+ - The configuration file is expected to be named `config.ini` and located at the root of the experiment directory.
285
+
286
+ Example
287
+ -------
288
+ >>> experiment = "/path/to/experiment"
289
+ >>> config_path = get_config(experiment)
290
+ >>> print(config_path)
291
+ '/path/to/experiment/config.ini'
292
+ """
293
+
112
294
  if not experiment.endswith(os.sep):
113
295
  experiment += os.sep
114
296
 
@@ -120,7 +302,42 @@ def get_config(experiment):
120
302
 
121
303
  def get_spatial_calibration(experiment):
122
304
 
123
-
305
+ """
306
+ Retrieves the spatial calibration factor for an experiment.
307
+
308
+ Parameters
309
+ ----------
310
+ experiment : str
311
+ The file system path to the experiment directory.
312
+
313
+ Returns
314
+ -------
315
+ float
316
+ The spatial calibration factor (pixels to micrometers conversion), extracted from the experiment's configuration file.
317
+
318
+ Raises
319
+ ------
320
+ AssertionError
321
+ If the configuration file (`config.ini`) does not exist in the specified experiment directory.
322
+ KeyError
323
+ If the "pxtoum" key is not found under the "MovieSettings" section in the configuration file.
324
+ ValueError
325
+ If the retrieved "pxtoum" value cannot be converted to a float.
326
+
327
+ Notes
328
+ -----
329
+ - The function retrieves the calibration factor by first locating the configuration file for the experiment using `get_config()`.
330
+ - It expects the configuration file to have a section named `MovieSettings` containing the key `pxtoum`.
331
+ - This factor defines the conversion from pixels to micrometers for spatial measurements.
332
+
333
+ Example
334
+ -------
335
+ >>> experiment = "/path/to/experiment"
336
+ >>> calibration = get_spatial_calibration(experiment)
337
+ >>> print(calibration)
338
+ 0.325 # pixels-to-micrometers conversion factor
339
+ """
340
+
124
341
  config = get_config(experiment)
125
342
  PxToUm = float(ConfigSectionMap(config, "MovieSettings")["pxtoum"])
126
343
 
@@ -129,14 +346,96 @@ def get_spatial_calibration(experiment):
129
346
 
130
347
  def get_temporal_calibration(experiment):
131
348
 
349
+ """
350
+ Retrieves the temporal calibration factor for an experiment.
351
+
352
+ Parameters
353
+ ----------
354
+ experiment : str
355
+ The file system path to the experiment directory.
356
+
357
+ Returns
358
+ -------
359
+ float
360
+ The temporal calibration factor (frames to minutes conversion), extracted from the experiment's configuration file.
361
+
362
+ Raises
363
+ ------
364
+ AssertionError
365
+ If the configuration file (`config.ini`) does not exist in the specified experiment directory.
366
+ KeyError
367
+ If the "frametomin" key is not found under the "MovieSettings" section in the configuration file.
368
+ ValueError
369
+ If the retrieved "frametomin" value cannot be converted to a float.
370
+
371
+ Notes
372
+ -----
373
+ - The function retrieves the calibration factor by locating the configuration file for the experiment using `get_config()`.
374
+ - It expects the configuration file to have a section named `MovieSettings` containing the key `frametomin`.
375
+ - This factor defines the conversion from frames to minutes for temporal measurements.
376
+
377
+ Example
378
+ -------
379
+ >>> experiment = "/path/to/experiment"
380
+ >>> calibration = get_temporal_calibration(experiment)
381
+ >>> print(calibration)
382
+ 0.5 # frames-to-minutes conversion factor
383
+ """
384
+
132
385
  config = get_config(experiment)
133
386
  FrameToMin = float(ConfigSectionMap(config, "MovieSettings")["frametomin"])
134
387
 
135
388
  return FrameToMin
136
389
 
390
+ def get_experiment_metadata(experiment):
391
+
392
+ config = get_config(experiment)
393
+ metadata = ConfigSectionMap(config, "Metadata")
394
+ return metadata
395
+
137
396
 
138
397
  def get_experiment_concentrations(experiment, dtype=str):
139
398
 
399
+ """
400
+ Retrieves the concentrations associated with each well in an experiment.
401
+
402
+ Parameters
403
+ ----------
404
+ experiment : str
405
+ The file system path to the experiment directory.
406
+ dtype : type, optional
407
+ The data type to which the concentrations should be converted (default is `str`).
408
+
409
+ Returns
410
+ -------
411
+ numpy.ndarray
412
+ An array of concentrations for each well, converted to the specified data type.
413
+
414
+ Raises
415
+ ------
416
+ AssertionError
417
+ If the configuration file (`config.ini`) does not exist in the specified experiment directory.
418
+ KeyError
419
+ If the "concentrations" key is not found under the "Labels" section in the configuration file.
420
+ ValueError
421
+ If the retrieved concentrations cannot be converted to the specified data type.
422
+
423
+ Notes
424
+ -----
425
+ - The function retrieves the configuration file using `get_config()` and expects a section `Labels` containing
426
+ a key `concentrations`.
427
+ - The concentrations are assumed to be comma-separated values.
428
+ - If the number of wells does not match the number of concentrations, the function generates a default set
429
+ of values ranging from 0 to the number of wells minus 1.
430
+ - The resulting concentrations are converted to the specified `dtype` before being returned.
431
+
432
+ Example
433
+ -------
434
+ >>> experiment = "/path/to/experiment"
435
+ >>> concentrations = get_experiment_concentrations(experiment, dtype=float)
436
+ >>> print(concentrations)
437
+ [0.1, 0.2, 0.5, 1.0]
438
+ """
140
439
 
141
440
  config = get_config(experiment)
142
441
  wells = get_experiment_wells(experiment)
@@ -150,6 +449,48 @@ def get_experiment_concentrations(experiment, dtype=str):
150
449
 
151
450
 
152
451
  def get_experiment_cell_types(experiment, dtype=str):
452
+
453
+ """
454
+ Retrieves the cell types associated with each well in an experiment.
455
+
456
+ Parameters
457
+ ----------
458
+ experiment : str
459
+ The file system path to the experiment directory.
460
+ dtype : type, optional
461
+ The data type to which the cell types should be converted (default is `str`).
462
+
463
+ Returns
464
+ -------
465
+ numpy.ndarray
466
+ An array of cell types for each well, converted to the specified data type.
467
+
468
+ Raises
469
+ ------
470
+ AssertionError
471
+ If the configuration file (`config.ini`) does not exist in the specified experiment directory.
472
+ KeyError
473
+ If the "cell_types" key is not found under the "Labels" section in the configuration file.
474
+ ValueError
475
+ If the retrieved cell types cannot be converted to the specified data type.
476
+
477
+ Notes
478
+ -----
479
+ - The function retrieves the configuration file using `get_config()` and expects a section `Labels` containing
480
+ a key `cell_types`.
481
+ - The cell types are assumed to be comma-separated values.
482
+ - If the number of wells does not match the number of cell types, the function generates a default set
483
+ of values ranging from 0 to the number of wells minus 1.
484
+ - The resulting cell types are converted to the specified `dtype` before being returned.
485
+
486
+ Example
487
+ -------
488
+ >>> experiment = "/path/to/experiment"
489
+ >>> cell_types = get_experiment_cell_types(experiment, dtype=str)
490
+ >>> print(cell_types)
491
+ ['TypeA', 'TypeB', 'TypeC', 'TypeD']
492
+ """
493
+
153
494
  config = get_config(experiment)
154
495
  wells = get_experiment_wells(experiment)
155
496
  nbr_of_wells = len(wells)
@@ -163,6 +504,44 @@ def get_experiment_cell_types(experiment, dtype=str):
163
504
 
164
505
  def get_experiment_antibodies(experiment, dtype=str):
165
506
 
507
+ """
508
+ Retrieve the list of antibodies used in an experiment.
509
+
510
+ This function extracts antibody labels for the wells in the given experiment
511
+ based on the configuration file. If the number of wells does not match the
512
+ number of antibody labels provided in the configuration, it generates a
513
+ sequence of default numeric labels.
514
+
515
+ Parameters
516
+ ----------
517
+ experiment : str
518
+ The identifier or name of the experiment to retrieve antibodies for.
519
+ dtype : type, optional
520
+ The data type to which the antibody labels should be cast. Default is `str`.
521
+
522
+ Returns
523
+ -------
524
+ numpy.ndarray
525
+ An array of antibody labels with the specified data type. If no antibodies
526
+ are specified or there is a mismatch, numeric labels are generated instead.
527
+
528
+ Notes
529
+ -----
530
+ - The function assumes the experiment's configuration can be loaded using
531
+ `get_config` and that the antibodies are listed under the "Labels" section
532
+ with the key `"antibodies"`.
533
+ - A mismatch between the number of wells and antibody labels will result in
534
+ numeric labels generated using `numpy.linspace`.
535
+
536
+ Examples
537
+ --------
538
+ >>> get_experiment_antibodies("path/to/experiment1")
539
+ array(['A1', 'A2', 'A3'], dtype='<U2')
540
+
541
+ >>> get_experiment_antibodies("path/to/experiment2", dtype=int)
542
+ array([0, 1, 2])
543
+ """
544
+
166
545
  config = get_config(experiment)
167
546
  wells = get_experiment_wells(experiment)
168
547
  nbr_of_wells = len(wells)
@@ -175,6 +554,48 @@ def get_experiment_antibodies(experiment, dtype=str):
175
554
 
176
555
 
177
556
  def get_experiment_pharmaceutical_agents(experiment, dtype=str):
557
+
558
+ """
559
+ Retrieves the antibodies associated with each well in an experiment.
560
+
561
+ Parameters
562
+ ----------
563
+ experiment : str
564
+ The file system path to the experiment directory.
565
+ dtype : type, optional
566
+ The data type to which the antibodies should be converted (default is `str`).
567
+
568
+ Returns
569
+ -------
570
+ numpy.ndarray
571
+ An array of antibodies for each well, converted to the specified data type.
572
+
573
+ Raises
574
+ ------
575
+ AssertionError
576
+ If the configuration file (`config.ini`) does not exist in the specified experiment directory.
577
+ KeyError
578
+ If the "antibodies" key is not found under the "Labels" section in the configuration file.
579
+ ValueError
580
+ If the retrieved antibody values cannot be converted to the specified data type.
581
+
582
+ Notes
583
+ -----
584
+ - The function retrieves the configuration file using `get_config()` and expects a section `Labels` containing
585
+ a key `antibodies`.
586
+ - The antibody names are assumed to be comma-separated values.
587
+ - If the number of wells does not match the number of antibodies, the function generates a default set
588
+ of values ranging from 0 to the number of wells minus 1.
589
+ - The resulting antibody names are converted to the specified `dtype` before being returned.
590
+
591
+ Example
592
+ -------
593
+ >>> experiment = "/path/to/experiment"
594
+ >>> antibodies = get_experiment_antibodies(experiment, dtype=str)
595
+ >>> print(antibodies)
596
+ ['AntibodyA', 'AntibodyB', 'AntibodyC', 'AntibodyD']
597
+ """
598
+
178
599
  config = get_config(experiment)
179
600
  wells = get_experiment_wells(experiment)
180
601
  nbr_of_wells = len(wells)
@@ -485,59 +906,61 @@ def get_position_movie_path(pos, prefix=''):
485
906
 
486
907
  def load_experiment_tables(experiment, population='targets', well_option='*', position_option='*',
487
908
  return_pos_info=False, load_pickle=False):
909
+
488
910
  """
489
- Loads and aggregates data tables for specified wells and positions within an experiment,
490
- optionally returning position information alongside the aggregated data table.
911
+ Load tabular data for an experiment, optionally including position-level information.
491
912
 
492
- This function collects data from tables associated with specific population types across
493
- various wells and positions within an experiment. It uses the experiment's configuration
494
- to gather metadata such as movie prefix, concentrations, cell types, antibodies, and
495
- pharmaceutical agents. Users can specify which wells and positions to include in the
496
- aggregation through pattern matching, and whether to include detailed position information
497
- in the output.
913
+ This function retrieves and processes tables associated with positions in an experiment.
914
+ It supports filtering by wells and positions, and can load either CSV data or pickle files.
498
915
 
499
916
  Parameters
500
917
  ----------
501
918
  experiment : str
502
- The path to the experiment directory.
919
+ Path to the experiment folder to load data for.
503
920
  population : str, optional
504
- The population type to filter the tables by (default is 'targets' among 'targets and "effectors').
505
- well_option : str, optional
506
- A pattern to specify which wells to include (default is '*', which includes all wells).
507
- position_option : str, optional
508
- A pattern to specify which positions to include (default is '*', which includes all positions).
921
+ The population to extract from the position tables (`'targets'` or `'effectors'`). Default is `'targets'`.
922
+ well_option : str or list, optional
923
+ Specifies which wells to include. Default is `'*'`, meaning all wells.
924
+ position_option : str or list, optional
925
+ Specifies which positions to include within selected wells. Default is `'*'`, meaning all positions.
509
926
  return_pos_info : bool, optional
510
- If True, returns a tuple where the first element is the aggregated data table and the
511
- second element is detailed position information (default is False).
927
+ If `True`, also returns a DataFrame containing position-level metadata. Default is `False`.
928
+ load_pickle : bool, optional
929
+ If `True`, loads pre-processed pickle files for the positions instead of raw data. Default is `False`.
512
930
 
513
931
  Returns
514
932
  -------
515
- pandas.DataFrame or (pandas.DataFrame, pandas.DataFrame)
516
- If return_pos_info is False, returns a pandas DataFrame aggregating the data from the
517
- specified tables. If return_pos_info is True, returns a tuple where the first element
518
- is the aggregated data table and the second element is a DataFrame with detailed position
519
- information.
520
-
521
- Raises
522
- ------
523
- FileNotFoundError
524
- If the experiment directory does not exist or specified files within the directory cannot be found.
525
- ValueError
526
- If the specified well or position patterns do not match any directories.
933
+ df : pandas.DataFrame or None
934
+ A DataFrame containing aggregated data for the specified wells and positions, or `None` if no data is found.
935
+ The DataFrame includes metadata such as well and position identifiers, concentrations, antibodies, and other
936
+ experimental parameters.
937
+ df_pos_info : pandas.DataFrame, optional
938
+ A DataFrame with metadata for each position, including file paths and experimental details. Returned only
939
+ if `return_pos_info=True`.
527
940
 
528
941
  Notes
529
942
  -----
530
- - This function assumes that the naming conventions and directory structure of the experiment
531
- follow a specific format, as outlined in the experiment's configuration file.
532
- - The function utilizes several helper functions to extract metadata, interpret well and
533
- position patterns, and load individual position tables. Errors in these helper functions
534
- may propagate up and affect the behavior of this function.
943
+ - The function assumes the experiment's configuration includes details about movie prefixes, concentrations,
944
+ cell types, antibodies, and pharmaceutical agents.
945
+ - Wells and positions can be filtered using `well_option` and `position_option`, respectively. If filtering
946
+ fails or is invalid, those specific wells/positions are skipped.
947
+ - Position-level metadata is assembled into `df_pos_info` and includes paths to data and movies.
535
948
 
536
949
  Examples
537
950
  --------
538
- >>> load_experiment_tables('/path/to/experiment', population='targets', well_option='W1', position_option='1-*')
539
- # This will load and aggregate tables for the 'targets' population within well 'W1' and positions matching '1-*'.
951
+ Load all data for an experiment:
952
+
953
+ >>> df = load_experiment_tables("path/to/experiment1")
954
+
955
+ Load data for specific wells and positions, including position metadata:
540
956
 
957
+ >>> df, df_pos_info = load_experiment_tables(
958
+ ... "experiment_01", well_option=["A1", "B1"], position_option=[0, 1], return_pos_info=True
959
+ ... )
960
+
961
+ Use pickle files for faster loading:
962
+
963
+ >>> df = load_experiment_tables("experiment_01", load_pickle=True)
541
964
  """
542
965
 
543
966
  config = get_config(experiment)
@@ -548,6 +971,7 @@ def load_experiment_tables(experiment, population='targets', well_option='*', po
548
971
  cell_types = get_experiment_cell_types(experiment)
549
972
  antibodies = get_experiment_antibodies(experiment)
550
973
  pharmaceutical_agents = get_experiment_pharmaceutical_agents(experiment)
974
+ metadata = get_experiment_metadata(experiment) # None or dict of metadata
551
975
  well_labels = _extract_labels_from_config(config, len(wells))
552
976
 
553
977
  well_indices, position_indices = interpret_wells_and_positions(experiment, well_option, position_option)
@@ -602,15 +1026,23 @@ def load_experiment_tables(experiment, population='targets', well_option='*', po
602
1026
  df_pos['antibody'] = well_antibody
603
1027
  df_pos['cell_type'] = well_cell_type
604
1028
  df_pos['pharmaceutical_agent'] = well_pharmaceutical_agent
1029
+ if metadata is not None:
1030
+ keys = list(metadata.keys())
1031
+ for k in keys:
1032
+ df_pos[k] = metadata[k]
605
1033
 
606
1034
  df.append(df_pos)
607
1035
  any_table = True
608
1036
 
609
- df_pos_info.append(
610
- {'pos_path': pos_path, 'pos_index': real_pos_index, 'pos_name': pos_name, 'table_path': table,
611
- 'stack_path': stack_path,
612
- 'well_path': well_path, 'well_index': real_well_index, 'well_name': well_name,
613
- 'well_number': well_number, 'well_alias': well_alias})
1037
+ pos_dict = {'pos_path': pos_path, 'pos_index': real_pos_index, 'pos_name': pos_name, 'table_path': table,
1038
+ 'stack_path': stack_path,'well_path': well_path, 'well_index': real_well_index, 'well_name': well_name,
1039
+ 'well_number': well_number, 'well_alias': well_alias}
1040
+ # if metadata is not None:
1041
+ # keys = list(metadata.keys())
1042
+ # for k in keys:
1043
+ # pos_dict.update({k: metadata[k]})
1044
+
1045
+ df_pos_info.append(pos_dict)
614
1046
 
615
1047
  real_pos_index += 1
616
1048
 
@@ -630,7 +1062,6 @@ def load_experiment_tables(experiment, population='targets', well_option='*', po
630
1062
  return df
631
1063
 
632
1064
 
633
-
634
1065
  def locate_stack(position, prefix='Aligned'):
635
1066
 
636
1067
  """
@@ -694,35 +1125,52 @@ def locate_stack(position, prefix='Aligned'):
694
1125
  def locate_labels(position, population='target', frames=None):
695
1126
 
696
1127
  """
1128
+ Locate and load label images for a given position and population in an experiment.
697
1129
 
698
- Locate and load labels for a specific population.
1130
+ This function retrieves and optionally loads labeled images (e.g., targets or effectors)
1131
+ for a specified position in an experiment. It supports loading all frames, a specific
1132
+ frame, or a list of frames.
699
1133
 
700
1134
  Parameters
701
1135
  ----------
702
1136
  position : str
703
- The position folder within the well where the stack is located.
1137
+ Path to the position directory containing label images.
704
1138
  population : str, optional
705
- The population for which to locate the labels.
706
- Valid options are 'target' and 'effector'.
707
- The default is 'target'.
1139
+ The population to load labels for. Options are `'target'` (or `'targets'`) and
1140
+ `'effector'` (or `'effectors'`). Default is `'target'`.
1141
+ frames : int, list of int, numpy.ndarray, or None, optional
1142
+ Specifies which frames to load:
1143
+ - `None`: Load all frames (default).
1144
+ - `int`: Load a single frame, identified by its index.
1145
+ - `list` or `numpy.ndarray`: Load multiple specific frames.
708
1146
 
709
1147
  Returns
710
1148
  -------
711
- labels : ndarray
712
- The loaded labels as a NumPy array.
1149
+ numpy.ndarray or list of numpy.ndarray
1150
+ If `frames` is `None` or a single integer, returns a NumPy array of the corresponding
1151
+ labels. If `frames` is a list or array, returns a list of NumPy arrays for each frame.
1152
+ If a frame is not found, `None` is returned for that frame.
713
1153
 
714
1154
  Notes
715
1155
  -----
716
- This function locates and loads the labels for a specific population based on the specified position.
717
- It assumes that the labels are stored in a directory named 'labels' or 'labels_effectors'
718
- within the specified position, depending on the population.
719
- The function loads the labels as a NumPy array.
1156
+ - The function assumes label images are stored in subdirectories named `"labels_targets"`
1157
+ or `"labels_effectors"`, with filenames formatted as `####.tif` (e.g., `0001.tif`).
1158
+ - Frame indices are zero-padded to four digits for matching.
1159
+ - If `frames` is invalid or a frame is not found, `None` is returned for that frame.
720
1160
 
721
1161
  Examples
722
1162
  --------
723
- >>> labels = locate_labels(position, population='target')
724
- # Locate and load labels for the target population.
1163
+ Load all label images for a position:
1164
+
1165
+ >>> labels = locate_labels("/path/to/position", population="target")
1166
+
1167
+ Load a single frame (frame index 3):
1168
+
1169
+ >>> label = locate_labels("/path/to/position", population="effector", frames=3)
1170
+
1171
+ Load multiple specific frames:
725
1172
 
1173
+ >>> labels = locate_labels("/path/to/position", population="target", frames=[0, 1, 2])
726
1174
  """
727
1175
 
728
1176
  if not position.endswith(os.sep):
@@ -791,7 +1239,7 @@ def fix_missing_labels(position, population='target', prefix='Aligned'):
791
1239
  position += os.sep
792
1240
 
793
1241
  stack = locate_stack(position, prefix=prefix)
794
- template = np.zeros((stack[0].shape[0], stack[0].shape[1]))
1242
+ template = np.zeros((stack[0].shape[0], stack[0].shape[1]),dtype=int)
795
1243
  all_frames = np.arange(len(stack))
796
1244
 
797
1245
  if population.lower() == "target" or population.lower() == "targets":
@@ -913,37 +1361,52 @@ def load_tracking_data(position, prefix="Aligned", population="target"):
913
1361
 
914
1362
 
915
1363
  def auto_load_number_of_frames(stack_path):
916
-
1364
+
917
1365
  """
1366
+ Automatically determine the number of frames in a TIFF image stack.
918
1367
 
919
- Automatically estimate the number of frames in a stack.
1368
+ This function extracts the number of frames (time slices) from the metadata of a TIFF file
1369
+ or infers it from the stack dimensions when metadata is unavailable. It is robust to
1370
+ variations in metadata structure and handles multi-channel images.
920
1371
 
921
1372
  Parameters
922
1373
  ----------
923
1374
  stack_path : str
924
- The file path to the stack.
1375
+ Path to the TIFF image stack file.
925
1376
 
926
1377
  Returns
927
1378
  -------
928
1379
  int or None
929
- The estimated number of frames in the stack. Returns None if the number of frames cannot be determined.
1380
+ The number of frames in the image stack. Returns `None` if the path is `None`
1381
+ or the frame count cannot be determined.
930
1382
 
931
1383
  Notes
932
1384
  -----
933
- This function attempts to estimate the number of frames in a stack by parsing the image description metadata.
934
- It reads the stack file using the TiffFile from the tifffile library.
935
- It searches for metadata fields containing information about the number of slices or frames.
936
- If the number of slices or frames is found, it returns the estimated length of the movie.
937
- If the number of slices or frames cannot be determined, it returns None.
1385
+ - The function attempts to extract the `frames` or `slices` attributes from the
1386
+ TIFF metadata, specifically the `ImageDescription` tag.
1387
+ - If metadata extraction fails, the function reads the image stack and infers
1388
+ the number of frames based on the stack dimensions.
1389
+ - Multi-channel stacks are handled by assuming the number of channels is specified
1390
+ in the metadata under the `channels` attribute.
938
1391
 
939
1392
  Examples
940
1393
  --------
941
- >>> len_movie = auto_load_number_of_frames(stack_path)
942
- # Automatically estimate the number of frames in the stack.
1394
+ Automatically detect the number of frames in a TIFF stack:
943
1395
 
944
- """
1396
+ >>> frames = auto_load_number_of_frames("experiment_stack.tif")
1397
+ Automatically detected stack length: 120...
1398
+
1399
+ Handle a single-frame TIFF:
945
1400
 
946
- # Try to estimate automatically # frames
1401
+ >>> frames = auto_load_number_of_frames("single_frame_stack.tif")
1402
+ Automatically detected stack length: 1...
1403
+
1404
+ Handle invalid or missing paths gracefully:
1405
+
1406
+ >>> frames = auto_load_number_of_frames(None)
1407
+ >>> print(frames)
1408
+ None
1409
+ """
947
1410
 
948
1411
  if stack_path is None:
949
1412
  return None
@@ -1000,6 +1463,47 @@ def auto_load_number_of_frames(stack_path):
1000
1463
 
1001
1464
 
1002
1465
  def parse_isotropic_radii(string):
1466
+
1467
+ """
1468
+ Parse a string representing isotropic radii into a structured list.
1469
+
1470
+ This function extracts integer values and ranges (denoted by square brackets)
1471
+ from a string input and returns them as a list. Single values are stored as integers,
1472
+ while ranges are represented as lists of two integers.
1473
+
1474
+ Parameters
1475
+ ----------
1476
+ string : str
1477
+ The input string containing radii and ranges, separated by commas or spaces.
1478
+ Ranges should be enclosed in square brackets, e.g., `[1 2]`.
1479
+
1480
+ Returns
1481
+ -------
1482
+ list
1483
+ A list of parsed radii where:
1484
+ - Single integers are included as `int`.
1485
+ - Ranges are included as two-element lists `[start, end]`.
1486
+
1487
+ Examples
1488
+ --------
1489
+ Parse a string with single radii and ranges:
1490
+
1491
+ >>> parse_isotropic_radii("1, [2 3], 4")
1492
+ [1, [2, 3], 4]
1493
+
1494
+ Handle inputs with mixed delimiters:
1495
+
1496
+ >>> parse_isotropic_radii("5 [6 7], 8")
1497
+ [5, [6, 7], 8]
1498
+
1499
+ Notes
1500
+ -----
1501
+ - The function splits the input string by commas or spaces.
1502
+ - It identifies ranges using square brackets and assumes that ranges are always
1503
+ two consecutive values.
1504
+ - Non-integer sections of the string are ignored.
1505
+ """
1506
+
1003
1507
  sections = re.split(',| ', string)
1004
1508
  radii = []
1005
1509
  for k, s in enumerate(sections):
@@ -1061,6 +1565,53 @@ def get_tracking_configs_list(return_path=False):
1061
1565
 
1062
1566
  def interpret_tracking_configuration(config):
1063
1567
 
1568
+ """
1569
+ Interpret and resolve the path for a tracking configuration file.
1570
+
1571
+ This function determines the appropriate configuration file path based on the input.
1572
+ If the input is a string representing an existing path or a known configuration name,
1573
+ it resolves to the correct file path. If the input is invalid or `None`, a default
1574
+ configuration is returned.
1575
+
1576
+ Parameters
1577
+ ----------
1578
+ config : str or None
1579
+ The input configuration, which can be:
1580
+ - A string representing the full path to a configuration file.
1581
+ - A short name of a configuration file without the `.json` extension.
1582
+ - `None` to use a default configuration.
1583
+
1584
+ Returns
1585
+ -------
1586
+ str
1587
+ The resolved path to the configuration file.
1588
+
1589
+ Notes
1590
+ -----
1591
+ - If `config` is a string and the specified path exists, it is returned as-is.
1592
+ - If `config` is a name, the function searches in the `tracking_configs` directory
1593
+ within the `celldetective` models folder.
1594
+ - If the file or name is not found, or if `config` is `None`, the function falls
1595
+ back to a default configuration using `cell_config()`.
1596
+
1597
+ Examples
1598
+ --------
1599
+ Resolve a full path:
1600
+
1601
+ >>> interpret_tracking_configuration("/path/to/config.json")
1602
+ '/path/to/config.json'
1603
+
1604
+ Resolve a named configuration:
1605
+
1606
+ >>> interpret_tracking_configuration("default_tracking")
1607
+ '/path/to/celldetective/models/tracking_configs/default_tracking.json'
1608
+
1609
+ Handle `None` to return the default configuration:
1610
+
1611
+ >>> interpret_tracking_configuration(None)
1612
+ '/path/to/default/config.json'
1613
+ """
1614
+
1064
1615
  if isinstance(config, str):
1065
1616
  if os.path.exists(config):
1066
1617
  return config
@@ -1180,6 +1731,61 @@ def get_pair_signal_models_list(return_path=False):
1180
1731
 
1181
1732
  def locate_signal_model(name, path=None, pairs=False):
1182
1733
 
1734
+ """
1735
+ Locate a signal detection model by name, either locally or from Zenodo.
1736
+
1737
+ This function searches for a signal detection model with the specified name in the local
1738
+ `celldetective` directory. If the model is not found locally, it attempts to download
1739
+ the model from Zenodo.
1740
+
1741
+ Parameters
1742
+ ----------
1743
+ name : str
1744
+ The name of the signal detection model to locate.
1745
+ path : str, optional
1746
+ An additional directory path to search for the model. If provided, this directory
1747
+ is also scanned for matching models. Default is `None`.
1748
+ pairs : bool, optional
1749
+ If `True`, searches for paired signal detection models in the `pair_signal_detection`
1750
+ subdirectory. If `False`, searches in the `signal_detection` subdirectory. Default is `False`.
1751
+
1752
+ Returns
1753
+ -------
1754
+ str or None
1755
+ The full path to the located model directory if found, or `None` if the model is not available
1756
+ locally or on Zenodo.
1757
+
1758
+ Notes
1759
+ -----
1760
+ - The function first searches in the `celldetective/models/signal_detection` or
1761
+ `celldetective/models/pair_signal_detection` directory based on the `pairs` argument.
1762
+ - If a `path` is specified, it is searched in addition to the default directories.
1763
+ - If the model is not found locally, the function queries Zenodo for the model. If available,
1764
+ the model is downloaded to the appropriate `celldetective` subdirectory.
1765
+
1766
+ Examples
1767
+ --------
1768
+ Search for a signal detection model locally:
1769
+
1770
+ >>> locate_signal_model("example_model")
1771
+ 'path/to/celldetective/models/signal_detection/example_model/'
1772
+
1773
+ Search for a paired signal detection model:
1774
+
1775
+ >>> locate_signal_model("paired_model", pairs=True)
1776
+ 'path/to/celldetective/models/pair_signal_detection/paired_model/'
1777
+
1778
+ Include an additional search path:
1779
+
1780
+ >>> locate_signal_model("custom_model", path="/additional/models/")
1781
+ '/additional/models/custom_model/'
1782
+
1783
+ Handle a model available only on Zenodo:
1784
+
1785
+ >>> locate_signal_model("remote_model")
1786
+ 'path/to/celldetective/models/signal_detection/remote_model/'
1787
+ """
1788
+
1183
1789
  main_dir = os.sep.join([os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], "celldetective"])
1184
1790
  modelpath = os.sep.join([main_dir, "models", "signal_detection", os.sep])
1185
1791
  if pairs:
@@ -1206,6 +1812,48 @@ def locate_signal_model(name, path=None, pairs=False):
1206
1812
  return match
1207
1813
 
1208
1814
  def locate_pair_signal_model(name, path=None):
1815
+
1816
+ """
1817
+ Locate a pair signal detection model by name.
1818
+
1819
+ This function searches for a pair signal detection model in the default
1820
+ `celldetective` directory and optionally in an additional user-specified path.
1821
+
1822
+ Parameters
1823
+ ----------
1824
+ name : str
1825
+ The name of the pair signal detection model to locate.
1826
+ path : str, optional
1827
+ An additional directory path to search for the model. If provided, this directory
1828
+ is also scanned for matching models. Default is `None`.
1829
+
1830
+ Returns
1831
+ -------
1832
+ str or None
1833
+ The full path to the located model directory if found, or `None` if no matching
1834
+ model is located.
1835
+
1836
+ Notes
1837
+ -----
1838
+ - The function first searches in the default `celldetective/models/pair_signal_detection`
1839
+ directory.
1840
+ - If a `path` is specified, it is searched in addition to the default directory.
1841
+ - The function prints the search path and model name during execution.
1842
+
1843
+ Examples
1844
+ --------
1845
+ Locate a model in the default directory:
1846
+
1847
+ >>> locate_pair_signal_model("example_model")
1848
+ 'path/to/celldetective/models/pair_signal_detection/example_model/'
1849
+
1850
+ Include an additional search directory:
1851
+
1852
+ >>> locate_pair_signal_model("custom_model", path="/additional/models/")
1853
+ '/additional/models/custom_model/'
1854
+ """
1855
+
1856
+
1209
1857
  main_dir = os.sep.join([os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], "celldetective"])
1210
1858
  modelpath = os.sep.join([main_dir, "models", "pair_signal_detection", os.sep])
1211
1859
  print(f'Looking for {name} in {modelpath}')
@@ -1215,74 +1863,118 @@ def locate_pair_signal_model(name, path=None):
1215
1863
  path += os.sep
1216
1864
  models += glob(path + f'*{os.sep}')
1217
1865
 
1218
- def relabel_segmentation(labels, data, properties, column_labels={'track': "track", 'frame': 'frame', 'y': 'y', 'x': 'x', 'label': 'class_id'}, threads=1):
1866
+ def relabel_segmentation(labels, df, exclude_nans=True, column_labels={'track': "TRACK_ID", 'frame': 'FRAME', 'y': 'POSITION_Y', 'x': 'POSITION_X', 'label': 'class_id'}, threads=1):
1219
1867
 
1220
1868
  """
1869
+ Relabel the segmentation labels with the tracking IDs from the tracks.
1221
1870
 
1222
- Relabel the segmentation labels based on the provided tracking data and properties.
1871
+ The function reassigns the mask value for each cell with the associated `TRACK_ID`, if it exists
1872
+ in the trajectory table (`df`). If no track uses the cell mask, a new track with a single point
1873
+ is generated on the fly (max of `TRACK_ID` values + i, for i=0 to N such cells). It supports
1874
+ multithreaded processing for faster execution on large datasets.
1223
1875
 
1224
1876
  Parameters
1225
1877
  ----------
1226
- labels : ndarray
1227
- The original segmentation labels.
1228
- data : ndarray
1229
- The tracking data containing information about tracks, frames, y-coordinates, and x-coordinates.
1230
- properties : ndarray
1231
- The properties associated with the tracking data.
1878
+ labels : np.ndarray
1879
+ A (TYX) array where each frame contains a 2D segmentation mask. Each unique
1880
+ non-zero integer represents a labeled object.
1881
+ df : pandas.DataFrame
1882
+ A DataFrame containing tracking information with columns
1883
+ specified in `column_labels`. Must include at least frame, track ID, and object ID.
1884
+ exclude_nans : bool, optional
1885
+ Whether to exclude rows in `df` with NaN values in the column specified by
1886
+ `column_labels['label']`. Default is `True`.
1232
1887
  column_labels : dict, optional
1233
- A dictionary specifying the column labels for the tracking data. The default is {'track': "track",
1234
- 'frame': 'frame', 'y': 'y', 'x': 'x', 'label': 'class_id'}.
1888
+ A dictionary specifying the column names in `df`. Default is:
1889
+ - `'track'`: Track ID column name (`"TRACK_ID"`)
1890
+ - `'frame'`: Frame column name (`"FRAME"`)
1891
+ - `'y'`: Y-coordinate column name (`"POSITION_Y"`)
1892
+ - `'x'`: X-coordinate column name (`"POSITION_X"`)
1893
+ - `'label'`: Object ID column name (`"class_id"`)
1894
+ threads : int, optional
1895
+ Number of threads to use for multithreaded processing. Default is `1`.
1235
1896
 
1236
1897
  Returns
1237
1898
  -------
1238
- ndarray
1239
- The relabeled segmentation labels.
1899
+ np.ndarray
1900
+ A new (TYX) array with the same shape as `labels`, where objects are relabeled
1901
+ according to their tracking identity in `df`.
1240
1902
 
1241
1903
  Notes
1242
1904
  -----
1243
- This function relabels the segmentation labels based on the provided tracking data and properties.
1244
- It creates a DataFrame from the tracking data and properties, merges them based on the indices, and sorts them by track and frame.
1245
- Then, it iterates over unique frames in the DataFrame, retrieves the tracks and identities at each frame,
1246
- and updates the corresponding labels with the new track values.
1905
+ - For frames where labeled objects in `labels` do not match any entries in the `df`,
1906
+ new track IDs are generated for the unmatched labels.
1907
+ - The relabeling process maintains synchronization across threads using a shared
1908
+ counter for generating unique track IDs.
1247
1909
 
1248
1910
  Examples
1249
1911
  --------
1250
- >>> relabeled = relabel_segmentation(labels, data, properties, column_labels={'track': "track", 'frame': 'frame',
1251
- ... 'y': 'y', 'x': 'x', 'label': 'class_id'})
1252
- # Relabel the segmentation labels based on the provided tracking data and properties.
1253
-
1254
- """
1912
+ Relabel segmentation using tracking data:
1255
1913
 
1914
+ >>> labels = np.random.randint(0, 5, (10, 100, 100))
1915
+ >>> df = pd.DataFrame({
1916
+ ... "TRACK_ID": [1, 2, 1, 2],
1917
+ ... "FRAME": [0, 0, 1, 1],
1918
+ ... "class_id": [1, 2, 1, 2],
1919
+ ... })
1920
+ >>> new_labels = relabel_segmentation(labels, df, threads=2)
1921
+ Done.
1256
1922
 
1257
- n_threads = threads
1258
- if data.shape[1]==4:
1259
- df = pd.DataFrame(data,columns=[column_labels['track'],column_labels['frame'],column_labels['y'],column_labels['x']])
1260
- else:
1261
- df = pd.DataFrame(data,columns=[column_labels['track'],column_labels['frame'],'z', column_labels['y'],column_labels['x']])
1262
- df = df.drop(columns=['z'])
1923
+ Use custom column labels and exclude rows with NaNs:
1924
+
1925
+ >>> column_labels = {
1926
+ ... 'track': "track_id",
1927
+ ... 'frame': "time",
1928
+ ... 'label': "object_id"
1929
+ ... }
1930
+ >>> new_labels = relabel_segmentation(labels, df, column_labels=column_labels, exclude_nans=True)
1931
+ Done.
1932
+ """
1263
1933
 
1264
- df = df.merge(pd.DataFrame(properties),left_index=True, right_index=True)
1934
+ n_threads = threads
1265
1935
  df = df.sort_values(by=[column_labels['track'],column_labels['frame']])
1266
- df.loc[df['dummy'],column_labels['label']] = np.nan
1936
+ if exclude_nans:
1937
+ df = df.dropna(subset=column_labels['label'])
1267
1938
 
1268
1939
  new_labels = np.zeros_like(labels)
1940
+ shared_data = {"s": 0}
1269
1941
 
1270
1942
  def rewrite_labels(indices):
1271
1943
 
1944
+ all_track_ids = df[column_labels['track']].unique()
1945
+
1272
1946
  for t in tqdm(indices):
1273
1947
 
1274
1948
  f = int(t)
1275
1949
  cells = df.loc[df[column_labels['frame']] == f, [column_labels['track'], column_labels['label']]].to_numpy()
1276
- tracks_at_t = cells[:,0]
1277
- identities = cells[:,1]
1950
+ tracks_at_t = list(cells[:,0])
1951
+ identities = list(cells[:,1])
1952
+
1953
+ labels_at_t = list(np.unique(labels[f]))
1954
+ if 0 in labels_at_t:
1955
+ labels_at_t.remove(0)
1956
+ labels_not_in_df = [lbl for lbl in labels_at_t if lbl not in identities]
1957
+ for lbl in labels_not_in_df:
1958
+ with threading.Lock(): # Synchronize access to `shared_data["s"]`
1959
+ track_id = max(all_track_ids) + shared_data["s"]
1960
+ shared_data["s"] += 1
1961
+ tracks_at_t.append(track_id)
1962
+ identities.append(lbl)
1278
1963
 
1279
1964
  # exclude NaN
1965
+ tracks_at_t = np.array(tracks_at_t)
1966
+ identities = np.array(identities)
1967
+
1280
1968
  tracks_at_t = tracks_at_t[identities == identities]
1281
1969
  identities = identities[identities == identities]
1282
1970
 
1283
1971
  for k in range(len(identities)):
1972
+
1973
+ # need routine to check values from labels not in class_id of this frame and add new track id
1974
+
1284
1975
  loc_i, loc_j = np.where(labels[f] == identities[k])
1285
- new_labels[f, loc_i, loc_j] = round(tracks_at_t[k])
1976
+ track_id = tracks_at_t[k]
1977
+ new_labels[f, loc_i, loc_j] = round(track_id)
1286
1978
 
1287
1979
  # Multithreading
1288
1980
  indices = list(df[column_labels['frame']].unique())
@@ -1296,90 +1988,308 @@ def relabel_segmentation(labels, data, properties, column_labels={'track': "trac
1296
1988
  return new_labels
1297
1989
 
1298
1990
 
1299
- def control_tracking_btrack(position, prefix="Aligned", population="target", relabel=True, flush_memory=True, threads=1):
1991
+ def control_tracks(position, prefix="Aligned", population="target", relabel=True, flush_memory=True, threads=1):
1300
1992
 
1301
1993
  """
1302
- Load the necessary data for visualization of bTrack trajectories in napari.
1994
+ Controls the tracking of cells or objects within a given position by locating the relevant image stack and label data,
1995
+ and then visualizing and managing the tracks in the Napari viewer.
1303
1996
 
1304
1997
  Parameters
1305
1998
  ----------
1306
1999
  position : str
1307
- The path to the position directory.
1308
- prefix : str, optional
1309
- The prefix used to identify the movie file. The default is "Aligned".
1310
- population : str, optional
1311
- The population type to load, either "target" or "effector". The default is "target".
2000
+ The path to the directory containing the position's data. The function will ensure the path uses forward slashes.
2001
+
2002
+ prefix : str, optional, default="Aligned"
2003
+ The prefix of the file names for the image stack and labels. This parameter helps locate the relevant data files.
2004
+
2005
+ population : str, optional, default="target"
2006
+ The population to be tracked, typically either "target" or "effectors". This is used to identify the group of interest for tracking.
2007
+
2008
+ relabel : bool, optional, default=True
2009
+ If True, will relabel the tracks, potentially assigning new track IDs to the detected objects.
2010
+
2011
+ flush_memory : bool, optional, default=True
2012
+ If True, will flush memory after processing to free up resources.
2013
+
2014
+ threads : int, optional, default=1
2015
+ The number of threads to use for processing. This can speed up the task in multi-threaded environments.
1312
2016
 
1313
2017
  Returns
1314
2018
  -------
1315
2019
  None
1316
- This function displays the data in Napari for visualization and analysis.
1317
-
1318
- Examples
1319
- --------
1320
- >>> control_tracking_btrack("path/to/position", population="target")
1321
- # Executes napari for visualization of target trajectories.
2020
+ The function performs visualization and management of tracks in the Napari viewer. It does not return any value.
1322
2021
 
2022
+ Notes
2023
+ -----
2024
+ - This function assumes that the necessary data for tracking (stack and labels) are located in the specified position directory.
2025
+ - The `locate_stack_and_labels` function is used to retrieve the image stack and labels from the specified directory.
2026
+ - The tracks are visualized using the `view_tracks_in_napari` function, which handles the display in the Napari viewer.
2027
+ - The function can be used for tracking biological entities (e.g., cells) and their movement across time frames in an image stack.
2028
+
2029
+ Example
2030
+ -------
2031
+ >>> control_tracks("/path/to/data/position_1", prefix="Aligned", population="target", relabel=True, flush_memory=True, threads=4)
1323
2032
  """
2033
+
2034
+ if not position.endswith(os.sep):
2035
+ position += os.sep
2036
+
2037
+ position = position.replace('\\','/')
2038
+ stack, labels = locate_stack_and_labels(position, prefix=prefix, population=population)
1324
2039
 
1325
- data, properties, graph, labels, stack = load_napari_data(position, prefix=prefix, population=population)
1326
- view_on_napari_btrack(data, properties, graph, labels=labels, stack=stack, relabel=relabel,
2040
+ view_tracks_in_napari(position, population, labels=labels, stack=stack, relabel=relabel,
1327
2041
  flush_memory=flush_memory, threads=threads)
1328
2042
 
1329
2043
 
1330
- def view_on_napari_btrack(data, properties, graph, stack=None, labels=None, relabel=True, flush_memory=True,
1331
- position=None, threads=1):
2044
+ def tracks_to_btrack(df, exclude_nans=False):
2045
+
1332
2046
  """
1333
-
1334
- Visualize btrack data, including stack, labels, points, and tracks, using the napari viewer.
2047
+ Converts a dataframe of tracked objects into the bTrack output format.
2048
+ The function prepares tracking data, properties, and an empty graph structure for further processing.
1335
2049
 
1336
2050
  Parameters
1337
2051
  ----------
1338
- data : ndarray
1339
- The btrack data containing information about tracks.
1340
- properties : ndarray
1341
- The properties associated with the btrack data.
1342
- graph : Graph
1343
- The btrack graph containing information about track connections.
1344
- stack : ndarray, optional
1345
- The stack of images to visualize. The default is None.
1346
- labels : ndarray, optional
1347
- The segmentation labels to visualize. The default is None.
1348
- relabel : bool, optional
1349
- Specify whether to relabel the segmentation labels using the provided data. The default is True.
2052
+ df : pandas.DataFrame
2053
+ A dataframe containing tracking information. The dataframe must have columns for `TRACK_ID`,
2054
+ `FRAME`, `POSITION_Y`, `POSITION_X`, and `class_id` (among others).
2055
+
2056
+ exclude_nans : bool, optional, default=False
2057
+ If True, rows with NaN values in the `class_id` column will be excluded from the dataset.
2058
+ If False, the dataframe will retain all rows, including those with NaN in `class_id`.
2059
+
2060
+ Returns
2061
+ -------
2062
+ data : numpy.ndarray
2063
+ A 2D numpy array containing the tracking data with columns `[TRACK_ID, FRAME, z, POSITION_Y, POSITION_X]`.
2064
+ The `z` column is set to zero for all rows.
2065
+
2066
+ properties : dict
2067
+ A dictionary where keys are property names (e.g., 'FRAME', 'state', 'generation', etc.) and values are numpy arrays
2068
+ containing the corresponding values from the dataframe.
1350
2069
 
2070
+ graph : dict
2071
+ An empty dictionary intended to store graph-related information for the tracking data. It can be extended
2072
+ later to represent relationships between different tracking objects.
2073
+
1351
2074
  Notes
1352
2075
  -----
1353
- This function visualizes btrack data using the napari viewer. It adds the stack, labels, points,
1354
- and tracks to the viewer for visualization. If `relabel` is True and labels are provided, it calls
1355
- the `relabel_segmentation` function to relabel the segmentation labels based on the provided data.
2076
+ - The function assumes that the dataframe contains specific columns: `TRACK_ID`, `FRAME`, `POSITION_Y`, `POSITION_X`,
2077
+ and `class_id`. These columns are used to construct the tracking data and properties.
2078
+ - The `z` coordinate is set to 0 for all tracks since the function does not process 3D data.
2079
+ - This function is useful for transforming tracking data into a format that can be used by tracking graph algorithms.
2080
+
2081
+ Example
2082
+ -------
2083
+ >>> data, properties, graph = tracks_to_btrack(df, exclude_nans=True)
2084
+ """
2085
+
2086
+ graph = {}
2087
+ if exclude_nans:
2088
+ df.dropna(subset='class_id',inplace=True)
2089
+
2090
+ df["z"] = 0.
2091
+ data = df[["TRACK_ID","FRAME","z","POSITION_Y","POSITION_X"]].to_numpy()
2092
+
2093
+ df['dummy'] = False
2094
+ prop_cols = ['FRAME','state','generation','root','parent','dummy','class_id']
2095
+ properties = {}
2096
+ for col in prop_cols:
2097
+ properties.update({col: df[col].to_numpy()})
2098
+
2099
+ return data, properties, graph
2100
+
2101
+ def tracks_to_napari(df, exclude_nans=False):
2102
+
2103
+ data, properties, graph = tracks_to_btrack(df, exclude_nans=exclude_nans)
2104
+ vertices = data[:, [1,-2,-1]]
2105
+ if data.shape[1]==4:
2106
+ tracks = data
2107
+ else:
2108
+ tracks = data[:,[0,1,3,4]]
2109
+ return vertices, tracks, properties, graph
1356
2110
 
1357
- Examples
1358
- --------
1359
- >>> view_on_napari_btrack(data, properties, graph, stack=stack, labels=labels, relabel=True)
1360
- # Visualize btrack data, including stack, labels, points, and tracks, using the napari viewer.
1361
2111
 
2112
+ def view_tracks_in_napari(position, population, stack=None, labels=None, relabel=True, flush_memory=True, threads=1):
2113
+
2114
+ """
2115
+ Updated
1362
2116
  """
1363
2117
 
2118
+ df, df_path = get_position_table(position, population=population, return_path=True)
2119
+ if df is None:
2120
+ print('Please compute trajectories first... Abort...')
2121
+ return None
2122
+ shared_data = {"df": df, "path": df_path, "position": position, "population": population, 'selected_frame': None}
2123
+
1364
2124
  if (labels is not None) * relabel:
1365
2125
  print('Replacing the cell mask labels with the track ID...')
1366
- labels = relabel_segmentation(labels, data, properties, threads=threads)
2126
+ labels = relabel_segmentation(labels, df, exclude_nans=True, threads=threads)
1367
2127
 
1368
- vertices = data[:, [1,-2,-1]]
2128
+ vertices, tracks, properties, graph = tracks_to_napari(df, exclude_nans=True)
1369
2129
 
1370
2130
  viewer = napari.Viewer()
1371
2131
  if stack is not None:
1372
2132
  viewer.add_image(stack, channel_axis=-1, colormap=["gray"] * stack.shape[-1])
1373
2133
  if labels is not None:
1374
- viewer.add_labels(labels.astype(int), name='segmentation', opacity=0.4)
2134
+ labels_layer = viewer.add_labels(labels.astype(int), name='segmentation', opacity=0.4)
1375
2135
  viewer.add_points(vertices, size=4, name='points', opacity=0.3)
1376
- if data.shape[1]==4:
1377
- viewer.add_tracks(data, properties=properties, graph=graph, name='tracks')
1378
- else:
1379
- viewer.add_tracks(data[:,[0,1,3,4]], properties=properties, graph=graph, name='tracks')
2136
+ viewer.add_tracks(tracks, properties=properties, graph=graph, name='tracks')
2137
+
2138
+ def lock_controls(layer, widgets=(), locked=True):
2139
+ qctrl = viewer.window.qt_viewer.controls.widgets[layer]
2140
+ for wdg in widgets:
2141
+ try:
2142
+ getattr(qctrl, wdg).setEnabled(not locked)
2143
+ except:
2144
+ pass
2145
+
2146
+ label_widget_list = ['paint_button', 'erase_button', 'fill_button', 'polygon_button', 'transform_button']
2147
+ lock_controls(viewer.layers['segmentation'], label_widget_list)
2148
+
2149
+ point_widget_list = ['addition_button', 'delete_button', 'select_button', 'transform_button']
2150
+ lock_controls(viewer.layers['points'], point_widget_list)
2151
+
2152
+ track_widget_list = ['transform_button']
2153
+ lock_controls(viewer.layers['tracks'], track_widget_list)
2154
+
2155
+ # Initialize selected frame
2156
+ selected_frame = viewer.dims.current_step[0]
2157
+ shared_data['selected_frame'] = selected_frame
2158
+
2159
+ def export_modifications():
2160
+
2161
+ from celldetective.tracking import write_first_detection_class, clean_trajectories
2162
+ from celldetective.utils import velocity_per_track
2163
+
2164
+ df = shared_data['df']
2165
+ position = shared_data['position']
2166
+ population = shared_data['population']
2167
+ df = velocity_per_track(df, window_size=3, mode='bi')
2168
+ df = write_first_detection_class(df, img_shape=labels[0].shape)
2169
+
2170
+ experiment = extract_experiment_from_position(position)
2171
+ instruction_file = "/".join([experiment,"configs", f"tracking_instructions_{population}.json"])
2172
+ print(f"{instruction_file=}")
2173
+ if os.path.exists(instruction_file):
2174
+ print('Tracking configuration file found...')
2175
+ with open(instruction_file, 'r') as f:
2176
+ instructions = json.load(f)
2177
+ if 'post_processing_options' in instructions:
2178
+ post_processing_options = instructions['post_processing_options']
2179
+ print(f'Applying the following track postprocessing: {post_processing_options}...')
2180
+ df = clean_trajectories(df.copy(),**post_processing_options)
2181
+ unnamed_cols = [c for c in list(df.columns) if c.startswith('Unnamed')]
2182
+ df = df.drop(unnamed_cols, axis=1)
2183
+ print(f"{list(df.columns)=}")
2184
+ df.to_csv(shared_data['path'], index=False)
2185
+ print('Done...')
2186
+
2187
+ @magicgui(call_button='Export the modified\ntracks...')
2188
+ def export_table_widget():
2189
+ return export_modifications()
2190
+
2191
+ def label_changed(event):
2192
+
2193
+ value = viewer.layers['segmentation'].selected_label
2194
+ if value != 0:
2195
+ selected_frame = viewer.dims.current_step[0]
2196
+ shared_data['selected_frame'] = selected_frame
2197
+
2198
+
2199
+ viewer.layers['segmentation'].events.selected_label.connect(label_changed)
2200
+
2201
+ viewer.window.add_dock_widget(export_table_widget, area='right')
2202
+
2203
+ @labels_layer.mouse_double_click_callbacks.append
2204
+ def on_second_click_of_double_click(layer, event):
2205
+
2206
+ df = shared_data['df']
2207
+ position = shared_data['position']
2208
+ population = shared_data['population']
2209
+
2210
+ frame, x, y = event.position
2211
+ try:
2212
+ value_under = viewer.layers['segmentation'].data[int(frame), int(x), int(y)] #labels[0,int(y),int(x)]
2213
+ if value_under==0:
2214
+ return None
2215
+ except:
2216
+ print('Invalid mask value...')
2217
+ return None
2218
+
2219
+ target_track_id = viewer.layers['segmentation'].selected_label
2220
+
2221
+ msgBox = QMessageBox()
2222
+ msgBox.setIcon(QMessageBox.Question)
2223
+ msgBox.setText(f"Do you want to propagate track {target_track_id} to the cell under the mouse, track {value_under}?")
2224
+ msgBox.setWindowTitle("Info")
2225
+ msgBox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
2226
+ returnValue = msgBox.exec()
2227
+ if returnValue == QMessageBox.No:
2228
+ return None
2229
+ else:
2230
+
2231
+ if target_track_id not in df['TRACK_ID'].unique() and target_track_id in np.unique(viewer.layers['segmentation'].data[shared_data['selected_frame']]):
2232
+ # the selected cell in frame -1 is not in the table... we can add it to DataFrame
2233
+ current_labelm1 = viewer.layers['segmentation'].data[shared_data['selected_frame']]
2234
+ original_labelm1 = locate_labels(position, population=population, frames=shared_data['selected_frame'])
2235
+ original_labelm1[current_labelm1!=target_track_id] = 0
2236
+ props = regionprops_table(original_labelm1, intensity_image=None, properties=['centroid', 'label'])
2237
+ props = pd.DataFrame(props)
2238
+ new_cell = props[['centroid-1', 'centroid-0','label']].copy()
2239
+ new_cell.rename(columns={'centroid-1': 'POSITION_X', 'centroid-0': 'POSITION_Y', 'label': 'class_id'},inplace=True)
2240
+ new_cell['FRAME'] = shared_data['selected_frame']
2241
+ new_cell['TRACK_ID'] = target_track_id
2242
+ df = pd.concat([df, new_cell], ignore_index=True)
2243
+
2244
+ if value_under not in df['TRACK_ID'].unique():
2245
+ # the cell to add is not currently part of DataFrame, need to add measurement
2246
+
2247
+ current_label = viewer.layers['segmentation'].data[int(frame)]
2248
+ original_label = locate_labels(position, population=population, frames=int(frame))
2249
+
2250
+ new_datapoint = {'TRACK_ID': value_under, 'FRAME': frame, 'POSITION_X': np.nan, 'POSITION_Y': np.nan, 'class_id': np.nan}
2251
+
2252
+ original_label[current_label!=value_under] = 0
2253
+
2254
+ props = regionprops_table(original_label, intensity_image=None, properties=['centroid', 'label'])
2255
+ props = pd.DataFrame(props)
2256
+
2257
+ new_cell = props[['centroid-1', 'centroid-0','label']].copy()
2258
+ new_cell.rename(columns={'centroid-1': 'POSITION_X', 'centroid-0': 'POSITION_Y', 'label': 'class_id'},inplace=True)
2259
+ new_cell['FRAME'] = int(frame)
2260
+ new_cell['TRACK_ID'] = value_under
2261
+ df = pd.concat([df, new_cell], ignore_index=True)
2262
+
2263
+ relabel = np.amax(df['TRACK_ID'].unique()) + 1
2264
+ for f in viewer.layers['segmentation'].data[int(frame):]:
2265
+ if target_track_id!=0:
2266
+ f[np.where(f==target_track_id)] = relabel
2267
+ f[np.where(f==value_under)] = target_track_id
2268
+
2269
+ if target_track_id!=0:
2270
+ df.loc[(df['FRAME']>=frame)&(df['TRACK_ID']==target_track_id),'TRACK_ID'] = relabel
2271
+ df.loc[(df['FRAME']>=frame)&(df['TRACK_ID']==value_under),'TRACK_ID'] = target_track_id
2272
+ df = df.loc[~(df['TRACK_ID']==0),:]
2273
+ df = df.sort_values(by=['TRACK_ID','FRAME'])
2274
+
2275
+ vertices, tracks, properties, graph = tracks_to_napari(df, exclude_nans=True)
2276
+
2277
+ viewer.layers['tracks'].data = tracks
2278
+ viewer.layers['tracks'].properties = properties
2279
+ viewer.layers['tracks'].graph = graph
2280
+
2281
+ viewer.layers['points'].data = vertices
2282
+
2283
+ viewer.layers['segmentation'].refresh()
2284
+ viewer.layers['tracks'].refresh()
2285
+ viewer.layers['points'].refresh()
2286
+
2287
+ shared_data['df'] = df
2288
+
1380
2289
  viewer.show(block=True)
1381
2290
 
1382
2291
  if flush_memory:
2292
+
1383
2293
  # temporary fix for slight napari memory leak
1384
2294
  for i in range(10000):
1385
2295
  try:
@@ -1394,6 +2304,7 @@ def view_on_napari_btrack(data, properties, graph, stack=None, labels=None, rela
1394
2304
 
1395
2305
 
1396
2306
  def load_napari_data(position, prefix="Aligned", population="target", return_stack=True):
2307
+
1397
2308
  """
1398
2309
  Load the necessary data for visualization in napari.
1399
2310
 
@@ -1417,6 +2328,10 @@ def load_napari_data(position, prefix="Aligned", population="target", return_sta
1417
2328
  # Load the necessary data for visualization of target trajectories.
1418
2329
 
1419
2330
  """
2331
+
2332
+ if not position.endswith(os.sep):
2333
+ position += os.sep
2334
+
1420
2335
  position = position.replace('\\','/')
1421
2336
  if population.lower()=="target" or population.lower()=="targets":
1422
2337
  if os.path.exists(position+os.sep.join(['output','tables','napari_target_trajectories.npy'])):
@@ -1428,6 +2343,7 @@ def load_napari_data(position, prefix="Aligned", population="target", return_sta
1428
2343
  napari_data = np.load(position+os.sep.join(['output', 'tables', 'napari_effector_trajectories.npy']), allow_pickle=True)
1429
2344
  else:
1430
2345
  napari_data = None
2346
+
1431
2347
  if napari_data is not None:
1432
2348
  data = napari_data.item()['data']
1433
2349
  properties = napari_data.item()['properties']
@@ -1444,9 +2360,6 @@ def load_napari_data(position, prefix="Aligned", population="target", return_sta
1444
2360
  return data, properties, graph, labels, stack
1445
2361
 
1446
2362
 
1447
- from skimage.measure import label
1448
-
1449
-
1450
2363
  def auto_correct_masks(masks, bbox_factor = 1.75, min_area=9, fill_labels=False):
1451
2364
 
1452
2365
  """
@@ -1621,7 +2534,7 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
1621
2534
  squares = np.array(squares)
1622
2535
  squares = squares[test_in_frame]
1623
2536
  nbr_squares = len(squares)
1624
- print(f"Found {nbr_squares} ROIS")
2537
+ print(f"Found {nbr_squares} ROIs...")
1625
2538
  if nbr_squares > 0:
1626
2539
  # deactivate field of view mode
1627
2540
  fov_export = False
@@ -1710,6 +2623,19 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
1710
2623
  viewer.add_labels(labels.astype(int), name='segmentation', opacity=0.4)
1711
2624
  viewer.window.add_dock_widget(save_widget, area='right')
1712
2625
  viewer.window.add_dock_widget(export_widget, area='right')
2626
+
2627
+ def lock_controls(layer, widgets=(), locked=True):
2628
+ qctrl = viewer.window.qt_viewer.controls.widgets[layer]
2629
+ for wdg in widgets:
2630
+ try:
2631
+ getattr(qctrl, wdg).setEnabled(not locked)
2632
+ except:
2633
+ pass
2634
+
2635
+ label_widget_list = ['polygon_button', 'transform_button']
2636
+ lock_controls(viewer.layers['segmentation'], label_widget_list)
2637
+
2638
+
1713
2639
  viewer.show(block=True)
1714
2640
 
1715
2641
  if flush_memory: