celldetective 1.2.2.post1__py3-none-any.whl → 1.2.2.post2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,46 @@
1
+ {
2
+ "Are you interested in transient cells (existing on the image for only a short amount of time)?": {
3
+ "yes": {
4
+ "Are all the cells of interest visible (and segmented) on the last frame of the movie?": {
5
+ "yes": {
6
+ "Are the cells quasi-static?": {
7
+ "yes": "Set a low minimum tracklength to keep transient tracks. If you are confident that all the cells of interest are visible and properly segmented on the last image, you may tick the option to 'remove the tracks that do not end at the end'. With quasi-static cells, you may tick the option to 'sustain the first position from the beginning of the movie', to start measuring intensities before cell arrival, giving a contrast on the signals. Tick the option to 'interpolate missed detections within tracks'.",
8
+ "no": "Set a low minimum tracklength to keep transient tracks. If you are confident that all the cells of interest are visible and properly segmented on the last image, you may tick the option to 'remove the tracks that do not end at the end'. Tick the option to 'interpolate missed detections within tracks'."
9
+ }
10
+ },
11
+ "no": {
12
+ "Are the cells quasi-static?": {
13
+ "yes": "Set a low minimum tracklength to keep transient tracks. Tick the option to 'interpolate missed detections within tracks'.",
14
+ "no": "Set a low minimum tracklength to keep transient tracks. Tick the option to 'interpolate missed detections within tracks'."
15
+ }
16
+ }
17
+ }
18
+ },
19
+ "no": {
20
+ "Are all the cells of interest visible (and segmented) on the first frame of the movie?": {
21
+ "yes": {
22
+ "Are the cells quasi-static?": {
23
+ "yes": "Set a high minimum tracklength to filter out most false positive tracks. If you are confident that all the cells of interest are visible and properly segmented on the first frame, you may tick the option to 'remove the tracks that do not start at the beginning'. With quasi-static cells, you may tick the option to 'sustain the last position until the end of the movie', to continue measuring intensities locally when cell track is interrupted. Tick the option to 'interpolate missed detections within tracks'.",
24
+ "no": "Set a high minimum tracklength to filter out most false positive tracks. If you are confident that all the cells of interest are visible and properly segmented on the first frame, you may tick the option to 'remove the tracks that do not start at the beginning'. Tick the option to 'interpolate missed detections within tracks'."
25
+ }
26
+ },
27
+ "no": {
28
+ "Are all the cells of interest visible (and segmented) on the last frame of the movie?": {
29
+ "yes": {
30
+ "Are the cells quasi-static?": {
31
+ "yes": "Set a moderate minimum tracklength to filter out false positive tracks. If you are confident that all the cells of interest are visible and properly segmented on the last image, you may tick the option to 'remove the tracks that do not end at the end'. With quasi-static cells, you may tick the option to 'sustain the first position from the beginning of the movie', to start measuring intensities before cell arrival, giving a contrast on the signals. Tick the option to 'interpolate missed detections within tracks'.",
32
+ "no": "Set a moderate minimum tracklength to filter out false positive tracks. Tick the option to 'interpolate missed detections within tracks'."
33
+ }
34
+ },
35
+ "no": {
36
+ "Are the cells quasi-static?": {
37
+ "yes": "Set a moderate minimum tracklength to filter out false positive tracks. Tick the option to 'interpolate missed detections within tracks'.",
38
+ "no": "Set a moderate minimum tracklength to filter out false positive tracks. Tick the option to 'interpolate missed detections within tracks'."
39
+ }
40
+ }
41
+ }
42
+ }
43
+ }
44
+ }
45
+ }
46
+ }
@@ -0,0 +1,11 @@
1
+ {
2
+ "Is the time resolution high enough to unambiguously identify visually cells from one frame to the next?": {
3
+ "yes": {
4
+ "Did you successfully segment the vast majority of the cells of interest?": {
5
+ "yes": "You should be able to track your cells. Do not forget to tune the tracking parameters for your cell population for optimal performance.",
6
+ "no": "With a non-negligible segmentation miss rate, tracking can be much noisier. If possible, try to improve the segmentation by using a different model or performing manual corrections. Otherwise, consider skipping the tracking and signal analysis steps and measure directly the cells."
7
+ }
8
+ },
9
+ "no": "Skip the tracking and signal analysis steps. Proceed directly with measurements."
10
+ }
11
+ }
celldetective/io.py CHANGED
@@ -22,6 +22,7 @@ from celldetective.utils import _estimate_scale_factor, _extract_channel_indices
22
22
  from celldetective.utils import interpolate_nan
23
23
  import concurrent.futures
24
24
  from tifffile import imwrite
25
+ from stardist import fill_label_holes
25
26
 
26
27
  def get_experiment_wells(experiment):
27
28
 
@@ -1069,8 +1070,8 @@ def get_pair_signal_models_list(return_path=False):
1069
1070
  available_models = glob(modelpath + f'*{os.sep}')
1070
1071
  available_models = [m.replace('\\', '/').split('/')[-2] for m in available_models]
1071
1072
  #for rm in repository_models:
1072
- # if rm not in available_models:
1073
- # available_models.append(rm)
1073
+ # if rm not in available_models:
1074
+ # available_models.append(rm)
1074
1075
 
1075
1076
  if not return_path:
1076
1077
  return available_models
@@ -1276,7 +1277,7 @@ def view_on_napari_btrack(data, properties, graph, stack=None, labels=None, rela
1276
1277
  if data.shape[1]==4:
1277
1278
  viewer.add_tracks(data, properties=properties, graph=graph, name='tracks')
1278
1279
  else:
1279
- viewer.add_tracks(data[:,[0,1,3,4]], properties=properties, graph=graph, name='tracks')
1280
+ viewer.add_tracks(data[:,[0,1,3,4]], properties=properties, graph=graph, name='tracks')
1280
1281
  viewer.show(block=True)
1281
1282
 
1282
1283
  if flush_memory:
@@ -1347,47 +1348,63 @@ def load_napari_data(position, prefix="Aligned", population="target", return_sta
1347
1348
  from skimage.measure import label
1348
1349
 
1349
1350
 
1350
- def auto_correct_masks(masks):
1351
+ def auto_correct_masks(masks, bbox_factor = 1.75, min_area=9, fill_labels=False):
1351
1352
 
1352
1353
  """
1353
- Automatically corrects segmentation masks by splitting disconnected objects sharing the same label.
1354
+ Correct segmentation masks to ensure consistency and remove anomalies.
1354
1355
 
1355
- This function examines each labeled object in the input masks and splits objects whose bounding box
1356
- area is significantly larger than their actual area, indicating potential merging of multiple objects.
1357
- It uses geometric properties to identify such cases and applies a relabeling to separate merged objects.
1356
+ This function processes a labeled mask image to correct anomalies and reassign labels.
1357
+ It performs the following operations:
1358
+
1359
+ 1. Corrects negative mask values by taking their absolute values.
1360
+ 2. Identifies and corrects segmented objects with a bounding box area that is disproportionately
1361
+ larger than the actual object area. This indicates potential segmentation errors where separate objects
1362
+ share the same label.
1363
+ 3. Removes small objects that are considered noise (default threshold is an area of less than 9 pixels).
1364
+ 4. Reorders the labels so they are consecutive from 1 up to the number of remaining objects (to avoid encoding errors).
1358
1365
 
1359
1366
  Parameters
1360
1367
  ----------
1361
- masks : ndarray
1362
- A 2D numpy array representing the segmentation masks, where each object is labeled with a unique integer.
1368
+ masks : np.ndarray
1369
+ A 2D array representing the segmented mask image with labeled regions. Each unique value
1370
+ in the array represents a different object or cell.
1363
1371
 
1364
1372
  Returns
1365
1373
  -------
1366
- ndarray
1367
- A 2D numpy array of the corrected segmentation masks with potentially merged objects separated and
1368
- relabeled.
1374
+ clean_labels : np.ndarray
1375
+ A corrected version of the input mask, with anomalies corrected, small objects removed,
1376
+ and labels reordered to be consecutive integers.
1369
1377
 
1370
1378
  Notes
1371
1379
  -----
1372
- - The function uses bounding box area and actual object area to identify potentially merged objects.
1373
- Objects are considered potentially merged if their bounding box area is more than twice their actual area.
1374
- - Relabeling of objects is done sequentially, adding to the maximum label number found in the original
1375
- masks to ensure new labels do not overlap with existing ones.
1376
- - This function relies on `skimage.measure.label` for relabeling and `skimage.measure.regionprops_table`
1377
- for calculating object properties.
1380
+ - This function is useful for post-processing segmentation outputs to ensure high-quality
1381
+ object detection, particularly in applications such as cell segmentation in microscopy images.
1382
+ - The function assumes that the input masks contain integer labels and that the background
1383
+ is represented by 0.
1378
1384
 
1385
+ Examples
1386
+ --------
1387
+ >>> masks = np.array([[0, 0, 1, 1], [0, 2, 2, 1], [0, 2, 0, 0]])
1388
+ >>> corrected_masks = auto_correct_masks(masks)
1389
+ >>> corrected_masks
1390
+ array([[0, 0, 1, 1],
1391
+ [0, 2, 2, 1],
1392
+ [0, 2, 0, 0]])
1379
1393
  """
1380
1394
 
1395
+ # Avoid negative mask values
1396
+ masks[masks<0] = np.abs(masks[masks<0])
1397
+
1381
1398
  props = pd.DataFrame(regionprops_table(masks, properties=('label', 'area', 'area_bbox')))
1382
1399
  max_lbl = props['label'].max()
1383
- corrected_lbl = masks.copy().astype(int)
1400
+ corrected_lbl = masks.copy() #.astype(int)
1384
1401
 
1385
1402
  for cell in props['label'].unique():
1386
1403
 
1387
1404
  bbox_area = props.loc[props['label'] == cell, 'area_bbox'].values
1388
1405
  area = props.loc[props['label'] == cell, 'area'].values
1389
1406
 
1390
- if bbox_area > 1.75 * area: # condition for anomaly
1407
+ if bbox_area > bbox_factor * area: # condition for anomaly
1391
1408
 
1392
1409
  lbl = masks == cell
1393
1410
  lbl = lbl.astype(int)
@@ -1400,7 +1417,27 @@ def auto_correct_masks(masks):
1400
1417
 
1401
1418
  max_lbl = np.amax(corrected_lbl)
1402
1419
 
1403
- return corrected_lbl
1420
+ # Second routine to eliminate objects too small
1421
+ props2 = pd.DataFrame(regionprops_table(corrected_lbl, properties=('label', 'area', 'area_bbox')))
1422
+ for cell in props2['label'].unique():
1423
+ area = props2.loc[props2['label'] == cell, 'area'].values
1424
+ lbl = corrected_lbl == cell
1425
+ if area < min_area:
1426
+ corrected_lbl[lbl] = 0
1427
+
1428
+ # Additionnal routine to reorder labels from 1 to number of cells
1429
+ label_ids = np.unique(corrected_lbl)[1:]
1430
+ clean_labels = corrected_lbl.copy()
1431
+
1432
+ for k,lbl in enumerate(label_ids):
1433
+ clean_labels[corrected_lbl==lbl] = k+1
1434
+
1435
+ clean_labels = clean_labels.astype(int)
1436
+
1437
+ if fill_labels:
1438
+ clean_labels = fill_label_holes(clean_labels)
1439
+
1440
+ return clean_labels
1404
1441
 
1405
1442
 
1406
1443
 
@@ -1529,7 +1566,7 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
1529
1566
  multichannel.append(frame)
1530
1567
  except:
1531
1568
  pass
1532
- multichannel = np.array(multichannel)
1569
+ multichannel = np.array(multichannel)
1533
1570
  save_tiff_imagej_compatible(annotation_folder + f"{exp_name}_{position.split(os.sep)[-2]}_{str(t).zfill(4)}_labelled.tif", labels_layer, axes='YX')
1534
1571
  save_tiff_imagej_compatible(annotation_folder + f"{exp_name}_{position.split(os.sep)[-2]}_{str(t).zfill(4)}.tif", multichannel, axes='CYX')
1535
1572
  info = {"spatial_calibration": spatial_calibration, "channels": list(channel_names), 'cell_type': ct, 'antibody': ab, 'concentration': conc, 'pharmaceutical_agent': pa}
celldetective/utils.py CHANGED
@@ -1961,7 +1961,7 @@ def noise(x, apply_probability=0.5, clip_option=False):
1961
1961
 
1962
1962
 
1963
1963
  def augmenter(x, y, flip=True, gauss_blur=True, noise_option=True, shift=True,
1964
- channel_extinction=False, extinction_probability=0.1, clip=False, max_sigma_blur=4,
1964
+ channel_extinction=True, extinction_probability=0.1, clip=False, max_sigma_blur=4,
1965
1965
  apply_noise_probability=0.5, augment_probability=0.9):
1966
1966
 
1967
1967
  """
@@ -2023,6 +2023,7 @@ def augmenter(x, y, flip=True, gauss_blur=True, noise_option=True, shift=True,
2023
2023
  >>> y = np.random.randint(2, size=(128, 128)) # Sample binary mask
2024
2024
  >>> x_aug, y_aug = augmenter(x, y)
2025
2025
  # The returned `x_aug` and `y_aug` are augmented versions of `x` and `y`.
2026
+
2026
2027
  """
2027
2028
 
2028
2029
  r = random.random()
@@ -2042,9 +2043,9 @@ def augmenter(x, y, flip=True, gauss_blur=True, noise_option=True, shift=True,
2042
2043
 
2043
2044
  if channel_extinction:
2044
2045
  assert extinction_probability <= 1.,'The extinction probability must be a number between 0 and 1.'
2045
- for i in range(x.shape[-1]):
2046
- if np.random.random() > (1 - extinction_probability):
2047
- x[:,:,i] = 0.
2046
+ channel_off = [np.random.random() < extinction_probability for i in range(x.shape[-1])]
2047
+ if not np.all(channel_off):
2048
+ x[:,:,np.array(channel_off, dtype=bool)] = 0.
2048
2049
 
2049
2050
  return x, y
2050
2051
 
@@ -0,0 +1,214 @@
1
+ Metadata-Version: 2.1
2
+ Name: celldetective
3
+ Version: 1.2.2.post2
4
+ Summary: description
5
+ Home-page: http://github.com/remyeltorro/celldetective
6
+ Author: Rémy Torro
7
+ Author-email: remy.torro@inserm.fr
8
+ License: GPL-3.0
9
+ Description-Content-Type: text/markdown
10
+ License-File: LICENSE
11
+ Requires-Dist: wheel
12
+ Requires-Dist: nbsphinx
13
+ Requires-Dist: nbsphinx-link
14
+ Requires-Dist: sphinx-rtd-theme
15
+ Requires-Dist: sphinx
16
+ Requires-Dist: jinja2
17
+ Requires-Dist: ipykernel
18
+ Requires-Dist: stardist
19
+ Requires-Dist: cellpose<3
20
+ Requires-Dist: scikit-learn
21
+ Requires-Dist: btrack
22
+ Requires-Dist: tensorflow~=2.15.0
23
+ Requires-Dist: napari
24
+ Requires-Dist: tqdm
25
+ Requires-Dist: mahotas
26
+ Requires-Dist: fonticon-materialdesignicons6
27
+ Requires-Dist: art
28
+ Requires-Dist: lifelines
29
+ Requires-Dist: setuptools
30
+ Requires-Dist: scipy
31
+ Requires-Dist: seaborn
32
+ Requires-Dist: opencv-python-headless==4.7.0.72
33
+ Requires-Dist: liblapack
34
+ Requires-Dist: gputools
35
+ Requires-Dist: lmfit
36
+ Requires-Dist: superqt[cmap]
37
+ Requires-Dist: matplotlib-scalebar
38
+ Requires-Dist: numpy==1.26.4
39
+ Requires-Dist: pytest
40
+ Requires-Dist: pytest-qt
41
+
42
+ # Celldetective
43
+
44
+ <embed>
45
+ <p align="center">
46
+ <img src="https://github.com/remyeltorro/celldetective/blob/main/celldetective/icons/logo-large.png" width="33%" />
47
+ </p>
48
+ </embed>
49
+
50
+ ![ico1](https://img.shields.io/readthedocs/celldetective?link=https%3A%2F%2Fcelldetective.readthedocs.io%2Fen%2Flatest%2Findex.html)
51
+ ![ico17](https://github.com/remyeltorro/celldetective/actions/workflows/test.yml/badge.svg)
52
+ ![ico4](https://img.shields.io/pypi/v/celldetective)
53
+ ![ico6](https://img.shields.io/github/downloads/remyeltorro/celldetective/total)
54
+ ![ico5](https://img.shields.io/pypi/dm/celldetective)
55
+ ![GitHub repo size](https://img.shields.io/github/repo-size/remyeltorro/celldetective)
56
+ ![GitHub License](https://img.shields.io/github/license/remyeltorro/celldetective?link=https%3A%2F%2Fgithub.com%2Fremyeltorro%2Fcelldetective%2Fblob%2Fmain%2FLICENSE)
57
+ ![ico2](https://img.shields.io/github/forks/remyeltorro/celldetective?link=https%3A%2F%2Fgithub.com%2Fremyeltorro%2Fcelldetective%2Fforks)
58
+ ![ico3](https://img.shields.io/github/stars/remyeltorro/celldetective?link=https%3A%2F%2Fgithub.com%2Fremyeltorro%2Fcelldetective%2Fstargazers)
59
+
60
+ Celldetective is a python package and graphical user interface to perform single-cell
61
+ analysis on multimodal time lapse microscopy images.
62
+
63
+ - [Check the full documentation](https://celldetective.readthedocs.io)
64
+ - [Report a bug or request a new feature](https://github.com/remyeltorro/celldetective/issues/new/choose)
65
+ - [Explore the datasets, models and demos](https://zenodo.org/records/10650279)
66
+
67
+ ## Overview
68
+
69
+ ![Pipeline](https://github.com/remyeltorro/celldetective/raw/main/docs/source/_static/celldetective-blocks.png)
70
+
71
+
72
+ Celldetective was designed to analyze time-lapse microscopy images in difficult situations: mixed cell populations that are only separable through multimodal information. This software provides a toolkit for the analysis of cell population interactions.
73
+
74
+
75
+ **Key features**:
76
+ - Achieve single-cell description (segment / track / measure) for up to two populations of interest
77
+ - Signal annotation and traditional or Deep learning automation
78
+ - Mask annotation in napari[^5] and retraining of Deep learning models
79
+ - Neighborhood linking within and across populations and interaction annotations
80
+ - Everything is done graphically, no coding is required!
81
+
82
+ Check out the [highlights](https://celldetective.readthedocs.io/en/latest/overview.html#description) in the documentation!
83
+
84
+ Instead of reinventing the wheel and out of respect for the amazing work done by these teams, we chose to build around StarDist[^1] & Cellpose[^2][^3] (BSD-3 license) for the Deep-learning segmentation and the Bayesian tracker bTrack[^4] (MIT license) for tracking. If you use these models or methods in your Celldetective workflow, don't forget to cite the respective papers!
85
+
86
+ **Target Audience**: The software is targeted to scientists who are interested in quantifying dynamically (or not) cell populations from microscopy images. Experimental scientists who produce such images can also analyze their data, thanks to the graphical interface, that completely removes the need for coding, and the many helper functions that guide the user in the analysis steps. Finally, the modular structure of Celldetective welcomes users with a partial need.
87
+
88
+ ![Signal analysis](https://github.com/remyeltorro/celldetective/raw/main/docs/source/_static/signal-annotator.gif)
89
+
90
+
91
+ # System requirements
92
+
93
+ ## Hardware requirements
94
+
95
+ The software was tested on several machines, including:
96
+
97
+ - An Intel(R) Core(TM) i9-10850K CPU @ 3.60GHz, with a single NVIDIA
98
+ GeForce RTX 3070 (8 Gb of memory) and 16 Gb of memory
99
+ - An Intel(R) Core(TM) i7-9750H CPU @ 2.60 GHz, with 16 Gb of memory
100
+
101
+ In GPU mode, succesive segmentation and DL signal analysis could be
102
+ performed without saturating the GPU memory thanks to the subprocess
103
+ formulation for the different modules. The GPU can be disabled in the
104
+ startup window. The software does not require a GPU (but model inference
105
+ will be longer).
106
+
107
+ A typical analysis of a single movie with a GPU takes
108
+ between 3 to 15 minutes. Depending on the number of cells and frames on
109
+ the images, this computation time can increase to the order of half an
110
+ hour on a CPU.
111
+
112
+ Processing is performed frame by frame, therefore the memory requirement is extremely low. The main bottleneck is in the visualization of segmentation and tracking output. Whole stacks (typically 1-9 Gb) have to be loaded in memory at once to be viewed in napari.
113
+
114
+ ## Software requirements
115
+
116
+ The software was developed simulateously on Ubuntu 20.04 and Windows 11.
117
+ It was tested on MacOS, but Tensorflow installation can require extra
118
+ steps.
119
+
120
+ - Linux: Ubuntu 20.04.6 LTS (Focal Fossa)
121
+ - Windows: Windows 11 Home 23H2
122
+
123
+ To use the software, you must install python, *e.g.* through
124
+ [Anaconda](https://www.anaconda.com/download). Celldetective is routinely tested on both Ubuntu and Windows for Python versions 3.9, 3.10 and 3.11.
125
+
126
+ # Installation
127
+
128
+ ## Stable release
129
+
130
+ Celldetective requires a version of Python between 3.9 and 3.11 (included). If your Python version is older or more recent, consider using `conda` to create an environment as described below.
131
+
132
+ With the proper Python version, Celldetective can be directly installed with `pip`:
133
+
134
+ ``` bash
135
+ pip install celldetective
136
+ ```
137
+
138
+ We recommend that you create an environment to use Celldetective, to protect your package versions and fix the Python version *e.g.*
139
+ with `conda`:
140
+
141
+ ``` bash
142
+ conda create -n celldetective python=3.11 pyqt
143
+ conda activate celldetective
144
+ pip install celldetective
145
+ ```
146
+
147
+ Need an update? Simply type the following in the terminal (in your
148
+ environment):
149
+
150
+ ``` bash
151
+ pip install --upgrade celldetective
152
+ ```
153
+
154
+ For more installation options, please check the [documentation](https://celldetective.readthedocs.io/en/latest/get-started.html#installation).
155
+
156
+
157
+ # Quick start
158
+
159
+ You can launch the GUI by 1) opening a terminal and 2) typing the
160
+ following:
161
+
162
+ ``` bash
163
+ # conda activate celldetective
164
+ python -m celldetective
165
+ ```
166
+
167
+ For more information about how to get started, please check the [documentation](https://celldetective.readthedocs.io/en/latest/get-started.html#launching-the-gui).
168
+
169
+ # How to cite?
170
+
171
+ If you use this software in your research, please cite the
172
+ [Celldetective](https://www.biorxiv.org/content/10.1101/2024.03.15.585250v1)
173
+ paper (currently preprint):
174
+
175
+ ``` raw
176
+ @article {Torro2024.03.15.585250,
177
+ author = {R{\'e}my Torro and Beatriz D{\`\i}az-Bello and Dalia El Arawi and Lorna Ammer and Patrick Chames and Kheya Sengupta and Laurent Limozin},
178
+ title = {Celldetective: an AI-enhanced image analysis tool for unraveling dynamic cell interactions},
179
+ elocation-id = {2024.03.15.585250},
180
+ year = {2024},
181
+ doi = {10.1101/2024.03.15.585250},
182
+ publisher = {Cold Spring Harbor Laboratory},
183
+ abstract = {A current key challenge in bioimaging is the analysis of multimodal and multidimensional data reporting dynamic interactions between diverse cell populations. We developed Celldetective, a software that integrates AI-based segmentation and tracking algorithms and automated signal analysis into a user-friendly graphical interface. It offers complete interactive visualization, annotation, and training capabilities. We demonstrate it by analyzing original experimental data of spreading immune effector cells as well as antibody-dependent cell cytotoxicity events using multimodal fluorescence microscopy.Competing Interest StatementThe authors have declared no competing interest.},
184
+ URL = {https://www.biorxiv.org/content/early/2024/03/17/2024.03.15.585250},
185
+ eprint = {https://www.biorxiv.org/content/early/2024/03/17/2024.03.15.585250.full.pdf},
186
+ journal = {bioRxiv}
187
+ }
188
+ ```
189
+
190
+ Make sure you to cite the papers of any segmentation model (StarDist,
191
+ Cellpose) or tracker (bTrack) you used through Celldetective.
192
+
193
+ # Bibliography
194
+
195
+ [^1]: Schmidt, U., Weigert, M., Broaddus, C. & Myers, G. Cell Detection
196
+ with Star-Convex Polygons. in Medical Image Computing and Computer
197
+ Assisted Intervention -- MICCAI 2018 (eds. Frangi, A. F., Schnabel,
198
+ J. A., Davatzikos, C., Alberola-López, C. & Fichtinger, G.) 265--273
199
+ (Springer International Publishing, Cham, 2018).
200
+ <doi:10.1007/978-3-030-00934-2_30>.
201
+
202
+ [^2]: Stringer, C., Wang, T., Michaelos, M. & Pachitariu, M. Cellpose: a
203
+ generalist algorithm for cellular segmentation. Nat Methods 18,
204
+ 100--106 (2021).
205
+
206
+ [^3]: Pachitariu, M. & Stringer, C. Cellpose 2.0: how to train your own
207
+ model. Nat Methods 19, 1634--1641 (2022).
208
+
209
+ [^4]: Ulicna, K., Vallardi, G., Charras, G. & Lowe, A. R. Automated Deep
210
+ Lineage Tree Analysis Using a Bayesian Single Cell Tracking
211
+ Approach. Frontiers in Computer Science 3, (2021).
212
+
213
+ [^5]: Ahlers, J. et al. napari: a multi-dimensional image viewer for
214
+ Python. Zenodo <https://doi.org/10.5281/zenodo.8115575> (2023).
@@ -1,9 +1,9 @@
1
1
  celldetective/__init__.py,sha256=PH25g2_AZt7N5Jg1KB3YT9wFvfMr9lIb2WxYzmDqlX4,105
2
- celldetective/__main__.py,sha256=Ua_3mAlW5Y53ih3rkZrAeYfvOc8-Xz6sZosyUCFtTZI,14842
2
+ celldetective/__main__.py,sha256=_H9B620ntENFx9RBvlV6ybxpvtHzCbv5NnIPmDBr9Z0,1127
3
3
  celldetective/events.py,sha256=D07LyzBerq4QkXKRhMj0ZChzNkqBrW45E9TgBtEa3tk,4735
4
4
  celldetective/extra_properties.py,sha256=8DkxTvVs7gASsnnGurVZ3_zt6uR0pvvJhBKO2LC6hGk,5118
5
5
  celldetective/filters.py,sha256=b0qKwHor1fvNA_dHovP17nQz8EsW5YlyhT2TJnayn08,3615
6
- celldetective/io.py,sha256=ZenCko4gdyJE0hwdTb2DWEXhUJRoF5FwAcpgZRdZtAo,85024
6
+ celldetective/io.py,sha256=Jld2Nt1cRZyJxUz2wdX-izrd0mp4DJ2P_AMwCDOLUfs,86273
7
7
  celldetective/measure.py,sha256=7bfCUdLfJbGmFWWwhu_C9-l1buj_-We7izBaFWTe4ok,54910
8
8
  celldetective/neighborhood.py,sha256=GjF_fTmtcU8jq5XZT-DnDSp28VDTrsaHjIGJG7W2Ppk,52799
9
9
  celldetective/preprocessing.py,sha256=iV5or20s8XPJXQZDsWzis7OBaqzPDyAo_cwcDVDuX5A,38188
@@ -11,9 +11,10 @@ celldetective/relative_measurements.py,sha256=qPHC6gtUaICNGKh6Qfh8y6NA4OUJauGcaj
11
11
  celldetective/segmentation.py,sha256=H_6BH_RKaG6injUPZ3gQzQ112pMyzQ33u5GEpLE9dsQ,30426
12
12
  celldetective/signals.py,sha256=485axzJwE3xsSsN4AmBQlZ2at7uWu9QRg0XrUHvLBUE,120552
13
13
  celldetective/tracking.py,sha256=HGeOtfQ-nmpaea9_Q-sq2WcwZfdPXRH028JyFlt6eUs,37901
14
- celldetective/utils.py,sha256=UkRHPHWZixj8HOI8pc79jgsRjWvpCkF-2lfJ9JfddgA,86311
14
+ celldetective/utils.py,sha256=yuPsRGsw2tYE1IApGulC83dtHOhsIhbts-46b6V83QA,86375
15
15
  celldetective/datasets/segmentation_annotations/blank,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  celldetective/datasets/signal_annotations/blank,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
+ celldetective/gui/InitWindow.py,sha256=M0v58BmOuZ05em2WSXHpDar3tp2AeS6pr44ZBtiYz-4,13683
17
18
  celldetective/gui/__init__.py,sha256=2_r2xfOj4_2xj0yBkCTIfzlF94AHKm-j6Pvpd7DddQc,989
18
19
  celldetective/gui/about.py,sha256=fed6nh-WwKy6bEbavy6L62P0CQB8VsOO0PajOjHsnFk,1694
19
20
  celldetective/gui/analyze_block.py,sha256=sat8RECEeZxlaconZZIxI0IrIjwJo121PcBXqJmA1-o,24756
@@ -41,6 +42,17 @@ celldetective/gui/survival_ui.py,sha256=tzvtI0eC0bgd7Lfm20HqU7rfCJ60gy9a_Sc9cZLS
41
42
  celldetective/gui/tableUI.py,sha256=9yJ4CjeNAQoYdHXfsVvD0qr_IdYWuQwIBhvfU3QoNtc,40602
42
43
  celldetective/gui/thresholds_gui.py,sha256=t1hTDdaJTJWHBt9vtRCE4B1TsVHhobQwCpKfntVgaQI,50510
43
44
  celldetective/gui/viewers.py,sha256=mCyE9DaUX2rGq65oYEwyEfNtEUPrHdoFNg2MRp1kXs4,27701
45
+ celldetective/gui/help/DL-segmentation-strategy.json,sha256=59jVtn8pECbCqPQwJifgViVYTF1AxLQDIkNJMS7CJuk,1143
46
+ celldetective/gui/help/Threshold-vs-DL.json,sha256=SELQk3qF8xcwmkX686Bqrr7i5KiXqN7r4jbUydCLRDU,603
47
+ celldetective/gui/help/cell-populations.json,sha256=wP0ekhokb9oHE3XqOQrC5ARewNgOlv0GJrQeVQyMJzg,1075
48
+ celldetective/gui/help/exp-structure.json,sha256=Q3z9-76VkQK77qxHRt2nUzE-nnM3yCKFXK3RMcjkeSI,1737
49
+ celldetective/gui/help/feature-btrack.json,sha256=2uU5R8HAvXfqjTieTwl3VzUb8Y5UGO3Qsb3_6UoUbs0,589
50
+ celldetective/gui/help/neighborhood.json,sha256=aSGMwK3ZeGGXcoAgvmJMs9RpUWXOdxws2ajJ4MdQBOM,1096
51
+ celldetective/gui/help/prefilter-for-segmentation.json,sha256=1-hgr3H5Sr91bL-3o_dQKFibmxfTFuO6btkNhAS6MGY,610
52
+ celldetective/gui/help/preprocessing.json,sha256=k4gWxCIE9CMXtBQl9W0ZrbQwhPF1QIxJdnDqtTiYN-c,5252
53
+ celldetective/gui/help/propagate-classification.json,sha256=cnZpY7iVFZzbHy1uIxWwtL-hnUEBV4JibVbQG5Zu66o,1285
54
+ celldetective/gui/help/track-postprocessing.json,sha256=VaGd8EEkA33OL-EI3NXWZ8yHeWWyUeImDF5yAjsVYGA,3990
55
+ celldetective/gui/help/tracking.json,sha256=yIAoOToqCSQ_XF4gwEZCcyXcvQ3mROju263ZPDvlUyY,776
44
56
  celldetective/icons/logo-large.png,sha256=FXSwV3u6zEKcfpuSn4unnqB0oUnN9cHqQ9BCKWytrpg,36631
45
57
  celldetective/icons/logo.png,sha256=wV2OS8_dU5Td5cgdPbCOU3JpMpTwNuYLnfVcnQX0tJA,2437
46
58
  celldetective/icons/signals_icon.png,sha256=vEiKoqWTtN0-uJgVqtAlwCuP-f4QeWYOlO3sdp2tg2w,3969
@@ -74,13 +86,14 @@ tests/test_io.py,sha256=gk5FmoI7ANEczUtNXYRxc48KzkfYzemwS_eYaLq4_NI,2093
74
86
  tests/test_measure.py,sha256=FEUAs1rVHylvIvubCb0bJDNGZLVmkgXNgI3NaGQ1dA8,4542
75
87
  tests/test_neighborhood.py,sha256=gk5FmoI7ANEczUtNXYRxc48KzkfYzemwS_eYaLq4_NI,2093
76
88
  tests/test_preprocessing.py,sha256=FI-Wk-kc4wWmOQg_NLCUIZC1oti396wr5cC-BauBai0,1436
89
+ tests/test_qt.py,sha256=eYqOoff-vfvZAZM6H_IY19IqK7qzyETcyj54f9T0bQA,3906
77
90
  tests/test_segmentation.py,sha256=_HB8CCq-Ci6amf0xAmDIUuwtBUU_EGpgqLvcvSHrGug,3427
78
91
  tests/test_signals.py,sha256=No4cah6KxplhDcKXnU8RrA7eDla4hWw6ccf7xGnBokU,3599
79
92
  tests/test_tracking.py,sha256=8hebWSqEIuttD1ABn-6dKCT7EXKRR7-4RwyFWi1WPFo,8800
80
93
  tests/test_utils.py,sha256=NKRCAC1d89aBK5cWjTb7-pInYow901RrT-uBlIdz4KI,3692
81
- celldetective-1.2.2.post1.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
82
- celldetective-1.2.2.post1.dist-info/METADATA,sha256=TUIJ7f5xLHoW8DMdQYzA3TQjU6fIm83Xo8DHV-99-QA,12839
83
- celldetective-1.2.2.post1.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
84
- celldetective-1.2.2.post1.dist-info/entry_points.txt,sha256=2NU6_EOByvPxqBbCvjwxlVlvnQreqZ3BKRCVIKEv3dg,62
85
- celldetective-1.2.2.post1.dist-info/top_level.txt,sha256=6rsIKKfGMKgud7HPuATcpq6EhdXwcg_yknBVWn9x4C4,20
86
- celldetective-1.2.2.post1.dist-info/RECORD,,
94
+ celldetective-1.2.2.post2.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
95
+ celldetective-1.2.2.post2.dist-info/METADATA,sha256=WT8GY_72_xK3YgqdZ97FwTWh857CGSzzNPJLKBJRIhI,9923
96
+ celldetective-1.2.2.post2.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
97
+ celldetective-1.2.2.post2.dist-info/entry_points.txt,sha256=2NU6_EOByvPxqBbCvjwxlVlvnQreqZ3BKRCVIKEv3dg,62
98
+ celldetective-1.2.2.post2.dist-info/top_level.txt,sha256=6rsIKKfGMKgud7HPuATcpq6EhdXwcg_yknBVWn9x4C4,20
99
+ celldetective-1.2.2.post2.dist-info/RECORD,,
tests/test_qt.py ADDED
@@ -0,0 +1,101 @@
1
+ import pytest
2
+ from PyQt5 import QtCore
3
+ from celldetective.gui.InitWindow import AppInitWindow
4
+ import time
5
+ import os
6
+
7
+ abs_path = os.sep.join([os.path.split(os.path.dirname(os.path.realpath(__file__)))[0]])
8
+ print(abs_path)
9
+
10
+ @pytest.fixture
11
+ def app(qtbot):
12
+ test_app = AppInitWindow()
13
+ qtbot.addWidget(test_app)
14
+ return test_app
15
+
16
+ # def test_launch_demo(app, qtbot):
17
+ # app.experiment_path_selection.setText(abs_path + os.sep + 'examples/demo')
18
+ # qtbot.mouseClick(app.validate_button, QtCore.Qt.LeftButton)
19
+
20
+ # def test_preprocessing_panel(app, qtbot):
21
+
22
+ # app.experiment_path_selection.setText(abs_path + os.sep + 'examples/demo')
23
+ # qtbot.mouseClick(app.validate_button, QtCore.Qt.LeftButton)
24
+
25
+ # qtbot.mouseClick(app.control_panel.PreprocessingPanel.collapse_btn, QtCore.Qt.LeftButton)
26
+ # qtbot.mouseClick(app.control_panel.PreprocessingPanel.fit_correction_layout.add_correction_btn, QtCore.Qt.LeftButton)
27
+ # qtbot.mouseClick(app.control_panel.PreprocessingPanel.collapse_btn, QtCore.Qt.LeftButton)
28
+
29
+ def test_app(app, qtbot):
30
+
31
+ # Set an experiment folder and open
32
+ app.experiment_path_selection.setText(os.sep.join([abs_path,'examples','demo']))
33
+ qtbot.mouseClick(app.validate_button, QtCore.Qt.LeftButton)
34
+
35
+ # Set a position
36
+ app.control_panel.position_list.setCurrentIndex(1)
37
+ app.control_panel.update_position_options()
38
+
39
+ # View stacl
40
+ qtbot.mouseClick(app.control_panel.view_stack_btn, QtCore.Qt.LeftButton)
41
+ #qtbot.wait(1000)
42
+ app.control_panel.viewer.close()
43
+
44
+ # Expand process block
45
+ qtbot.mouseClick(app.control_panel.ProcessEffectors.collapse_btn, QtCore.Qt.LeftButton)
46
+
47
+ # Use Threshold Config Wizard
48
+ qtbot.mouseClick(app.control_panel.ProcessEffectors.upload_model_btn, QtCore.Qt.LeftButton)
49
+ qtbot.wait(1000)
50
+ qtbot.mouseClick(app.control_panel.ProcessEffectors.SegModelLoader.threshold_config_button, QtCore.Qt.LeftButton)
51
+ app.control_panel.ProcessEffectors.SegModelLoader.ThreshWizard.close()
52
+ app.control_panel.ProcessEffectors.SegModelLoader.close()
53
+
54
+ # Check segmentation with napari
55
+ #qtbot.mouseClick(app.control_panel.ProcessEffectors.check_seg_btn, QtCore.Qt.LeftButton)
56
+ # close napari?
57
+
58
+ # Train model
59
+ qtbot.mouseClick(app.control_panel.ProcessEffectors.train_btn, QtCore.Qt.LeftButton)
60
+ qtbot.wait(1000)
61
+ app.control_panel.ProcessEffectors.ConfigSegmentationTrain.close()
62
+
63
+ # Config tracking
64
+ qtbot.mouseClick(app.control_panel.ProcessEffectors.track_config_btn, QtCore.Qt.LeftButton)
65
+ qtbot.wait(1000)
66
+ app.control_panel.ProcessEffectors.ConfigTracking.close()
67
+
68
+ # Config measurements
69
+ qtbot.mouseClick(app.control_panel.ProcessEffectors.measurements_config_btn, QtCore.Qt.LeftButton)
70
+ qtbot.wait(1000)
71
+ app.control_panel.ProcessEffectors.ConfigMeasurements.close()
72
+
73
+ # Classifier widget
74
+ qtbot.mouseClick(app.control_panel.ProcessEffectors.classify_btn, QtCore.Qt.LeftButton)
75
+ qtbot.wait(1000)
76
+ app.control_panel.ProcessEffectors.ClassifierWidget.close()
77
+
78
+ # Config signal annotator
79
+ qtbot.mouseClick(app.control_panel.ProcessEffectors.config_signal_annotator_btn, QtCore.Qt.LeftButton)
80
+ qtbot.mouseClick(app.control_panel.ProcessEffectors.ConfigSignalAnnotator.rgb_btn, QtCore.Qt.LeftButton)
81
+ qtbot.wait(1000)
82
+ app.control_panel.ProcessEffectors.ConfigSignalAnnotator.close()
83
+
84
+ # Signal annotator widget
85
+ qtbot.mouseClick(app.control_panel.ProcessEffectors.check_signals_btn, QtCore.Qt.LeftButton)
86
+ qtbot.wait(1000)
87
+ app.control_panel.ProcessEffectors.SignalAnnotator.close()
88
+
89
+ # Table widget
90
+ qtbot.mouseClick(app.control_panel.ProcessEffectors.view_tab_btn, QtCore.Qt.LeftButton)
91
+ qtbot.wait(1000)
92
+ app.control_panel.ProcessEffectors.tab_ui.close()
93
+
94
+ #qtbot.mouseClick(app.control_panel.PreprocessingPanel.fit_correction_layout.add_correction_btn, QtCore.Qt.LeftButton)
95
+ qtbot.mouseClick(app.control_panel.ProcessEffectors.collapse_btn, QtCore.Qt.LeftButton)
96
+
97
+
98
+
99
+ # def test_click(app, qtbot):
100
+ # qtbot.mouseClick(app.new_exp_button, QtCore.Qt.LeftButton)
101
+ # qtbot.wait(10000)