spacr 0.0.17__tar.gz → 0.0.20__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {spacr-0.0.17/spacr.egg-info → spacr-0.0.20}/PKG-INFO +28 -26
- {spacr-0.0.17 → spacr-0.0.20}/README.md +4 -2
- {spacr-0.0.17 → spacr-0.0.20}/setup.py +24 -24
- {spacr-0.0.17 → spacr-0.0.20}/spacr/__init__.py +2 -0
- spacr-0.0.20/spacr/alpha.py +18 -0
- spacr-0.0.20/spacr/cli.py +41 -0
- {spacr-0.0.17 → spacr-0.0.20}/spacr/core.py +267 -56
- spacr-0.0.20/spacr/graph_learning.py +276 -0
- spacr-0.0.20/spacr/graph_learning_lap.py +84 -0
- {spacr-0.0.17 → spacr-0.0.20}/spacr/gui_classify_app.py +6 -21
- {spacr-0.0.17 → spacr-0.0.20}/spacr/gui_mask_app.py +9 -43
- {spacr-0.0.17 → spacr-0.0.20}/spacr/gui_measure_app.py +10 -24
- spacr-0.0.20/spacr/gui_sim_app.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/spacr/gui_utils.py +84 -66
- {spacr-0.0.17 → spacr-0.0.20}/spacr/io.py +258 -110
- {spacr-0.0.17 → spacr-0.0.20}/spacr/measure.py +11 -17
- spacr-0.0.20/spacr/old_code.py +290 -0
- {spacr-0.0.17 → spacr-0.0.20}/spacr/plot.py +92 -87
- {spacr-0.0.17 → spacr-0.0.20}/spacr/timelapse.py +213 -52
- {spacr-0.0.17 → spacr-0.0.20}/spacr/utils.py +219 -118
- {spacr-0.0.17 → spacr-0.0.20/spacr.egg-info}/PKG-INFO +28 -26
- {spacr-0.0.17 → spacr-0.0.20}/spacr.egg-info/SOURCES.txt +2 -0
- spacr-0.0.20/spacr.egg-info/requires.txt +32 -0
- spacr-0.0.17/spacr/cli.py +0 -240
- spacr-0.0.17/spacr/graph_learning.py +0 -95
- spacr-0.0.17/spacr/gui_sim_app.py +0 -213
- spacr-0.0.17/spacr/old_code.py +0 -104
- spacr-0.0.17/spacr.egg-info/requires.txt +0 -32
- {spacr-0.0.17 → spacr-0.0.20}/LICENSE +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/setup.cfg +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/spacr/__main__.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/spacr/annotate_app.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/spacr/logger.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/spacr/mask_app.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/spacr/sim.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/spacr/train.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/spacr/umap.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/spacr/version.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/spacr.egg-info/dependency_links.txt +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/spacr.egg-info/entry_points.txt +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/spacr.egg-info/top_level.txt +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/tests/test_annotate_app.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/tests/test_core.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/tests/test_gui_classify_app.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/tests/test_gui_mask_app.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/tests/test_gui_measure_app.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/tests/test_gui_sim_app.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/tests/test_gui_utils.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/tests/test_io.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/tests/test_mask_app.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/tests/test_measure.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/tests/test_plot.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/tests/test_sim.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/tests/test_timelapse.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/tests/test_train.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/tests/test_umap.py +0 -0
- {spacr-0.0.17 → spacr-0.0.20}/tests/test_utils.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: spacr
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.20
|
4
4
|
Summary: Spatial phenotype analysis of crisp screens (SpaCr)
|
5
5
|
Home-page: https://github.com/EinarOlafsson/spacr
|
6
6
|
Author: Einar Birnir Olafsson
|
@@ -9,29 +9,29 @@ Classifier: Programming Language :: Python :: 3
|
|
9
9
|
Classifier: License :: OSI Approved :: MIT License
|
10
10
|
Classifier: Operating System :: OS Independent
|
11
11
|
License-File: LICENSE
|
12
|
-
Requires-Dist: torch
|
13
|
-
Requires-Dist: torchvision
|
14
|
-
Requires-Dist: torch-geometric
|
15
|
-
Requires-Dist: numpy
|
16
|
-
Requires-Dist: pandas
|
17
|
-
Requires-Dist: statsmodels
|
18
|
-
Requires-Dist: scikit-image
|
19
|
-
Requires-Dist: scikit-learn
|
20
|
-
Requires-Dist: seaborn
|
21
|
-
Requires-Dist: matplotlib
|
22
|
-
Requires-Dist: pillow
|
23
|
-
Requires-Dist: imageio
|
24
|
-
Requires-Dist: scipy
|
25
|
-
Requires-Dist: ipywidgets
|
26
|
-
Requires-Dist: mahotas
|
27
|
-
Requires-Dist: btrack
|
28
|
-
Requires-Dist: trackpy
|
29
|
-
Requires-Dist: cellpose
|
30
|
-
Requires-Dist: IPython
|
31
|
-
Requires-Dist: opencv-python-headless
|
32
|
-
Requires-Dist: umap
|
33
|
-
Requires-Dist: ttkthemes
|
34
|
-
Requires-Dist: lxml
|
12
|
+
Requires-Dist: torch>=2.2.1
|
13
|
+
Requires-Dist: torchvision>=0.17.1
|
14
|
+
Requires-Dist: torch-geometric>=2.5.1
|
15
|
+
Requires-Dist: numpy>=1.26.4
|
16
|
+
Requires-Dist: pandas>=2.2.1
|
17
|
+
Requires-Dist: statsmodels>=0.14.1
|
18
|
+
Requires-Dist: scikit-image>=0.22.0
|
19
|
+
Requires-Dist: scikit-learn>=1.4.1
|
20
|
+
Requires-Dist: seaborn>=0.13.2
|
21
|
+
Requires-Dist: matplotlib>=3.8.3
|
22
|
+
Requires-Dist: pillow>=10.2.0
|
23
|
+
Requires-Dist: imageio>=2.34.0
|
24
|
+
Requires-Dist: scipy>=1.12.0
|
25
|
+
Requires-Dist: ipywidgets>=8.1.2
|
26
|
+
Requires-Dist: mahotas>=1.4.13
|
27
|
+
Requires-Dist: btrack>=0.6.5
|
28
|
+
Requires-Dist: trackpy>=0.6.2
|
29
|
+
Requires-Dist: cellpose>=3.0.6
|
30
|
+
Requires-Dist: IPython>=8.18.1
|
31
|
+
Requires-Dist: opencv-python-headless>=4.9.0.80
|
32
|
+
Requires-Dist: umap>=0.1.1
|
33
|
+
Requires-Dist: ttkthemes>=3.2.2
|
34
|
+
Requires-Dist: lxml>=5.1.0
|
35
35
|
Provides-Extra: dev
|
36
36
|
Requires-Dist: pytest>=3.9; extra == "dev"
|
37
37
|
Provides-Extra: headless
|
@@ -68,16 +68,18 @@ Spatial phenotype analysis of crisp screens (SpaCr). A collection of functions f
|
|
68
68
|
|
69
69
|
- **Crop Images:** Objects (e.g. cells) can be saved as PNGs from the object area or bounding box area of each object. Object paths are saved in an sql database that can be annotated and used to train CNNs/Transformer models for classefication tasks.
|
70
70
|
|
71
|
-
- **Train CNNs or Transformers:** Train Torch Convolutional Neural Networks (CNNs) or Transformers to classify single object images. Train Torch models with IRM/ERM, checkpointing
|
71
|
+
- **Train CNNs or Transformers:** Train Torch Convolutional Neural Networks (CNNs) or Transformers to classify single object images. Train Torch models with IRM/ERM, checkpointing.
|
72
72
|
|
73
73
|
- **Manual Annotation:** Supports manual annotation of single cell images and segmentation to refine training datasets for training CNNs/Transformers or cellpose, respectively.
|
74
74
|
|
75
75
|
- **Finetune Cellpose Models:** Adjust pre-existing Cellpose models to your specific dataset for improved performance.
|
76
76
|
|
77
|
-
- **Timelapse Data Support:**
|
77
|
+
- **Timelapse Data Support:** Track objects in timelapse image data.
|
78
78
|
|
79
79
|
- **Simulations:** Simulate spatial phenotype screens.
|
80
80
|
|
81
|
+
- **Misc:** Analyze Ca oscillation, recruitment, infection rate, plaque size/count.
|
82
|
+
|
81
83
|
## Installation
|
82
84
|
|
83
85
|
spacr requires Tkinter for its graphical user interface features.
|
@@ -27,16 +27,18 @@ Spatial phenotype analysis of crisp screens (SpaCr). A collection of functions f
|
|
27
27
|
|
28
28
|
- **Crop Images:** Objects (e.g. cells) can be saved as PNGs from the object area or bounding box area of each object. Object paths are saved in an sql database that can be annotated and used to train CNNs/Transformer models for classefication tasks.
|
29
29
|
|
30
|
-
- **Train CNNs or Transformers:** Train Torch Convolutional Neural Networks (CNNs) or Transformers to classify single object images. Train Torch models with IRM/ERM, checkpointing
|
30
|
+
- **Train CNNs or Transformers:** Train Torch Convolutional Neural Networks (CNNs) or Transformers to classify single object images. Train Torch models with IRM/ERM, checkpointing.
|
31
31
|
|
32
32
|
- **Manual Annotation:** Supports manual annotation of single cell images and segmentation to refine training datasets for training CNNs/Transformers or cellpose, respectively.
|
33
33
|
|
34
34
|
- **Finetune Cellpose Models:** Adjust pre-existing Cellpose models to your specific dataset for improved performance.
|
35
35
|
|
36
|
-
- **Timelapse Data Support:**
|
36
|
+
- **Timelapse Data Support:** Track objects in timelapse image data.
|
37
37
|
|
38
38
|
- **Simulations:** Simulate spatial phenotype screens.
|
39
39
|
|
40
|
+
- **Misc:** Analyze Ca oscillation, recruitment, infection rate, plaque size/count.
|
41
|
+
|
40
42
|
## Installation
|
41
43
|
|
42
44
|
spacr requires Tkinter for its graphical user interface features.
|
@@ -5,34 +5,34 @@ with open("README.md", "r", encoding="utf-8") as fh:
|
|
5
5
|
long_description = fh.read()
|
6
6
|
|
7
7
|
dependencies = [
|
8
|
-
'torch',
|
9
|
-
'torchvision',
|
10
|
-
'torch-geometric',
|
11
|
-
'numpy',
|
12
|
-
'pandas',
|
13
|
-
'statsmodels',
|
14
|
-
'scikit-image',
|
15
|
-
'scikit-learn',
|
16
|
-
'seaborn',
|
17
|
-
'matplotlib',
|
18
|
-
'pillow',
|
19
|
-
'imageio',
|
20
|
-
'scipy',
|
21
|
-
'ipywidgets',
|
22
|
-
'mahotas',
|
23
|
-
'btrack',
|
24
|
-
'trackpy',
|
25
|
-
'cellpose',
|
26
|
-
'IPython',
|
27
|
-
'opencv-python-headless',
|
28
|
-
'umap',
|
29
|
-
'ttkthemes',
|
30
|
-
'lxml'
|
8
|
+
'torch>=2.2.1',
|
9
|
+
'torchvision>=0.17.1',
|
10
|
+
'torch-geometric>=2.5.1',
|
11
|
+
'numpy>=1.26.4',
|
12
|
+
'pandas>=2.2.1',
|
13
|
+
'statsmodels>=0.14.1',
|
14
|
+
'scikit-image>=0.22.0',
|
15
|
+
'scikit-learn>=1.4.1',
|
16
|
+
'seaborn>=0.13.2',
|
17
|
+
'matplotlib>=3.8.3',
|
18
|
+
'pillow>=10.2.0',
|
19
|
+
'imageio>=2.34.0',
|
20
|
+
'scipy>=1.12.0',
|
21
|
+
'ipywidgets>=8.1.2',
|
22
|
+
'mahotas>=1.4.13',
|
23
|
+
'btrack>=0.6.5',
|
24
|
+
'trackpy>=0.6.2',
|
25
|
+
'cellpose>=3.0.6',
|
26
|
+
'IPython>=8.18.1',
|
27
|
+
'opencv-python-headless>=4.9.0.80',
|
28
|
+
'umap>=0.1.1',
|
29
|
+
'ttkthemes>=3.2.2',
|
30
|
+
'lxml>=5.1.0'
|
31
31
|
]
|
32
32
|
|
33
33
|
setup(
|
34
34
|
name="spacr",
|
35
|
-
version="0.0.
|
35
|
+
version="0.0.20",
|
36
36
|
author="Einar Birnir Olafsson",
|
37
37
|
author_email="olafsson@med.umich.com",
|
38
38
|
description="Spatial phenotype analysis of crisp screens (SpaCr)",
|
@@ -11,6 +11,7 @@ from . import timelapse
|
|
11
11
|
from . import train
|
12
12
|
from . import mask_app
|
13
13
|
from . import annotate_app
|
14
|
+
from . import graph_learning
|
14
15
|
from . import gui_utils
|
15
16
|
from . import gui_mask_app
|
16
17
|
from . import gui_measure_app
|
@@ -27,6 +28,7 @@ __all__ = [
|
|
27
28
|
"timelapse",
|
28
29
|
"train",
|
29
30
|
"annotate_app",
|
31
|
+
"graph_learning",
|
30
32
|
"gui_utils",
|
31
33
|
"mask_app",
|
32
34
|
"gui_mask_app",
|
@@ -0,0 +1,18 @@
|
|
1
|
+
def gui_mask():
|
2
|
+
from .cli import get_arg_parser
|
3
|
+
from .version import version_str
|
4
|
+
|
5
|
+
args = get_arg_parser().parse_args()
|
6
|
+
|
7
|
+
if args.version:
|
8
|
+
print(version_str)
|
9
|
+
return
|
10
|
+
|
11
|
+
if args.headless:
|
12
|
+
settings = {}
|
13
|
+
spacr.core.preprocess_generate_masks(settings['src'], settings=settings, advanced_settings={})
|
14
|
+
return
|
15
|
+
|
16
|
+
global vars_dict, root
|
17
|
+
root, vars_dict = initiate_mask_root(1000, 1500)
|
18
|
+
root.mainloop()
|
@@ -0,0 +1,41 @@
|
|
1
|
+
"""
|
2
|
+
Copyright © 2023 Howard Hughes Medical Institute, Authored by Carsen Stringer and Marius Pachitariu and Michael Rariden.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import argparse
|
6
|
+
|
7
|
+
import argparse
|
8
|
+
|
9
|
+
|
10
|
+
def get_arg_parser():
|
11
|
+
""" Parses command line arguments for spacr main functions
|
12
|
+
|
13
|
+
Note: this function has to be in a separate file to allow autodoc to work for CLI.
|
14
|
+
The autodoc_mock_imports in conf.py does not work for sphinx-argparse sometimes,
|
15
|
+
see https://github.com/ashb/sphinx-argparse/issues/9#issue-1097057823
|
16
|
+
"""
|
17
|
+
|
18
|
+
parser = argparse.ArgumentParser(description="SPACR Mask App Command Line Parameters")
|
19
|
+
hardware_args = parser.add_argument_group("Hardware Arguments")
|
20
|
+
input_img_args = parser.add_argument_group("Input Image Arguments")
|
21
|
+
#model_args = parser.add_argument_group("Model Arguments")
|
22
|
+
#algorithm_args = parser.add_argument_group("Algorithm Arguments")
|
23
|
+
#training_args = parser.add_argument_group("Training Arguments")
|
24
|
+
#output_args = parser.add_argument_group("Output Arguments")
|
25
|
+
|
26
|
+
# misc settings
|
27
|
+
parser.add_argument("--version", action="store_true",
|
28
|
+
help="show version info")
|
29
|
+
# misc settings
|
30
|
+
parser.add_argument("--headless", action="store_true",
|
31
|
+
help="run the app without the gui")
|
32
|
+
|
33
|
+
parser.add_argument("--verbose", action="store_true",
|
34
|
+
help="show information about running and settings and save to log")
|
35
|
+
|
36
|
+
hardware_args.add_argument("--gpu_device", required=False, default="0", type=str,
|
37
|
+
help="which gpu device to use, use an integer for torch, or mps for M1")
|
38
|
+
|
39
|
+
input_img_args.add_argument("--src", default=[], type=str,
|
40
|
+
help="folder containing data to run or train on.")
|
41
|
+
return parser
|