spacr 0.4.60__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- spacr/__init__.py +2 -4
- spacr/__main__.py +3 -3
- spacr/core.py +13 -107
- spacr/gui.py +0 -1
- spacr/gui_core.py +2 -2
- spacr/gui_utils.py +5 -14
- spacr/io.py +189 -200
- spacr/mediar.py +12 -8
- spacr/plot.py +50 -13
- spacr/settings.py +71 -14
- spacr/submodules.py +21 -14
- spacr/timelapse.py +192 -6
- spacr/utils.py +180 -56
- {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/METADATA +64 -62
- {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/RECORD +20 -72
- {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/WHEEL +1 -1
- spacr/resources/MEDIAR/.gitignore +0 -18
- spacr/resources/MEDIAR/LICENSE +0 -21
- spacr/resources/MEDIAR/README.md +0 -189
- spacr/resources/MEDIAR/SetupDict.py +0 -39
- spacr/resources/MEDIAR/config/baseline.json +0 -60
- spacr/resources/MEDIAR/config/mediar_example.json +0 -72
- spacr/resources/MEDIAR/config/pred/pred_mediar.json +0 -17
- spacr/resources/MEDIAR/config/step1_pretraining/phase1.json +0 -55
- spacr/resources/MEDIAR/config/step1_pretraining/phase2.json +0 -58
- spacr/resources/MEDIAR/config/step2_finetuning/finetuning1.json +0 -66
- spacr/resources/MEDIAR/config/step2_finetuning/finetuning2.json +0 -66
- spacr/resources/MEDIAR/config/step3_prediction/base_prediction.json +0 -16
- spacr/resources/MEDIAR/config/step3_prediction/ensemble_tta.json +0 -23
- spacr/resources/MEDIAR/core/BasePredictor.py +0 -120
- spacr/resources/MEDIAR/core/BaseTrainer.py +0 -240
- spacr/resources/MEDIAR/core/Baseline/Predictor.py +0 -59
- spacr/resources/MEDIAR/core/Baseline/Trainer.py +0 -113
- spacr/resources/MEDIAR/core/Baseline/__init__.py +0 -2
- spacr/resources/MEDIAR/core/Baseline/utils.py +0 -80
- spacr/resources/MEDIAR/core/MEDIAR/EnsemblePredictor.py +0 -105
- spacr/resources/MEDIAR/core/MEDIAR/Predictor.py +0 -234
- spacr/resources/MEDIAR/core/MEDIAR/Trainer.py +0 -172
- spacr/resources/MEDIAR/core/MEDIAR/__init__.py +0 -3
- spacr/resources/MEDIAR/core/MEDIAR/utils.py +0 -429
- spacr/resources/MEDIAR/core/__init__.py +0 -2
- spacr/resources/MEDIAR/core/utils.py +0 -40
- spacr/resources/MEDIAR/evaluate.py +0 -71
- spacr/resources/MEDIAR/generate_mapping.py +0 -121
- spacr/resources/MEDIAR/image/examples/img1.tiff +0 -0
- spacr/resources/MEDIAR/image/examples/img2.tif +0 -0
- spacr/resources/MEDIAR/image/failure_cases.png +0 -0
- spacr/resources/MEDIAR/image/mediar_framework.png +0 -0
- spacr/resources/MEDIAR/image/mediar_model.PNG +0 -0
- spacr/resources/MEDIAR/image/mediar_results.png +0 -0
- spacr/resources/MEDIAR/main.py +0 -125
- spacr/resources/MEDIAR/predict.py +0 -70
- spacr/resources/MEDIAR/requirements.txt +0 -14
- spacr/resources/MEDIAR/train_tools/__init__.py +0 -3
- spacr/resources/MEDIAR/train_tools/data_utils/__init__.py +0 -1
- spacr/resources/MEDIAR/train_tools/data_utils/custom/CellAware.py +0 -88
- spacr/resources/MEDIAR/train_tools/data_utils/custom/LoadImage.py +0 -161
- spacr/resources/MEDIAR/train_tools/data_utils/custom/NormalizeImage.py +0 -77
- spacr/resources/MEDIAR/train_tools/data_utils/custom/__init__.py +0 -3
- spacr/resources/MEDIAR/train_tools/data_utils/custom/modalities.pkl +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/datasetter.py +0 -208
- spacr/resources/MEDIAR/train_tools/data_utils/transforms.py +0 -148
- spacr/resources/MEDIAR/train_tools/data_utils/utils.py +0 -84
- spacr/resources/MEDIAR/train_tools/measures.py +0 -200
- spacr/resources/MEDIAR/train_tools/models/MEDIARFormer.py +0 -102
- spacr/resources/MEDIAR/train_tools/models/__init__.py +0 -1
- spacr/resources/MEDIAR/train_tools/utils.py +0 -70
- spacr/stats.py +0 -221
- /spacr/{cellpose.py → spacr_cellpose.py} +0 -0
- {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/LICENSE +0 -0
- {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/entry_points.txt +0 -0
- {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/top_level.txt +0 -0
spacr/__init__.py
CHANGED
@@ -21,12 +21,11 @@ from . import app_measure
|
|
21
21
|
from . import app_classify
|
22
22
|
from . import app_sequencing
|
23
23
|
from . import app_umap
|
24
|
-
from . import mediar
|
25
24
|
from . import submodules
|
26
25
|
from . import openai
|
27
26
|
from . import ml
|
28
27
|
from . import toxo
|
29
|
-
from . import
|
28
|
+
from . import spacr_cellpose
|
30
29
|
from . import sp_stats
|
31
30
|
from . import logger
|
32
31
|
|
@@ -52,12 +51,11 @@ __all__ = [
|
|
52
51
|
"app_classify",
|
53
52
|
"app_sequencing",
|
54
53
|
"app_umap",
|
55
|
-
"mediar",
|
56
54
|
"submodules",
|
57
55
|
"openai",
|
58
56
|
"ml",
|
59
57
|
"toxo",
|
60
|
-
"
|
58
|
+
"spacr_cellpose",
|
61
59
|
"sp_stats",
|
62
60
|
"logger"
|
63
61
|
]
|
spacr/__main__.py
CHANGED
spacr/core.py
CHANGED
@@ -98,15 +98,12 @@ def preprocess_generate_masks(settings):
|
|
98
98
|
files_to_process = 3
|
99
99
|
files_processed = 0
|
100
100
|
if settings['masks']:
|
101
|
-
mask_src = os.path.join(src, '
|
101
|
+
mask_src = os.path.join(src, 'masks')
|
102
102
|
if settings['cell_channel'] != None:
|
103
103
|
time_ls=[]
|
104
104
|
if check_mask_folder(src, 'cell_mask_stack'):
|
105
105
|
start = time.time()
|
106
|
-
|
107
|
-
generate_cellpose_masks(mask_src, settings, 'cell')
|
108
|
-
elif settings['segmentation_mode'] == 'mediar':
|
109
|
-
generate_mediar_masks(mask_src, settings, 'cell')
|
106
|
+
generate_cellpose_masks(mask_src, settings, 'cell')
|
110
107
|
stop = time.time()
|
111
108
|
duration = (stop - start)
|
112
109
|
time_ls.append(duration)
|
@@ -117,10 +114,7 @@ def preprocess_generate_masks(settings):
|
|
117
114
|
time_ls=[]
|
118
115
|
if check_mask_folder(src, 'nucleus_mask_stack'):
|
119
116
|
start = time.time()
|
120
|
-
|
121
|
-
generate_cellpose_masks(mask_src, settings, 'nucleus')
|
122
|
-
elif settings['segmentation_mode'] == 'mediar':
|
123
|
-
generate_mediar_masks(mask_src, settings, 'nucleus')
|
117
|
+
generate_cellpose_masks(mask_src, settings, 'nucleus')
|
124
118
|
stop = time.time()
|
125
119
|
duration = (stop - start)
|
126
120
|
time_ls.append(duration)
|
@@ -131,10 +125,7 @@ def preprocess_generate_masks(settings):
|
|
131
125
|
time_ls=[]
|
132
126
|
if check_mask_folder(src, 'pathogen_mask_stack'):
|
133
127
|
start = time.time()
|
134
|
-
|
135
|
-
generate_cellpose_masks(mask_src, settings, 'pathogen')
|
136
|
-
elif settings['segmentation_mode'] == 'mediar':
|
137
|
-
generate_mediar_masks(mask_src, settings, 'pathogen')
|
128
|
+
generate_cellpose_masks(mask_src, settings, 'pathogen')
|
138
129
|
stop = time.time()
|
139
130
|
duration = (stop - start)
|
140
131
|
time_ls.append(duration)
|
@@ -167,7 +158,6 @@ def preprocess_generate_masks(settings):
|
|
167
158
|
|
168
159
|
if settings['plot']:
|
169
160
|
if not settings['timelapse']:
|
170
|
-
|
171
161
|
if settings['test_mode'] == True:
|
172
162
|
settings['examples_to_plot'] = len(os.path.join(src,'merged'))
|
173
163
|
|
@@ -387,7 +377,13 @@ def generate_cellpose_masks(src, settings, object_type):
|
|
387
377
|
timelapse_remove_transient=timelapse_remove_transient,
|
388
378
|
radius=radius,
|
389
379
|
n_jobs=n_jobs)
|
390
|
-
|
380
|
+
|
381
|
+
if timelapse_mode == 'trackpy' or timelapse_mode == 'iou':
|
382
|
+
if timelapse_mode == 'iou':
|
383
|
+
track_by_iou = True
|
384
|
+
else:
|
385
|
+
track_by_iou = False
|
386
|
+
|
391
387
|
mask_stack = _trackpy_track_cells(src=src,
|
392
388
|
name=name,
|
393
389
|
batch_filenames=batch_filenames,
|
@@ -398,7 +394,8 @@ def generate_cellpose_masks(src, settings, object_type):
|
|
398
394
|
timelapse_remove_transient=timelapse_remove_transient,
|
399
395
|
plot=settings['plot'],
|
400
396
|
save=settings['save'],
|
401
|
-
mode=timelapse_mode
|
397
|
+
mode=timelapse_mode,
|
398
|
+
track_by_iou=track_by_iou)
|
402
399
|
else:
|
403
400
|
mask_stack = _masks_to_masks_stack(masks)
|
404
401
|
else:
|
@@ -820,97 +817,6 @@ def reducer_hyperparameter_search(settings={}, reduction_params=None, dbscan_par
|
|
820
817
|
|
821
818
|
return
|
822
819
|
|
823
|
-
def generate_mediar_masks(src, settings, object_type):
|
824
|
-
"""
|
825
|
-
Generates masks using the MEDIARPredictor.
|
826
|
-
|
827
|
-
:param src: Source folder containing images or npz files.
|
828
|
-
:param settings: Dictionary of settings for generating masks.
|
829
|
-
:param object_type: Type of object to detect (e.g., 'cell', 'nucleus', etc.).
|
830
|
-
"""
|
831
|
-
from .mediar import MEDIARPredictor
|
832
|
-
from .io import _create_database, _save_object_counts_to_database
|
833
|
-
from .plot import plot_masks
|
834
|
-
from .settings import set_default_settings_preprocess_generate_masks
|
835
|
-
from .utils import prepare_batch_for_segmentation
|
836
|
-
|
837
|
-
# Clear CUDA cache and check if CUDA is available
|
838
|
-
gc.collect()
|
839
|
-
if not torch.cuda.is_available():
|
840
|
-
print(f'Torch CUDA is not available, using CPU')
|
841
|
-
|
842
|
-
settings['src'] = src
|
843
|
-
|
844
|
-
# Preprocess settings
|
845
|
-
settings = set_default_settings_preprocess_generate_masks(settings)
|
846
|
-
|
847
|
-
if settings['verbose']:
|
848
|
-
settings_df = pd.DataFrame(list(settings.items()), columns=['setting_key', 'setting_value'])
|
849
|
-
settings_df['setting_value'] = settings_df['setting_value'].apply(str)
|
850
|
-
display(settings_df)
|
851
|
-
|
852
|
-
figuresize = 10
|
853
|
-
timelapse = settings['timelapse']
|
854
|
-
batch_size = settings['batch_size']
|
855
|
-
|
856
|
-
# Get object settings and initialize MEDIARPredictor
|
857
|
-
mediar_predictor = MEDIARPredictor(input_path=None, output_path=None, normalize=settings['normalize'], use_tta=False)
|
858
|
-
|
859
|
-
# Paths to input npz files
|
860
|
-
paths = [os.path.join(src, file) for file in os.listdir(src) if file.endswith('.npz')]
|
861
|
-
|
862
|
-
# Initialize a database for saving measurements
|
863
|
-
count_loc = os.path.join(os.path.dirname(src), 'measurements', 'measurements.db')
|
864
|
-
os.makedirs(os.path.dirname(src) + '/measurements', exist_ok=True)
|
865
|
-
_create_database(count_loc)
|
866
|
-
|
867
|
-
for file_index, path in enumerate(paths):
|
868
|
-
name = os.path.basename(path)
|
869
|
-
name, ext = os.path.splitext(name)
|
870
|
-
output_folder = os.path.join(os.path.dirname(path), f'{object_type}_mask_stack')
|
871
|
-
os.makedirs(output_folder, exist_ok=True)
|
872
|
-
|
873
|
-
with np.load(path) as data:
|
874
|
-
stack = data['data']
|
875
|
-
filenames = data['filenames']
|
876
|
-
|
877
|
-
for i, filename in enumerate(filenames):
|
878
|
-
output_path = os.path.join(output_folder, filename)
|
879
|
-
if os.path.exists(output_path):
|
880
|
-
print(f"File {filename} already exists. Skipping...")
|
881
|
-
continue
|
882
|
-
|
883
|
-
# Process each batch of images in the stack
|
884
|
-
for i in range(0, stack.shape[0], batch_size):
|
885
|
-
batch = stack[i: i + batch_size]
|
886
|
-
batch_filenames = filenames[i: i + batch_size]
|
887
|
-
|
888
|
-
# Prepare batch for MEDIARPredictor (optional)
|
889
|
-
batch = prepare_batch_for_segmentation(batch)
|
890
|
-
|
891
|
-
# Predict masks using MEDIARPredictor
|
892
|
-
predicted_masks = mediar_predictor.predict_batch(batch)
|
893
|
-
|
894
|
-
# Save predicted masks
|
895
|
-
for j, mask in enumerate(predicted_masks):
|
896
|
-
output_filename = os.path.join(output_folder, batch_filenames[j])
|
897
|
-
mask = mask.astype(np.uint16)
|
898
|
-
np.save(output_filename, mask)
|
899
|
-
|
900
|
-
# Optional: Plot the masks
|
901
|
-
if settings['plot']:
|
902
|
-
for idx, mask in enumerate(predicted_masks):
|
903
|
-
plot_masks(batch[idx], mask, cmap='inferno', figuresize=figuresize)
|
904
|
-
|
905
|
-
# Save object counts to database
|
906
|
-
_save_object_counts_to_database(predicted_masks, object_type, batch_filenames, count_loc)
|
907
|
-
|
908
|
-
# Clear CUDA cache after each file
|
909
|
-
gc.collect()
|
910
|
-
torch.cuda.empty_cache()
|
911
|
-
|
912
|
-
print("Mask generation completed.")
|
913
|
-
|
914
820
|
def generate_screen_graphs(settings):
|
915
821
|
"""
|
916
822
|
Generate screen graphs for different measurements in a given source directory.
|
spacr/gui.py
CHANGED
@@ -48,7 +48,6 @@ class MainApp(tk.Tk):
|
|
48
48
|
}
|
49
49
|
|
50
50
|
self.additional_gui_apps = {
|
51
|
-
"Convert": (lambda frame: initiate_root(self, 'convert'), "Convert images to Grayscale TIFs."),
|
52
51
|
"Umap": (lambda frame: initiate_root(self, 'umap'), "Generate UMAP embeddings with datapoints represented as images."),
|
53
52
|
"Train Cellpose": (lambda frame: initiate_root(self, 'train_cellpose'), "Train custom Cellpose models."),
|
54
53
|
"ML Analyze": (lambda frame: initiate_root(self, 'ml_analyze'), "Machine learning analysis of data."),
|
spacr/gui_core.py
CHANGED
@@ -872,7 +872,7 @@ def check_src_folders_files(settings, settings_type, q):
|
|
872
872
|
pictures_continue = _folder_has_images(path)
|
873
873
|
folder_chan_continue = _has_folder(path, "1")
|
874
874
|
folder_stack_continue = _has_folder(path, "stack")
|
875
|
-
folder_npz_continue = _has_folder(path, "
|
875
|
+
folder_npz_continue = _has_folder(path, "masks")
|
876
876
|
|
877
877
|
if not pictures_continue:
|
878
878
|
if not any([folder_chan_continue, folder_stack_continue, folder_npz_continue]):
|
@@ -883,7 +883,7 @@ def check_src_folders_files(settings, settings_type, q):
|
|
883
883
|
q.put(f"Error: Missing stack folder in folder: {path}")
|
884
884
|
|
885
885
|
if not folder_npz_continue:
|
886
|
-
q.put(f"Error: Missing
|
886
|
+
q.put(f"Error: Missing masks folder in folder: {path}")
|
887
887
|
else:
|
888
888
|
q.put(f"Error: No images in folder: {path}")
|
889
889
|
|
spacr/gui_utils.py
CHANGED
@@ -376,25 +376,14 @@ def convert_settings_dict_for_gui(settings):
|
|
376
376
|
'channel_dims': ('combo', chan_list, '[0,1,2,3]'),
|
377
377
|
'dataset_mode': ('combo', ['annotation', 'metadata', 'recruitment'], 'metadata'),
|
378
378
|
'cov_type': ('combo', ['HC0', 'HC1', 'HC2', 'HC3', None], None),
|
379
|
-
#'cell_mask_dim': ('combo', chans_v3, None),
|
380
|
-
#'cell_chann_dim': ('combo', chans_v3, None),
|
381
|
-
#'nucleus_mask_dim': ('combo', chans_v3, None),
|
382
|
-
#'nucleus_chann_dim': ('combo', chans_v3, None),
|
383
|
-
#'pathogen_mask_dim': ('combo', chans_v3, None),
|
384
|
-
#'pathogen_chann_dim': ('combo', chans_v3, None),
|
385
379
|
'crop_mode': ('combo', ["['cell']", "['nucleus']", "['pathogen']", "['cell', 'nucleus']", "['cell', 'pathogen']", "['nucleus', 'pathogen']", "['cell', 'nucleus', 'pathogen']"], "['cell']"),
|
386
|
-
|
387
|
-
#'nucleus_channel': ('combo', chans_v3, None),
|
388
|
-
#'cell_channel': ('combo', chans_v3, None),
|
389
|
-
#'channel_of_interest': ('combo', chans_v3, None),
|
390
|
-
#'pathogen_channel': ('combo', chans_v3, None),
|
391
|
-
'timelapse_mode': ('combo', ['trackpy', 'btrack'], 'trackpy'),
|
380
|
+
'timelapse_mode': ('combo', ['trackpy', 'iou', 'btrack'], 'trackpy'),
|
392
381
|
'train_mode': ('combo', ['erm', 'irm'], 'erm'),
|
393
382
|
'clustering': ('combo', ['dbscan', 'kmean'], 'dbscan'),
|
394
383
|
'reduction_method': ('combo', ['umap', 'tsne'], 'umap'),
|
395
384
|
'model_name': ('combo', ['cyto', 'cyto_2', 'cyto_3', 'nuclei'], 'cyto'),
|
396
385
|
'regression_type': ('combo', ['ols','gls','wls','rlm','glm','mixed','quantile','logit','probit','poisson','lasso','ridge'], 'ols'),
|
397
|
-
'timelapse_objects': ('combo', ['cell', 'nucleus', 'pathogen', '
|
386
|
+
'timelapse_objects': ('combo', ["['cell']", "['nucleus']", "['pathogen']", "['cell', 'nucleus']", "['cell', 'pathogen']", "['nucleus', 'pathogen']", "['cell', 'nucleus', 'pathogen']", None], None),
|
398
387
|
'model_type': ('combo', torchvision_models, 'resnet50'),
|
399
388
|
'optimizer_type': ('combo', ['adamw', 'adam'], 'adamw'),
|
400
389
|
'schedule': ('combo', ['reduce_lr_on_plateau', 'step_lr'], 'reduce_lr_on_plateau'),
|
@@ -470,7 +459,7 @@ def function_gui_wrapper(function=None, settings={}, q=None, fig_queue=None, imp
|
|
470
459
|
def run_function_gui(settings_type, settings, q, fig_queue, stop_requested):
|
471
460
|
|
472
461
|
from .core import generate_image_umap, preprocess_generate_masks
|
473
|
-
from .
|
462
|
+
from .spacr_cellpose import identify_masks_finetune, check_cellpose_models, compare_cellpose_masks
|
474
463
|
from .submodules import analyze_recruitment
|
475
464
|
from .ml import generate_ml_scores, perform_regression
|
476
465
|
from .submodules import train_cellpose, analyze_plaques
|
@@ -939,8 +928,10 @@ def convert_to_number(value):
|
|
939
928
|
|
940
929
|
"""
|
941
930
|
Converts a string value to an integer if possible, otherwise converts to a float.
|
931
|
+
|
942
932
|
Args:
|
943
933
|
value (str): The string representation of the number.
|
934
|
+
|
944
935
|
Returns:
|
945
936
|
int or float: The converted number.
|
946
937
|
"""
|