spacr 0.5.0__py3-none-any.whl → 0.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- spacr/__init__.py +0 -2
- spacr/__main__.py +3 -3
- spacr/core.py +13 -106
- spacr/gui_core.py +2 -77
- spacr/gui_utils.py +1 -13
- spacr/io.py +24 -25
- spacr/mediar.py +12 -8
- spacr/plot.py +50 -135
- spacr/settings.py +42 -30
- spacr/submodules.py +11 -1
- spacr/timelapse.py +7 -79
- spacr/utils.py +152 -61
- {spacr-0.5.0.dist-info → spacr-0.9.1.dist-info}/METADATA +62 -62
- spacr-0.9.1.dist-info/RECORD +109 -0
- {spacr-0.5.0.dist-info → spacr-0.9.1.dist-info}/WHEEL +1 -1
- spacr/resources/MEDIAR/.gitignore +0 -18
- spacr/resources/MEDIAR/LICENSE +0 -21
- spacr/resources/MEDIAR/README.md +0 -189
- spacr/resources/MEDIAR/SetupDict.py +0 -39
- spacr/resources/MEDIAR/__pycache__/SetupDict.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/__pycache__/evaluate.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/__pycache__/generate_mapping.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/__pycache__/main.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/config/baseline.json +0 -60
- spacr/resources/MEDIAR/config/mediar_example.json +0 -72
- spacr/resources/MEDIAR/config/pred/pred_mediar.json +0 -17
- spacr/resources/MEDIAR/config/step1_pretraining/phase1.json +0 -55
- spacr/resources/MEDIAR/config/step1_pretraining/phase2.json +0 -58
- spacr/resources/MEDIAR/config/step2_finetuning/finetuning1.json +0 -66
- spacr/resources/MEDIAR/config/step2_finetuning/finetuning2.json +0 -66
- spacr/resources/MEDIAR/config/step3_prediction/base_prediction.json +0 -16
- spacr/resources/MEDIAR/config/step3_prediction/ensemble_tta.json +0 -23
- spacr/resources/MEDIAR/core/BasePredictor.py +0 -120
- spacr/resources/MEDIAR/core/BaseTrainer.py +0 -240
- spacr/resources/MEDIAR/core/Baseline/Predictor.py +0 -59
- spacr/resources/MEDIAR/core/Baseline/Trainer.py +0 -113
- spacr/resources/MEDIAR/core/Baseline/__init__.py +0 -2
- spacr/resources/MEDIAR/core/Baseline/__pycache__/Predictor.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/Baseline/__pycache__/Trainer.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/Baseline/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/Baseline/__pycache__/utils.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/Baseline/utils.py +0 -80
- spacr/resources/MEDIAR/core/MEDIAR/EnsemblePredictor.py +0 -105
- spacr/resources/MEDIAR/core/MEDIAR/Predictor.py +0 -234
- spacr/resources/MEDIAR/core/MEDIAR/Trainer.py +0 -172
- spacr/resources/MEDIAR/core/MEDIAR/__init__.py +0 -3
- spacr/resources/MEDIAR/core/MEDIAR/__pycache__/EnsemblePredictor.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/MEDIAR/__pycache__/Predictor.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/MEDIAR/__pycache__/Trainer.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/MEDIAR/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/MEDIAR/__pycache__/utils.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/MEDIAR/utils.py +0 -429
- spacr/resources/MEDIAR/core/__init__.py +0 -2
- spacr/resources/MEDIAR/core/__pycache__/BasePredictor.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/__pycache__/BaseTrainer.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/__pycache__/utils.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/utils.py +0 -40
- spacr/resources/MEDIAR/evaluate.py +0 -71
- spacr/resources/MEDIAR/generate_mapping.py +0 -121
- spacr/resources/MEDIAR/image/examples/img1.tiff +0 -0
- spacr/resources/MEDIAR/image/examples/img2.tif +0 -0
- spacr/resources/MEDIAR/image/failure_cases.png +0 -0
- spacr/resources/MEDIAR/image/mediar_framework.png +0 -0
- spacr/resources/MEDIAR/image/mediar_model.PNG +0 -0
- spacr/resources/MEDIAR/image/mediar_results.png +0 -0
- spacr/resources/MEDIAR/main.py +0 -125
- spacr/resources/MEDIAR/predict.py +0 -70
- spacr/resources/MEDIAR/requirements.txt +0 -14
- spacr/resources/MEDIAR/train_tools/__init__.py +0 -3
- spacr/resources/MEDIAR/train_tools/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/__pycache__/measures.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/__pycache__/utils.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/__init__.py +0 -1
- spacr/resources/MEDIAR/train_tools/data_utils/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/__pycache__/datasetter.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/__pycache__/transforms.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/__pycache__/utils.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/custom/CellAware.py +0 -88
- spacr/resources/MEDIAR/train_tools/data_utils/custom/LoadImage.py +0 -161
- spacr/resources/MEDIAR/train_tools/data_utils/custom/NormalizeImage.py +0 -77
- spacr/resources/MEDIAR/train_tools/data_utils/custom/__init__.py +0 -3
- spacr/resources/MEDIAR/train_tools/data_utils/custom/__pycache__/CellAware.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/custom/__pycache__/LoadImage.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/custom/__pycache__/NormalizeImage.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/custom/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/custom/modalities.pkl +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/datasetter.py +0 -208
- spacr/resources/MEDIAR/train_tools/data_utils/transforms.py +0 -148
- spacr/resources/MEDIAR/train_tools/data_utils/utils.py +0 -84
- spacr/resources/MEDIAR/train_tools/measures.py +0 -200
- spacr/resources/MEDIAR/train_tools/models/MEDIARFormer.py +0 -102
- spacr/resources/MEDIAR/train_tools/models/__init__.py +0 -1
- spacr/resources/MEDIAR/train_tools/models/__pycache__/MEDIARFormer.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/models/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/utils.py +0 -70
- spacr-0.5.0.dist-info/RECORD +0 -190
- {spacr-0.5.0.dist-info → spacr-0.9.1.dist-info}/LICENSE +0 -0
- {spacr-0.5.0.dist-info → spacr-0.9.1.dist-info}/entry_points.txt +0 -0
- {spacr-0.5.0.dist-info → spacr-0.9.1.dist-info}/top_level.txt +0 -0
spacr/__init__.py
CHANGED
@@ -21,7 +21,6 @@ from . import app_measure
|
|
21
21
|
from . import app_classify
|
22
22
|
from . import app_sequencing
|
23
23
|
from . import app_umap
|
24
|
-
from . import mediar
|
25
24
|
from . import submodules
|
26
25
|
from . import openai
|
27
26
|
from . import ml
|
@@ -52,7 +51,6 @@ __all__ = [
|
|
52
51
|
"app_classify",
|
53
52
|
"app_sequencing",
|
54
53
|
"app_umap",
|
55
|
-
"mediar",
|
56
54
|
"submodules",
|
57
55
|
"openai",
|
58
56
|
"ml",
|
spacr/__main__.py
CHANGED
spacr/core.py
CHANGED
@@ -98,15 +98,12 @@ def preprocess_generate_masks(settings):
|
|
98
98
|
files_to_process = 3
|
99
99
|
files_processed = 0
|
100
100
|
if settings['masks']:
|
101
|
-
mask_src = os.path.join(src, '
|
101
|
+
mask_src = os.path.join(src, 'masks')
|
102
102
|
if settings['cell_channel'] != None:
|
103
103
|
time_ls=[]
|
104
104
|
if check_mask_folder(src, 'cell_mask_stack'):
|
105
105
|
start = time.time()
|
106
|
-
|
107
|
-
generate_cellpose_masks(mask_src, settings, 'cell')
|
108
|
-
elif settings['segmentation_mode'] == 'mediar':
|
109
|
-
generate_mediar_masks(mask_src, settings, 'cell')
|
106
|
+
generate_cellpose_masks(mask_src, settings, 'cell')
|
110
107
|
stop = time.time()
|
111
108
|
duration = (stop - start)
|
112
109
|
time_ls.append(duration)
|
@@ -117,10 +114,7 @@ def preprocess_generate_masks(settings):
|
|
117
114
|
time_ls=[]
|
118
115
|
if check_mask_folder(src, 'nucleus_mask_stack'):
|
119
116
|
start = time.time()
|
120
|
-
|
121
|
-
generate_cellpose_masks(mask_src, settings, 'nucleus')
|
122
|
-
elif settings['segmentation_mode'] == 'mediar':
|
123
|
-
generate_mediar_masks(mask_src, settings, 'nucleus')
|
117
|
+
generate_cellpose_masks(mask_src, settings, 'nucleus')
|
124
118
|
stop = time.time()
|
125
119
|
duration = (stop - start)
|
126
120
|
time_ls.append(duration)
|
@@ -131,10 +125,7 @@ def preprocess_generate_masks(settings):
|
|
131
125
|
time_ls=[]
|
132
126
|
if check_mask_folder(src, 'pathogen_mask_stack'):
|
133
127
|
start = time.time()
|
134
|
-
|
135
|
-
generate_cellpose_masks(mask_src, settings, 'pathogen')
|
136
|
-
elif settings['segmentation_mode'] == 'mediar':
|
137
|
-
generate_mediar_masks(mask_src, settings, 'pathogen')
|
128
|
+
generate_cellpose_masks(mask_src, settings, 'pathogen')
|
138
129
|
stop = time.time()
|
139
130
|
duration = (stop - start)
|
140
131
|
time_ls.append(duration)
|
@@ -386,7 +377,13 @@ def generate_cellpose_masks(src, settings, object_type):
|
|
386
377
|
timelapse_remove_transient=timelapse_remove_transient,
|
387
378
|
radius=radius,
|
388
379
|
n_jobs=n_jobs)
|
389
|
-
|
380
|
+
|
381
|
+
if timelapse_mode == 'trackpy' or timelapse_mode == 'iou':
|
382
|
+
if timelapse_mode == 'iou':
|
383
|
+
track_by_iou = True
|
384
|
+
else:
|
385
|
+
track_by_iou = False
|
386
|
+
|
390
387
|
mask_stack = _trackpy_track_cells(src=src,
|
391
388
|
name=name,
|
392
389
|
batch_filenames=batch_filenames,
|
@@ -397,7 +394,8 @@ def generate_cellpose_masks(src, settings, object_type):
|
|
397
394
|
timelapse_remove_transient=timelapse_remove_transient,
|
398
395
|
plot=settings['plot'],
|
399
396
|
save=settings['save'],
|
400
|
-
mode=timelapse_mode
|
397
|
+
mode=timelapse_mode,
|
398
|
+
track_by_iou=track_by_iou)
|
401
399
|
else:
|
402
400
|
mask_stack = _masks_to_masks_stack(masks)
|
403
401
|
else:
|
@@ -819,97 +817,6 @@ def reducer_hyperparameter_search(settings={}, reduction_params=None, dbscan_par
|
|
819
817
|
|
820
818
|
return
|
821
819
|
|
822
|
-
def generate_mediar_masks(src, settings, object_type):
|
823
|
-
"""
|
824
|
-
Generates masks using the MEDIARPredictor.
|
825
|
-
|
826
|
-
:param src: Source folder containing images or npz files.
|
827
|
-
:param settings: Dictionary of settings for generating masks.
|
828
|
-
:param object_type: Type of object to detect (e.g., 'cell', 'nucleus', etc.).
|
829
|
-
"""
|
830
|
-
from .mediar import MEDIARPredictor
|
831
|
-
from .io import _create_database, _save_object_counts_to_database
|
832
|
-
from .plot import plot_masks
|
833
|
-
from .settings import set_default_settings_preprocess_generate_masks
|
834
|
-
from .utils import prepare_batch_for_segmentation
|
835
|
-
|
836
|
-
# Clear CUDA cache and check if CUDA is available
|
837
|
-
gc.collect()
|
838
|
-
if not torch.cuda.is_available():
|
839
|
-
print(f'Torch CUDA is not available, using CPU')
|
840
|
-
|
841
|
-
settings['src'] = src
|
842
|
-
|
843
|
-
# Preprocess settings
|
844
|
-
settings = set_default_settings_preprocess_generate_masks(settings)
|
845
|
-
|
846
|
-
if settings['verbose']:
|
847
|
-
settings_df = pd.DataFrame(list(settings.items()), columns=['setting_key', 'setting_value'])
|
848
|
-
settings_df['setting_value'] = settings_df['setting_value'].apply(str)
|
849
|
-
display(settings_df)
|
850
|
-
|
851
|
-
figuresize = 10
|
852
|
-
timelapse = settings['timelapse']
|
853
|
-
batch_size = settings['batch_size']
|
854
|
-
|
855
|
-
# Get object settings and initialize MEDIARPredictor
|
856
|
-
mediar_predictor = MEDIARPredictor(input_path=None, output_path=None, normalize=settings['normalize'], use_tta=False)
|
857
|
-
|
858
|
-
# Paths to input npz files
|
859
|
-
paths = [os.path.join(src, file) for file in os.listdir(src) if file.endswith('.npz')]
|
860
|
-
|
861
|
-
# Initialize a database for saving measurements
|
862
|
-
count_loc = os.path.join(os.path.dirname(src), 'measurements', 'measurements.db')
|
863
|
-
os.makedirs(os.path.dirname(src) + '/measurements', exist_ok=True)
|
864
|
-
_create_database(count_loc)
|
865
|
-
|
866
|
-
for file_index, path in enumerate(paths):
|
867
|
-
name = os.path.basename(path)
|
868
|
-
name, ext = os.path.splitext(name)
|
869
|
-
output_folder = os.path.join(os.path.dirname(path), f'{object_type}_mask_stack')
|
870
|
-
os.makedirs(output_folder, exist_ok=True)
|
871
|
-
|
872
|
-
with np.load(path) as data:
|
873
|
-
stack = data['data']
|
874
|
-
filenames = data['filenames']
|
875
|
-
|
876
|
-
for i, filename in enumerate(filenames):
|
877
|
-
output_path = os.path.join(output_folder, filename)
|
878
|
-
if os.path.exists(output_path):
|
879
|
-
print(f"File {filename} already exists. Skipping...")
|
880
|
-
continue
|
881
|
-
|
882
|
-
# Process each batch of images in the stack
|
883
|
-
for i in range(0, stack.shape[0], batch_size):
|
884
|
-
batch = stack[i: i + batch_size]
|
885
|
-
batch_filenames = filenames[i: i + batch_size]
|
886
|
-
|
887
|
-
# Prepare batch for MEDIARPredictor (optional)
|
888
|
-
batch = prepare_batch_for_segmentation(batch)
|
889
|
-
|
890
|
-
# Predict masks using MEDIARPredictor
|
891
|
-
predicted_masks = mediar_predictor.predict_batch(batch)
|
892
|
-
|
893
|
-
# Save predicted masks
|
894
|
-
for j, mask in enumerate(predicted_masks):
|
895
|
-
output_filename = os.path.join(output_folder, batch_filenames[j])
|
896
|
-
mask = mask.astype(np.uint16)
|
897
|
-
np.save(output_filename, mask)
|
898
|
-
|
899
|
-
# Optional: Plot the masks
|
900
|
-
if settings['plot']:
|
901
|
-
for idx, mask in enumerate(predicted_masks):
|
902
|
-
plot_masks(batch[idx], mask, cmap='inferno', figuresize=figuresize)
|
903
|
-
|
904
|
-
# Save object counts to database
|
905
|
-
_save_object_counts_to_database(predicted_masks, object_type, batch_filenames, count_loc)
|
906
|
-
|
907
|
-
# Clear CUDA cache after each file
|
908
|
-
gc.collect()
|
909
|
-
torch.cuda.empty_cache()
|
910
|
-
|
911
|
-
print("Mask generation completed.")
|
912
|
-
|
913
820
|
def generate_screen_graphs(settings):
|
914
821
|
"""
|
915
822
|
Generate screen graphs for different measurements in a given source directory.
|
spacr/gui_core.py
CHANGED
@@ -872,7 +872,7 @@ def check_src_folders_files(settings, settings_type, q):
|
|
872
872
|
pictures_continue = _folder_has_images(path)
|
873
873
|
folder_chan_continue = _has_folder(path, "1")
|
874
874
|
folder_stack_continue = _has_folder(path, "stack")
|
875
|
-
folder_npz_continue = _has_folder(path, "
|
875
|
+
folder_npz_continue = _has_folder(path, "masks")
|
876
876
|
|
877
877
|
if not pictures_continue:
|
878
878
|
if not any([folder_chan_continue, folder_stack_continue, folder_npz_continue]):
|
@@ -883,7 +883,7 @@ def check_src_folders_files(settings, settings_type, q):
|
|
883
883
|
q.put(f"Error: Missing stack folder in folder: {path}")
|
884
884
|
|
885
885
|
if not folder_npz_continue:
|
886
|
-
q.put(f"Error: Missing
|
886
|
+
q.put(f"Error: Missing masks folder in folder: {path}")
|
887
887
|
else:
|
888
888
|
q.put(f"Error: No images in folder: {path}")
|
889
889
|
|
@@ -1083,81 +1083,6 @@ def process_console_queue():
|
|
1083
1083
|
# **Continue processing if no error was detected**
|
1084
1084
|
after_id = console_output.after(uppdate_frequency, process_console_queue)
|
1085
1085
|
parent_frame.after_tasks.append(after_id)
|
1086
|
-
|
1087
|
-
def process_console_queue_v2():
|
1088
|
-
global q, console_output, parent_frame, progress_bar, process_console_queue
|
1089
|
-
|
1090
|
-
# Initialize function attribute if it doesn't exist
|
1091
|
-
if not hasattr(process_console_queue, "completed_tasks"):
|
1092
|
-
process_console_queue.completed_tasks = []
|
1093
|
-
if not hasattr(process_console_queue, "current_maximum"):
|
1094
|
-
process_console_queue.current_maximum = None
|
1095
|
-
|
1096
|
-
ansi_escape_pattern = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
|
1097
|
-
|
1098
|
-
while not q.empty():
|
1099
|
-
message = q.get_nowait()
|
1100
|
-
clean_message = ansi_escape_pattern.sub('', message)
|
1101
|
-
|
1102
|
-
# **Abort Execution if an Error Message is Detected**
|
1103
|
-
if clean_message.startswith("Error:"):
|
1104
|
-
console_output.insert(tk.END, clean_message + "\n", "error")
|
1105
|
-
console_output.see(tk.END)
|
1106
|
-
print("Run aborted due to error:", clean_message) # Debug message
|
1107
|
-
return # **Exit immediately to stop further execution**
|
1108
|
-
|
1109
|
-
# Check if the message contains progress information
|
1110
|
-
if clean_message.startswith("Progress:"):
|
1111
|
-
try:
|
1112
|
-
# Extract the progress information
|
1113
|
-
match = re.search(r'Progress: (\d+)/(\d+), operation_type: ([\w\s]*),(.*)', clean_message)
|
1114
|
-
|
1115
|
-
if match:
|
1116
|
-
current_progress = int(match.group(1))
|
1117
|
-
total_progress = int(match.group(2))
|
1118
|
-
operation_type = match.group(3).strip()
|
1119
|
-
additional_info = match.group(4).strip() # Capture everything after operation_type
|
1120
|
-
|
1121
|
-
# Check if the maximum value has changed
|
1122
|
-
if process_console_queue.current_maximum != total_progress:
|
1123
|
-
process_console_queue.current_maximum = total_progress
|
1124
|
-
process_console_queue.completed_tasks = []
|
1125
|
-
|
1126
|
-
# Add the task to the completed set
|
1127
|
-
process_console_queue.completed_tasks.append(current_progress)
|
1128
|
-
|
1129
|
-
# Calculate the unique progress count
|
1130
|
-
unique_progress_count = len(np.unique(process_console_queue.completed_tasks))
|
1131
|
-
|
1132
|
-
# Update the progress bar
|
1133
|
-
if progress_bar:
|
1134
|
-
progress_bar['maximum'] = total_progress
|
1135
|
-
progress_bar['value'] = unique_progress_count
|
1136
|
-
|
1137
|
-
# Store operation type and additional info
|
1138
|
-
if operation_type:
|
1139
|
-
progress_bar.operation_type = operation_type
|
1140
|
-
progress_bar.additional_info = additional_info
|
1141
|
-
|
1142
|
-
# Update the progress label
|
1143
|
-
if progress_bar.progress_label:
|
1144
|
-
progress_bar.update_label()
|
1145
|
-
|
1146
|
-
# Clear completed tasks when progress is complete
|
1147
|
-
if unique_progress_count >= total_progress:
|
1148
|
-
process_console_queue.completed_tasks.clear()
|
1149
|
-
|
1150
|
-
except Exception as e:
|
1151
|
-
print(f"Error parsing progress message: {e}")
|
1152
|
-
|
1153
|
-
else:
|
1154
|
-
# Insert non-progress messages into the console
|
1155
|
-
console_output.insert(tk.END, clean_message + "\n")
|
1156
|
-
console_output.see(tk.END)
|
1157
|
-
|
1158
|
-
# **Continue processing if no error was detected**
|
1159
|
-
after_id = console_output.after(uppdate_frequency, process_console_queue)
|
1160
|
-
parent_frame.after_tasks.append(after_id)
|
1161
1086
|
|
1162
1087
|
def main_thread_update_function(root, q, fig_queue, canvas_widget):
|
1163
1088
|
global uppdate_frequency
|
spacr/gui_utils.py
CHANGED
@@ -376,26 +376,14 @@ def convert_settings_dict_for_gui(settings):
|
|
376
376
|
'channel_dims': ('combo', chan_list, '[0,1,2,3]'),
|
377
377
|
'dataset_mode': ('combo', ['annotation', 'metadata', 'recruitment'], 'metadata'),
|
378
378
|
'cov_type': ('combo', ['HC0', 'HC1', 'HC2', 'HC3', None], None),
|
379
|
-
#'cell_mask_dim': ('combo', chans_v3, None),
|
380
|
-
#'cell_chann_dim': ('combo', chans_v3, None),
|
381
|
-
#'nucleus_mask_dim': ('combo', chans_v3, None),
|
382
|
-
#'nucleus_chann_dim': ('combo', chans_v3, None),
|
383
|
-
#'pathogen_mask_dim': ('combo', chans_v3, None),
|
384
|
-
#'pathogen_chann_dim': ('combo', chans_v3, None),
|
385
379
|
'crop_mode': ('combo', ["['cell']", "['nucleus']", "['pathogen']", "['cell', 'nucleus']", "['cell', 'pathogen']", "['nucleus', 'pathogen']", "['cell', 'nucleus', 'pathogen']"], "['cell']"),
|
386
|
-
|
387
|
-
#'nucleus_channel': ('combo', chans_v3, None),
|
388
|
-
#'cell_channel': ('combo', chans_v3, None),
|
389
|
-
#'channel_of_interest': ('combo', chans_v3, None),
|
390
|
-
#'pathogen_channel': ('combo', chans_v3, None),
|
391
|
-
'timelapse_mode': ('combo', ['trackpy', 'btrack'], 'trackpy'),
|
380
|
+
'timelapse_mode': ('combo', ['trackpy', 'iou', 'btrack'], 'trackpy'),
|
392
381
|
'train_mode': ('combo', ['erm', 'irm'], 'erm'),
|
393
382
|
'clustering': ('combo', ['dbscan', 'kmean'], 'dbscan'),
|
394
383
|
'reduction_method': ('combo', ['umap', 'tsne'], 'umap'),
|
395
384
|
'model_name': ('combo', ['cyto', 'cyto_2', 'cyto_3', 'nuclei'], 'cyto'),
|
396
385
|
'regression_type': ('combo', ['ols','gls','wls','rlm','glm','mixed','quantile','logit','probit','poisson','lasso','ridge'], 'ols'),
|
397
386
|
'timelapse_objects': ('combo', ["['cell']", "['nucleus']", "['pathogen']", "['cell', 'nucleus']", "['cell', 'pathogen']", "['nucleus', 'pathogen']", "['cell', 'nucleus', 'pathogen']", None], None),
|
398
|
-
#'timelapse_objects': ('combo', '[cell]', '[nucleus]', '[pathogen]', '[cytoplasm]', None, None),
|
399
387
|
'model_type': ('combo', torchvision_models, 'resnet50'),
|
400
388
|
'optimizer_type': ('combo', ['adamw', 'adam'], 'adamw'),
|
401
389
|
'schedule': ('combo', ['reduce_lr_on_plateau', 'step_lr'], 'reduce_lr_on_plateau'),
|
spacr/io.py
CHANGED
@@ -1175,7 +1175,7 @@ def concatenate_and_normalize(src, channels, save_dtype=np.float32, settings={})
|
|
1175
1175
|
|
1176
1176
|
paths = []
|
1177
1177
|
time_ls = []
|
1178
|
-
output_fldr = os.path.join(os.path.dirname(src), '
|
1178
|
+
output_fldr = os.path.join(os.path.dirname(src), 'masks')
|
1179
1179
|
os.makedirs(output_fldr, exist_ok=True)
|
1180
1180
|
|
1181
1181
|
if settings['timelapse']:
|
@@ -1333,7 +1333,7 @@ def _normalize_stack(src, backgrounds=[100, 100, 100], remove_backgrounds=[False
|
|
1333
1333
|
None
|
1334
1334
|
"""
|
1335
1335
|
paths = [os.path.join(src, file) for file in os.listdir(src) if file.endswith('.npz')]
|
1336
|
-
output_fldr = os.path.join(os.path.dirname(src), '
|
1336
|
+
output_fldr = os.path.join(os.path.dirname(src), 'masks')
|
1337
1337
|
os.makedirs(output_fldr, exist_ok=True)
|
1338
1338
|
time_ls = []
|
1339
1339
|
|
@@ -1418,7 +1418,7 @@ def _normalize_timelapse(src, lower_percentile=2, save_dtype=np.float32):
|
|
1418
1418
|
save_dtype (numpy.dtype, optional): The data type to save the normalized stack. Defaults to np.float32.
|
1419
1419
|
"""
|
1420
1420
|
paths = [os.path.join(src, file) for file in os.listdir(src) if file.endswith('.npz')]
|
1421
|
-
output_fldr = os.path.join(os.path.dirname(src), '
|
1421
|
+
output_fldr = os.path.join(os.path.dirname(src), 'masks')
|
1422
1422
|
os.makedirs(output_fldr, exist_ok=True)
|
1423
1423
|
|
1424
1424
|
for file_index, path in enumerate(paths):
|
@@ -1567,16 +1567,16 @@ def preprocess_img_data(settings):
|
|
1567
1567
|
Returns:
|
1568
1568
|
None
|
1569
1569
|
"""
|
1570
|
-
|
1571
1570
|
src = settings['src']
|
1572
|
-
delete_empty_subdirectories(src)
|
1573
|
-
files = os.listdir(src)
|
1574
1571
|
|
1572
|
+
if len(os.listdir(src)) < 100:
|
1573
|
+
delete_empty_subdirectories(src)
|
1574
|
+
|
1575
|
+
files = os.listdir(src)
|
1575
1576
|
valid_ext = ['tif', 'tiff', 'png', 'jpg', 'jpeg', 'bmp', 'nd2', 'czi', 'lif']
|
1576
1577
|
extensions = [file.split('.')[-1].lower() for file in files]
|
1577
1578
|
# Filter only valid extensions
|
1578
1579
|
valid_extensions = [ext for ext in extensions if ext in valid_ext]
|
1579
|
-
|
1580
1580
|
# Determine most common valid extension
|
1581
1581
|
img_format = None
|
1582
1582
|
if valid_extensions:
|
@@ -1595,10 +1595,10 @@ def preprocess_img_data(settings):
|
|
1595
1595
|
print('Found existing stack folder.')
|
1596
1596
|
if os.path.exists(os.path.join(src,'channel_stack')):
|
1597
1597
|
print('Found existing channel_stack folder.')
|
1598
|
-
if os.path.exists(os.path.join(src,'
|
1599
|
-
print('Found existing
|
1598
|
+
if os.path.exists(os.path.join(src,'masks')):
|
1599
|
+
print('Found existing masks folder. Skipping preprocessing')
|
1600
1600
|
return settings, src
|
1601
|
-
|
1601
|
+
|
1602
1602
|
mask_channels = [settings['nucleus_channel'], settings['cell_channel'], settings['pathogen_channel']]
|
1603
1603
|
|
1604
1604
|
settings = set_default_settings_preprocess_img_data(settings)
|
@@ -1606,16 +1606,16 @@ def preprocess_img_data(settings):
|
|
1606
1606
|
regex = _get_regex(settings['metadata_type'], img_format, settings['custom_regex'])
|
1607
1607
|
|
1608
1608
|
if settings['test_mode']:
|
1609
|
-
|
1610
1609
|
print(f"Running spacr in test mode")
|
1611
1610
|
settings['plot'] = True
|
1612
|
-
|
1613
|
-
|
1614
|
-
|
1615
|
-
|
1616
|
-
|
1617
|
-
|
1618
|
-
|
1611
|
+
if os.path.exists(os.path.join(src,'test')):
|
1612
|
+
try:
|
1613
|
+
os.rmdir(os.path.join(src, 'test'))
|
1614
|
+
print(f"Deleted test directory: {os.path.join(src, 'test')}")
|
1615
|
+
except OSError as e:
|
1616
|
+
print(f"Error deleting test directory: {e}")
|
1617
|
+
print(f"Delete manually before running test mode")
|
1618
|
+
pass
|
1619
1619
|
|
1620
1620
|
src = _run_test_mode(settings['src'], regex, settings['timelapse'], settings['test_images'], settings['random_test'])
|
1621
1621
|
settings['src'] = src
|
@@ -1991,12 +1991,12 @@ def _load_and_concatenate_arrays(src, channels, cell_chann_dim, nucleus_chann_di
|
|
1991
1991
|
"""
|
1992
1992
|
folder_paths = [os.path.join(src+'/stack')]
|
1993
1993
|
|
1994
|
-
if cell_chann_dim is not None or os.path.exists(os.path.join(src, '
|
1995
|
-
folder_paths = folder_paths + [os.path.join(src, '
|
1996
|
-
if nucleus_chann_dim is not None or os.path.exists(os.path.join(src, '
|
1997
|
-
folder_paths = folder_paths + [os.path.join(src, '
|
1998
|
-
if pathogen_chann_dim is not None or os.path.exists(os.path.join(src, '
|
1999
|
-
folder_paths = folder_paths + [os.path.join(src, '
|
1994
|
+
if cell_chann_dim is not None or os.path.exists(os.path.join(src, 'masks', 'cell_mask_stack')):
|
1995
|
+
folder_paths = folder_paths + [os.path.join(src, 'masks','cell_mask_stack')]
|
1996
|
+
if nucleus_chann_dim is not None or os.path.exists(os.path.join(src, 'masks', 'nucleus_mask_stack')):
|
1997
|
+
folder_paths = folder_paths + [os.path.join(src, 'masks','nucleus_mask_stack')]
|
1998
|
+
if pathogen_chann_dim is not None or os.path.exists(os.path.join(src, 'masks', 'pathogen_mask_stack')):
|
1999
|
+
folder_paths = folder_paths + [os.path.join(src, 'masks','pathogen_mask_stack')]
|
2000
2000
|
|
2001
2001
|
output_folder = src+'/merged'
|
2002
2002
|
reference_folder = folder_paths[0]
|
@@ -2870,7 +2870,6 @@ def generate_training_dataset(settings):
|
|
2870
2870
|
|
2871
2871
|
return class_paths_ls
|
2872
2872
|
|
2873
|
-
from .io import _read_and_merge_data, _read_db
|
2874
2873
|
from .utils import get_paths_from_db, annotate_conditions, save_settings
|
2875
2874
|
from .settings import set_generate_training_dataset_defaults
|
2876
2875
|
|
spacr/mediar.py
CHANGED
@@ -15,14 +15,18 @@ if not os.path.exists(init_file):
|
|
15
15
|
# Add MEDIAR to sys.path
|
16
16
|
sys.path.insert(0, mediar_path)
|
17
17
|
|
18
|
-
try:
|
19
|
-
|
20
|
-
from core.MEDIAR import Predictor, EnsemblePredictor
|
21
|
-
from train_tools.models import MEDIARFormer
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
18
|
+
#try:
|
19
|
+
# Now import the dependencies from MEDIAR
|
20
|
+
# from core.MEDIAR import Predictor, EnsemblePredictor
|
21
|
+
# from train_tools.models import MEDIARFormer
|
22
|
+
|
23
|
+
# from train_tools.models import MEDIARFormer
|
24
|
+
Predictor, EnsemblePredictor, MEDIARFormer = None, None, None
|
25
|
+
|
26
|
+
#finally:
|
27
|
+
# # Remove the temporary __init__.py file after the import
|
28
|
+
# if os.path.exists(init_file):
|
29
|
+
# os.remove(init_file) # Remove the __init__.py file
|
26
30
|
|
27
31
|
def display_imgs_in_list(lists_of_imgs, cmaps=None):
|
28
32
|
"""
|