spacr 0.2.21__py3-none-any.whl → 0.2.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- spacr/app_annotate.py +3 -4
- spacr/core.py +58 -229
- spacr/gui_core.py +83 -36
- spacr/gui_elements.py +414 -63
- spacr/gui_utils.py +58 -32
- spacr/io.py +121 -37
- spacr/measure.py +6 -8
- spacr/resources/icons/measure.png +0 -0
- spacr/resources/icons/ml_analyze.png +0 -0
- spacr/resources/icons/recruitment.png +0 -0
- spacr/resources/icons/spacr_logo_rotation.gif +0 -0
- spacr/resources/icons/train_cellpose.png +0 -0
- spacr/settings.py +1 -4
- spacr/utils.py +55 -0
- {spacr-0.2.21.dist-info → spacr-0.2.31.dist-info}/METADATA +1 -1
- {spacr-0.2.21.dist-info → spacr-0.2.31.dist-info}/RECORD +20 -18
- {spacr-0.2.21.dist-info → spacr-0.2.31.dist-info}/LICENSE +0 -0
- {spacr-0.2.21.dist-info → spacr-0.2.31.dist-info}/WHEEL +0 -0
- {spacr-0.2.21.dist-info → spacr-0.2.31.dist-info}/entry_points.txt +0 -0
- {spacr-0.2.21.dist-info → spacr-0.2.31.dist-info}/top_level.txt +0 -0
spacr/gui_utils.py
CHANGED
@@ -3,20 +3,13 @@ import tkinter as tk
|
|
3
3
|
from tkinter import ttk
|
4
4
|
|
5
5
|
from . gui_core import initiate_root
|
6
|
-
from .gui_elements import spacrLabel, spacrCheckbutton, AnnotateApp
|
6
|
+
from .gui_elements import spacrLabel, spacrCheckbutton, AnnotateApp, spacrEntry, spacrCheck, spacrCombo, set_default_font
|
7
7
|
|
8
8
|
try:
|
9
9
|
ctypes.windll.shcore.SetProcessDpiAwareness(True)
|
10
10
|
except AttributeError:
|
11
11
|
pass
|
12
12
|
|
13
|
-
def set_default_font(root, font_name="Helvetica", size=12):
|
14
|
-
default_font = (font_name, size)
|
15
|
-
root.option_add("*Font", default_font)
|
16
|
-
root.option_add("*TButton.Font", default_font)
|
17
|
-
root.option_add("*TLabel.Font", default_font)
|
18
|
-
root.option_add("*TEntry.Font", default_font)
|
19
|
-
|
20
13
|
def proceed_with_app_v1(root, app_name, app_func):
|
21
14
|
from .gui import gui_app
|
22
15
|
|
@@ -98,6 +91,7 @@ def parse_list(value):
|
|
98
91
|
except (ValueError, SyntaxError) as e:
|
99
92
|
raise ValueError(f"Invalid format for list: {value}. Error: {e}")
|
100
93
|
|
94
|
+
# Usage example in your create_input_field function
|
101
95
|
def create_input_field(frame, label_text, row, var_type='entry', options=None, default_value=None):
|
102
96
|
label_column = 0
|
103
97
|
widget_column = 1
|
@@ -107,22 +101,22 @@ def create_input_field(frame, label_text, row, var_type='entry', options=None, d
|
|
107
101
|
frame.grid_columnconfigure(widget_column, weight=1) # Allow the widget column to expand
|
108
102
|
|
109
103
|
# Right-align the label text and the label itself
|
110
|
-
label =
|
104
|
+
label = ttk.Label(frame, text=label_text, background="black", foreground="white", anchor='e', justify='right')
|
111
105
|
label.grid(column=label_column, row=row, sticky=tk.E, padx=(5, 2), pady=5) # Align label to the right
|
112
106
|
|
113
107
|
if var_type == 'entry':
|
114
108
|
var = tk.StringVar(value=default_value) # Set default value
|
115
|
-
entry =
|
109
|
+
entry = spacrEntry(frame, textvariable=var, outline=False)
|
116
110
|
entry.grid(column=widget_column, row=row, sticky=tk.W, padx=(2, 5), pady=5) # Align widget to the left
|
117
111
|
return (label, entry, var) # Return both the label and the entry, and the variable
|
118
112
|
elif var_type == 'check':
|
119
113
|
var = tk.BooleanVar(value=default_value) # Set default value (True/False)
|
120
|
-
check =
|
114
|
+
check = spacrCheck(frame, text="", variable=var)
|
121
115
|
check.grid(column=widget_column, row=row, sticky=tk.W, padx=(2, 5), pady=5) # Align widget to the left
|
122
116
|
return (label, check, var) # Return both the label and the checkbutton, and the variable
|
123
117
|
elif var_type == 'combo':
|
124
118
|
var = tk.StringVar(value=default_value) # Set default value
|
125
|
-
combo =
|
119
|
+
combo = spacrCombo(frame, textvariable=var, values=options) # Apply TCombobox style
|
126
120
|
combo.grid(column=widget_column, row=row, sticky=tk.W, padx=(2, 5), pady=5) # Align widget to the left
|
127
121
|
if default_value:
|
128
122
|
combo.set(default_value)
|
@@ -158,32 +152,32 @@ def cancel_after_tasks(frame):
|
|
158
152
|
frame.after_cancel(task)
|
159
153
|
frame.after_tasks.clear()
|
160
154
|
|
161
|
-
def main_thread_update_function(root, q, fig_queue, canvas_widget
|
155
|
+
def main_thread_update_function(root, q, fig_queue, canvas_widget):
|
162
156
|
try:
|
163
|
-
ansi_escape_pattern = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
|
157
|
+
#ansi_escape_pattern = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
|
164
158
|
while not q.empty():
|
165
159
|
message = q.get_nowait()
|
166
|
-
clean_message = ansi_escape_pattern.sub('', message)
|
167
|
-
if clean_message.startswith("Progress"):
|
168
|
-
|
169
|
-
if clean_message.startswith("\rProgress"):
|
170
|
-
|
171
|
-
elif clean_message.startswith("Successfully"):
|
172
|
-
|
173
|
-
elif clean_message.startswith("Processing"):
|
174
|
-
|
175
|
-
elif clean_message.startswith("scale"):
|
176
|
-
|
177
|
-
elif clean_message.startswith("plot_cropped_arrays"):
|
178
|
-
|
179
|
-
elif clean_message == "" or clean_message == "\r" or clean_message.strip() == "":
|
180
|
-
|
181
|
-
else:
|
182
|
-
|
160
|
+
#clean_message = ansi_escape_pattern.sub('', message)
|
161
|
+
#if clean_message.startswith("Progress"):
|
162
|
+
# progress_label.config(text=clean_message)
|
163
|
+
#if clean_message.startswith("\rProgress"):
|
164
|
+
# progress_label.config(text=clean_message)
|
165
|
+
#elif clean_message.startswith("Successfully"):
|
166
|
+
# progress_label.config(text=clean_message)
|
167
|
+
#elif clean_message.startswith("Processing"):
|
168
|
+
# progress_label.config(text=clean_message)
|
169
|
+
#elif clean_message.startswith("scale"):
|
170
|
+
# pass
|
171
|
+
#elif clean_message.startswith("plot_cropped_arrays"):
|
172
|
+
# pass
|
173
|
+
#elif clean_message == "" or clean_message == "\r" or clean_message.strip() == "":
|
174
|
+
# pass
|
175
|
+
#else:
|
176
|
+
# print(clean_message)
|
183
177
|
except Exception as e:
|
184
178
|
print(f"Error updating GUI canvas: {e}")
|
185
179
|
finally:
|
186
|
-
root.after(100, lambda: main_thread_update_function(root, q, fig_queue, canvas_widget
|
180
|
+
root.after(100, lambda: main_thread_update_function(root, q, fig_queue, canvas_widget))
|
187
181
|
|
188
182
|
def annotate(settings):
|
189
183
|
from .settings import set_annotate_default_settings
|
@@ -324,3 +318,35 @@ def annotate_with_image_refs(settings, root, shutdown_callback):
|
|
324
318
|
# Call load_images after setting up the root window
|
325
319
|
app.load_images()
|
326
320
|
|
321
|
+
def annotate_with_image_refs(settings, root, shutdown_callback):
|
322
|
+
from .settings import set_annotate_default_settings
|
323
|
+
|
324
|
+
settings = set_annotate_default_settings(settings)
|
325
|
+
src = settings['src']
|
326
|
+
|
327
|
+
db = os.path.join(src, 'measurements/measurements.db')
|
328
|
+
conn = sqlite3.connect(db)
|
329
|
+
c = conn.cursor()
|
330
|
+
c.execute('PRAGMA table_info(png_list)')
|
331
|
+
cols = c.fetchall()
|
332
|
+
if settings['annotation_column'] not in [col[1] for col in cols]:
|
333
|
+
c.execute(f"ALTER TABLE png_list ADD COLUMN {settings['annotation_column']} integer")
|
334
|
+
conn.commit()
|
335
|
+
conn.close()
|
336
|
+
|
337
|
+
screen_width = root.winfo_screenwidth()
|
338
|
+
screen_height = root.winfo_screenheight()
|
339
|
+
root.geometry(f"{screen_width}x{screen_height}")
|
340
|
+
|
341
|
+
app = AnnotateApp(root, db, src, image_type=settings['image_type'], channels=settings['channels'], image_size=settings['img_size'], annotation_column=settings['annotation_column'], normalize=settings['normalize'], percentiles=settings['percentiles'], measurement=settings['measurement'], threshold=settings['threshold'])
|
342
|
+
|
343
|
+
# Set the canvas background to black
|
344
|
+
root.configure(bg='black')
|
345
|
+
|
346
|
+
# Store the shutdown function and next app details in the root
|
347
|
+
root.current_app_exit_func = lambda: [app.shutdown(), shutdown_callback()]
|
348
|
+
|
349
|
+
# Call load_images after setting up the root window
|
350
|
+
app.load_images()
|
351
|
+
|
352
|
+
|
spacr/io.py
CHANGED
@@ -583,7 +583,7 @@ def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=Fals
|
|
583
583
|
None
|
584
584
|
"""
|
585
585
|
|
586
|
-
from .utils import _extract_filename_metadata
|
586
|
+
from .utils import _extract_filename_metadata, print_progress
|
587
587
|
|
588
588
|
regular_expression = re.compile(regex)
|
589
589
|
images_by_key = defaultdict(list)
|
@@ -591,10 +591,15 @@ def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=Fals
|
|
591
591
|
if not os.path.exists(stack_path) or (os.path.isdir(stack_path) and len(os.listdir(stack_path)) == 0):
|
592
592
|
all_filenames = [filename for filename in os.listdir(src) if filename.endswith(img_format)]
|
593
593
|
print(f'All_files:{len(all_filenames)} in {src}')
|
594
|
+
time_ls = []
|
595
|
+
processed = 0
|
594
596
|
for i in range(0, len(all_filenames), batch_size):
|
597
|
+
start = time.time()
|
595
598
|
batch_filenames = all_filenames[i:i+batch_size]
|
599
|
+
processed += len(batch_filenames)
|
596
600
|
for filename in batch_filenames:
|
597
601
|
images_by_key = _extract_filename_metadata(batch_filenames, src, images_by_key, regular_expression, metadata_type, pick_slice, skip_mode)
|
602
|
+
|
598
603
|
if pick_slice:
|
599
604
|
for key in images_by_key:
|
600
605
|
plate, well, field, channel, mode = key
|
@@ -609,6 +614,14 @@ def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=Fals
|
|
609
614
|
print(f'WARNING: A file with the same name already exists at location {output_filename}')
|
610
615
|
else:
|
611
616
|
mip_image.save(output_path)
|
617
|
+
|
618
|
+
stop = time.time()
|
619
|
+
duration = stop - start
|
620
|
+
time_ls.append(duration)
|
621
|
+
files_processed = processed
|
622
|
+
files_to_process = len(all_filenames)
|
623
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=batch_size, operation_type='Preprocessing filenames')
|
624
|
+
|
612
625
|
else:
|
613
626
|
for key, images in images_by_key.items():
|
614
627
|
mip = np.max(np.stack(images), axis=0)
|
@@ -623,6 +636,12 @@ def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=Fals
|
|
623
636
|
print(f'WARNING: A file with the same name already exists at location {output_filename}')
|
624
637
|
else:
|
625
638
|
mip_image.save(output_path)
|
639
|
+
stop = time.time()
|
640
|
+
duration = stop - start
|
641
|
+
time_ls.append(duration)
|
642
|
+
files_processed = processed
|
643
|
+
files_to_process = len(all_filenames)
|
644
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=batch_size, operation_type='Preprocessing filenames')
|
626
645
|
|
627
646
|
images_by_key.clear()
|
628
647
|
|
@@ -826,6 +845,7 @@ def _merge_channels(src, plot=False):
|
|
826
845
|
"""
|
827
846
|
|
828
847
|
from .plot import plot_arrays
|
848
|
+
from .utils import print_progress
|
829
849
|
|
830
850
|
stack_dir = os.path.join(src, 'stack')
|
831
851
|
allowed_names = ['01', '02', '03', '04', '00', '1', '2', '3', '4', '0']
|
@@ -835,8 +855,7 @@ def _merge_channels(src, plot=False):
|
|
835
855
|
chan_dirs.sort()
|
836
856
|
|
837
857
|
print(f'List of folders in src: {chan_dirs}. Single channel folders.')
|
838
|
-
|
839
|
-
|
858
|
+
|
840
859
|
# Assuming chan_dirs[0] is not empty and exists, adjust according to your logic
|
841
860
|
first_dir_path = os.path.join(src, chan_dirs[0])
|
842
861
|
dir_files = os.listdir(first_dir_path)
|
@@ -847,14 +866,18 @@ def _merge_channels(src, plot=False):
|
|
847
866
|
print(f'Generated folder with merged arrays: {stack_dir}')
|
848
867
|
|
849
868
|
if _is_dir_empty(stack_dir):
|
850
|
-
|
869
|
+
time_ls = []
|
870
|
+
files_to_process = len(dir_files)
|
871
|
+
for i, file_name in enumerate(dir_files):
|
872
|
+
start_time = time.time()
|
851
873
|
full_file_path = os.path.join(first_dir_path, file_name)
|
852
874
|
if os.path.isfile(full_file_path):
|
853
875
|
_merge_file([os.path.join(src, d) for d in chan_dirs], stack_dir, file_name)
|
854
|
-
|
855
|
-
|
856
|
-
|
857
|
-
|
876
|
+
stop_time = time.time()
|
877
|
+
duration = stop_time - start_time
|
878
|
+
time_ls.append(duration)
|
879
|
+
files_processed = i + 1
|
880
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type='Merging channels into npy stacks')
|
858
881
|
|
859
882
|
if plot:
|
860
883
|
plot_arrays(os.path.join(src, 'stack'))
|
@@ -911,6 +934,7 @@ def _mip_all(src, include_first_chan=True):
|
|
911
934
|
|
912
935
|
#@log_function_call
|
913
936
|
def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_size=100):
|
937
|
+
from .utils import print_progress
|
914
938
|
"""
|
915
939
|
Concatenates channel data from multiple files and saves the concatenated data as numpy arrays.
|
916
940
|
|
@@ -926,6 +950,7 @@ def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_s
|
|
926
950
|
"""
|
927
951
|
channels = [item for item in channels if item is not None]
|
928
952
|
paths = []
|
953
|
+
time_ls = []
|
929
954
|
index = 0
|
930
955
|
channel_stack_loc = os.path.join(os.path.dirname(src), 'channel_stack')
|
931
956
|
os.makedirs(channel_stack_loc, exist_ok=True)
|
@@ -944,8 +969,13 @@ def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_s
|
|
944
969
|
array = np.take(array, channels, axis=2)
|
945
970
|
stack_region.append(array)
|
946
971
|
filenames_region.append(os.path.basename(path))
|
947
|
-
|
948
|
-
|
972
|
+
|
973
|
+
stop = time.time()
|
974
|
+
duration = stop - start
|
975
|
+
time_ls.append(duration)
|
976
|
+
files_processed = i+1
|
977
|
+
files_to_process = time_stack_path_lists
|
978
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=None, batch_size=None, operation_type="Concatinating")
|
949
979
|
stack = np.stack(stack_region)
|
950
980
|
save_loc = os.path.join(channel_stack_loc, f'{name}.npz')
|
951
981
|
np.savez(save_loc, data=stack, filenames=filenames_region)
|
@@ -964,23 +994,24 @@ def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_s
|
|
964
994
|
nr_files = len(paths)
|
965
995
|
batch_index = 0 # Added this to name the output files
|
966
996
|
stack_ls = []
|
967
|
-
filenames_batch = []
|
997
|
+
filenames_batch = []
|
968
998
|
for i, path in enumerate(paths):
|
999
|
+
start = time.time()
|
969
1000
|
array = np.load(path)
|
970
1001
|
array = np.take(array, channels, axis=2)
|
971
1002
|
stack_ls.append(array)
|
972
1003
|
filenames_batch.append(os.path.basename(path)) # store the filename
|
973
|
-
|
974
|
-
|
975
|
-
|
976
|
-
|
1004
|
+
stop = time.time()
|
1005
|
+
duration = stop - start
|
1006
|
+
time_ls.append(duration)
|
1007
|
+
files_processed = i+1
|
1008
|
+
files_to_process = nr_files
|
1009
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=None, batch_size=None, operation_type="Concatinating")
|
977
1010
|
if (i+1) % batch_size == 0 or i+1 == nr_files:
|
978
1011
|
unique_shapes = {arr.shape[:-1] for arr in stack_ls}
|
979
1012
|
if len(unique_shapes) > 1:
|
980
1013
|
max_dims = np.max(np.array(list(unique_shapes)), axis=0)
|
981
|
-
#clear_output(wait=True)
|
982
1014
|
print(f'Warning: arrays with multiple shapes found in batch {i+1}. Padding arrays to max X,Y dimentions {max_dims}')
|
983
|
-
#print(f'Warning: arrays with multiple shapes found in batch {i+1}. Padding arrays to max X,Y dimentions {max_dims}', end='\r', flush=True)
|
984
1015
|
padded_stack_ls = []
|
985
1016
|
for arr in stack_ls:
|
986
1017
|
pad_width = [(0, max_dim - dim) for max_dim, dim in zip(max_dims, arr.shape[:-1])]
|
@@ -1002,6 +1033,7 @@ def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_s
|
|
1002
1033
|
|
1003
1034
|
def _normalize_img_batch(stack, channels, save_dtype, settings):
|
1004
1035
|
|
1036
|
+
from .utils import print_progress
|
1005
1037
|
"""
|
1006
1038
|
Normalize the stack of images.
|
1007
1039
|
|
@@ -1018,7 +1050,9 @@ def _normalize_img_batch(stack, channels, save_dtype, settings):
|
|
1018
1050
|
normalized_stack = np.zeros_like(stack, dtype=np.float32)
|
1019
1051
|
|
1020
1052
|
#for channel in range(stack.shape[-1]):
|
1021
|
-
|
1053
|
+
time_ls = []
|
1054
|
+
for i, channel in enumerate(channels):
|
1055
|
+
start = time.time()
|
1022
1056
|
if channel == settings['nucleus_channel']:
|
1023
1057
|
background = settings['nucleus_background']
|
1024
1058
|
signal_threshold = settings['nucleus_Signal_to_noise']*settings['nucleus_background']
|
@@ -1065,10 +1099,18 @@ def _normalize_img_batch(stack, channels, save_dtype, settings):
|
|
1065
1099
|
arr_2d_normalized = exposure.rescale_intensity(arr_2d, in_range=(global_lower, global_upper), out_range=(0, 1))
|
1066
1100
|
normalized_stack[array_index, :, :, channel] = arr_2d_normalized
|
1067
1101
|
|
1102
|
+
stop = time.time()
|
1103
|
+
duration = stop - start
|
1104
|
+
time_ls.append(duration)
|
1105
|
+
files_processed = i+1
|
1106
|
+
files_to_process = len(channels)
|
1107
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=None, batch_size=None, operation_type=f"Normalizing: Channel: {channel}")
|
1108
|
+
|
1068
1109
|
return normalized_stack.astype(save_dtype)
|
1069
1110
|
|
1070
1111
|
def concatenate_and_normalize(src, channels, save_dtype=np.float32, settings={}):
|
1071
|
-
|
1112
|
+
from .utils import print_progress
|
1113
|
+
|
1072
1114
|
"""
|
1073
1115
|
Concatenates and normalizes channel data from multiple files and saves the normalized data.
|
1074
1116
|
|
@@ -1088,10 +1130,11 @@ def concatenate_and_normalize(src, channels, save_dtype=np.float32, settings={})
|
|
1088
1130
|
Returns:
|
1089
1131
|
str: The directory path where the concatenated and normalized channel data is saved.
|
1090
1132
|
"""
|
1091
|
-
|
1133
|
+
|
1092
1134
|
channels = [item for item in channels if item is not None]
|
1093
1135
|
|
1094
1136
|
paths = []
|
1137
|
+
time_ls = []
|
1095
1138
|
output_fldr = os.path.join(os.path.dirname(src), 'norm_channel_stack')
|
1096
1139
|
os.makedirs(output_fldr, exist_ok=True)
|
1097
1140
|
|
@@ -1099,6 +1142,7 @@ def concatenate_and_normalize(src, channels, save_dtype=np.float32, settings={})
|
|
1099
1142
|
try:
|
1100
1143
|
time_stack_path_lists = _generate_time_lists(os.listdir(src))
|
1101
1144
|
for i, time_stack_list in enumerate(time_stack_path_lists):
|
1145
|
+
start = time.time()
|
1102
1146
|
stack_region = []
|
1103
1147
|
filenames_region = []
|
1104
1148
|
for idx, file in enumerate(time_stack_list):
|
@@ -1110,7 +1154,12 @@ def concatenate_and_normalize(src, channels, save_dtype=np.float32, settings={})
|
|
1110
1154
|
#array = np.take(array, channels, axis=2)
|
1111
1155
|
stack_region.append(array)
|
1112
1156
|
filenames_region.append(os.path.basename(path))
|
1113
|
-
|
1157
|
+
stop = time.time()
|
1158
|
+
duration = stop - start
|
1159
|
+
time_ls.append(duration)
|
1160
|
+
files_processed = i+1
|
1161
|
+
files_to_process = len(time_stack_path_lists)
|
1162
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=None, batch_size=None, operation_type="Concatinating")
|
1114
1163
|
stack = np.stack(stack_region)
|
1115
1164
|
|
1116
1165
|
normalized_stack = _normalize_img_batch(stack=stack,
|
@@ -1138,13 +1187,19 @@ def concatenate_and_normalize(src, channels, save_dtype=np.float32, settings={})
|
|
1138
1187
|
batch_index = 0
|
1139
1188
|
stack_ls = []
|
1140
1189
|
filenames_batch = []
|
1141
|
-
|
1190
|
+
time_ls = []
|
1142
1191
|
for i, path in enumerate(paths):
|
1192
|
+
start = time.time()
|
1143
1193
|
array = np.load(path)
|
1144
1194
|
#array = np.take(array, channels, axis=2)
|
1145
1195
|
stack_ls.append(array)
|
1146
1196
|
filenames_batch.append(os.path.basename(path))
|
1147
|
-
|
1197
|
+
stop = time.time()
|
1198
|
+
duration = stop - start
|
1199
|
+
time_ls.append(duration)
|
1200
|
+
files_processed = i+1
|
1201
|
+
files_to_process = nr_files
|
1202
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=None, batch_size=None, operation_type="Concatinating")
|
1148
1203
|
|
1149
1204
|
if (i + 1) % settings['batch_size'] == 0 or i + 1 == nr_files:
|
1150
1205
|
unique_shapes = {arr.shape[:-1] for arr in stack_ls}
|
@@ -1219,6 +1274,7 @@ def _get_lists_for_normalization(settings):
|
|
1219
1274
|
return backgrounds, signal_to_noise, signal_thresholds, remove_background
|
1220
1275
|
|
1221
1276
|
def _normalize_stack(src, backgrounds=[100, 100, 100], remove_backgrounds=[False, False, False], lower_percentile=2, save_dtype=np.float32, signal_to_noise=[5, 5, 5], signal_thresholds=[1000, 1000, 1000]):
|
1277
|
+
from .utils import print_progress
|
1222
1278
|
"""
|
1223
1279
|
Normalize the stack of images.
|
1224
1280
|
|
@@ -1266,10 +1322,11 @@ def _normalize_stack(src, backgrounds=[100, 100, 100], remove_backgrounds=[False
|
|
1266
1322
|
global_upper = np.percentile(non_zero_single_channel, upper_p)
|
1267
1323
|
if global_upper >= signal_threshold:
|
1268
1324
|
break
|
1269
|
-
|
1325
|
+
|
1270
1326
|
# Normalize the pixels in each image to the global percentiles and then dtype.
|
1271
1327
|
arr_2d_normalized = np.zeros_like(single_channel, dtype=single_channel.dtype)
|
1272
1328
|
signal_to_noise_ratio_ls = []
|
1329
|
+
time_ls = []
|
1273
1330
|
for array_index in range(single_channel.shape[0]):
|
1274
1331
|
start = time.time()
|
1275
1332
|
arr_2d = single_channel[array_index, :, :]
|
@@ -1291,8 +1348,15 @@ def _normalize_stack(src, backgrounds=[100, 100, 100], remove_backgrounds=[False
|
|
1291
1348
|
duration = (stop - start) * single_channel.shape[0]
|
1292
1349
|
time_ls.append(duration)
|
1293
1350
|
average_time = np.mean(time_ls) if len(time_ls) > 0 else 0
|
1294
|
-
print(f'
|
1351
|
+
print(f'channels:{chan_index}/{stack.shape[-1] - 1}, arrays:{array_index + 1}/{single_channel.shape[0]}, Signal:{upper:.1f}, noise:{lower:.1f}, Signal-to-noise:{average_stnr:.1f}, Time/channel:{average_time:.2f}sec')
|
1295
1352
|
|
1353
|
+
stop = time.time()
|
1354
|
+
duration = stop - start
|
1355
|
+
time_ls.append(duration)
|
1356
|
+
files_processed = file_index + 1
|
1357
|
+
files_to_process = len(paths)
|
1358
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type="Normalizing")
|
1359
|
+
|
1296
1360
|
normalized_stack[:, :, :, channel] = arr_2d_normalized
|
1297
1361
|
|
1298
1362
|
save_loc = os.path.join(output_fldr, f'{name}_norm_stack.npz')
|
@@ -1303,6 +1367,7 @@ def _normalize_stack(src, backgrounds=[100, 100, 100], remove_backgrounds=[False
|
|
1303
1367
|
return print(f'Saved stacks: {output_fldr}')
|
1304
1368
|
|
1305
1369
|
def _normalize_timelapse(src, lower_percentile=2, save_dtype=np.float32):
|
1370
|
+
from .utils import print_progress
|
1306
1371
|
"""
|
1307
1372
|
Normalize the timelapse data by rescaling the intensity values based on percentiles.
|
1308
1373
|
|
@@ -1326,8 +1391,9 @@ def _normalize_timelapse(src, lower_percentile=2, save_dtype=np.float32):
|
|
1326
1391
|
|
1327
1392
|
for chan_index in range(stack.shape[-1]):
|
1328
1393
|
single_channel = stack[:, :, :, chan_index]
|
1329
|
-
|
1394
|
+
time_ls = []
|
1330
1395
|
for array_index in range(single_channel.shape[0]):
|
1396
|
+
start = time.time()
|
1331
1397
|
arr_2d = single_channel[array_index]
|
1332
1398
|
# Calculate the 1% and 98% percentiles for this specific image
|
1333
1399
|
q_low = np.percentile(arr_2d[arr_2d != 0], lower_percentile)
|
@@ -1337,7 +1403,14 @@ def _normalize_timelapse(src, lower_percentile=2, save_dtype=np.float32):
|
|
1337
1403
|
arr_2d_rescaled = exposure.rescale_intensity(arr_2d, in_range=(q_low, q_high), out_range='dtype')
|
1338
1404
|
normalized_stack[array_index, :, :, chan_index] = arr_2d_rescaled
|
1339
1405
|
|
1340
|
-
print(f'
|
1406
|
+
print(f'channels:{chan_index+1}/{stack.shape[-1]}, arrays:{array_index+1}/{single_channel.shape[0]}', end='\r')
|
1407
|
+
|
1408
|
+
stop = time.time()
|
1409
|
+
duration = stop - start
|
1410
|
+
time_ls.append(duration)
|
1411
|
+
files_processed = file_index+1
|
1412
|
+
files_to_process = len(paths)
|
1413
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type="Normalizing")
|
1341
1414
|
|
1342
1415
|
save_loc = os.path.join(output_fldr, f'{name}_norm_timelapse.npz')
|
1343
1416
|
np.savez(save_loc, data=normalized_stack, filenames=filenames)
|
@@ -1602,6 +1675,7 @@ def _get_avg_object_size(masks):
|
|
1602
1675
|
return 0 # Return 0 if no objects are found
|
1603
1676
|
|
1604
1677
|
def _save_figure(fig, src, text, dpi=300, i=1, all_folders=1):
|
1678
|
+
from .utils import print_progress
|
1605
1679
|
"""
|
1606
1680
|
Save a figure to a specified location.
|
1607
1681
|
|
@@ -1611,6 +1685,7 @@ def _save_figure(fig, src, text, dpi=300, i=1, all_folders=1):
|
|
1611
1685
|
text (str): The text to be included in the figure name.
|
1612
1686
|
dpi (int, optional): The resolution of the saved figure. Defaults to 300.
|
1613
1687
|
"""
|
1688
|
+
|
1614
1689
|
save_folder = os.path.dirname(src)
|
1615
1690
|
obj_type = os.path.basename(src)
|
1616
1691
|
name = os.path.basename(save_folder)
|
@@ -1619,10 +1694,11 @@ def _save_figure(fig, src, text, dpi=300, i=1, all_folders=1):
|
|
1619
1694
|
fig_name = f'{obj_type}_{name}_{text}.pdf'
|
1620
1695
|
save_location = os.path.join(save_folder, fig_name)
|
1621
1696
|
fig.savefig(save_location, bbox_inches='tight', dpi=dpi)
|
1622
|
-
|
1623
|
-
|
1624
|
-
|
1625
|
-
|
1697
|
+
|
1698
|
+
files_processed = i
|
1699
|
+
files_to_process = all_folders
|
1700
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=None, batch_size=None, operation_type="Saving Figures")
|
1701
|
+
print(f'Saved single cell figure: {os.path.basename(save_location)}')
|
1626
1702
|
plt.close(fig)
|
1627
1703
|
del fig
|
1628
1704
|
gc.collect()
|
@@ -1842,6 +1918,9 @@ def _create_database(db_path):
|
|
1842
1918
|
conn.close()
|
1843
1919
|
|
1844
1920
|
def _load_and_concatenate_arrays(src, channels, cell_chann_dim, nucleus_chann_dim, pathogen_chann_dim):
|
1921
|
+
|
1922
|
+
from .utils import print_progress
|
1923
|
+
|
1845
1924
|
"""
|
1846
1925
|
Load and concatenate arrays from multiple folders.
|
1847
1926
|
|
@@ -1870,9 +1949,10 @@ def _load_and_concatenate_arrays(src, channels, cell_chann_dim, nucleus_chann_di
|
|
1870
1949
|
|
1871
1950
|
count=0
|
1872
1951
|
all_imgs = len(os.listdir(reference_folder))
|
1873
|
-
|
1952
|
+
time_ls = []
|
1874
1953
|
# Iterate through each file in the reference folder
|
1875
1954
|
for filename in os.listdir(reference_folder):
|
1955
|
+
start = time.time()
|
1876
1956
|
stack_ls = []
|
1877
1957
|
if filename.endswith('.npy'):
|
1878
1958
|
count += 1
|
@@ -1929,9 +2009,13 @@ def _load_and_concatenate_arrays(src, channels, cell_chann_dim, nucleus_chann_di
|
|
1929
2009
|
output_path = os.path.join(output_folder, filename)
|
1930
2010
|
np.save(output_path, stack)
|
1931
2011
|
|
1932
|
-
|
1933
|
-
|
1934
|
-
|
2012
|
+
stop = time.time()
|
2013
|
+
duration = stop - start
|
2014
|
+
time_ls.append(duration)
|
2015
|
+
files_processed = count
|
2016
|
+
files_to_process = all_imgs
|
2017
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type="Merging Arrays")
|
2018
|
+
|
1935
2019
|
return
|
1936
2020
|
|
1937
2021
|
def _read_db(db_loc, tables):
|
@@ -2211,7 +2295,7 @@ def _save_model(model, model_type, results_df, dst, epoch, epochs, intermedeate_
|
|
2211
2295
|
|
2212
2296
|
def save_model_at_threshold(threshold, epoch, suffix=""):
|
2213
2297
|
percentile = str(threshold * 100)
|
2214
|
-
print(f'\rfound: {percentile}% accurate model'
|
2298
|
+
print(f'\rfound: {percentile}% accurate model')#, end='\r', flush=True)
|
2215
2299
|
torch.save(model, f'{dst}/{model_type}_epoch_{str(epoch)}{suffix}_acc_{percentile}_channels_{channels_str}.pth')
|
2216
2300
|
|
2217
2301
|
if epoch % 100 == 0 or epoch == epochs:
|
@@ -2545,5 +2629,5 @@ def generate_cellpose_train_test(src, test_split=0.1):
|
|
2545
2629
|
new_mask_path = os.path.join(dst_mask, filename)
|
2546
2630
|
shutil.copy(img_path, new_img_path)
|
2547
2631
|
shutil.copy(mask_path, new_mask_path)
|
2548
|
-
print(f'Copied {idx+1}/{len(ls)} images to {_type} set'
|
2632
|
+
print(f'Copied {idx+1}/{len(ls)} images to {_type} set')#, end='\r', flush=True)
|
2549
2633
|
|
spacr/measure.py
CHANGED
@@ -935,7 +935,7 @@ def measure_crop(settings):
|
|
935
935
|
from .io import _save_settings_to_db
|
936
936
|
from .timelapse import _timelapse_masks_to_gif, _scmovie
|
937
937
|
from .plot import _save_scimg_plot
|
938
|
-
from .utils import _list_endpoint_subdirectories, _generate_representative_images, measure_test_mode
|
938
|
+
from .utils import _list_endpoint_subdirectories, _generate_representative_images, measure_test_mode, print_progress
|
939
939
|
from .settings import get_measure_crop_settings
|
940
940
|
|
941
941
|
settings = get_measure_crop_settings(settings)
|
@@ -996,24 +996,20 @@ def measure_crop(settings):
|
|
996
996
|
return
|
997
997
|
|
998
998
|
_save_settings_to_db(settings)
|
999
|
-
|
1000
999
|
files = [f for f in os.listdir(settings['src']) if f.endswith('.npy')]
|
1001
1000
|
n_jobs = settings['n_jobs'] or mp.cpu_count()-4
|
1002
1001
|
print(f'using {n_jobs} cpu cores')
|
1003
|
-
|
1004
1002
|
with mp.Manager() as manager:
|
1005
1003
|
time_ls = manager.list()
|
1006
1004
|
with mp.Pool(n_jobs) as pool:
|
1007
1005
|
result = pool.starmap_async(_measure_crop_core, [(index, time_ls, file, settings) for index, file in enumerate(files)])
|
1008
1006
|
|
1009
1007
|
# Track progress in the main process
|
1010
|
-
while not result.ready():
|
1011
|
-
time.sleep(1)
|
1008
|
+
while not result.ready():
|
1009
|
+
time.sleep(1)
|
1012
1010
|
files_processed = len(time_ls)
|
1013
1011
|
files_to_process = len(files)
|
1014
|
-
|
1015
|
-
time_left = (((files_to_process-files_processed)*average_time)/n_jobs)/60
|
1016
|
-
print(f'Progress: {files_processed}/{files_to_process} Time/img {average_time:.3f}sec, Time Remaining {time_left:.3f} min.', end='\r', flush=True)
|
1012
|
+
print_progress(files_processed, files_to_process, n_jobs, time_ls=None)
|
1017
1013
|
result.get()
|
1018
1014
|
|
1019
1015
|
if settings['representative_images']:
|
@@ -1114,3 +1110,5 @@ def get_object_counts(src):
|
|
1114
1110
|
# Close the database connection
|
1115
1111
|
conn.close()
|
1116
1112
|
return grouped_df
|
1113
|
+
|
1114
|
+
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
spacr/settings.py
CHANGED
@@ -1143,10 +1143,7 @@ def set_annotate_default_settings(settings):
|
|
1143
1143
|
settings.setdefault('src', 'path')
|
1144
1144
|
settings.setdefault('image_type', 'cell_png')
|
1145
1145
|
settings.setdefault('channels', 'r,g,b')
|
1146
|
-
settings.setdefault('
|
1147
|
-
settings.setdefault('img_size', [200, 200])
|
1148
|
-
settings.setdefault('rows', 10)
|
1149
|
-
settings.setdefault('columns', 18)
|
1146
|
+
settings.setdefault('img_size', 200)
|
1150
1147
|
settings.setdefault('annotation_column', 'test')
|
1151
1148
|
settings.setdefault('normalize', 'False')
|
1152
1149
|
settings.setdefault('percentiles', [2, 98])
|