spacr 0.4.15__py3-none-any.whl → 0.4.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- spacr/core.py +52 -9
- spacr/deep_spacr.py +2 -3
- spacr/gui_core.py +247 -41
- spacr/gui_elements.py +133 -2
- spacr/gui_utils.py +17 -15
- spacr/io.py +540 -55
- spacr/ml.py +141 -258
- spacr/plot.py +76 -34
- spacr/sequencing.py +73 -38
- spacr/settings.py +136 -128
- spacr/submodules.py +619 -213
- spacr/timelapse.py +25 -25
- spacr/toxo.py +23 -23
- spacr/utils.py +162 -89
- {spacr-0.4.15.dist-info → spacr-0.4.60.dist-info}/METADATA +2 -1
- {spacr-0.4.15.dist-info → spacr-0.4.60.dist-info}/RECORD +20 -20
- {spacr-0.4.15.dist-info → spacr-0.4.60.dist-info}/LICENSE +0 -0
- {spacr-0.4.15.dist-info → spacr-0.4.60.dist-info}/WHEEL +0 -0
- {spacr-0.4.15.dist-info → spacr-0.4.60.dist-info}/entry_points.txt +0 -0
- {spacr-0.4.15.dist-info → spacr-0.4.60.dist-info}/top_level.txt +0 -0
spacr/core.py
CHANGED
@@ -9,12 +9,11 @@ warnings.filterwarnings("ignore", message="3D stack used, but stitch_threshold=0
|
|
9
9
|
|
10
10
|
def preprocess_generate_masks(settings):
|
11
11
|
|
12
|
-
from .io import preprocess_img_data, _load_and_concatenate_arrays
|
12
|
+
from .io import preprocess_img_data, _load_and_concatenate_arrays, convert_to_yokogawa, convert_separate_files_to_yokogawa
|
13
13
|
from .plot import plot_image_mask_overlay, plot_arrays
|
14
|
-
from .utils import _pivot_counts_table, check_mask_folder, adjust_cell_masks, print_progress, save_settings, delete_intermedeate_files, format_path_for_system, normalize_src_path
|
14
|
+
from .utils import _pivot_counts_table, check_mask_folder, adjust_cell_masks, print_progress, save_settings, delete_intermedeate_files, format_path_for_system, normalize_src_path, generate_image_path_map, copy_images_to_consolidated
|
15
15
|
from .settings import set_default_settings_preprocess_generate_masks
|
16
|
-
|
17
|
-
|
16
|
+
|
18
17
|
if 'src' in settings:
|
19
18
|
if not isinstance(settings['src'], (str, list)):
|
20
19
|
ValueError(f'src must be a string or a list of strings')
|
@@ -25,19 +24,51 @@ def preprocess_generate_masks(settings):
|
|
25
24
|
|
26
25
|
settings['src'] = normalize_src_path(settings['src'])
|
27
26
|
|
27
|
+
if settings['consolidate']:
|
28
|
+
image_map = generate_image_path_map(settings['src'])
|
29
|
+
copy_images_to_consolidated(image_map, settings['src'])
|
30
|
+
settings['src'] = os.path.join(settings['src'], 'consolidated')
|
31
|
+
|
28
32
|
if isinstance(settings['src'], str):
|
29
33
|
settings['src'] = [settings['src']]
|
30
34
|
|
31
35
|
if isinstance(settings['src'], list):
|
32
36
|
source_folders = settings['src']
|
33
37
|
for source_folder in source_folders:
|
38
|
+
|
34
39
|
print(f'Processing folder: {source_folder}')
|
35
|
-
|
40
|
+
|
41
|
+
source_folder = format_path_for_system(source_folder)
|
36
42
|
settings['src'] = source_folder
|
37
43
|
src = source_folder
|
38
44
|
settings = set_default_settings_preprocess_generate_masks(settings)
|
45
|
+
|
46
|
+
if settings['metadata_type'] == 'auto':
|
47
|
+
if settings['custom_regex'] != None:
|
48
|
+
try:
|
49
|
+
print(f"using regex: {settings['custom_regex']}")
|
50
|
+
convert_separate_files_to_yokogawa(folder=source_folder, regex=settings['custom_regex'])
|
51
|
+
except:
|
52
|
+
try:
|
53
|
+
convert_to_yokogawa(folder=source_folder)
|
54
|
+
except Exception as e:
|
55
|
+
print(f"Error: Tried to convert image files and image file name metadata with regex {settings['custom_regex']} then without regex but failed both.")
|
56
|
+
print(f'Error: {e}')
|
57
|
+
return
|
58
|
+
else:
|
59
|
+
try:
|
60
|
+
convert_to_yokogawa(folder=source_folder)
|
61
|
+
except Exception as e:
|
62
|
+
print(f"Error: Tried to convert image files and image file name metadata without regex but failed.")
|
63
|
+
print(f'Error: {e}')
|
64
|
+
return
|
65
|
+
|
66
|
+
if settings['cell_channel'] is None and settings['nucleus_channel'] is None and settings['pathogen_channel'] is None:
|
67
|
+
print(f'Error: At least one of cell_channel, nucleus_channel or pathogen_channel must be defined')
|
68
|
+
return
|
69
|
+
|
39
70
|
save_settings(settings, name='gen_mask_settings')
|
40
|
-
|
71
|
+
|
41
72
|
if not settings['pathogen_channel'] is None:
|
42
73
|
custom_model_ls = ['toxo_pv_lumen','toxo_cyto']
|
43
74
|
if settings['pathogen_model'] not in custom_model_ls:
|
@@ -57,7 +88,7 @@ def preprocess_generate_masks(settings):
|
|
57
88
|
settings_df = pd.DataFrame(list(settings.items()), columns=['setting_key', 'setting_value'])
|
58
89
|
settings_df['setting_value'] = settings_df['setting_value'].apply(str)
|
59
90
|
display(settings_df)
|
60
|
-
|
91
|
+
|
61
92
|
if settings['test_mode']:
|
62
93
|
print(f'Starting Test mode ...')
|
63
94
|
|
@@ -167,6 +198,7 @@ def preprocess_generate_masks(settings):
|
|
167
198
|
gc.collect()
|
168
199
|
|
169
200
|
if settings['delete_intermediate']:
|
201
|
+
print(f"deleting intermediate files")
|
170
202
|
delete_intermedeate_files(settings)
|
171
203
|
|
172
204
|
print("Successfully completed run")
|
@@ -288,6 +320,17 @@ def generate_cellpose_masks(src, settings, object_type):
|
|
288
320
|
continue
|
289
321
|
|
290
322
|
batch = prepare_batch_for_segmentation(batch)
|
323
|
+
|
324
|
+
|
325
|
+
#if settings['denoise']:
|
326
|
+
# if object_type == 'cell':
|
327
|
+
# model_type = "denoise_cyto3"
|
328
|
+
# elif object_type == 'nucleus':
|
329
|
+
# model_type = "denoise_nucleus"
|
330
|
+
# else:
|
331
|
+
# raise ValueError(f"No denoise model for object_type: {object_type}")
|
332
|
+
# dn = denoise.DenoiseModel(model_type=model_type, gpu=device)
|
333
|
+
# batch = dn.eval(imgs=batch, channels=chans, diameter=object_settings['diameter'])
|
291
334
|
|
292
335
|
if timelapse:
|
293
336
|
movie_path = os.path.join(os.path.dirname(src), 'movies')
|
@@ -489,7 +532,7 @@ def generate_image_umap(settings={}):
|
|
489
532
|
all_df = pd.concat([all_df, df], axis=0)
|
490
533
|
#image_paths.extend(image_paths_tmp)
|
491
534
|
|
492
|
-
all_df['cond'] = all_df['
|
535
|
+
all_df['cond'] = all_df['columnID'].apply(map_condition, neg=settings['neg'], pos=settings['pos'], mix=settings['mix'])
|
493
536
|
|
494
537
|
if settings['exclude_conditions']:
|
495
538
|
if isinstance(settings['exclude_conditions'], str):
|
@@ -673,7 +716,7 @@ def reducer_hyperparameter_search(settings={}, reduction_params=None, dbscan_par
|
|
673
716
|
df = _read_and_join_tables(db_path, table_names=tables)
|
674
717
|
all_df = pd.concat([all_df, df], axis=0)
|
675
718
|
|
676
|
-
all_df['cond'] = all_df['
|
719
|
+
all_df['cond'] = all_df['columnID'].apply(map_condition, neg=settings['neg'], pos=settings['pos'], mix=settings['mix'])
|
677
720
|
|
678
721
|
if settings['exclude_conditions']:
|
679
722
|
if isinstance(settings['exclude_conditions'], str):
|
spacr/deep_spacr.py
CHANGED
@@ -1144,7 +1144,7 @@ def model_fusion(model_paths,save_path,device='cpu',model_name='maxvit_t',pretra
|
|
1144
1144
|
|
1145
1145
|
def annotate_filter_vision(settings):
|
1146
1146
|
|
1147
|
-
from .utils import annotate_conditions
|
1147
|
+
from .utils import annotate_conditions, correct_metadata
|
1148
1148
|
|
1149
1149
|
def filter_csv_by_png(csv_file):
|
1150
1150
|
"""
|
@@ -1188,8 +1188,7 @@ def annotate_filter_vision(settings):
|
|
1188
1188
|
|
1189
1189
|
df = pd.read_csv(src)
|
1190
1190
|
|
1191
|
-
|
1192
|
-
df['column_name'] = df['column']
|
1191
|
+
df = correct_metadata(df)
|
1193
1192
|
|
1194
1193
|
df = annotate_conditions(df,
|
1195
1194
|
cells=settings['cells'],
|
spacr/gui_core.py
CHANGED
@@ -176,36 +176,6 @@ def display_figure(fig):
|
|
176
176
|
elif event.num == 5: # Scroll down
|
177
177
|
print("zoom out")
|
178
178
|
|
179
|
-
def zoom_v1(event):
|
180
|
-
# Fixed zoom factors (adjust these if you want faster or slower zoom)
|
181
|
-
zoom_in_factor = 0.9 # When zooming in, ranges shrink by 10%
|
182
|
-
zoom_out_factor = 1.1 # When zooming out, ranges increase by 10%
|
183
|
-
|
184
|
-
# Determine the zoom direction based on the scroll event
|
185
|
-
if event.num == 4 or (hasattr(event, 'delta') and event.delta > 0): # Scroll up = zoom in
|
186
|
-
factor = zoom_in_factor
|
187
|
-
elif event.num == 5 or (hasattr(event, 'delta') and event.delta < 0): # Scroll down = zoom out
|
188
|
-
factor = zoom_out_factor
|
189
|
-
else:
|
190
|
-
return # No recognized scroll direction
|
191
|
-
|
192
|
-
for ax in canvas.figure.get_axes():
|
193
|
-
xlim = ax.get_xlim()
|
194
|
-
ylim = ax.get_ylim()
|
195
|
-
|
196
|
-
x_center = (xlim[1] + xlim[0]) / 2
|
197
|
-
y_center = (ylim[1] + ylim[0]) / 2
|
198
|
-
|
199
|
-
x_range = (xlim[1] - xlim[0]) * factor
|
200
|
-
y_range = (ylim[1] - ylim[0]) * factor
|
201
|
-
|
202
|
-
# Set the new limits
|
203
|
-
ax.set_xlim([x_center - x_range / 2, x_center + x_range / 2])
|
204
|
-
ax.set_ylim([y_center - y_range / 2, y_center + y_range / 2])
|
205
|
-
|
206
|
-
# Redraw the figure efficiently
|
207
|
-
canvas.draw_idle()
|
208
|
-
|
209
179
|
def zoom(event):
|
210
180
|
# Fixed zoom factors (adjust these if you want faster or slower zoom)
|
211
181
|
zoom_in_factor = 0.9 # When zooming in, ranges shrink by 10%
|
@@ -832,7 +802,7 @@ def initiate_abort():
|
|
832
802
|
global thread_control, q, parent_frame
|
833
803
|
if thread_control.get("run_thread") is not None:
|
834
804
|
try:
|
835
|
-
q.put("Aborting processes...")
|
805
|
+
#q.put("Aborting processes...")
|
836
806
|
thread_control.get("run_thread").terminate()
|
837
807
|
thread_control["run_thread"] = None
|
838
808
|
q.put("Processes aborted.")
|
@@ -840,22 +810,164 @@ def initiate_abort():
|
|
840
810
|
q.put(f"Error aborting process: {e}")
|
841
811
|
|
842
812
|
thread_control = {"run_thread": None, "stop_requested": False}
|
813
|
+
|
814
|
+
def check_src_folders_files(settings, settings_type, q):
|
815
|
+
"""
|
816
|
+
Checks if 'src' is a key in the settings dictionary and if it exists as a valid path.
|
817
|
+
If 'src' is a list, iterates through the list and checks each path.
|
818
|
+
If any path is missing, prompts the user to edit or remove invalid paths.
|
819
|
+
"""
|
820
|
+
|
821
|
+
request_stop = False
|
822
|
+
|
823
|
+
def _folder_has_images(folder_path, image_extensions = {".png", ".jpg", ".jpeg", ".bmp", ".gif", ".tiff", ".tif", ".webp", ".npy", ".npz", "nd2", "czi", "lif"}):
|
824
|
+
"""Check if a folder contains any image files."""
|
825
|
+
return any(file.lower().endswith(tuple(image_extensions)) for file in os.listdir(folder_path))
|
826
|
+
|
827
|
+
def _has_folder(parent_folder, sub_folder="measure"):
|
828
|
+
"""Check if a specific sub-folder exists inside the given folder."""
|
829
|
+
return os.path.isdir(os.path.join(parent_folder, sub_folder))
|
830
|
+
|
831
|
+
from .utils import normalize_src_path, generate_image_path_map
|
832
|
+
|
833
|
+
settings['src'] = normalize_src_path(settings['src'])
|
834
|
+
|
835
|
+
src_value = settings.get("src")
|
836
|
+
|
837
|
+
# **Skip if 'src' is missing**
|
838
|
+
if src_value is None:
|
839
|
+
return request_stop
|
840
|
+
|
841
|
+
# Convert single string src to a list for uniform handling
|
842
|
+
if isinstance(src_value, str):
|
843
|
+
src_list = [src_value]
|
844
|
+
elif isinstance(src_value, list):
|
845
|
+
src_list = src_value
|
846
|
+
else:
|
847
|
+
request_stop = True
|
848
|
+
return request_stop # Ensure early exit
|
849
|
+
|
850
|
+
# Identify missing paths
|
851
|
+
missing_paths = {i: path for i, path in enumerate(src_list) if not os.path.exists(path)}
|
852
|
+
|
853
|
+
if missing_paths:
|
854
|
+
q.put(f'Error: The following paths are missing: {missing_paths}')
|
855
|
+
request_stop = True
|
856
|
+
return request_stop # Ensure early exit
|
857
|
+
|
858
|
+
conditions = [True] # Initialize conditions list
|
859
|
+
|
860
|
+
for path in src_list: # Fixed: Use src_list instead of src_value
|
861
|
+
if settings_type == 'mask':
|
862
|
+
if settings['consolidate']:
|
863
|
+
image_map = generate_image_path_map(path)
|
864
|
+
if len(image_map) > 0:
|
865
|
+
request_stop = False
|
866
|
+
return request_stop
|
867
|
+
else:
|
868
|
+
q.put(f"Error: Missing subfolders with images for: {path}")
|
869
|
+
request_stop = True
|
870
|
+
return request_stop
|
871
|
+
else:
|
872
|
+
pictures_continue = _folder_has_images(path)
|
873
|
+
folder_chan_continue = _has_folder(path, "1")
|
874
|
+
folder_stack_continue = _has_folder(path, "stack")
|
875
|
+
folder_npz_continue = _has_folder(path, "norm_channel_stack")
|
876
|
+
|
877
|
+
if not pictures_continue:
|
878
|
+
if not any([folder_chan_continue, folder_stack_continue, folder_npz_continue]):
|
879
|
+
if not folder_chan_continue:
|
880
|
+
q.put(f"Error: Missing channel folder in folder: {path}")
|
881
|
+
|
882
|
+
if not folder_stack_continue:
|
883
|
+
q.put(f"Error: Missing stack folder in folder: {path}")
|
884
|
+
|
885
|
+
if not folder_npz_continue:
|
886
|
+
q.put(f"Error: Missing norm_channel_stack folder in folder: {path}")
|
887
|
+
else:
|
888
|
+
q.put(f"Error: No images in folder: {path}")
|
889
|
+
|
890
|
+
#q.put(f"path:{path}")
|
891
|
+
#q.put(f"pictures_continue:{pictures_continue}, folder_chan_continue:{folder_chan_continue}, folder_stack_continue:{folder_stack_continue}, folder_npz_continue:{folder_npz_continue}")
|
892
|
+
|
893
|
+
conditions = [pictures_continue, folder_chan_continue, folder_stack_continue, folder_npz_continue]
|
894
|
+
|
895
|
+
if settings_type == 'measure':
|
896
|
+
if not os.path.basename(path) == 'merged':
|
897
|
+
path = os.path.join(path, "merged")
|
898
|
+
npy_continue = _folder_has_images(path, image_extensions={".npy"})
|
899
|
+
conditions = [npy_continue]
|
900
|
+
|
901
|
+
#if settings_type == 'recruitment':
|
902
|
+
# if not os.path.basename(path) == 'measurements':
|
903
|
+
# path = os.path.join(path, "measurements")
|
904
|
+
# db_continue = _folder_has_images(path, image_extensions={".db"})
|
905
|
+
# conditions = [db_continue]
|
906
|
+
|
907
|
+
#if settings_type == 'umap':
|
908
|
+
# if not os.path.basename(path) == 'measurements':
|
909
|
+
# path = os.path.join(path, "measurements")
|
910
|
+
# db_continue = _folder_has_images(path, image_extensions={".db"})
|
911
|
+
# conditions = [db_continue]
|
912
|
+
|
913
|
+
#if settings_type == 'analyze_plaques':
|
914
|
+
# if not os.path.basename(path) == 'measurements':
|
915
|
+
# path = os.path.join(path, "measurements")
|
916
|
+
# db_continue = _folder_has_images(path, image_extensions={".db"})
|
917
|
+
# conditions = [db_continue]
|
918
|
+
|
919
|
+
#if settings_type == 'map_barcodes':
|
920
|
+
# if not os.path.basename(path) == 'measurements':
|
921
|
+
# path = os.path.join(path, "measurements")
|
922
|
+
# db_continue = _folder_has_images(path, image_extensions={".db"})
|
923
|
+
# conditions = [db_continue]
|
924
|
+
|
925
|
+
#if settings_type == 'regression':
|
926
|
+
# if not os.path.basename(path) == 'measurements':
|
927
|
+
# path = os.path.join(path, "measurements")
|
928
|
+
# db_continue = _folder_has_images(path, image_extensions={".db"})
|
929
|
+
# conditions = [db_continue]
|
930
|
+
|
931
|
+
#if settings_type == 'classify':
|
932
|
+
# if not os.path.basename(path) == 'measurements':
|
933
|
+
# path = os.path.join(path, "measurements")
|
934
|
+
# db_continue = _folder_has_images(path, image_extensions={".db"})
|
935
|
+
# conditions = [db_continue]
|
936
|
+
|
937
|
+
#if settings_type == 'analyze_plaques':
|
938
|
+
# if not os.path.basename(path) == 'measurements':
|
939
|
+
# path = os.path.join(path, "measurements")
|
940
|
+
# db_continue = _folder_has_images(path, image_extensions={".db"})
|
941
|
+
# conditions = [db_continue]
|
942
|
+
|
943
|
+
if not any(conditions):
|
944
|
+
q.put(f"Error: The following path(s) is missing images or folders: {path}")
|
945
|
+
request_stop = True
|
946
|
+
|
947
|
+
return request_stop
|
843
948
|
|
844
949
|
def start_process(q=None, fig_queue=None, settings_type='mask'):
|
845
950
|
global thread_control, vars_dict, parent_frame
|
846
951
|
from .settings import check_settings, expected_types
|
847
952
|
from .gui_utils import run_function_gui, set_cpu_affinity, initialize_cuda, display_gif_in_plot_frame, print_widget_structure
|
848
|
-
|
953
|
+
|
849
954
|
if q is None:
|
850
955
|
q = Queue()
|
851
956
|
if fig_queue is None:
|
852
957
|
fig_queue = Queue()
|
853
958
|
try:
|
854
|
-
settings = check_settings(vars_dict, expected_types, q)
|
959
|
+
settings, errors = check_settings(vars_dict, expected_types, q)
|
960
|
+
|
961
|
+
if len(errors) > 0:
|
962
|
+
return
|
963
|
+
|
964
|
+
if check_src_folders_files(settings, settings_type, q):
|
965
|
+
return
|
966
|
+
|
855
967
|
except ValueError as e:
|
856
968
|
q.put(f"Error: {e}")
|
857
969
|
return
|
858
|
-
|
970
|
+
|
859
971
|
if isinstance(thread_control, dict) and thread_control.get("run_thread") is not None:
|
860
972
|
initiate_abort()
|
861
973
|
|
@@ -880,10 +992,11 @@ def start_process(q=None, fig_queue=None, settings_type='mask'):
|
|
880
992
|
|
881
993
|
# Store the process in thread_control for future reference
|
882
994
|
thread_control["run_thread"] = process
|
995
|
+
|
883
996
|
else:
|
884
997
|
q.put(f"Error: Unknown settings type '{settings_type}'")
|
885
998
|
return
|
886
|
-
|
999
|
+
|
887
1000
|
def process_console_queue():
|
888
1001
|
global q, console_output, parent_frame, progress_bar, process_console_queue
|
889
1002
|
|
@@ -894,13 +1007,105 @@ def process_console_queue():
|
|
894
1007
|
process_console_queue.current_maximum = None
|
895
1008
|
|
896
1009
|
ansi_escape_pattern = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
|
1010
|
+
|
1011
|
+
spacing = 5
|
1012
|
+
|
1013
|
+
# **Configure styles for different message types**
|
1014
|
+
console_output.tag_configure("error", foreground="red", spacing3 = spacing)
|
1015
|
+
console_output.tag_configure("warning", foreground="orange", spacing3 = spacing)
|
1016
|
+
console_output.tag_configure("normal", foreground="white", spacing3 = spacing)
|
897
1017
|
|
898
1018
|
while not q.empty():
|
899
1019
|
message = q.get_nowait()
|
900
1020
|
clean_message = ansi_escape_pattern.sub('', message)
|
901
|
-
|
902
|
-
#
|
1021
|
+
|
1022
|
+
# **Detect Error Messages (Red)**
|
1023
|
+
if clean_message.startswith("Error:"):
|
1024
|
+
console_output.insert(tk.END, clean_message + "\n", "error")
|
1025
|
+
console_output.see(tk.END)
|
1026
|
+
#print("Run aborted due to error:", clean_message) # Debug message
|
1027
|
+
#return # **Exit immediately to stop further execution**
|
1028
|
+
|
1029
|
+
# **Detect Warning Messages (Orange)**
|
1030
|
+
elif clean_message.startswith("Warning:"):
|
1031
|
+
console_output.insert(tk.END, clean_message + "\n", "warning")
|
903
1032
|
|
1033
|
+
# **Process Progress Messages Normally**
|
1034
|
+
elif clean_message.startswith("Progress:"):
|
1035
|
+
try:
|
1036
|
+
# Extract the progress information
|
1037
|
+
match = re.search(r'Progress: (\d+)/(\d+), operation_type: ([\w\s]*),(.*)', clean_message)
|
1038
|
+
|
1039
|
+
if match:
|
1040
|
+
current_progress = int(match.group(1))
|
1041
|
+
total_progress = int(match.group(2))
|
1042
|
+
operation_type = match.group(3).strip()
|
1043
|
+
additional_info = match.group(4).strip() # Capture everything after operation_type
|
1044
|
+
|
1045
|
+
# Check if the maximum value has changed
|
1046
|
+
if process_console_queue.current_maximum != total_progress:
|
1047
|
+
process_console_queue.current_maximum = total_progress
|
1048
|
+
process_console_queue.completed_tasks = []
|
1049
|
+
|
1050
|
+
# Add the task to the completed set
|
1051
|
+
process_console_queue.completed_tasks.append(current_progress)
|
1052
|
+
|
1053
|
+
# Calculate the unique progress count
|
1054
|
+
unique_progress_count = len(np.unique(process_console_queue.completed_tasks))
|
1055
|
+
|
1056
|
+
# Update the progress bar
|
1057
|
+
if progress_bar:
|
1058
|
+
progress_bar['maximum'] = total_progress
|
1059
|
+
progress_bar['value'] = unique_progress_count
|
1060
|
+
|
1061
|
+
# Store operation type and additional info
|
1062
|
+
if operation_type:
|
1063
|
+
progress_bar.operation_type = operation_type
|
1064
|
+
progress_bar.additional_info = additional_info
|
1065
|
+
|
1066
|
+
# Update the progress label
|
1067
|
+
if progress_bar.progress_label:
|
1068
|
+
progress_bar.update_label()
|
1069
|
+
|
1070
|
+
# Clear completed tasks when progress is complete
|
1071
|
+
if unique_progress_count >= total_progress:
|
1072
|
+
process_console_queue.completed_tasks.clear()
|
1073
|
+
|
1074
|
+
except Exception as e:
|
1075
|
+
print(f"Error parsing progress message: {e}")
|
1076
|
+
|
1077
|
+
# **Insert Normal Messages with Extra Line Spacing**
|
1078
|
+
else:
|
1079
|
+
console_output.insert(tk.END, clean_message + "\n", "normal")
|
1080
|
+
|
1081
|
+
console_output.see(tk.END)
|
1082
|
+
|
1083
|
+
# **Continue processing if no error was detected**
|
1084
|
+
after_id = console_output.after(uppdate_frequency, process_console_queue)
|
1085
|
+
parent_frame.after_tasks.append(after_id)
|
1086
|
+
|
1087
|
+
def process_console_queue_v2():
|
1088
|
+
global q, console_output, parent_frame, progress_bar, process_console_queue
|
1089
|
+
|
1090
|
+
# Initialize function attribute if it doesn't exist
|
1091
|
+
if not hasattr(process_console_queue, "completed_tasks"):
|
1092
|
+
process_console_queue.completed_tasks = []
|
1093
|
+
if not hasattr(process_console_queue, "current_maximum"):
|
1094
|
+
process_console_queue.current_maximum = None
|
1095
|
+
|
1096
|
+
ansi_escape_pattern = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
|
1097
|
+
|
1098
|
+
while not q.empty():
|
1099
|
+
message = q.get_nowait()
|
1100
|
+
clean_message = ansi_escape_pattern.sub('', message)
|
1101
|
+
|
1102
|
+
# **Abort Execution if an Error Message is Detected**
|
1103
|
+
if clean_message.startswith("Error:"):
|
1104
|
+
console_output.insert(tk.END, clean_message + "\n", "error")
|
1105
|
+
console_output.see(tk.END)
|
1106
|
+
print("Run aborted due to error:", clean_message) # Debug message
|
1107
|
+
return # **Exit immediately to stop further execution**
|
1108
|
+
|
904
1109
|
# Check if the message contains progress information
|
905
1110
|
if clean_message.startswith("Progress:"):
|
906
1111
|
try:
|
@@ -928,8 +1133,7 @@ def process_console_queue():
|
|
928
1133
|
if progress_bar:
|
929
1134
|
progress_bar['maximum'] = total_progress
|
930
1135
|
progress_bar['value'] = unique_progress_count
|
931
|
-
|
932
|
-
|
1136
|
+
|
933
1137
|
# Store operation type and additional info
|
934
1138
|
if operation_type:
|
935
1139
|
progress_bar.operation_type = operation_type
|
@@ -945,11 +1149,13 @@ def process_console_queue():
|
|
945
1149
|
|
946
1150
|
except Exception as e:
|
947
1151
|
print(f"Error parsing progress message: {e}")
|
1152
|
+
|
948
1153
|
else:
|
949
|
-
#
|
1154
|
+
# Insert non-progress messages into the console
|
950
1155
|
console_output.insert(tk.END, clean_message + "\n")
|
951
1156
|
console_output.see(tk.END)
|
952
|
-
|
1157
|
+
|
1158
|
+
# **Continue processing if no error was detected**
|
953
1159
|
after_id = console_output.after(uppdate_frequency, process_console_queue)
|
954
1160
|
parent_frame.after_tasks.append(after_id)
|
955
1161
|
|
spacr/gui_elements.py
CHANGED
@@ -726,8 +726,8 @@ class spacrProgressBar(ttk.Progressbar):
|
|
726
726
|
|
727
727
|
def set_label_position(self):
|
728
728
|
if self.label and self.progress_label:
|
729
|
-
row_info = self.grid_info().get('
|
730
|
-
col_info = self.grid_info().get('
|
729
|
+
row_info = self.grid_info().get('rowID', 0)
|
730
|
+
col_info = self.grid_info().get('columnID', 0)
|
731
731
|
col_span = self.grid_info().get('columnspan', 1)
|
732
732
|
self.progress_label.grid(row=row_info + 1, column=col_info, columnspan=col_span, pady=5, padx=5, sticky='ew')
|
733
733
|
|
@@ -2285,6 +2285,9 @@ class AnnotateApp:
|
|
2285
2285
|
|
2286
2286
|
self.train_button = Button(self.button_frame,text="orig.",command=self.swich_back_annotation_column,bg=self.bg_color,fg=self.fg_color,highlightbackground=self.fg_color,highlightcolor=self.fg_color,highlightthickness=1)
|
2287
2287
|
self.train_button.pack(side="right", padx=5)
|
2288
|
+
|
2289
|
+
self.settings_button = Button(self.button_frame, text="Settings", command=self.open_settings_window, bg=self.bg_color, fg=self.fg_color, highlightbackground=self.fg_color,highlightcolor=self.fg_color,highlightthickness=1)
|
2290
|
+
self.settings_button.pack(side="right", padx=5)
|
2288
2291
|
|
2289
2292
|
# Calculate grid rows and columns based on the root window size and image size
|
2290
2293
|
self.calculate_grid_dimensions()
|
@@ -2308,6 +2311,134 @@ class AnnotateApp:
|
|
2308
2311
|
for col in range(self.grid_cols):
|
2309
2312
|
self.grid_frame.grid_columnconfigure(col, weight=1)
|
2310
2313
|
|
2314
|
+
def open_settings_window(self):
|
2315
|
+
from .gui_utils import generate_annotate_fields, convert_to_number
|
2316
|
+
|
2317
|
+
# Create settings window
|
2318
|
+
settings_window = tk.Toplevel(self.root)
|
2319
|
+
settings_window.title("Modify Annotation Settings")
|
2320
|
+
|
2321
|
+
style_out = set_dark_style(ttk.Style())
|
2322
|
+
settings_window.configure(bg=style_out['bg_color'])
|
2323
|
+
|
2324
|
+
settings_frame = tk.Frame(settings_window, bg=style_out['bg_color'])
|
2325
|
+
settings_frame.pack(fill=tk.BOTH, expand=True)
|
2326
|
+
|
2327
|
+
# Generate fields with current settings pre-filled
|
2328
|
+
vars_dict = generate_annotate_fields(settings_frame)
|
2329
|
+
|
2330
|
+
# Pre-fill the current settings into vars_dict
|
2331
|
+
current_settings = {
|
2332
|
+
'image_type': self.image_type or '',
|
2333
|
+
'channels': ','.join(self.channels) if self.channels else '',
|
2334
|
+
'img_size': f"{self.image_size[0]},{self.image_size[1]}",
|
2335
|
+
'annotation_column': self.annotation_column or '',
|
2336
|
+
'normalize': str(self.normalize),
|
2337
|
+
'percentiles': ','.join(map(str, self.percentiles)),
|
2338
|
+
'measurement': ','.join(self.measurement) if self.measurement else '',
|
2339
|
+
'threshold': str(self.threshold) if self.threshold is not None else '',
|
2340
|
+
'normalize_channels': ','.join(self.normalize_channels) if self.normalize_channels else ''
|
2341
|
+
}
|
2342
|
+
|
2343
|
+
for key, data in vars_dict.items():
|
2344
|
+
if key in current_settings:
|
2345
|
+
data['entry'].delete(0, tk.END)
|
2346
|
+
data['entry'].insert(0, current_settings[key])
|
2347
|
+
|
2348
|
+
def apply_new_settings():
|
2349
|
+
settings = {key: data['entry'].get() for key, data in vars_dict.items()}
|
2350
|
+
|
2351
|
+
# Process settings exactly as your original initiation function does
|
2352
|
+
settings['channels'] = settings['channels'].split(',') if settings['channels'] else None
|
2353
|
+
settings['img_size'] = list(map(int, settings['img_size'].split(',')))
|
2354
|
+
settings['percentiles'] = list(map(convert_to_number, settings['percentiles'].split(','))) if settings['percentiles'] else [1, 99]
|
2355
|
+
settings['normalize'] = settings['normalize'].lower() == 'true'
|
2356
|
+
settings['normalize_channels'] = settings['normalize_channels'].split(',') if settings['normalize_channels'] else None
|
2357
|
+
|
2358
|
+
try:
|
2359
|
+
settings['measurement'] = settings['measurement'].split(',') if settings['measurement'] else None
|
2360
|
+
settings['threshold'] = None if settings['threshold'].lower() == 'none' else int(settings['threshold'])
|
2361
|
+
except:
|
2362
|
+
settings['measurement'] = None
|
2363
|
+
settings['threshold'] = None
|
2364
|
+
|
2365
|
+
# Convert empty strings to None
|
2366
|
+
for key, value in settings.items():
|
2367
|
+
if isinstance(value, list):
|
2368
|
+
settings[key] = [v if v != '' else None for v in value]
|
2369
|
+
elif value == '':
|
2370
|
+
settings[key] = None
|
2371
|
+
|
2372
|
+
# Apply these settings dynamically using update_settings method
|
2373
|
+
self.update_settings(**{
|
2374
|
+
'image_type': settings.get('image_type'),
|
2375
|
+
'channels': settings.get('channels'),
|
2376
|
+
'image_size': settings.get('img_size'),
|
2377
|
+
'annotation_column': settings.get('annotation_column'),
|
2378
|
+
'normalize': settings.get('normalize'),
|
2379
|
+
'percentiles': settings.get('percentiles'),
|
2380
|
+
'measurement': settings.get('measurement'),
|
2381
|
+
'threshold': settings.get('threshold'),
|
2382
|
+
'normalize_channels': settings.get('normalize_channels')
|
2383
|
+
})
|
2384
|
+
|
2385
|
+
settings_window.destroy()
|
2386
|
+
|
2387
|
+
apply_button = spacrButton(settings_window, text="Apply Settings", command=apply_new_settings,show_text=False)
|
2388
|
+
apply_button.pack(pady=10)
|
2389
|
+
|
2390
|
+
def update_settings(self, **kwargs):
|
2391
|
+
allowed_attributes = {
|
2392
|
+
'image_type', 'channels', 'image_size', 'annotation_column',
|
2393
|
+
'normalize', 'percentiles', 'measurement', 'threshold', 'normalize_channels'
|
2394
|
+
}
|
2395
|
+
|
2396
|
+
updated = False
|
2397
|
+
|
2398
|
+
for attr, value in kwargs.items():
|
2399
|
+
if attr in allowed_attributes and value is not None:
|
2400
|
+
setattr(self, attr, value)
|
2401
|
+
updated = True
|
2402
|
+
|
2403
|
+
if 'image_size' in kwargs:
|
2404
|
+
if isinstance(self.image_size, list):
|
2405
|
+
self.image_size = (int(self.image_size[0]), int(self.image_size[0]))
|
2406
|
+
elif isinstance(self.image_size, int):
|
2407
|
+
self.image_size = (self.image_size, self.image_size)
|
2408
|
+
else:
|
2409
|
+
raise ValueError("Invalid image size")
|
2410
|
+
|
2411
|
+
self.calculate_grid_dimensions()
|
2412
|
+
self.recreate_image_grid()
|
2413
|
+
|
2414
|
+
if updated:
|
2415
|
+
current_index = self.index # Retain current index
|
2416
|
+
self.prefilter_paths_annotations()
|
2417
|
+
|
2418
|
+
# Ensure the retained index is still valid (not out of bounds)
|
2419
|
+
max_index = len(self.filtered_paths_annotations) - 1
|
2420
|
+
self.index = min(current_index, max_index := max(0, max(0, max(len(self.filtered_paths_annotations) - self.grid_rows * self.grid_cols, 0))))
|
2421
|
+
self.load_images()
|
2422
|
+
|
2423
|
+
def recreate_image_grid(self):
|
2424
|
+
# Remove current labels
|
2425
|
+
for label in self.labels:
|
2426
|
+
label.destroy()
|
2427
|
+
self.labels.clear()
|
2428
|
+
|
2429
|
+
# Recreate the labels grid with updated dimensions
|
2430
|
+
for i in range(self.grid_rows * self.grid_cols):
|
2431
|
+
label = Label(self.grid_frame, bg=self.root.cget('bg'))
|
2432
|
+
label.grid(row=i // self.grid_cols, column=i % self.grid_cols, padx=2, pady=2, sticky="nsew")
|
2433
|
+
self.labels.append(label)
|
2434
|
+
|
2435
|
+
# Reconfigure grid weights
|
2436
|
+
for row in range(self.grid_rows):
|
2437
|
+
self.grid_frame.grid_rowconfigure(row, weight=1)
|
2438
|
+
for col in range(self.grid_cols):
|
2439
|
+
self.grid_frame.grid_columnconfigure(col, weight=1)
|
2440
|
+
|
2441
|
+
|
2311
2442
|
def swich_back_annotation_column(self):
|
2312
2443
|
self.annotation_column = self.orig_annotation_columns
|
2313
2444
|
self.prefilter_paths_annotations()
|