spacr 0.4.15__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- spacr/__init__.py +2 -2
- spacr/core.py +52 -10
- spacr/deep_spacr.py +2 -3
- spacr/gui.py +0 -1
- spacr/gui_core.py +247 -41
- spacr/gui_elements.py +133 -2
- spacr/gui_utils.py +22 -17
- spacr/io.py +624 -149
- spacr/ml.py +141 -258
- spacr/plot.py +76 -34
- spacr/resources/MEDIAR/__pycache__/SetupDict.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/__pycache__/evaluate.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/__pycache__/generate_mapping.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/__pycache__/main.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/Baseline/__pycache__/Predictor.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/Baseline/__pycache__/Trainer.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/Baseline/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/Baseline/__pycache__/utils.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/MEDIAR/__pycache__/EnsemblePredictor.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/MEDIAR/__pycache__/Predictor.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/MEDIAR/__pycache__/Trainer.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/MEDIAR/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/MEDIAR/__pycache__/utils.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/__pycache__/BasePredictor.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/__pycache__/BaseTrainer.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/__pycache__/utils.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/__pycache__/measures.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/__pycache__/utils.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/__pycache__/datasetter.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/__pycache__/transforms.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/__pycache__/utils.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/custom/__pycache__/CellAware.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/custom/__pycache__/LoadImage.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/custom/__pycache__/NormalizeImage.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/custom/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/models/__pycache__/MEDIARFormer.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/models/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/sequencing.py +73 -38
- spacr/settings.py +161 -135
- spacr/submodules.py +618 -215
- spacr/timelapse.py +197 -29
- spacr/toxo.py +23 -23
- spacr/utils.py +186 -128
- {spacr-0.4.15.dist-info → spacr-0.5.0.dist-info}/METADATA +5 -2
- {spacr-0.4.15.dist-info → spacr-0.5.0.dist-info}/RECORD +53 -24
- spacr/stats.py +0 -221
- /spacr/{cellpose.py → spacr_cellpose.py} +0 -0
- {spacr-0.4.15.dist-info → spacr-0.5.0.dist-info}/LICENSE +0 -0
- {spacr-0.4.15.dist-info → spacr-0.5.0.dist-info}/WHEEL +0 -0
- {spacr-0.4.15.dist-info → spacr-0.5.0.dist-info}/entry_points.txt +0 -0
- {spacr-0.4.15.dist-info → spacr-0.5.0.dist-info}/top_level.txt +0 -0
spacr/__init__.py
CHANGED
@@ -26,7 +26,7 @@ from . import submodules
|
|
26
26
|
from . import openai
|
27
27
|
from . import ml
|
28
28
|
from . import toxo
|
29
|
-
from . import
|
29
|
+
from . import spacr_cellpose
|
30
30
|
from . import sp_stats
|
31
31
|
from . import logger
|
32
32
|
|
@@ -57,7 +57,7 @@ __all__ = [
|
|
57
57
|
"openai",
|
58
58
|
"ml",
|
59
59
|
"toxo",
|
60
|
-
"
|
60
|
+
"spacr_cellpose",
|
61
61
|
"sp_stats",
|
62
62
|
"logger"
|
63
63
|
]
|
spacr/core.py
CHANGED
@@ -9,12 +9,11 @@ warnings.filterwarnings("ignore", message="3D stack used, but stitch_threshold=0
|
|
9
9
|
|
10
10
|
def preprocess_generate_masks(settings):
|
11
11
|
|
12
|
-
from .io import preprocess_img_data, _load_and_concatenate_arrays
|
12
|
+
from .io import preprocess_img_data, _load_and_concatenate_arrays, convert_to_yokogawa, convert_separate_files_to_yokogawa
|
13
13
|
from .plot import plot_image_mask_overlay, plot_arrays
|
14
|
-
from .utils import _pivot_counts_table, check_mask_folder, adjust_cell_masks, print_progress, save_settings, delete_intermedeate_files, format_path_for_system, normalize_src_path
|
14
|
+
from .utils import _pivot_counts_table, check_mask_folder, adjust_cell_masks, print_progress, save_settings, delete_intermedeate_files, format_path_for_system, normalize_src_path, generate_image_path_map, copy_images_to_consolidated
|
15
15
|
from .settings import set_default_settings_preprocess_generate_masks
|
16
|
-
|
17
|
-
|
16
|
+
|
18
17
|
if 'src' in settings:
|
19
18
|
if not isinstance(settings['src'], (str, list)):
|
20
19
|
ValueError(f'src must be a string or a list of strings')
|
@@ -25,19 +24,51 @@ def preprocess_generate_masks(settings):
|
|
25
24
|
|
26
25
|
settings['src'] = normalize_src_path(settings['src'])
|
27
26
|
|
27
|
+
if settings['consolidate']:
|
28
|
+
image_map = generate_image_path_map(settings['src'])
|
29
|
+
copy_images_to_consolidated(image_map, settings['src'])
|
30
|
+
settings['src'] = os.path.join(settings['src'], 'consolidated')
|
31
|
+
|
28
32
|
if isinstance(settings['src'], str):
|
29
33
|
settings['src'] = [settings['src']]
|
30
34
|
|
31
35
|
if isinstance(settings['src'], list):
|
32
36
|
source_folders = settings['src']
|
33
37
|
for source_folder in source_folders:
|
38
|
+
|
34
39
|
print(f'Processing folder: {source_folder}')
|
35
|
-
|
40
|
+
|
41
|
+
source_folder = format_path_for_system(source_folder)
|
36
42
|
settings['src'] = source_folder
|
37
43
|
src = source_folder
|
38
44
|
settings = set_default_settings_preprocess_generate_masks(settings)
|
45
|
+
|
46
|
+
if settings['metadata_type'] == 'auto':
|
47
|
+
if settings['custom_regex'] != None:
|
48
|
+
try:
|
49
|
+
print(f"using regex: {settings['custom_regex']}")
|
50
|
+
convert_separate_files_to_yokogawa(folder=source_folder, regex=settings['custom_regex'])
|
51
|
+
except:
|
52
|
+
try:
|
53
|
+
convert_to_yokogawa(folder=source_folder)
|
54
|
+
except Exception as e:
|
55
|
+
print(f"Error: Tried to convert image files and image file name metadata with regex {settings['custom_regex']} then without regex but failed both.")
|
56
|
+
print(f'Error: {e}')
|
57
|
+
return
|
58
|
+
else:
|
59
|
+
try:
|
60
|
+
convert_to_yokogawa(folder=source_folder)
|
61
|
+
except Exception as e:
|
62
|
+
print(f"Error: Tried to convert image files and image file name metadata without regex but failed.")
|
63
|
+
print(f'Error: {e}')
|
64
|
+
return
|
65
|
+
|
66
|
+
if settings['cell_channel'] is None and settings['nucleus_channel'] is None and settings['pathogen_channel'] is None:
|
67
|
+
print(f'Error: At least one of cell_channel, nucleus_channel or pathogen_channel must be defined')
|
68
|
+
return
|
69
|
+
|
39
70
|
save_settings(settings, name='gen_mask_settings')
|
40
|
-
|
71
|
+
|
41
72
|
if not settings['pathogen_channel'] is None:
|
42
73
|
custom_model_ls = ['toxo_pv_lumen','toxo_cyto']
|
43
74
|
if settings['pathogen_model'] not in custom_model_ls:
|
@@ -57,7 +88,7 @@ def preprocess_generate_masks(settings):
|
|
57
88
|
settings_df = pd.DataFrame(list(settings.items()), columns=['setting_key', 'setting_value'])
|
58
89
|
settings_df['setting_value'] = settings_df['setting_value'].apply(str)
|
59
90
|
display(settings_df)
|
60
|
-
|
91
|
+
|
61
92
|
if settings['test_mode']:
|
62
93
|
print(f'Starting Test mode ...')
|
63
94
|
|
@@ -136,7 +167,6 @@ def preprocess_generate_masks(settings):
|
|
136
167
|
|
137
168
|
if settings['plot']:
|
138
169
|
if not settings['timelapse']:
|
139
|
-
|
140
170
|
if settings['test_mode'] == True:
|
141
171
|
settings['examples_to_plot'] = len(os.path.join(src,'merged'))
|
142
172
|
|
@@ -167,6 +197,7 @@ def preprocess_generate_masks(settings):
|
|
167
197
|
gc.collect()
|
168
198
|
|
169
199
|
if settings['delete_intermediate']:
|
200
|
+
print(f"deleting intermediate files")
|
170
201
|
delete_intermedeate_files(settings)
|
171
202
|
|
172
203
|
print("Successfully completed run")
|
@@ -288,6 +319,17 @@ def generate_cellpose_masks(src, settings, object_type):
|
|
288
319
|
continue
|
289
320
|
|
290
321
|
batch = prepare_batch_for_segmentation(batch)
|
322
|
+
|
323
|
+
|
324
|
+
#if settings['denoise']:
|
325
|
+
# if object_type == 'cell':
|
326
|
+
# model_type = "denoise_cyto3"
|
327
|
+
# elif object_type == 'nucleus':
|
328
|
+
# model_type = "denoise_nucleus"
|
329
|
+
# else:
|
330
|
+
# raise ValueError(f"No denoise model for object_type: {object_type}")
|
331
|
+
# dn = denoise.DenoiseModel(model_type=model_type, gpu=device)
|
332
|
+
# batch = dn.eval(imgs=batch, channels=chans, diameter=object_settings['diameter'])
|
291
333
|
|
292
334
|
if timelapse:
|
293
335
|
movie_path = os.path.join(os.path.dirname(src), 'movies')
|
@@ -489,7 +531,7 @@ def generate_image_umap(settings={}):
|
|
489
531
|
all_df = pd.concat([all_df, df], axis=0)
|
490
532
|
#image_paths.extend(image_paths_tmp)
|
491
533
|
|
492
|
-
all_df['cond'] = all_df['
|
534
|
+
all_df['cond'] = all_df['columnID'].apply(map_condition, neg=settings['neg'], pos=settings['pos'], mix=settings['mix'])
|
493
535
|
|
494
536
|
if settings['exclude_conditions']:
|
495
537
|
if isinstance(settings['exclude_conditions'], str):
|
@@ -673,7 +715,7 @@ def reducer_hyperparameter_search(settings={}, reduction_params=None, dbscan_par
|
|
673
715
|
df = _read_and_join_tables(db_path, table_names=tables)
|
674
716
|
all_df = pd.concat([all_df, df], axis=0)
|
675
717
|
|
676
|
-
all_df['cond'] = all_df['
|
718
|
+
all_df['cond'] = all_df['columnID'].apply(map_condition, neg=settings['neg'], pos=settings['pos'], mix=settings['mix'])
|
677
719
|
|
678
720
|
if settings['exclude_conditions']:
|
679
721
|
if isinstance(settings['exclude_conditions'], str):
|
spacr/deep_spacr.py
CHANGED
@@ -1144,7 +1144,7 @@ def model_fusion(model_paths,save_path,device='cpu',model_name='maxvit_t',pretra
|
|
1144
1144
|
|
1145
1145
|
def annotate_filter_vision(settings):
|
1146
1146
|
|
1147
|
-
from .utils import annotate_conditions
|
1147
|
+
from .utils import annotate_conditions, correct_metadata
|
1148
1148
|
|
1149
1149
|
def filter_csv_by_png(csv_file):
|
1150
1150
|
"""
|
@@ -1188,8 +1188,7 @@ def annotate_filter_vision(settings):
|
|
1188
1188
|
|
1189
1189
|
df = pd.read_csv(src)
|
1190
1190
|
|
1191
|
-
|
1192
|
-
df['column_name'] = df['column']
|
1191
|
+
df = correct_metadata(df)
|
1193
1192
|
|
1194
1193
|
df = annotate_conditions(df,
|
1195
1194
|
cells=settings['cells'],
|
spacr/gui.py
CHANGED
@@ -48,7 +48,6 @@ class MainApp(tk.Tk):
|
|
48
48
|
}
|
49
49
|
|
50
50
|
self.additional_gui_apps = {
|
51
|
-
"Convert": (lambda frame: initiate_root(self, 'convert'), "Convert images to Grayscale TIFs."),
|
52
51
|
"Umap": (lambda frame: initiate_root(self, 'umap'), "Generate UMAP embeddings with datapoints represented as images."),
|
53
52
|
"Train Cellpose": (lambda frame: initiate_root(self, 'train_cellpose'), "Train custom Cellpose models."),
|
54
53
|
"ML Analyze": (lambda frame: initiate_root(self, 'ml_analyze'), "Machine learning analysis of data."),
|
spacr/gui_core.py
CHANGED
@@ -176,36 +176,6 @@ def display_figure(fig):
|
|
176
176
|
elif event.num == 5: # Scroll down
|
177
177
|
print("zoom out")
|
178
178
|
|
179
|
-
def zoom_v1(event):
|
180
|
-
# Fixed zoom factors (adjust these if you want faster or slower zoom)
|
181
|
-
zoom_in_factor = 0.9 # When zooming in, ranges shrink by 10%
|
182
|
-
zoom_out_factor = 1.1 # When zooming out, ranges increase by 10%
|
183
|
-
|
184
|
-
# Determine the zoom direction based on the scroll event
|
185
|
-
if event.num == 4 or (hasattr(event, 'delta') and event.delta > 0): # Scroll up = zoom in
|
186
|
-
factor = zoom_in_factor
|
187
|
-
elif event.num == 5 or (hasattr(event, 'delta') and event.delta < 0): # Scroll down = zoom out
|
188
|
-
factor = zoom_out_factor
|
189
|
-
else:
|
190
|
-
return # No recognized scroll direction
|
191
|
-
|
192
|
-
for ax in canvas.figure.get_axes():
|
193
|
-
xlim = ax.get_xlim()
|
194
|
-
ylim = ax.get_ylim()
|
195
|
-
|
196
|
-
x_center = (xlim[1] + xlim[0]) / 2
|
197
|
-
y_center = (ylim[1] + ylim[0]) / 2
|
198
|
-
|
199
|
-
x_range = (xlim[1] - xlim[0]) * factor
|
200
|
-
y_range = (ylim[1] - ylim[0]) * factor
|
201
|
-
|
202
|
-
# Set the new limits
|
203
|
-
ax.set_xlim([x_center - x_range / 2, x_center + x_range / 2])
|
204
|
-
ax.set_ylim([y_center - y_range / 2, y_center + y_range / 2])
|
205
|
-
|
206
|
-
# Redraw the figure efficiently
|
207
|
-
canvas.draw_idle()
|
208
|
-
|
209
179
|
def zoom(event):
|
210
180
|
# Fixed zoom factors (adjust these if you want faster or slower zoom)
|
211
181
|
zoom_in_factor = 0.9 # When zooming in, ranges shrink by 10%
|
@@ -832,7 +802,7 @@ def initiate_abort():
|
|
832
802
|
global thread_control, q, parent_frame
|
833
803
|
if thread_control.get("run_thread") is not None:
|
834
804
|
try:
|
835
|
-
q.put("Aborting processes...")
|
805
|
+
#q.put("Aborting processes...")
|
836
806
|
thread_control.get("run_thread").terminate()
|
837
807
|
thread_control["run_thread"] = None
|
838
808
|
q.put("Processes aborted.")
|
@@ -840,22 +810,164 @@ def initiate_abort():
|
|
840
810
|
q.put(f"Error aborting process: {e}")
|
841
811
|
|
842
812
|
thread_control = {"run_thread": None, "stop_requested": False}
|
813
|
+
|
814
|
+
def check_src_folders_files(settings, settings_type, q):
|
815
|
+
"""
|
816
|
+
Checks if 'src' is a key in the settings dictionary and if it exists as a valid path.
|
817
|
+
If 'src' is a list, iterates through the list and checks each path.
|
818
|
+
If any path is missing, prompts the user to edit or remove invalid paths.
|
819
|
+
"""
|
820
|
+
|
821
|
+
request_stop = False
|
822
|
+
|
823
|
+
def _folder_has_images(folder_path, image_extensions = {".png", ".jpg", ".jpeg", ".bmp", ".gif", ".tiff", ".tif", ".webp", ".npy", ".npz", "nd2", "czi", "lif"}):
|
824
|
+
"""Check if a folder contains any image files."""
|
825
|
+
return any(file.lower().endswith(tuple(image_extensions)) for file in os.listdir(folder_path))
|
826
|
+
|
827
|
+
def _has_folder(parent_folder, sub_folder="measure"):
|
828
|
+
"""Check if a specific sub-folder exists inside the given folder."""
|
829
|
+
return os.path.isdir(os.path.join(parent_folder, sub_folder))
|
830
|
+
|
831
|
+
from .utils import normalize_src_path, generate_image_path_map
|
832
|
+
|
833
|
+
settings['src'] = normalize_src_path(settings['src'])
|
834
|
+
|
835
|
+
src_value = settings.get("src")
|
836
|
+
|
837
|
+
# **Skip if 'src' is missing**
|
838
|
+
if src_value is None:
|
839
|
+
return request_stop
|
840
|
+
|
841
|
+
# Convert single string src to a list for uniform handling
|
842
|
+
if isinstance(src_value, str):
|
843
|
+
src_list = [src_value]
|
844
|
+
elif isinstance(src_value, list):
|
845
|
+
src_list = src_value
|
846
|
+
else:
|
847
|
+
request_stop = True
|
848
|
+
return request_stop # Ensure early exit
|
849
|
+
|
850
|
+
# Identify missing paths
|
851
|
+
missing_paths = {i: path for i, path in enumerate(src_list) if not os.path.exists(path)}
|
852
|
+
|
853
|
+
if missing_paths:
|
854
|
+
q.put(f'Error: The following paths are missing: {missing_paths}')
|
855
|
+
request_stop = True
|
856
|
+
return request_stop # Ensure early exit
|
857
|
+
|
858
|
+
conditions = [True] # Initialize conditions list
|
859
|
+
|
860
|
+
for path in src_list: # Fixed: Use src_list instead of src_value
|
861
|
+
if settings_type == 'mask':
|
862
|
+
if settings['consolidate']:
|
863
|
+
image_map = generate_image_path_map(path)
|
864
|
+
if len(image_map) > 0:
|
865
|
+
request_stop = False
|
866
|
+
return request_stop
|
867
|
+
else:
|
868
|
+
q.put(f"Error: Missing subfolders with images for: {path}")
|
869
|
+
request_stop = True
|
870
|
+
return request_stop
|
871
|
+
else:
|
872
|
+
pictures_continue = _folder_has_images(path)
|
873
|
+
folder_chan_continue = _has_folder(path, "1")
|
874
|
+
folder_stack_continue = _has_folder(path, "stack")
|
875
|
+
folder_npz_continue = _has_folder(path, "norm_channel_stack")
|
876
|
+
|
877
|
+
if not pictures_continue:
|
878
|
+
if not any([folder_chan_continue, folder_stack_continue, folder_npz_continue]):
|
879
|
+
if not folder_chan_continue:
|
880
|
+
q.put(f"Error: Missing channel folder in folder: {path}")
|
881
|
+
|
882
|
+
if not folder_stack_continue:
|
883
|
+
q.put(f"Error: Missing stack folder in folder: {path}")
|
884
|
+
|
885
|
+
if not folder_npz_continue:
|
886
|
+
q.put(f"Error: Missing norm_channel_stack folder in folder: {path}")
|
887
|
+
else:
|
888
|
+
q.put(f"Error: No images in folder: {path}")
|
889
|
+
|
890
|
+
#q.put(f"path:{path}")
|
891
|
+
#q.put(f"pictures_continue:{pictures_continue}, folder_chan_continue:{folder_chan_continue}, folder_stack_continue:{folder_stack_continue}, folder_npz_continue:{folder_npz_continue}")
|
892
|
+
|
893
|
+
conditions = [pictures_continue, folder_chan_continue, folder_stack_continue, folder_npz_continue]
|
894
|
+
|
895
|
+
if settings_type == 'measure':
|
896
|
+
if not os.path.basename(path) == 'merged':
|
897
|
+
path = os.path.join(path, "merged")
|
898
|
+
npy_continue = _folder_has_images(path, image_extensions={".npy"})
|
899
|
+
conditions = [npy_continue]
|
900
|
+
|
901
|
+
#if settings_type == 'recruitment':
|
902
|
+
# if not os.path.basename(path) == 'measurements':
|
903
|
+
# path = os.path.join(path, "measurements")
|
904
|
+
# db_continue = _folder_has_images(path, image_extensions={".db"})
|
905
|
+
# conditions = [db_continue]
|
906
|
+
|
907
|
+
#if settings_type == 'umap':
|
908
|
+
# if not os.path.basename(path) == 'measurements':
|
909
|
+
# path = os.path.join(path, "measurements")
|
910
|
+
# db_continue = _folder_has_images(path, image_extensions={".db"})
|
911
|
+
# conditions = [db_continue]
|
912
|
+
|
913
|
+
#if settings_type == 'analyze_plaques':
|
914
|
+
# if not os.path.basename(path) == 'measurements':
|
915
|
+
# path = os.path.join(path, "measurements")
|
916
|
+
# db_continue = _folder_has_images(path, image_extensions={".db"})
|
917
|
+
# conditions = [db_continue]
|
918
|
+
|
919
|
+
#if settings_type == 'map_barcodes':
|
920
|
+
# if not os.path.basename(path) == 'measurements':
|
921
|
+
# path = os.path.join(path, "measurements")
|
922
|
+
# db_continue = _folder_has_images(path, image_extensions={".db"})
|
923
|
+
# conditions = [db_continue]
|
924
|
+
|
925
|
+
#if settings_type == 'regression':
|
926
|
+
# if not os.path.basename(path) == 'measurements':
|
927
|
+
# path = os.path.join(path, "measurements")
|
928
|
+
# db_continue = _folder_has_images(path, image_extensions={".db"})
|
929
|
+
# conditions = [db_continue]
|
930
|
+
|
931
|
+
#if settings_type == 'classify':
|
932
|
+
# if not os.path.basename(path) == 'measurements':
|
933
|
+
# path = os.path.join(path, "measurements")
|
934
|
+
# db_continue = _folder_has_images(path, image_extensions={".db"})
|
935
|
+
# conditions = [db_continue]
|
936
|
+
|
937
|
+
#if settings_type == 'analyze_plaques':
|
938
|
+
# if not os.path.basename(path) == 'measurements':
|
939
|
+
# path = os.path.join(path, "measurements")
|
940
|
+
# db_continue = _folder_has_images(path, image_extensions={".db"})
|
941
|
+
# conditions = [db_continue]
|
942
|
+
|
943
|
+
if not any(conditions):
|
944
|
+
q.put(f"Error: The following path(s) is missing images or folders: {path}")
|
945
|
+
request_stop = True
|
946
|
+
|
947
|
+
return request_stop
|
843
948
|
|
844
949
|
def start_process(q=None, fig_queue=None, settings_type='mask'):
|
845
950
|
global thread_control, vars_dict, parent_frame
|
846
951
|
from .settings import check_settings, expected_types
|
847
952
|
from .gui_utils import run_function_gui, set_cpu_affinity, initialize_cuda, display_gif_in_plot_frame, print_widget_structure
|
848
|
-
|
953
|
+
|
849
954
|
if q is None:
|
850
955
|
q = Queue()
|
851
956
|
if fig_queue is None:
|
852
957
|
fig_queue = Queue()
|
853
958
|
try:
|
854
|
-
settings = check_settings(vars_dict, expected_types, q)
|
959
|
+
settings, errors = check_settings(vars_dict, expected_types, q)
|
960
|
+
|
961
|
+
if len(errors) > 0:
|
962
|
+
return
|
963
|
+
|
964
|
+
if check_src_folders_files(settings, settings_type, q):
|
965
|
+
return
|
966
|
+
|
855
967
|
except ValueError as e:
|
856
968
|
q.put(f"Error: {e}")
|
857
969
|
return
|
858
|
-
|
970
|
+
|
859
971
|
if isinstance(thread_control, dict) and thread_control.get("run_thread") is not None:
|
860
972
|
initiate_abort()
|
861
973
|
|
@@ -880,10 +992,11 @@ def start_process(q=None, fig_queue=None, settings_type='mask'):
|
|
880
992
|
|
881
993
|
# Store the process in thread_control for future reference
|
882
994
|
thread_control["run_thread"] = process
|
995
|
+
|
883
996
|
else:
|
884
997
|
q.put(f"Error: Unknown settings type '{settings_type}'")
|
885
998
|
return
|
886
|
-
|
999
|
+
|
887
1000
|
def process_console_queue():
|
888
1001
|
global q, console_output, parent_frame, progress_bar, process_console_queue
|
889
1002
|
|
@@ -894,13 +1007,105 @@ def process_console_queue():
|
|
894
1007
|
process_console_queue.current_maximum = None
|
895
1008
|
|
896
1009
|
ansi_escape_pattern = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
|
1010
|
+
|
1011
|
+
spacing = 5
|
1012
|
+
|
1013
|
+
# **Configure styles for different message types**
|
1014
|
+
console_output.tag_configure("error", foreground="red", spacing3 = spacing)
|
1015
|
+
console_output.tag_configure("warning", foreground="orange", spacing3 = spacing)
|
1016
|
+
console_output.tag_configure("normal", foreground="white", spacing3 = spacing)
|
897
1017
|
|
898
1018
|
while not q.empty():
|
899
1019
|
message = q.get_nowait()
|
900
1020
|
clean_message = ansi_escape_pattern.sub('', message)
|
901
|
-
|
902
|
-
#
|
1021
|
+
|
1022
|
+
# **Detect Error Messages (Red)**
|
1023
|
+
if clean_message.startswith("Error:"):
|
1024
|
+
console_output.insert(tk.END, clean_message + "\n", "error")
|
1025
|
+
console_output.see(tk.END)
|
1026
|
+
#print("Run aborted due to error:", clean_message) # Debug message
|
1027
|
+
#return # **Exit immediately to stop further execution**
|
1028
|
+
|
1029
|
+
# **Detect Warning Messages (Orange)**
|
1030
|
+
elif clean_message.startswith("Warning:"):
|
1031
|
+
console_output.insert(tk.END, clean_message + "\n", "warning")
|
903
1032
|
|
1033
|
+
# **Process Progress Messages Normally**
|
1034
|
+
elif clean_message.startswith("Progress:"):
|
1035
|
+
try:
|
1036
|
+
# Extract the progress information
|
1037
|
+
match = re.search(r'Progress: (\d+)/(\d+), operation_type: ([\w\s]*),(.*)', clean_message)
|
1038
|
+
|
1039
|
+
if match:
|
1040
|
+
current_progress = int(match.group(1))
|
1041
|
+
total_progress = int(match.group(2))
|
1042
|
+
operation_type = match.group(3).strip()
|
1043
|
+
additional_info = match.group(4).strip() # Capture everything after operation_type
|
1044
|
+
|
1045
|
+
# Check if the maximum value has changed
|
1046
|
+
if process_console_queue.current_maximum != total_progress:
|
1047
|
+
process_console_queue.current_maximum = total_progress
|
1048
|
+
process_console_queue.completed_tasks = []
|
1049
|
+
|
1050
|
+
# Add the task to the completed set
|
1051
|
+
process_console_queue.completed_tasks.append(current_progress)
|
1052
|
+
|
1053
|
+
# Calculate the unique progress count
|
1054
|
+
unique_progress_count = len(np.unique(process_console_queue.completed_tasks))
|
1055
|
+
|
1056
|
+
# Update the progress bar
|
1057
|
+
if progress_bar:
|
1058
|
+
progress_bar['maximum'] = total_progress
|
1059
|
+
progress_bar['value'] = unique_progress_count
|
1060
|
+
|
1061
|
+
# Store operation type and additional info
|
1062
|
+
if operation_type:
|
1063
|
+
progress_bar.operation_type = operation_type
|
1064
|
+
progress_bar.additional_info = additional_info
|
1065
|
+
|
1066
|
+
# Update the progress label
|
1067
|
+
if progress_bar.progress_label:
|
1068
|
+
progress_bar.update_label()
|
1069
|
+
|
1070
|
+
# Clear completed tasks when progress is complete
|
1071
|
+
if unique_progress_count >= total_progress:
|
1072
|
+
process_console_queue.completed_tasks.clear()
|
1073
|
+
|
1074
|
+
except Exception as e:
|
1075
|
+
print(f"Error parsing progress message: {e}")
|
1076
|
+
|
1077
|
+
# **Insert Normal Messages with Extra Line Spacing**
|
1078
|
+
else:
|
1079
|
+
console_output.insert(tk.END, clean_message + "\n", "normal")
|
1080
|
+
|
1081
|
+
console_output.see(tk.END)
|
1082
|
+
|
1083
|
+
# **Continue processing if no error was detected**
|
1084
|
+
after_id = console_output.after(uppdate_frequency, process_console_queue)
|
1085
|
+
parent_frame.after_tasks.append(after_id)
|
1086
|
+
|
1087
|
+
def process_console_queue_v2():
|
1088
|
+
global q, console_output, parent_frame, progress_bar, process_console_queue
|
1089
|
+
|
1090
|
+
# Initialize function attribute if it doesn't exist
|
1091
|
+
if not hasattr(process_console_queue, "completed_tasks"):
|
1092
|
+
process_console_queue.completed_tasks = []
|
1093
|
+
if not hasattr(process_console_queue, "current_maximum"):
|
1094
|
+
process_console_queue.current_maximum = None
|
1095
|
+
|
1096
|
+
ansi_escape_pattern = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
|
1097
|
+
|
1098
|
+
while not q.empty():
|
1099
|
+
message = q.get_nowait()
|
1100
|
+
clean_message = ansi_escape_pattern.sub('', message)
|
1101
|
+
|
1102
|
+
# **Abort Execution if an Error Message is Detected**
|
1103
|
+
if clean_message.startswith("Error:"):
|
1104
|
+
console_output.insert(tk.END, clean_message + "\n", "error")
|
1105
|
+
console_output.see(tk.END)
|
1106
|
+
print("Run aborted due to error:", clean_message) # Debug message
|
1107
|
+
return # **Exit immediately to stop further execution**
|
1108
|
+
|
904
1109
|
# Check if the message contains progress information
|
905
1110
|
if clean_message.startswith("Progress:"):
|
906
1111
|
try:
|
@@ -928,8 +1133,7 @@ def process_console_queue():
|
|
928
1133
|
if progress_bar:
|
929
1134
|
progress_bar['maximum'] = total_progress
|
930
1135
|
progress_bar['value'] = unique_progress_count
|
931
|
-
|
932
|
-
|
1136
|
+
|
933
1137
|
# Store operation type and additional info
|
934
1138
|
if operation_type:
|
935
1139
|
progress_bar.operation_type = operation_type
|
@@ -945,11 +1149,13 @@ def process_console_queue():
|
|
945
1149
|
|
946
1150
|
except Exception as e:
|
947
1151
|
print(f"Error parsing progress message: {e}")
|
1152
|
+
|
948
1153
|
else:
|
949
|
-
#
|
1154
|
+
# Insert non-progress messages into the console
|
950
1155
|
console_output.insert(tk.END, clean_message + "\n")
|
951
1156
|
console_output.see(tk.END)
|
952
|
-
|
1157
|
+
|
1158
|
+
# **Continue processing if no error was detected**
|
953
1159
|
after_id = console_output.after(uppdate_frequency, process_console_queue)
|
954
1160
|
parent_frame.after_tasks.append(after_id)
|
955
1161
|
|