spacr 0.1.64__py3-none-any.whl → 0.1.75__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
spacr/core.py CHANGED
@@ -971,7 +971,7 @@ def generate_dataset(src, file_metadata=None, experiment='TSG101_screen', sample
971
971
  shutil.rmtree(temp_dir)
972
972
  print(f"\nSaved {total_images} images to {tar_name}")
973
973
 
974
- def apply_model_to_tar(tar_path, model_path, file_type='cell_png', image_size=224, batch_size=64, normalize=True, preload='images', num_workers=10, threshold=0.5, verbose=False):
974
+ def apply_model_to_tar(tar_path, model_path, file_type='cell_png', image_size=224, batch_size=64, normalize=True, preload='images', n_job=10, threshold=0.5, verbose=False):
975
975
 
976
976
  from .io import TarImageDataset
977
977
  from .utils import process_vision_results
@@ -994,7 +994,7 @@ def apply_model_to_tar(tar_path, model_path, file_type='cell_png', image_size=22
994
994
  model = torch.load(model_path)
995
995
 
996
996
  dataset = TarImageDataset(tar_path, transform=transform)
997
- data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True)
997
+ data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, n_job=n_job, pin_memory=True)
998
998
 
999
999
  model_name = os.path.splitext(os.path.basename(model_path))[0]
1000
1000
  dataset_name = os.path.splitext(os.path.basename(tar_path))[0]
@@ -1034,7 +1034,7 @@ def apply_model_to_tar(tar_path, model_path, file_type='cell_png', image_size=22
1034
1034
  torch.cuda.memory.empty_cache()
1035
1035
  return df
1036
1036
 
1037
- def apply_model(src, model_path, image_size=224, batch_size=64, normalize=True, num_workers=10):
1037
+ def apply_model(src, model_path, image_size=224, batch_size=64, normalize=True, n_job=10):
1038
1038
 
1039
1039
  from .io import NoClassDataset
1040
1040
 
@@ -1055,7 +1055,7 @@ def apply_model(src, model_path, image_size=224, batch_size=64, normalize=True,
1055
1055
 
1056
1056
  print(f'Loading dataset in {src} with {len(src)} images')
1057
1057
  dataset = NoClassDataset(data_dir=src, transform=transform, shuffle=True, load_to_memory=False)
1058
- data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
1058
+ data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, n_job=n_job)
1059
1059
  print(f'Loaded {len(src)} images')
1060
1060
 
1061
1061
  result_loc = os.path.splitext(model_path)[0]+datetime.date.today().strftime('%y%m%d')+'_'+os.path.splitext(model_path)[1]+'_test_result.csv'
@@ -1302,7 +1302,7 @@ def generate_training_dataset(src, mode='annotation', annotation_column='test',
1302
1302
 
1303
1303
  return
1304
1304
 
1305
- def generate_loaders(src, train_mode='erm', mode='train', image_size=224, batch_size=32, classes=['nc','pc'], num_workers=None, validation_split=0.0, max_show=2, pin_memory=False, normalize=False, channels=[1, 2, 3], augment=False, verbose=False):
1305
+ def generate_loaders(src, train_mode='erm', mode='train', image_size=224, batch_size=32, classes=['nc','pc'], n_job=None, validation_split=0.0, max_show=2, pin_memory=False, normalize=False, channels=[1, 2, 3], augment=False, verbose=False):
1306
1306
 
1307
1307
  """
1308
1308
  Generate data loaders for training and validation/test datasets.
@@ -1314,7 +1314,7 @@ def generate_loaders(src, train_mode='erm', mode='train', image_size=224, batch_
1314
1314
  - image_size (int): The size of the input images.
1315
1315
  - batch_size (int): The batch size for the data loaders.
1316
1316
  - classes (list): The list of classes to consider.
1317
- - num_workers (int): The number of worker threads for data loading.
1317
+ - n_job (int): The number of worker threads for data loading.
1318
1318
  - validation_split (float): The fraction of data to use for validation when train_mode is 'erm'.
1319
1319
  - max_show (int): The maximum number of images to show when verbose is True.
1320
1320
  - pin_memory (bool): Whether to pin memory for faster data transfer.
@@ -1404,10 +1404,10 @@ def generate_loaders(src, train_mode='erm', mode='train', image_size=224, batch_
1404
1404
  #val_dataset = augment_dataset(val_dataset, is_grayscale=(len(channels) == 1))
1405
1405
  print(f'Data after augmentation: Train: {len(train_dataset)}')#, Validataion:{len(val_dataset)}')
1406
1406
 
1407
- train_loaders = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers if num_workers is not None else 0, pin_memory=pin_memory)
1408
- val_loaders = DataLoader(val_dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers if num_workers is not None else 0, pin_memory=pin_memory)
1407
+ train_loaders = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, n_job=n_job if n_job is not None else 0, pin_memory=pin_memory)
1408
+ val_loaders = DataLoader(val_dataset, batch_size=batch_size, shuffle=shuffle, n_job=n_job if n_job is not None else 0, pin_memory=pin_memory)
1409
1409
  else:
1410
- train_loaders = DataLoader(data, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers if num_workers is not None else 0, pin_memory=pin_memory)
1410
+ train_loaders = DataLoader(data, batch_size=batch_size, shuffle=shuffle, n_job=n_job if n_job is not None else 0, pin_memory=pin_memory)
1411
1411
 
1412
1412
  elif train_mode == 'irm':
1413
1413
  data = MyDataset(data_dir, classes, transform=transform, shuffle=shuffle, pin_memory=pin_memory)
@@ -1436,13 +1436,13 @@ def generate_loaders(src, train_mode='erm', mode='train', image_size=224, batch_
1436
1436
  #val_dataset = augment_dataset(val_dataset, is_grayscale=(len(channels) == 1))
1437
1437
  print(f'Data after augmentation: Train: {len(train_dataset)}')#, Validataion:{len(val_dataset)}')
1438
1438
 
1439
- train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers if num_workers is not None else 0, pin_memory=pin_memory)
1440
- val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers if num_workers is not None else 0, pin_memory=pin_memory)
1439
+ train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, n_job=n_job if n_job is not None else 0, pin_memory=pin_memory)
1440
+ val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=shuffle, n_job=n_job if n_job is not None else 0, pin_memory=pin_memory)
1441
1441
 
1442
1442
  train_loaders.append(train_loader)
1443
1443
  val_loaders.append(val_loader)
1444
1444
  else:
1445
- train_loader = DataLoader(plate_data, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers if num_workers is not None else 0, pin_memory=pin_memory)
1445
+ train_loader = DataLoader(plate_data, batch_size=batch_size, shuffle=shuffle, n_job=n_job if n_job is not None else 0, pin_memory=pin_memory)
1446
1446
  train_loaders.append(train_loader)
1447
1447
  val_loaders.append(None)
1448
1448
 
@@ -2078,11 +2078,11 @@ def identify_masks(src, object_type, model_name, batch_size, channels, diameter,
2078
2078
  else:
2079
2079
  radius = 100
2080
2080
 
2081
- workers = os.cpu_count()-2
2082
- if workers < 1:
2083
- workers = 1
2081
+ n_job = os.cpu_count()-2
2082
+ if n_job < 1:
2083
+ n_job = 1
2084
2084
 
2085
- mask_stack = _btrack_track_cells(src, name, batch_filenames, object_type, plot, save, masks_3D=masks, mode=timelapse_mode, timelapse_remove_transient=timelapse_remove_transient, radius=radius, workers=workers)
2085
+ mask_stack = _btrack_track_cells(src, name, batch_filenames, object_type, plot, save, masks_3D=masks, mode=timelapse_mode, timelapse_remove_transient=timelapse_remove_transient, radius=radius, n_job=n_job)
2086
2086
  if timelapse_mode == 'trackpy':
2087
2087
  mask_stack = _trackpy_track_cells(src, name, batch_filenames, object_type, masks, timelapse_displacement, timelapse_memory, timelapse_remove_transient, plot, save, timelapse_mode)
2088
2088
 
@@ -2303,9 +2303,9 @@ def generate_cellpose_masks(src, settings, object_type):
2303
2303
  else:
2304
2304
  radius = 100
2305
2305
 
2306
- workers = os.cpu_count()-2
2307
- if workers < 1:
2308
- workers = 1
2306
+ n_job = os.cpu_count()-2
2307
+ if n_job < 1:
2308
+ n_job = 1
2309
2309
 
2310
2310
  mask_stack = _btrack_track_cells(src=src,
2311
2311
  name=name,
@@ -2317,7 +2317,7 @@ def generate_cellpose_masks(src, settings, object_type):
2317
2317
  mode=timelapse_mode,
2318
2318
  timelapse_remove_transient=timelapse_remove_transient,
2319
2319
  radius=radius,
2320
- workers=workers)
2320
+ n_job=n_job)
2321
2321
  if timelapse_mode == 'trackpy':
2322
2322
  mask_stack = _trackpy_track_cells(src=src,
2323
2323
  name=name,
@@ -2551,7 +2551,7 @@ def compare_cellpose_masks(src, verbose=False, processes=None, save=True):
2551
2551
  common_files.intersection_update(os.listdir(d))
2552
2552
  common_files = list(common_files)
2553
2553
 
2554
- # Create a pool of workers
2554
+ # Create a pool of n_job
2555
2555
  with Pool(processes=processes) as pool:
2556
2556
  args = [(src, filename, dirs, conditions) for filename in common_files]
2557
2557
  results = pool.map(compare_mask, args)
spacr/deep_spacr.py CHANGED
@@ -230,7 +230,7 @@ def train_test_model(src, settings, custom_model=False, custom_model_path=None):
230
230
  image_size=settings['image_size'],
231
231
  batch_size=settings['batch_size'],
232
232
  classes=settings['classes'],
233
- num_workers=settings['num_workers'],
233
+ n_job=settings['n_job'],
234
234
  validation_split=settings['val_split'],
235
235
  pin_memory=settings['pin_memory'],
236
236
  normalize=settings['normalize'],
@@ -255,7 +255,7 @@ def train_test_model(src, settings, custom_model=False, custom_model_path=None):
255
255
  optimizer_type = settings['optimizer_type'],
256
256
  use_checkpoint = settings['use_checkpoint'],
257
257
  dropout_rate = settings['dropout_rate'],
258
- num_workers = settings['num_workers'],
258
+ n_job = settings['n_job'],
259
259
  val_loaders = val,
260
260
  test_loaders = None,
261
261
  intermedeate_save = settings['intermedeate_save'],
@@ -276,7 +276,7 @@ def train_test_model(src, settings, custom_model=False, custom_model_path=None):
276
276
  image_size=settings['image_size'],
277
277
  batch_size=settings['batch_size'],
278
278
  classes=settings['classes'],
279
- num_workers=settings['num_workers'],
279
+ n_job=settings['n_job'],
280
280
  validation_split=0.0,
281
281
  pin_memory=settings['pin_memory'],
282
282
  normalize=settings['normalize'],
@@ -315,7 +315,7 @@ def train_test_model(src, settings, custom_model=False, custom_model_path=None):
315
315
  torch.cuda.memory.empty_cache()
316
316
  gc.collect()
317
317
 
318
- def train_model(dst, model_type, train_loaders, train_loader_names, train_mode='erm', epochs=100, learning_rate=0.0001, weight_decay=0.05, amsgrad=False, optimizer_type='adamw', use_checkpoint=False, dropout_rate=0, num_workers=20, val_loaders=None, test_loaders=None, init_weights='imagenet', intermedeate_save=None, chan_dict=None, schedule = None, loss_type='binary_cross_entropy_with_logits', gradient_accumulation=False, gradient_accumulation_steps=4, channels=['r','g','b']):
318
+ def train_model(dst, model_type, train_loaders, train_loader_names, train_mode='erm', epochs=100, learning_rate=0.0001, weight_decay=0.05, amsgrad=False, optimizer_type='adamw', use_checkpoint=False, dropout_rate=0, n_job=20, val_loaders=None, test_loaders=None, init_weights='imagenet', intermedeate_save=None, chan_dict=None, schedule = None, loss_type='binary_cross_entropy_with_logits', gradient_accumulation=False, gradient_accumulation_steps=4, channels=['r','g','b']):
319
319
  """
320
320
  Trains a model using the specified parameters.
321
321
 
@@ -332,7 +332,7 @@ def train_model(dst, model_type, train_loaders, train_loader_names, train_mode='
332
332
  optimizer_type (str, optional): The type of optimizer to use. Defaults to 'adamw'.
333
333
  use_checkpoint (bool, optional): Whether to use checkpointing during training. Defaults to False.
334
334
  dropout_rate (float, optional): The dropout rate for the model. Defaults to 0.
335
- num_workers (int, optional): The number of workers for data loading. Defaults to 20.
335
+ n_job (int, optional): The number of n_job for data loading. Defaults to 20.
336
336
  val_loaders (list, optional): A list of validation data loaders. Defaults to None.
337
337
  test_loaders (list, optional): A list of test data loaders. Defaults to None.
338
338
  init_weights (str, optional): The initialization weights for the model. Defaults to 'imagenet'.
@@ -357,7 +357,7 @@ def train_model(dst, model_type, train_loaders, train_loader_names, train_mode='
357
357
 
358
358
  use_cuda = torch.cuda.is_available()
359
359
  device = torch.device("cuda" if use_cuda else "cpu")
360
- kwargs = {'num_workers': num_workers, 'pin_memory': True} if use_cuda else {}
360
+ kwargs = {'n_job': n_job, 'pin_memory': True} if use_cuda else {}
361
361
 
362
362
  for idx, (images, labels, filenames) in enumerate(train_loaders):
363
363
  batch, chans, height, width = images.shape
spacr/gui.py CHANGED
@@ -5,8 +5,6 @@ import os, requests
5
5
  from multiprocessing import set_start_method
6
6
  from .gui_elements import spacrButton, create_menu_bar, set_dark_style
7
7
  from .gui_core import initiate_root
8
- from .app_annotate import initiate_annotation_app_root
9
- from .app_make_masks import initiate_mask_app_root
10
8
 
11
9
  class MainApp(tk.Tk):
12
10
  def __init__(self, default_app=None):
@@ -22,8 +20,8 @@ class MainApp(tk.Tk):
22
20
  self.gui_apps = {
23
21
  "Mask": (lambda frame: initiate_root(frame, 'mask'), "Generate cellpose masks for cells, nuclei and pathogen images."),
24
22
  "Measure": (lambda frame: initiate_root(frame, 'measure'), "Measure single object intensity and morphological feature. Crop and save single object image"),
25
- "Annotate": (initiate_annotation_app_root, "Annotation single object images on a grid. Annotations are saved to database."),
26
- "Make Masks": (initiate_mask_app_root, "Adjust pre-existing Cellpose models to your specific dataset for improved performance"),
23
+ "Annotate": (lambda frame: initiate_root(frame, 'annotate'), "Annotation single object images on a grid. Annotations are saved to database."),
24
+ "Make Masks": (lambda frame: initiate_root(frame, 'make_masks'),"Adjust pre-existing Cellpose models to your specific dataset for improved performance"),
27
25
  "Classify": (lambda frame: initiate_root(frame, 'classify'), "Train Torch Convolutional Neural Networks (CNNs) or Transformers to classify single object images."),
28
26
  "Sequencing": (lambda frame: initiate_root(frame, 'sequencing'), "Analyze sequensing data."),
29
27
  "Umap": (lambda frame: initiate_root(frame, 'umap'), "Generate UMAP embedings with datapoints represented as images.")
@@ -32,21 +30,8 @@ class MainApp(tk.Tk):
32
30
  self.selected_app = tk.StringVar()
33
31
  self.create_widgets()
34
32
 
35
-
36
- if default_app == "Mask":
33
+ if default_app in self.gui_apps:
37
34
  self.load_app(default_app, self.gui_apps[default_app][0])
38
- elif default_app == "Measure":
39
- self.load_app(default_app, self.gui_apps[default_app][1])
40
- elif default_app == "Annotate":
41
- self.load_app(default_app, self.gui_apps[default_app][2])
42
- elif default_app == "Make Masks":
43
- self.load_app(default_app, self.gui_apps[default_app][3])
44
- elif default_app == "Classify":
45
- self.load_app(default_app, self.gui_apps[default_app][4])
46
- elif default_app == "Sequencing":
47
- self.load_app(default_app, self.gui_apps[default_app][5])
48
- elif default_app == "Umap":
49
- self.load_app(default_app, self.gui_apps[default_app][6])
50
35
 
51
36
  def create_widgets(self):
52
37
  # Create the menu bar
@@ -88,7 +73,6 @@ class MainApp(tk.Tk):
88
73
 
89
74
  # Create custom button with text
90
75
  button = spacrButton(buttons_frame, text=app_name, command=lambda app_name=app_name, app_func=app_func: self.load_app(app_name, app_func), font=('Helvetica', 12))
91
- #button = ttk.Button(buttons_frame, text=app_name, command=lambda app_name=app_name, app_func=app_func: self.load_app(app_name, app_func), style='Custom.TButton')
92
76
  button.grid(row=i, column=0, pady=10, padx=10, sticky="w")
93
77
 
94
78
  description_label = tk.Label(buttons_frame, text=app_desc, bg="black", fg="white", wraplength=800, justify="left", font=('Helvetica', 12))
@@ -161,4 +145,4 @@ def gui_app():
161
145
 
162
146
  if __name__ == "__main__":
163
147
  set_start_method('spawn', force=True)
164
- gui_app()
148
+ gui_app()
spacr/gui_core.py CHANGED
@@ -1,11 +1,11 @@
1
- import os, traceback, ctypes, matplotlib, requests, csv, re
1
+ import os, traceback, ctypes, matplotlib, requests, csv
2
2
  matplotlib.use('Agg')
3
3
  import tkinter as tk
4
4
  from tkinter import ttk
5
- import tkinter.font as tkFont
6
5
  from tkinter import filedialog
7
- from tkinter import font as tkFont
8
- from multiprocessing import Process, Value, Queue, Manager, set_start_method
6
+ from multiprocessing import Process, Value, Queue
7
+ from multiprocessing.sharedctypes import Synchronized
8
+ from multiprocessing import set_start_method
9
9
  from tkinter import ttk, scrolledtext
10
10
  from matplotlib.figure import Figure
11
11
  from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
@@ -14,7 +14,7 @@ import requests
14
14
  from huggingface_hub import list_repo_files
15
15
 
16
16
  from .settings import set_default_train_test_model, get_measure_crop_settings, set_default_settings_preprocess_generate_masks, get_analyze_reads_default_settings, set_default_umap_image_settings
17
- from .gui_elements import create_menu_bar, spacrButton, spacrLabel, spacrFrame, spacrCheckbutton, spacrDropdownMenu ,set_dark_style, set_default_font
17
+ from .gui_elements import create_menu_bar, spacrButton, spacrLabel, spacrFrame, spacrDropdownMenu ,set_dark_style, set_default_font
18
18
  from . gui_run import run_mask_gui, run_measure_gui, run_classify_gui, run_sequencing_gui, run_umap_gui
19
19
 
20
20
  try:
@@ -37,21 +37,18 @@ thread_control = {"run_thread": None, "stop_requested": False}
37
37
 
38
38
  def initiate_abort():
39
39
  global thread_control
40
- if thread_control.get("stop_requested") is not None:
41
- if isinstance(thread_control["stop_requested"], Value):
42
- thread_control["stop_requested"].value = 1
43
-
40
+ if isinstance(thread_control.get("stop_requested"), Synchronized):
41
+ thread_control["stop_requested"].value = 1
44
42
  if thread_control.get("run_thread") is not None:
45
- thread_control["run_thread"].join(timeout=5)
46
- if thread_control["run_thread"].is_alive():
47
- thread_control["run_thread"].terminate()
43
+ thread_control["run_thread"].terminate()
44
+ thread_control["run_thread"].join()
48
45
  thread_control["run_thread"] = None
49
46
 
50
- def start_process(q, fig_queue, settings_type='mask'):
47
+ def start_process_v1(q, fig_queue, settings_type='mask'):
51
48
  global thread_control, vars_dict
52
- from .settings import check_settings
49
+ from .settings import check_settings, expected_types
53
50
 
54
- settings = check_settings(vars_dict)
51
+ settings = check_settings(vars_dict, expected_types, q)
55
52
  if thread_control.get("run_thread") is not None:
56
53
  initiate_abort()
57
54
  stop_requested = Value('i', 0) # multiprocessing shared value for inter-process communication
@@ -68,9 +65,47 @@ def start_process(q, fig_queue, settings_type='mask'):
68
65
  thread_control["run_thread"] = Process(target=run_umap_gui, args=(settings, q, fig_queue, stop_requested))
69
66
  thread_control["run_thread"].start()
70
67
 
71
- def import_settings(settings_type='mask'):
68
+ def start_process(q=None, fig_queue=None, settings_type='mask'):
69
+ global thread_control, vars_dict
70
+ from .settings import check_settings, expected_types
72
71
 
73
- global vars_dict, scrollable_frame
72
+ if q is None:
73
+ q = Queue()
74
+ if fig_queue is None:
75
+ fig_queue = Queue()
76
+
77
+ try:
78
+ settings = check_settings(vars_dict, expected_types, q)
79
+ except ValueError as e:
80
+ q.put(f"Error: {e}")
81
+ return
82
+
83
+ if thread_control.get("run_thread") is not None:
84
+ initiate_abort()
85
+
86
+ stop_requested = Value('i', 0) # multiprocessing shared value for inter-process communication
87
+ thread_control["stop_requested"] = stop_requested
88
+
89
+ process_args = (settings, q, fig_queue, stop_requested)
90
+
91
+ if settings_type == 'mask':
92
+ thread_control["run_thread"] = Process(target=run_mask_gui, args=process_args)
93
+ elif settings_type == 'measure':
94
+ thread_control["run_thread"] = Process(target=run_measure_gui, args=process_args)
95
+ elif settings_type == 'classify':
96
+ thread_control["run_thread"] = Process(target=run_classify_gui, args=process_args)
97
+ elif settings_type == 'sequencing':
98
+ thread_control["run_thread"] = Process(target=run_sequencing_gui, args=process_args)
99
+ elif settings_type == 'umap':
100
+ thread_control["run_thread"] = Process(target=run_umap_gui, args=process_args)
101
+ else:
102
+ q.put(f"Error: Unknown settings type '{settings_type}'")
103
+ return
104
+
105
+ thread_control["run_thread"].start()
106
+
107
+ def import_settings(settings_type='mask'):
108
+ global vars_dict, scrollable_frame, button_scrollable_frame
74
109
  from .settings import generate_fields
75
110
 
76
111
  def read_settings_from_csv(csv_file_path):
@@ -98,6 +133,7 @@ def import_settings(settings_type='mask'):
98
133
  if not csv_file_path: # If no file is selected, return early
99
134
  return
100
135
 
136
+ #vars_dict = hide_all_settings(vars_dict, categories=None)
101
137
  csv_settings = read_settings_from_csv(csv_file_path)
102
138
  if settings_type == 'mask':
103
139
  settings = set_default_settings_preprocess_generate_masks(src='path', settings={})
@@ -115,6 +151,7 @@ def import_settings(settings_type='mask'):
115
151
  variables = convert_settings_dict_for_gui(settings)
116
152
  new_settings = update_settings_from_csv(variables, csv_settings)
117
153
  vars_dict = generate_fields(new_settings, scrollable_frame)
154
+ vars_dict = hide_all_settings(vars_dict, categories=None)
118
155
 
119
156
  def convert_settings_dict_for_gui(settings):
120
157
  variables = {}
@@ -314,9 +351,8 @@ def download_dataset(repo_id, subfolder, local_dir=None, retries=5, delay=5):
314
351
 
315
352
  raise Exception("Failed to download files after multiple attempts.")
316
353
 
317
-
318
354
  def setup_button_section(horizontal_container, settings_type='mask', settings_row=5, run=True, abort=True, download=True, import_btn=True):
319
- global button_frame, run_button, abort_button, download_dataset_button, import_button, q, fig_queue, vars_dict
355
+ global button_frame, button_scrollable_frame, run_button, abort_button, download_dataset_button, import_button, q, fig_queue, vars_dict
320
356
 
321
357
  button_frame = tk.Frame(horizontal_container, bg='black')
322
358
  horizontal_container.add(button_frame, stretch="always", sticky="nsew")
@@ -357,60 +393,29 @@ def setup_button_section(horizontal_container, settings_type='mask', settings_ro
357
393
  toggle_settings(button_scrollable_frame)
358
394
  return button_scrollable_frame
359
395
 
360
- def toggle_settings_v1(button_scrollable_frame):
361
- global vars_dict
362
- from .settings import categories
363
-
364
- if vars_dict is None:
365
- raise ValueError("vars_dict is not initialized.")
396
+ def hide_all_settings(vars_dict, categories):
397
+ """
398
+ Function to initially hide all settings in the GUI.
366
399
 
367
- def toggle_category(settings, var):
368
- for setting in settings:
369
- if setting in vars_dict:
370
- label, widget, _ = vars_dict[setting]
371
- if var.get() == 0:
372
- label.grid_remove()
373
- widget.grid_remove()
374
- else:
375
- label.grid()
376
- widget.grid()
400
+ Parameters:
401
+ - categories: dict, The categories of settings with their corresponding settings.
402
+ - vars_dict: dict, The dictionary containing the settings and their corresponding widgets.
403
+ """
377
404
 
378
- row = 1
379
- col = 3
380
- category_idx = 0
405
+ if categories is None:
406
+ from .settings import categories
381
407
 
382
408
  for category, settings in categories.items():
383
409
  if any(setting in vars_dict for setting in settings):
384
- category_var = tk.IntVar(value=0)
385
- vars_dict[category] = (None, None, category_var)
386
- toggle = spacrCheckbutton(
387
- button_scrollable_frame.scrollable_frame,
388
- text=category,
389
- variable=category_var,
390
- command=lambda cat=settings, var=category_var: toggle_category(cat, var)
391
- )
392
- # Directly set the style
393
- style = ttk.Style()
394
- font_style = tkFont.Font(family="Helvetica", size=12, weight="bold")
395
- style.configure('Spacr.TCheckbutton', font=font_style, background='black', foreground='#ffffff', indicatoron=False, relief='flat')
396
- style.map('Spacr.TCheckbutton', background=[('selected', 'black'), ('active', 'black')], foreground=[('selected', '#ffffff'), ('active', '#ffffff')])
397
- toggle.configure(style='Spacr.TCheckbutton')
398
- toggle.grid(row=row, column=col, sticky="w", pady=2, padx=2)
399
- #row += 1
400
- col += 1
401
- category_idx += 1
402
-
403
- if category_idx % 4 == 0:
404
- row += 1
405
- col = 2
406
-
407
- for settings in categories.values():
408
- for setting in settings:
409
- if setting in vars_dict:
410
- label, widget, _ = vars_dict[setting]
411
- label.grid_remove()
412
- widget.grid_remove()
413
-
410
+ vars_dict[category] = (None, None, tk.IntVar(value=0))
411
+
412
+ # Initially hide all settings
413
+ for setting in settings:
414
+ if setting in vars_dict:
415
+ label, widget, _ = vars_dict[setting]
416
+ label.grid_remove()
417
+ widget.grid_remove()
418
+ return vars_dict
414
419
 
415
420
  def toggle_settings(button_scrollable_frame):
416
421
  global vars_dict
@@ -449,16 +454,7 @@ def toggle_settings(button_scrollable_frame):
449
454
  non_empty_categories = [category for category, settings in categories.items() if any(setting in vars_dict for setting in settings)]
450
455
  category_dropdown = spacrDropdownMenu(button_scrollable_frame.scrollable_frame, category_var, non_empty_categories, command=on_category_select)
451
456
  category_dropdown.grid(row=1, column=3, sticky="ew", pady=2, padx=2)
452
-
453
- for category, settings in categories.items():
454
- if any(setting in vars_dict for setting in settings):
455
- vars_dict[category] = (None, None, tk.IntVar(value=0))
456
- # Initially hide all settings
457
- for setting in settings:
458
- if setting in vars_dict:
459
- label, widget, _ = vars_dict[setting]
460
- label.grid_remove()
461
- widget.grid_remove()
457
+ vars_dict = hide_all_settings(vars_dict, categories)
462
458
 
463
459
  def process_fig_queue():
464
460
  global canvas, fig_queue, canvas_widget, parent_frame
@@ -531,66 +527,55 @@ def setup_frame(parent_frame):
531
527
  return parent_frame, vertical_container, horizontal_container
532
528
 
533
529
  def initiate_root(parent, settings_type='mask'):
534
-
535
- def main_thread_update_function(root, q, fig_queue, canvas_widget, progress_label):
536
- try:
537
- ansi_escape_pattern = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
538
- while not q.empty():
539
- message = q.get_nowait()
540
- clean_message = ansi_escape_pattern.sub('', message)
541
- if clean_message.startswith("Progress"):
542
- progress_label.config(text=clean_message)
543
- if clean_message.startswith("\rProgress"):
544
- progress_label.config(text=clean_message)
545
- elif clean_message.startswith("Successfully"):
546
- progress_label.config(text=clean_message)
547
- elif clean_message.startswith("Processing"):
548
- progress_label.config(text=clean_message)
549
- elif clean_message.startswith("scale"):
550
- pass
551
- elif clean_message.startswith("plot_cropped_arrays"):
552
- pass
553
- elif clean_message == "" or clean_message == "\r" or clean_message.strip() == "":
554
- pass
555
- else:
556
- print(clean_message)
557
- except Exception as e:
558
- print(f"Error updating GUI canvas: {e}")
559
- finally:
560
- root.after(100, lambda: main_thread_update_function(root, q, fig_queue, canvas_widget, progress_label))
561
-
562
530
  global q, fig_queue, parent_frame, scrollable_frame, button_frame, vars_dict, canvas, canvas_widget, progress_label, button_scrollable_frame
531
+ from .gui_utils import main_thread_update_function
532
+ from .gui import gui_app
533
+ set_start_method('spawn', force=True)
534
+
563
535
  print("Initializing root with settings_type:", settings_type)
564
536
  parent_frame = parent
565
537
 
566
538
  if not hasattr(parent_frame, 'after_tasks'):
567
539
  parent_frame.after_tasks = []
568
540
 
541
+ # Clear previous content instead of destroying the root
569
542
  for widget in parent_frame.winfo_children():
570
- if widget.winfo_exists():
571
- try:
572
- widget.destroy()
573
- except tk.TclError as e:
574
- print(f"Error destroying widget: {e}")
543
+ try:
544
+ widget.destroy()
545
+ except tk.TclError as e:
546
+ print(f"Error destroying widget: {e}")
575
547
 
576
548
  q = Queue()
577
549
  fig_queue = Queue()
578
550
  parent_frame, vertical_container, horizontal_container = setup_frame(parent_frame)
579
- scrollable_frame, vars_dict = setup_settings_panel(horizontal_container, settings_type) # Adjust height and width as needed
580
- button_scrollable_frame = setup_button_section(horizontal_container, settings_type)
581
- canvas, canvas_widget = setup_plot_section(vertical_container)
582
- console_output = setup_console(vertical_container)
583
551
 
584
- if settings_type in ['mask', 'measure', 'classify', 'sequencing']:
585
- progress_output = setup_progress_frame(vertical_container)
552
+ if settings_type == 'annotate':
553
+ from .app_annotate import initiate_annotation_app
554
+ initiate_annotation_app(horizontal_container)
555
+ elif settings_type == 'make_masks':
556
+ from .app_make_masks import initiate_make_mask_app
557
+ initiate_make_mask_app(horizontal_container)
558
+ elif settings_type is None:
559
+ #parent.quit()
560
+ parent_frame.destroy()
561
+ gui_app()
586
562
  else:
587
- progress_output = None
563
+ scrollable_frame, vars_dict = setup_settings_panel(horizontal_container, settings_type)
564
+ button_scrollable_frame = setup_button_section(horizontal_container, settings_type)
565
+ canvas, canvas_widget = setup_plot_section(vertical_container)
566
+ console_output = setup_console(vertical_container)
567
+
568
+ if settings_type in ['mask', 'measure', 'classify', 'sequencing']:
569
+ progress_output = setup_progress_frame(vertical_container)
570
+ else:
571
+ progress_output = None
572
+
573
+ set_globals(q, console_output, parent_frame, vars_dict, canvas, canvas_widget, scrollable_frame, progress_label, fig_queue)
574
+ process_console_queue()
575
+ process_fig_queue()
576
+ after_id = parent_frame.after(100, lambda: main_thread_update_function(parent_frame, q, fig_queue, canvas_widget, progress_label))
577
+ parent_frame.after_tasks.append(after_id)
588
578
 
589
- set_globals(q, console_output, parent_frame, vars_dict, canvas, canvas_widget, scrollable_frame, progress_label, fig_queue)
590
- process_console_queue()
591
- process_fig_queue()
592
- after_id = parent_frame.after(100, lambda: main_thread_update_function(parent_frame, q, fig_queue, canvas_widget, progress_label))
593
- parent_frame.after_tasks.append(after_id)
594
579
  print("Root initialization complete")
595
580
  return parent_frame, vars_dict
596
581