spacr 0.1.6__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
spacr/__init__.py CHANGED
@@ -41,7 +41,6 @@ __all__ = [
41
41
  "app_annotate",
42
42
  "gui_utils",
43
43
  "app_make_masks",
44
- "app_make_masks_v2",
45
44
  "app_mask",
46
45
  "app_measure",
47
46
  "app_classify",
spacr/app_annotate.py CHANGED
@@ -485,12 +485,15 @@ def annotate_with_image_refs(settings, root, shutdown_callback):
485
485
  exit_button = tk.Button(root, text="Exit", command=lambda: [app.shutdown(), shutdown_callback()], background='black', foreground='white')
486
486
  exit_button.grid(row=app.grid_rows, column=app.grid_cols - 3)
487
487
 
488
- app.load_images()
488
+ #app.load_images()
489
489
 
490
490
  # Store the shutdown function and next app details in the root
491
491
  root.current_app_exit_func = lambda: [app.shutdown(), shutdown_callback()]
492
492
  root.next_app_func = proceed_with_app
493
- root.next_app_args = ("Main App", gui_app) # Specify the main app function
493
+ root.next_app_args = ("Main App", gui_app)
494
+
495
+ # Call load_images after setting up the root window
496
+ app.load_images()
494
497
 
495
498
  def load_next_app(root):
496
499
  # Get the next app function and arguments
spacr/core.py CHANGED
@@ -971,7 +971,7 @@ def generate_dataset(src, file_metadata=None, experiment='TSG101_screen', sample
971
971
  shutil.rmtree(temp_dir)
972
972
  print(f"\nSaved {total_images} images to {tar_name}")
973
973
 
974
- def apply_model_to_tar(tar_path, model_path, file_type='cell_png', image_size=224, batch_size=64, normalize=True, preload='images', num_workers=10, threshold=0.5, verbose=False):
974
+ def apply_model_to_tar(tar_path, model_path, file_type='cell_png', image_size=224, batch_size=64, normalize=True, preload='images', n_job=10, threshold=0.5, verbose=False):
975
975
 
976
976
  from .io import TarImageDataset
977
977
  from .utils import process_vision_results
@@ -994,7 +994,7 @@ def apply_model_to_tar(tar_path, model_path, file_type='cell_png', image_size=22
994
994
  model = torch.load(model_path)
995
995
 
996
996
  dataset = TarImageDataset(tar_path, transform=transform)
997
- data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True)
997
+ data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, n_job=n_job, pin_memory=True)
998
998
 
999
999
  model_name = os.path.splitext(os.path.basename(model_path))[0]
1000
1000
  dataset_name = os.path.splitext(os.path.basename(tar_path))[0]
@@ -1034,7 +1034,7 @@ def apply_model_to_tar(tar_path, model_path, file_type='cell_png', image_size=22
1034
1034
  torch.cuda.memory.empty_cache()
1035
1035
  return df
1036
1036
 
1037
- def apply_model(src, model_path, image_size=224, batch_size=64, normalize=True, num_workers=10):
1037
+ def apply_model(src, model_path, image_size=224, batch_size=64, normalize=True, n_job=10):
1038
1038
 
1039
1039
  from .io import NoClassDataset
1040
1040
 
@@ -1055,7 +1055,7 @@ def apply_model(src, model_path, image_size=224, batch_size=64, normalize=True,
1055
1055
 
1056
1056
  print(f'Loading dataset in {src} with {len(src)} images')
1057
1057
  dataset = NoClassDataset(data_dir=src, transform=transform, shuffle=True, load_to_memory=False)
1058
- data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
1058
+ data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, n_job=n_job)
1059
1059
  print(f'Loaded {len(src)} images')
1060
1060
 
1061
1061
  result_loc = os.path.splitext(model_path)[0]+datetime.date.today().strftime('%y%m%d')+'_'+os.path.splitext(model_path)[1]+'_test_result.csv'
@@ -1302,7 +1302,7 @@ def generate_training_dataset(src, mode='annotation', annotation_column='test',
1302
1302
 
1303
1303
  return
1304
1304
 
1305
- def generate_loaders(src, train_mode='erm', mode='train', image_size=224, batch_size=32, classes=['nc','pc'], num_workers=None, validation_split=0.0, max_show=2, pin_memory=False, normalize=False, channels=[1, 2, 3], augment=False, verbose=False):
1305
+ def generate_loaders(src, train_mode='erm', mode='train', image_size=224, batch_size=32, classes=['nc','pc'], n_job=None, validation_split=0.0, max_show=2, pin_memory=False, normalize=False, channels=[1, 2, 3], augment=False, verbose=False):
1306
1306
 
1307
1307
  """
1308
1308
  Generate data loaders for training and validation/test datasets.
@@ -1314,7 +1314,7 @@ def generate_loaders(src, train_mode='erm', mode='train', image_size=224, batch_
1314
1314
  - image_size (int): The size of the input images.
1315
1315
  - batch_size (int): The batch size for the data loaders.
1316
1316
  - classes (list): The list of classes to consider.
1317
- - num_workers (int): The number of worker threads for data loading.
1317
+ - n_job (int): The number of worker threads for data loading.
1318
1318
  - validation_split (float): The fraction of data to use for validation when train_mode is 'erm'.
1319
1319
  - max_show (int): The maximum number of images to show when verbose is True.
1320
1320
  - pin_memory (bool): Whether to pin memory for faster data transfer.
@@ -1404,10 +1404,10 @@ def generate_loaders(src, train_mode='erm', mode='train', image_size=224, batch_
1404
1404
  #val_dataset = augment_dataset(val_dataset, is_grayscale=(len(channels) == 1))
1405
1405
  print(f'Data after augmentation: Train: {len(train_dataset)}')#, Validataion:{len(val_dataset)}')
1406
1406
 
1407
- train_loaders = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers if num_workers is not None else 0, pin_memory=pin_memory)
1408
- val_loaders = DataLoader(val_dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers if num_workers is not None else 0, pin_memory=pin_memory)
1407
+ train_loaders = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, n_job=n_job if n_job is not None else 0, pin_memory=pin_memory)
1408
+ val_loaders = DataLoader(val_dataset, batch_size=batch_size, shuffle=shuffle, n_job=n_job if n_job is not None else 0, pin_memory=pin_memory)
1409
1409
  else:
1410
- train_loaders = DataLoader(data, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers if num_workers is not None else 0, pin_memory=pin_memory)
1410
+ train_loaders = DataLoader(data, batch_size=batch_size, shuffle=shuffle, n_job=n_job if n_job is not None else 0, pin_memory=pin_memory)
1411
1411
 
1412
1412
  elif train_mode == 'irm':
1413
1413
  data = MyDataset(data_dir, classes, transform=transform, shuffle=shuffle, pin_memory=pin_memory)
@@ -1436,13 +1436,13 @@ def generate_loaders(src, train_mode='erm', mode='train', image_size=224, batch_
1436
1436
  #val_dataset = augment_dataset(val_dataset, is_grayscale=(len(channels) == 1))
1437
1437
  print(f'Data after augmentation: Train: {len(train_dataset)}')#, Validataion:{len(val_dataset)}')
1438
1438
 
1439
- train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers if num_workers is not None else 0, pin_memory=pin_memory)
1440
- val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers if num_workers is not None else 0, pin_memory=pin_memory)
1439
+ train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, n_job=n_job if n_job is not None else 0, pin_memory=pin_memory)
1440
+ val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=shuffle, n_job=n_job if n_job is not None else 0, pin_memory=pin_memory)
1441
1441
 
1442
1442
  train_loaders.append(train_loader)
1443
1443
  val_loaders.append(val_loader)
1444
1444
  else:
1445
- train_loader = DataLoader(plate_data, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers if num_workers is not None else 0, pin_memory=pin_memory)
1445
+ train_loader = DataLoader(plate_data, batch_size=batch_size, shuffle=shuffle, n_job=n_job if n_job is not None else 0, pin_memory=pin_memory)
1446
1446
  train_loaders.append(train_loader)
1447
1447
  val_loaders.append(None)
1448
1448
 
@@ -1668,7 +1668,7 @@ def preprocess_generate_masks(src, settings={}):
1668
1668
 
1669
1669
  from .io import preprocess_img_data, _load_and_concatenate_arrays
1670
1670
  from .plot import plot_merged, plot_arrays
1671
- from .utils import _pivot_counts_table, check_mask_folder, adjust_cell_masks, _merge_cells_based_on_parasite_overlap, process_masks
1671
+ from .utils import _pivot_counts_table, check_mask_folder, adjust_cell_masks
1672
1672
  from .settings import set_default_settings_preprocess_generate_masks, set_default_plot_merge_settings
1673
1673
 
1674
1674
  settings = set_default_settings_preprocess_generate_masks(src, settings)
@@ -2078,11 +2078,11 @@ def identify_masks(src, object_type, model_name, batch_size, channels, diameter,
2078
2078
  else:
2079
2079
  radius = 100
2080
2080
 
2081
- workers = os.cpu_count()-2
2082
- if workers < 1:
2083
- workers = 1
2081
+ n_job = os.cpu_count()-2
2082
+ if n_job < 1:
2083
+ n_job = 1
2084
2084
 
2085
- mask_stack = _btrack_track_cells(src, name, batch_filenames, object_type, plot, save, masks_3D=masks, mode=timelapse_mode, timelapse_remove_transient=timelapse_remove_transient, radius=radius, workers=workers)
2085
+ mask_stack = _btrack_track_cells(src, name, batch_filenames, object_type, plot, save, masks_3D=masks, mode=timelapse_mode, timelapse_remove_transient=timelapse_remove_transient, radius=radius, n_job=n_job)
2086
2086
  if timelapse_mode == 'trackpy':
2087
2087
  mask_stack = _trackpy_track_cells(src, name, batch_filenames, object_type, masks, timelapse_displacement, timelapse_memory, timelapse_remove_transient, plot, save, timelapse_mode)
2088
2088
 
@@ -2303,9 +2303,9 @@ def generate_cellpose_masks(src, settings, object_type):
2303
2303
  else:
2304
2304
  radius = 100
2305
2305
 
2306
- workers = os.cpu_count()-2
2307
- if workers < 1:
2308
- workers = 1
2306
+ n_job = os.cpu_count()-2
2307
+ if n_job < 1:
2308
+ n_job = 1
2309
2309
 
2310
2310
  mask_stack = _btrack_track_cells(src=src,
2311
2311
  name=name,
@@ -2317,7 +2317,7 @@ def generate_cellpose_masks(src, settings, object_type):
2317
2317
  mode=timelapse_mode,
2318
2318
  timelapse_remove_transient=timelapse_remove_transient,
2319
2319
  radius=radius,
2320
- workers=workers)
2320
+ n_job=n_job)
2321
2321
  if timelapse_mode == 'trackpy':
2322
2322
  mask_stack = _trackpy_track_cells(src=src,
2323
2323
  name=name,
@@ -2551,7 +2551,7 @@ def compare_cellpose_masks(src, verbose=False, processes=None, save=True):
2551
2551
  common_files.intersection_update(os.listdir(d))
2552
2552
  common_files = list(common_files)
2553
2553
 
2554
- # Create a pool of workers
2554
+ # Create a pool of n_job
2555
2555
  with Pool(processes=processes) as pool:
2556
2556
  args = [(src, filename, dirs, conditions) for filename in common_files]
2557
2557
  results = pool.map(compare_mask, args)
spacr/deep_spacr.py CHANGED
@@ -230,7 +230,7 @@ def train_test_model(src, settings, custom_model=False, custom_model_path=None):
230
230
  image_size=settings['image_size'],
231
231
  batch_size=settings['batch_size'],
232
232
  classes=settings['classes'],
233
- num_workers=settings['num_workers'],
233
+ n_job=settings['n_job'],
234
234
  validation_split=settings['val_split'],
235
235
  pin_memory=settings['pin_memory'],
236
236
  normalize=settings['normalize'],
@@ -255,7 +255,7 @@ def train_test_model(src, settings, custom_model=False, custom_model_path=None):
255
255
  optimizer_type = settings['optimizer_type'],
256
256
  use_checkpoint = settings['use_checkpoint'],
257
257
  dropout_rate = settings['dropout_rate'],
258
- num_workers = settings['num_workers'],
258
+ n_job = settings['n_job'],
259
259
  val_loaders = val,
260
260
  test_loaders = None,
261
261
  intermedeate_save = settings['intermedeate_save'],
@@ -276,7 +276,7 @@ def train_test_model(src, settings, custom_model=False, custom_model_path=None):
276
276
  image_size=settings['image_size'],
277
277
  batch_size=settings['batch_size'],
278
278
  classes=settings['classes'],
279
- num_workers=settings['num_workers'],
279
+ n_job=settings['n_job'],
280
280
  validation_split=0.0,
281
281
  pin_memory=settings['pin_memory'],
282
282
  normalize=settings['normalize'],
@@ -315,7 +315,7 @@ def train_test_model(src, settings, custom_model=False, custom_model_path=None):
315
315
  torch.cuda.memory.empty_cache()
316
316
  gc.collect()
317
317
 
318
- def train_model(dst, model_type, train_loaders, train_loader_names, train_mode='erm', epochs=100, learning_rate=0.0001, weight_decay=0.05, amsgrad=False, optimizer_type='adamw', use_checkpoint=False, dropout_rate=0, num_workers=20, val_loaders=None, test_loaders=None, init_weights='imagenet', intermedeate_save=None, chan_dict=None, schedule = None, loss_type='binary_cross_entropy_with_logits', gradient_accumulation=False, gradient_accumulation_steps=4, channels=['r','g','b']):
318
+ def train_model(dst, model_type, train_loaders, train_loader_names, train_mode='erm', epochs=100, learning_rate=0.0001, weight_decay=0.05, amsgrad=False, optimizer_type='adamw', use_checkpoint=False, dropout_rate=0, n_job=20, val_loaders=None, test_loaders=None, init_weights='imagenet', intermedeate_save=None, chan_dict=None, schedule = None, loss_type='binary_cross_entropy_with_logits', gradient_accumulation=False, gradient_accumulation_steps=4, channels=['r','g','b']):
319
319
  """
320
320
  Trains a model using the specified parameters.
321
321
 
@@ -332,7 +332,7 @@ def train_model(dst, model_type, train_loaders, train_loader_names, train_mode='
332
332
  optimizer_type (str, optional): The type of optimizer to use. Defaults to 'adamw'.
333
333
  use_checkpoint (bool, optional): Whether to use checkpointing during training. Defaults to False.
334
334
  dropout_rate (float, optional): The dropout rate for the model. Defaults to 0.
335
- num_workers (int, optional): The number of workers for data loading. Defaults to 20.
335
+ n_job (int, optional): The number of n_job for data loading. Defaults to 20.
336
336
  val_loaders (list, optional): A list of validation data loaders. Defaults to None.
337
337
  test_loaders (list, optional): A list of test data loaders. Defaults to None.
338
338
  init_weights (str, optional): The initialization weights for the model. Defaults to 'imagenet'.
@@ -357,7 +357,7 @@ def train_model(dst, model_type, train_loaders, train_loader_names, train_mode='
357
357
 
358
358
  use_cuda = torch.cuda.is_available()
359
359
  device = torch.device("cuda" if use_cuda else "cpu")
360
- kwargs = {'num_workers': num_workers, 'pin_memory': True} if use_cuda else {}
360
+ kwargs = {'n_job': n_job, 'pin_memory': True} if use_cuda else {}
361
361
 
362
362
  for idx, (images, labels, filenames) in enumerate(train_loaders):
363
363
  batch, chans, height, width = images.shape
spacr/gui_core.py CHANGED
@@ -5,7 +5,9 @@ from tkinter import ttk
5
5
  import tkinter.font as tkFont
6
6
  from tkinter import filedialog
7
7
  from tkinter import font as tkFont
8
- from multiprocessing import Process, Value, Queue, Manager, set_start_method
8
+ from multiprocessing import Process, Value, Queue
9
+ from multiprocessing.sharedctypes import Synchronized
10
+ from multiprocessing import set_start_method
9
11
  from tkinter import ttk, scrolledtext
10
12
  from matplotlib.figure import Figure
11
13
  from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
@@ -37,21 +39,18 @@ thread_control = {"run_thread": None, "stop_requested": False}
37
39
 
38
40
  def initiate_abort():
39
41
  global thread_control
40
- if thread_control.get("stop_requested") is not None:
41
- if isinstance(thread_control["stop_requested"], Value):
42
- thread_control["stop_requested"].value = 1
43
-
42
+ if isinstance(thread_control.get("stop_requested"), Synchronized):
43
+ thread_control["stop_requested"].value = 1
44
44
  if thread_control.get("run_thread") is not None:
45
- thread_control["run_thread"].join(timeout=5)
46
- if thread_control["run_thread"].is_alive():
47
- thread_control["run_thread"].terminate()
45
+ thread_control["run_thread"].terminate()
46
+ thread_control["run_thread"].join()
48
47
  thread_control["run_thread"] = None
49
48
 
50
- def start_process(q, fig_queue, settings_type='mask'):
49
+ def start_process_v1(q, fig_queue, settings_type='mask'):
51
50
  global thread_control, vars_dict
52
- from .settings import check_settings
51
+ from .settings import check_settings, expected_types
53
52
 
54
- settings = check_settings(vars_dict)
53
+ settings = check_settings(vars_dict, expected_types, q)
55
54
  if thread_control.get("run_thread") is not None:
56
55
  initiate_abort()
57
56
  stop_requested = Value('i', 0) # multiprocessing shared value for inter-process communication
@@ -68,9 +67,47 @@ def start_process(q, fig_queue, settings_type='mask'):
68
67
  thread_control["run_thread"] = Process(target=run_umap_gui, args=(settings, q, fig_queue, stop_requested))
69
68
  thread_control["run_thread"].start()
70
69
 
71
- def import_settings(settings_type='mask'):
70
+ def start_process(q=None, fig_queue=None, settings_type='mask'):
71
+ global thread_control, vars_dict
72
+ from .settings import check_settings, expected_types
72
73
 
73
- global vars_dict, scrollable_frame
74
+ if q is None:
75
+ q = Queue()
76
+ if fig_queue is None:
77
+ fig_queue = Queue()
78
+
79
+ try:
80
+ settings = check_settings(vars_dict, expected_types, q)
81
+ except ValueError as e:
82
+ q.put(f"Error: {e}")
83
+ return
84
+
85
+ if thread_control.get("run_thread") is not None:
86
+ initiate_abort()
87
+
88
+ stop_requested = Value('i', 0) # multiprocessing shared value for inter-process communication
89
+ thread_control["stop_requested"] = stop_requested
90
+
91
+ process_args = (settings, q, fig_queue, stop_requested)
92
+
93
+ if settings_type == 'mask':
94
+ thread_control["run_thread"] = Process(target=run_mask_gui, args=process_args)
95
+ elif settings_type == 'measure':
96
+ thread_control["run_thread"] = Process(target=run_measure_gui, args=process_args)
97
+ elif settings_type == 'classify':
98
+ thread_control["run_thread"] = Process(target=run_classify_gui, args=process_args)
99
+ elif settings_type == 'sequencing':
100
+ thread_control["run_thread"] = Process(target=run_sequencing_gui, args=process_args)
101
+ elif settings_type == 'umap':
102
+ thread_control["run_thread"] = Process(target=run_umap_gui, args=process_args)
103
+ else:
104
+ q.put(f"Error: Unknown settings type '{settings_type}'")
105
+ return
106
+
107
+ thread_control["run_thread"].start()
108
+
109
+ def import_settings(settings_type='mask'):
110
+ global vars_dict, scrollable_frame, button_scrollable_frame
74
111
  from .settings import generate_fields
75
112
 
76
113
  def read_settings_from_csv(csv_file_path):
@@ -98,6 +135,7 @@ def import_settings(settings_type='mask'):
98
135
  if not csv_file_path: # If no file is selected, return early
99
136
  return
100
137
 
138
+ #vars_dict = hide_all_settings(vars_dict, categories=None)
101
139
  csv_settings = read_settings_from_csv(csv_file_path)
102
140
  if settings_type == 'mask':
103
141
  settings = set_default_settings_preprocess_generate_masks(src='path', settings={})
@@ -115,6 +153,7 @@ def import_settings(settings_type='mask'):
115
153
  variables = convert_settings_dict_for_gui(settings)
116
154
  new_settings = update_settings_from_csv(variables, csv_settings)
117
155
  vars_dict = generate_fields(new_settings, scrollable_frame)
156
+ vars_dict = hide_all_settings(vars_dict, categories=None)
118
157
 
119
158
  def convert_settings_dict_for_gui(settings):
120
159
  variables = {}
@@ -314,9 +353,8 @@ def download_dataset(repo_id, subfolder, local_dir=None, retries=5, delay=5):
314
353
 
315
354
  raise Exception("Failed to download files after multiple attempts.")
316
355
 
317
-
318
356
  def setup_button_section(horizontal_container, settings_type='mask', settings_row=5, run=True, abort=True, download=True, import_btn=True):
319
- global button_frame, run_button, abort_button, download_dataset_button, import_button, q, fig_queue, vars_dict
357
+ global button_frame, button_scrollable_frame, run_button, abort_button, download_dataset_button, import_button, q, fig_queue, vars_dict
320
358
 
321
359
  button_frame = tk.Frame(horizontal_container, bg='black')
322
360
  horizontal_container.add(button_frame, stretch="always", sticky="nsew")
@@ -357,60 +395,29 @@ def setup_button_section(horizontal_container, settings_type='mask', settings_ro
357
395
  toggle_settings(button_scrollable_frame)
358
396
  return button_scrollable_frame
359
397
 
360
- def toggle_settings_v1(button_scrollable_frame):
361
- global vars_dict
362
- from .settings import categories
363
-
364
- if vars_dict is None:
365
- raise ValueError("vars_dict is not initialized.")
398
+ def hide_all_settings(vars_dict, categories):
399
+ """
400
+ Function to initially hide all settings in the GUI.
366
401
 
367
- def toggle_category(settings, var):
368
- for setting in settings:
369
- if setting in vars_dict:
370
- label, widget, _ = vars_dict[setting]
371
- if var.get() == 0:
372
- label.grid_remove()
373
- widget.grid_remove()
374
- else:
375
- label.grid()
376
- widget.grid()
402
+ Parameters:
403
+ - categories: dict, The categories of settings with their corresponding settings.
404
+ - vars_dict: dict, The dictionary containing the settings and their corresponding widgets.
405
+ """
377
406
 
378
- row = 1
379
- col = 3
380
- category_idx = 0
407
+ if categories is None:
408
+ from .settings import categories
381
409
 
382
410
  for category, settings in categories.items():
383
411
  if any(setting in vars_dict for setting in settings):
384
- category_var = tk.IntVar(value=0)
385
- vars_dict[category] = (None, None, category_var)
386
- toggle = spacrCheckbutton(
387
- button_scrollable_frame.scrollable_frame,
388
- text=category,
389
- variable=category_var,
390
- command=lambda cat=settings, var=category_var: toggle_category(cat, var)
391
- )
392
- # Directly set the style
393
- style = ttk.Style()
394
- font_style = tkFont.Font(family="Helvetica", size=12, weight="bold")
395
- style.configure('Spacr.TCheckbutton', font=font_style, background='black', foreground='#ffffff', indicatoron=False, relief='flat')
396
- style.map('Spacr.TCheckbutton', background=[('selected', 'black'), ('active', 'black')], foreground=[('selected', '#ffffff'), ('active', '#ffffff')])
397
- toggle.configure(style='Spacr.TCheckbutton')
398
- toggle.grid(row=row, column=col, sticky="w", pady=2, padx=2)
399
- #row += 1
400
- col += 1
401
- category_idx += 1
402
-
403
- if category_idx % 4 == 0:
404
- row += 1
405
- col = 2
406
-
407
- for settings in categories.values():
408
- for setting in settings:
409
- if setting in vars_dict:
410
- label, widget, _ = vars_dict[setting]
411
- label.grid_remove()
412
- widget.grid_remove()
413
-
412
+ vars_dict[category] = (None, None, tk.IntVar(value=0))
413
+
414
+ # Initially hide all settings
415
+ for setting in settings:
416
+ if setting in vars_dict:
417
+ label, widget, _ = vars_dict[setting]
418
+ label.grid_remove()
419
+ widget.grid_remove()
420
+ return vars_dict
414
421
 
415
422
  def toggle_settings(button_scrollable_frame):
416
423
  global vars_dict
@@ -449,16 +456,7 @@ def toggle_settings(button_scrollable_frame):
449
456
  non_empty_categories = [category for category, settings in categories.items() if any(setting in vars_dict for setting in settings)]
450
457
  category_dropdown = spacrDropdownMenu(button_scrollable_frame.scrollable_frame, category_var, non_empty_categories, command=on_category_select)
451
458
  category_dropdown.grid(row=1, column=3, sticky="ew", pady=2, padx=2)
452
-
453
- for category, settings in categories.items():
454
- if any(setting in vars_dict for setting in settings):
455
- vars_dict[category] = (None, None, tk.IntVar(value=0))
456
- # Initially hide all settings
457
- for setting in settings:
458
- if setting in vars_dict:
459
- label, widget, _ = vars_dict[setting]
460
- label.grid_remove()
461
- widget.grid_remove()
459
+ vars_dict = hide_all_settings(vars_dict, categories)
462
460
 
463
461
  def process_fig_queue():
464
462
  global canvas, fig_queue, canvas_widget, parent_frame
@@ -532,6 +530,8 @@ def setup_frame(parent_frame):
532
530
 
533
531
  def initiate_root(parent, settings_type='mask'):
534
532
 
533
+ set_start_method('spawn', force=True)
534
+
535
535
  def main_thread_update_function(root, q, fig_queue, canvas_widget, progress_label):
536
536
  try:
537
537
  ansi_escape_pattern = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
spacr/gui_elements.py CHANGED
@@ -101,9 +101,11 @@ class spacrButton(tk.Frame):
101
101
  super().__init__(parent, *args, **kwargs)
102
102
  self.text = text
103
103
  self.command = command
104
- screen_height = self.winfo_screenheight()
105
- button_height = screen_height // 50
106
- button_width = button_height * 3
104
+ #screen_height = self.winfo_screenheight()
105
+ button_height = 50 #screen_height // 50
106
+ button_width = 140 #button_height * 3
107
+
108
+ #print(button_height, button_width)
107
109
 
108
110
  # Increase the canvas size to accommodate the button and the rim
109
111
  self.canvas = tk.Canvas(self, width=button_width + 4, height=button_height + 4, highlightthickness=0, bg="black")
@@ -294,8 +296,8 @@ def create_menu_bar(root):
294
296
  def set_dark_style(style):
295
297
  font_style = tkFont.Font(family="Helvetica", size=24)
296
298
  style.configure('TEntry', padding='5 5 5 5', borderwidth=1, relief='solid', fieldbackground='black', foreground='#ffffff', font=font_style)
297
- style.configure('TCombobox', fieldbackground='black', background='black', foreground='#ffffff', font=font_style)
298
- style.map('TCombobox', fieldbackground=[('readonly', 'black')], foreground=[('readonly', '#ffffff')])
299
+ style.configure('TCombobox', fieldbackground='black', background='black', foreground='#ffffff', selectbackground='black', selectforeground='#ffffff', font=font_style)
300
+ style.map('TCombobox', fieldbackground=[('readonly', 'black')], foreground=[('readonly', '#ffffff')], selectbackground=[('readonly', 'black')], selectforeground=[('readonly', '#ffffff')])
299
301
  style.configure('Custom.TButton', background='black', foreground='white', bordercolor='white', focusthickness=3, focuscolor='white', font=('Helvetica', 12))
300
302
  style.map('Custom.TButton', background=[('active', 'teal'), ('!active', 'black')], foreground=[('active', 'white'), ('!active', 'white')], bordercolor=[('active', 'white'), ('!active', 'white')])
301
303
  style.configure('Custom.TLabel', padding='5 5 5 5', borderwidth=1, relief='flat', background='black', foreground='#ffffff', font=font_style)
spacr/gui_utils.py CHANGED
@@ -68,7 +68,7 @@ def load_app(root, app_name, app_func):
68
68
  else:
69
69
  proceed_with_app(root, app_name, app_func)
70
70
 
71
- def parse_list(value):
71
+ def parse_list_v1(value):
72
72
  try:
73
73
  parsed_value = ast.literal_eval(value)
74
74
  if isinstance(parsed_value, list):
@@ -78,6 +78,16 @@ def parse_list(value):
78
78
  except (ValueError, SyntaxError):
79
79
  raise ValueError("Invalid format for list")
80
80
 
81
+ def parse_list(value):
82
+ try:
83
+ parsed_value = ast.literal_eval(value)
84
+ if isinstance(parsed_value, list):
85
+ return parsed_value
86
+ else:
87
+ raise ValueError(f"Expected a list but got {type(parsed_value).__name__}")
88
+ except (ValueError, SyntaxError) as e:
89
+ raise ValueError(f"Invalid format for list: {value}. Error: {e}")
90
+
81
91
  def create_input_field(frame, label_text, row, var_type='entry', options=None, default_value=None):
82
92
  label_column = 0
83
93
  widget_column = 1
spacr/gui_wrappers.py CHANGED
@@ -4,15 +4,27 @@ matplotlib.use('Agg')
4
4
 
5
5
  fig_queue = None
6
6
 
7
- def my_show():
7
+ def spacrFigShow_v1():
8
8
  """
9
9
  Replacement for plt.show() that queues figures instead of displaying them.
10
10
  """
11
+ global fig_queue
11
12
  fig = plt.gcf()
12
13
  fig_queue.put(fig)
13
14
  plt.close(fig)
14
15
 
15
- def measure_crop_wrapper(settings, q, fig_queue):
16
+ def spacrFigShow(fig_queue=None):
17
+ """
18
+ Replacement for plt.show() that queues figures instead of displaying them.
19
+ """
20
+ fig = plt.gcf()
21
+ if fig_queue:
22
+ fig_queue.put(fig)
23
+ else:
24
+ fig.show()
25
+ plt.close(fig)
26
+
27
+ def preprocess_generate_masks_wrapper(settings, q, fig_queue):
16
28
  """
17
29
  Wraps the measure_crop function to integrate with GUI processes.
18
30
 
@@ -24,19 +36,18 @@ def measure_crop_wrapper(settings, q, fig_queue):
24
36
 
25
37
  # Temporarily override plt.show
26
38
  original_show = plt.show
27
- plt.show = my_show
39
+ plt.show = lambda: spacrFigShow(fig_queue)
28
40
 
29
41
  try:
30
- print('start')
31
- spacr.measure.measure_crop(settings=settings)
42
+ spacr.core.preprocess_generate_masks(src=settings['src'], settings=settings)
32
43
  except Exception as e:
33
44
  errorMessage = f"Error during processing: {e}"
34
45
  q.put(errorMessage)
35
46
  traceback.print_exc()
36
47
  finally:
37
- plt.show = original_show
38
-
39
- def preprocess_generate_masks_wrapper(settings, q, fig_queue):
48
+ plt.show = original_show
49
+
50
+ def measure_crop_wrapper(settings, q, fig_queue):
40
51
  """
41
52
  Wraps the measure_crop function to integrate with GUI processes.
42
53
 
@@ -48,22 +59,23 @@ def preprocess_generate_masks_wrapper(settings, q, fig_queue):
48
59
 
49
60
  # Temporarily override plt.show
50
61
  original_show = plt.show
51
- plt.show = my_show
62
+ plt.show = lambda: spacrFigShow(fig_queue)
52
63
 
53
64
  try:
54
- spacr.core.preprocess_generate_masks(src=settings['src'], settings=settings)
65
+ print('start')
66
+ spacr.measure.measure_crop(settings=settings)
55
67
  except Exception as e:
56
68
  errorMessage = f"Error during processing: {e}"
57
69
  q.put(errorMessage)
58
70
  traceback.print_exc()
59
71
  finally:
60
- plt.show = original_show
72
+ plt.show = original_show
61
73
 
62
74
  def sequencing_wrapper(settings, q, fig_queue):
63
75
 
64
76
  # Temporarily override plt.show
65
77
  original_show = plt.show
66
- plt.show = my_show
78
+ plt.show = lambda: spacrFigShow(fig_queue)
67
79
 
68
80
  try:
69
81
  spacr.sequencing.analyze_reads(settings=settings)
@@ -78,7 +90,7 @@ def umap_wrapper(settings, q, fig_queue):
78
90
 
79
91
  # Temporarily override plt.show
80
92
  original_show = plt.show
81
- plt.show = my_show
93
+ plt.show = lambda: spacrFigShow(fig_queue)
82
94
 
83
95
  try:
84
96
  spacr.core.generate_image_umap(settings=settings)
@@ -101,7 +113,7 @@ def train_test_model_wrapper(settings, q, fig_queue):
101
113
 
102
114
  # Temporarily override plt.show
103
115
  original_show = plt.show
104
- plt.show = my_show
116
+ plt.show = lambda: spacrFigShow(fig_queue)
105
117
 
106
118
  try:
107
119
  spacr.core.train_test_model(settings['src'], settings=settings)
@@ -125,7 +137,7 @@ def run_multiple_simulations_wrapper(settings, q, fig_queue):
125
137
 
126
138
  # Temporarily override plt.show
127
139
  original_show = plt.show
128
- plt.show = my_show
140
+ plt.show = lambda: spacrFigShow(fig_queue)
129
141
 
130
142
  try:
131
143
  spacr.sim.run_multiple_simulations(settings=settings)