spacr 1.0.6__py3-none-any.whl → 1.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
spacr/__init__.py CHANGED
@@ -15,21 +15,17 @@ from . import utils
15
15
  from . import settings
16
16
  from . import plot
17
17
  from . import measure
18
- from . import sim
19
18
  from . import sequencing
20
19
  from . import timelapse
21
20
  from . import deep_spacr
22
- from . import app_annotate
23
21
  from . import gui_utils
24
22
  from . import gui_elements
25
23
  from . import gui_core
26
24
  from . import gui
27
- from . import app_make_masks
28
25
  from . import app_mask
29
26
  from . import app_measure
30
27
  from . import app_classify
31
28
  from . import app_sequencing
32
- from . import app_umap
33
29
  from . import submodules
34
30
  from . import ml
35
31
  from . import toxo
@@ -44,23 +40,18 @@ __all__ = [
44
40
  "settings",
45
41
  "plot",
46
42
  "measure",
47
- "sim",
48
43
  "sequencing",
49
44
  "timelapse",
50
45
  "deep_spacr",
51
- "app_annotate",
52
46
  "gui_utils",
53
47
  "gui_elements",
54
48
  "gui_core",
55
49
  "gui",
56
- "app_make_masks",
57
50
  "app_mask",
58
51
  "app_measure",
59
52
  "app_classify",
60
53
  "app_sequencing",
61
- "app_umap",
62
54
  "submodules",
63
- "openai",
64
55
  "ml",
65
56
  "toxo",
66
57
  "spacr_cellpose",
spacr/core.py CHANGED
@@ -1,7 +1,6 @@
1
1
  import os, gc, torch, time, random
2
2
  import numpy as np
3
3
  import pandas as pd
4
- import matplotlib.pyplot as plt
5
4
  from IPython.display import display
6
5
 
7
6
  import warnings
@@ -449,362 +448,6 @@ def generate_cellpose_masks(src, settings, object_type):
449
448
  torch.cuda.empty_cache()
450
449
  return
451
450
 
452
- def generate_image_umap(settings={}):
453
- """
454
- Generate UMAP or tSNE embedding and visualize the data with clustering.
455
-
456
- Parameters:
457
- settings (dict): Dictionary containing the following keys:
458
- src (str): Source directory containing the data.
459
- row_limit (int): Limit the number of rows to process.
460
- tables (list): List of table names to read from the database.
461
- visualize (str): Visualization type.
462
- image_nr (int): Number of images to display.
463
- dot_size (int): Size of dots in the scatter plot.
464
- n_neighbors (int): Number of neighbors for UMAP.
465
- figuresize (int): Size of the figure.
466
- black_background (bool): Whether to use a black background.
467
- remove_image_canvas (bool): Whether to remove the image canvas.
468
- plot_outlines (bool): Whether to plot outlines.
469
- plot_points (bool): Whether to plot points.
470
- smooth_lines (bool): Whether to smooth lines.
471
- verbose (bool): Whether to print verbose output.
472
- embedding_by_controls (bool): Whether to use embedding from controls.
473
- col_to_compare (str): Column to compare for control-based embedding.
474
- pos (str): Positive control value.
475
- neg (str): Negative control value.
476
- clustering (str): Clustering method ('DBSCAN' or 'KMeans').
477
- exclude (list): List of columns to exclude from the analysis.
478
- plot_images (bool): Whether to plot images.
479
- reduction_method (str): Dimensionality reduction method ('UMAP' or 'tSNE').
480
- save_figure (bool): Whether to save the figure as a PDF.
481
-
482
- Returns:
483
- pd.DataFrame: DataFrame with the original data and an additional column 'cluster' containing the cluster identity.
484
- """
485
-
486
- from .io import _read_and_join_tables
487
- from .utils import get_db_paths, preprocess_data, reduction_and_clustering, remove_noise, generate_colors, correct_paths, plot_embedding, plot_clusters_grid, cluster_feature_analysis, map_condition
488
- from .settings import set_default_umap_image_settings
489
- settings = set_default_umap_image_settings(settings)
490
-
491
- if isinstance(settings['src'], str):
492
- settings['src'] = [settings['src']]
493
-
494
- if settings['plot_images'] is False:
495
- settings['black_background'] = False
496
-
497
- if settings['color_by']:
498
- settings['remove_cluster_noise'] = False
499
- settings['plot_outlines'] = False
500
- settings['smooth_lines'] = False
501
-
502
- print(f'Generating Image UMAP ...')
503
- settings_df = pd.DataFrame(list(settings.items()), columns=['Key', 'Value'])
504
- settings_dir = os.path.join(settings['src'][0],'settings')
505
- settings_csv = os.path.join(settings_dir,'embedding_settings.csv')
506
- os.makedirs(settings_dir, exist_ok=True)
507
- settings_df.to_csv(settings_csv, index=False)
508
- display(settings_df)
509
-
510
- db_paths = get_db_paths(settings['src'])
511
- tables = settings['tables'] + ['png_list']
512
- all_df = pd.DataFrame()
513
-
514
- for i,db_path in enumerate(db_paths):
515
- df = _read_and_join_tables(db_path, table_names=tables)
516
- df, image_paths_tmp = correct_paths(df, settings['src'][i])
517
- all_df = pd.concat([all_df, df], axis=0)
518
- #image_paths.extend(image_paths_tmp)
519
-
520
- all_df['cond'] = all_df['columnID'].apply(map_condition, neg=settings['neg'], pos=settings['pos'], mix=settings['mix'])
521
-
522
- if settings['exclude_conditions']:
523
- if isinstance(settings['exclude_conditions'], str):
524
- settings['exclude_conditions'] = [settings['exclude_conditions']]
525
- row_count_before = len(all_df)
526
- all_df = all_df[~all_df['cond'].isin(settings['exclude_conditions'])]
527
- if settings['verbose']:
528
- print(f'Excluded {row_count_before - len(all_df)} rows after excluding: {settings["exclude_conditions"]}, rows left: {len(all_df)}')
529
-
530
- if settings['row_limit'] is not None:
531
- all_df = all_df.sample(n=settings['row_limit'], random_state=42)
532
-
533
- image_paths = all_df['png_path'].to_list()
534
-
535
- if settings['embedding_by_controls']:
536
-
537
- # Extract and reset the index for the column to compare
538
- col_to_compare = all_df[settings['col_to_compare']].reset_index(drop=True)
539
- print(col_to_compare)
540
- #if settings['only_top_features']:
541
- # column_list = None
542
-
543
- # Preprocess the data to obtain numeric data
544
- numeric_data = preprocess_data(all_df, settings['filter_by'], settings['remove_highly_correlated'], settings['log_data'], settings['exclude'])
545
-
546
- # Convert numeric_data back to a DataFrame to align with col_to_compare
547
- numeric_data_df = pd.DataFrame(numeric_data)
548
-
549
- # Ensure numeric_data_df and col_to_compare are properly aligned
550
- numeric_data_df = numeric_data_df.reset_index(drop=True)
551
-
552
- # Assign the column back to numeric_data_df
553
- numeric_data_df[settings['col_to_compare']] = col_to_compare
554
-
555
- # Subset the dataframe based on specified column values for controls
556
- positive_control_df = numeric_data_df[numeric_data_df[settings['col_to_compare']] == settings['pos']].copy()
557
- negative_control_df = numeric_data_df[numeric_data_df[settings['col_to_compare']] == settings['neg']].copy()
558
- control_numeric_data_df = pd.concat([positive_control_df, negative_control_df])
559
-
560
- # Drop the comparison column from numeric_data_df and control_numeric_data_df
561
- numeric_data_df = numeric_data_df.drop(columns=[settings['col_to_compare']])
562
- control_numeric_data_df = control_numeric_data_df.drop(columns=[settings['col_to_compare']])
563
-
564
- # Convert numeric_data_df and control_numeric_data_df back to numpy arrays
565
- numeric_data = numeric_data_df.values
566
- control_numeric_data = control_numeric_data_df.values
567
-
568
- # Train the reducer on control data
569
- _, _, reducer = reduction_and_clustering(control_numeric_data, settings['n_neighbors'], settings['min_dist'], settings['metric'], settings['eps'], settings['min_samples'], settings['clustering'], settings['reduction_method'], settings['verbose'], n_jobs=settings['n_jobs'], mode='fit', model=False)
570
-
571
- # Apply the trained reducer to the entire dataset
572
- numeric_data = preprocess_data(all_df, settings['filter_by'], settings['remove_highly_correlated'], settings['log_data'], settings['exclude'])
573
- embedding, labels, _ = reduction_and_clustering(numeric_data, settings['n_neighbors'], settings['min_dist'], settings['metric'], settings['eps'], settings['min_samples'], settings['clustering'], settings['reduction_method'], settings['verbose'], n_jobs=settings['n_jobs'], mode=None, model=reducer)
574
-
575
- else:
576
- if settings['resnet_features']:
577
- # placeholder for resnet features, not implemented yet
578
- pass
579
- #numeric_data, embedding, labels = generate_umap_from_images(image_paths, settings['n_neighbors'], settings['min_dist'], settings['metric'], settings['clustering'], settings['eps'], settings['min_samples'], settings['n_jobs'], settings['verbose'])
580
- else:
581
- # Apply the trained reducer to the entire dataset
582
- numeric_data = preprocess_data(all_df, settings['filter_by'], settings['remove_highly_correlated'], settings['log_data'], settings['exclude'])
583
- embedding, labels, _ = reduction_and_clustering(numeric_data, settings['n_neighbors'], settings['min_dist'], settings['metric'], settings['eps'], settings['min_samples'], settings['clustering'], settings['reduction_method'], settings['verbose'], n_jobs=settings['n_jobs'])
584
-
585
- if settings['remove_cluster_noise']:
586
- # Remove noise from the clusters (removes -1 labels from DBSCAN)
587
- embedding, labels = remove_noise(embedding, labels)
588
-
589
- # Plot the results
590
- if settings['color_by']:
591
- if settings['embedding_by_controls']:
592
- labels = all_df[settings['color_by']]
593
- else:
594
- labels = all_df[settings['color_by']]
595
-
596
- # Generate colors for the clusters
597
- colors = generate_colors(len(np.unique(labels)), settings['black_background'])
598
-
599
- # Plot the embedding
600
- umap_plt = plot_embedding(embedding, image_paths, labels, settings['image_nr'], settings['img_zoom'], colors, settings['plot_by_cluster'], settings['plot_outlines'], settings['plot_points'], settings['plot_images'], settings['smooth_lines'], settings['black_background'], settings['figuresize'], settings['dot_size'], settings['remove_image_canvas'], settings['verbose'])
601
- if settings['plot_cluster_grids'] and settings['plot_images']:
602
- grid_plt = plot_clusters_grid(embedding, labels, settings['image_nr'], image_paths, colors, settings['figuresize'], settings['black_background'], settings['verbose'])
603
-
604
- # Save figure as PDF if required
605
- if settings['save_figure']:
606
- results_dir = os.path.join(settings['src'][0], 'results')
607
- os.makedirs(results_dir, exist_ok=True)
608
- reduction_method = settings['reduction_method'].upper()
609
- embedding_path = os.path.join(results_dir, f'{reduction_method}_embedding.pdf')
610
- umap_plt.savefig(embedding_path, format='pdf')
611
- print(f'Saved {reduction_method} embedding to {embedding_path} and grid to {embedding_path}')
612
- if settings['plot_cluster_grids'] and settings['plot_images']:
613
- grid_path = os.path.join(results_dir, f'{reduction_method}_grid.pdf')
614
- grid_plt.savefig(grid_path, format='pdf')
615
- print(f'Saved {reduction_method} embedding to {embedding_path} and grid to {grid_path}')
616
-
617
- # Add cluster labels to the dataframe
618
- if len(labels) > 0:
619
- all_df['cluster'] = labels
620
- else:
621
- all_df['cluster'] = 1 # Assign a default cluster label
622
- print("No clusters found. Consider reducing 'min_samples' or increasing 'eps' for DBSCAN.")
623
-
624
- # Save the results to a CSV file
625
- results_dir = os.path.join(settings['src'][0], 'results')
626
- results_csv = os.path.join(results_dir,'embedding_results.csv')
627
- os.makedirs(results_dir, exist_ok=True)
628
- all_df.to_csv(results_csv, index=False)
629
- print(f'Results saved to {results_csv}')
630
-
631
- if settings['analyze_clusters']:
632
- combined_results = cluster_feature_analysis(all_df)
633
- results_dir = os.path.join(settings['src'][0], 'results')
634
- cluster_results_csv = os.path.join(results_dir,'cluster_results.csv')
635
- os.makedirs(results_dir, exist_ok=True)
636
- combined_results.to_csv(cluster_results_csv, index=False)
637
- print(f'Cluster results saved to {cluster_results_csv}')
638
-
639
- return all_df
640
-
641
- def reducer_hyperparameter_search(settings={}, reduction_params=None, dbscan_params=None, kmeans_params=None, save=False):
642
- """
643
- Perform a hyperparameter search for UMAP or tSNE on the given data.
644
-
645
- Parameters:
646
- settings (dict): Dictionary containing the following keys:
647
- src (str): Source directory containing the data.
648
- row_limit (int): Limit the number of rows to process.
649
- tables (list): List of table names to read from the database.
650
- filter_by (str): Column to filter the data.
651
- sample_size (int): Number of samples to use for the hyperparameter search.
652
- remove_highly_correlated (bool): Whether to remove highly correlated columns.
653
- log_data (bool): Whether to log transform the data.
654
- verbose (bool): Whether to print verbose output.
655
- reduction_method (str): Dimensionality reduction method ('UMAP' or 'tSNE').
656
- reduction_params (list): List of dictionaries containing hyperparameters to test for the reduction method.
657
- dbscan_params (list): List of dictionaries containing DBSCAN hyperparameters to test.
658
- kmeans_params (list): List of dictionaries containing KMeans hyperparameters to test.
659
- pointsize (int): Size of the points in the scatter plot.
660
- save (bool): Whether to save the resulting plot as a file.
661
-
662
- Returns:
663
- None
664
- """
665
-
666
- from .io import _read_and_join_tables
667
- from .utils import get_db_paths, preprocess_data, search_reduction_and_clustering, generate_colors, map_condition
668
- from .settings import set_default_umap_image_settings
669
-
670
- settings = set_default_umap_image_settings(settings)
671
- pointsize = settings['dot_size']
672
- if isinstance(dbscan_params, dict):
673
- dbscan_params = [dbscan_params]
674
-
675
- if isinstance(kmeans_params, dict):
676
- kmeans_params = [kmeans_params]
677
-
678
- if isinstance(reduction_params, dict):
679
- reduction_params = [reduction_params]
680
-
681
- # Determine reduction method based on the keys in reduction_param
682
- if any('n_neighbors' in param for param in reduction_params):
683
- reduction_method = 'umap'
684
- elif any('perplexity' in param for param in reduction_params):
685
- reduction_method = 'tsne'
686
- elif any('perplexity' in param for param in reduction_params) and any('n_neighbors' in param for param in reduction_params):
687
- raise ValueError("Reduction parameters must include 'n_neighbors' for UMAP or 'perplexity' for tSNE, not both.")
688
-
689
- if settings['reduction_method'].lower() != reduction_method:
690
- settings['reduction_method'] = reduction_method
691
- print(f'Changed reduction method to {reduction_method} based on the provided parameters.')
692
-
693
- if settings['verbose']:
694
- display(pd.DataFrame(list(settings.items()), columns=['Key', 'Value']))
695
-
696
- db_paths = get_db_paths(settings['src'])
697
-
698
- tables = settings['tables']
699
- all_df = pd.DataFrame()
700
- for db_path in db_paths:
701
- df = _read_and_join_tables(db_path, table_names=tables)
702
- all_df = pd.concat([all_df, df], axis=0)
703
-
704
- all_df['cond'] = all_df['columnID'].apply(map_condition, neg=settings['neg'], pos=settings['pos'], mix=settings['mix'])
705
-
706
- if settings['exclude_conditions']:
707
- if isinstance(settings['exclude_conditions'], str):
708
- settings['exclude_conditions'] = [settings['exclude_conditions']]
709
- row_count_before = len(all_df)
710
- all_df = all_df[~all_df['cond'].isin(settings['exclude_conditions'])]
711
- if settings['verbose']:
712
- print(f'Excluded {row_count_before - len(all_df)} rows after excluding: {settings["exclude_conditions"]}, rows left: {len(all_df)}')
713
-
714
- if settings['row_limit'] is not None:
715
- all_df = all_df.sample(n=settings['row_limit'], random_state=42)
716
-
717
- numeric_data = preprocess_data(all_df, settings['filter_by'], settings['remove_highly_correlated'], settings['log_data'], settings['exclude'])
718
-
719
- # Combine DBSCAN and KMeans parameters
720
- clustering_params = []
721
- if dbscan_params:
722
- for param in dbscan_params:
723
- param['method'] = 'dbscan'
724
- clustering_params.append(param)
725
- if kmeans_params:
726
- for param in kmeans_params:
727
- param['method'] = 'kmeans'
728
- clustering_params.append(param)
729
-
730
- print('Testing paramiters:', reduction_params)
731
- print('Testing clustering paramiters:', clustering_params)
732
-
733
- # Calculate the grid size
734
- grid_rows = len(reduction_params)
735
- grid_cols = len(clustering_params)
736
-
737
- fig_width = grid_cols*10
738
- fig_height = grid_rows*10
739
-
740
- fig, axs = plt.subplots(grid_rows, grid_cols, figsize=(fig_width, fig_height))
741
-
742
- # Make sure axs is always an array of axes
743
- axs = np.atleast_1d(axs)
744
-
745
- # Iterate through the Cartesian product of reduction and clustering hyperparameters
746
- for i, reduction_param in enumerate(reduction_params):
747
- for j, clustering_param in enumerate(clustering_params):
748
- if len(clustering_params) <= 1:
749
- axs[i].axis('off')
750
- ax = axs[i]
751
- elif len(reduction_params) <= 1:
752
- axs[j].axis('off')
753
- ax = axs[j]
754
- else:
755
- ax = axs[i, j]
756
-
757
- # Perform dimensionality reduction and clustering
758
- if settings['reduction_method'].lower() == 'umap':
759
- n_neighbors = reduction_param.get('n_neighbors', 15)
760
-
761
- if isinstance(n_neighbors, float):
762
- n_neighbors = int(n_neighbors * len(numeric_data))
763
-
764
- min_dist = reduction_param.get('min_dist', 0.1)
765
- embedding, labels = search_reduction_and_clustering(numeric_data, n_neighbors, min_dist, settings['metric'],
766
- clustering_param.get('eps', 0.5), clustering_param.get('min_samples', 5),
767
- clustering_param['method'], settings['reduction_method'], settings['verbose'], reduction_param, n_jobs=settings['n_jobs'])
768
-
769
- elif settings['reduction_method'].lower() == 'tsne':
770
- perplexity = reduction_param.get('perplexity', 30)
771
-
772
- if isinstance(perplexity, float):
773
- perplexity = int(perplexity * len(numeric_data))
774
-
775
- embedding, labels = search_reduction_and_clustering(numeric_data, perplexity, 0.1, settings['metric'],
776
- clustering_param.get('eps', 0.5), clustering_param.get('min_samples', 5),
777
- clustering_param['method'], settings['reduction_method'], settings['verbose'], reduction_param, n_jobs=settings['n_jobs'])
778
-
779
- else:
780
- raise ValueError(f"Unsupported reduction method: {settings['reduction_method']}. Supported methods are 'UMAP' and 'tSNE'")
781
-
782
- # Plot the results
783
- if settings['color_by']:
784
- unique_groups = all_df[settings['color_by']].unique()
785
- colors = generate_colors(len(unique_groups), False)
786
- for group, color in zip(unique_groups, colors):
787
- indices = all_df[settings['color_by']] == group
788
- ax.scatter(embedding[indices, 0], embedding[indices, 1], s=pointsize, label=f"{group}", color=color)
789
- else:
790
- unique_labels = np.unique(labels)
791
- colors = generate_colors(len(unique_labels), False)
792
- for label, color in zip(unique_labels, colors):
793
- ax.scatter(embedding[labels == label, 0], embedding[labels == label, 1], s=pointsize, label=f"Cluster {label}", color=color)
794
-
795
- ax.set_title(f"{settings['reduction_method']} {reduction_param}\n{clustering_param['method']} {clustering_param}")
796
- ax.legend()
797
-
798
- plt.tight_layout()
799
- if save:
800
- results_dir = os.path.join(settings['src'], 'results')
801
- os.makedirs(results_dir, exist_ok=True)
802
- plt.savefig(os.path.join(results_dir, 'hyperparameter_search.pdf'))
803
- else:
804
- plt.show()
805
-
806
- return
807
-
808
451
  def generate_screen_graphs(settings):
809
452
  """
810
453
  Generate screen graphs for different measurements in a given source directory.
spacr/gui.py CHANGED
@@ -44,22 +44,15 @@ class MainApp(tk.Tk):
44
44
  self.main_gui_apps = {
45
45
  "Mask": (lambda frame: initiate_root(self, 'mask'), "Generate cellpose masks for cells, nuclei and pathogen images."),
46
46
  "Measure": (lambda frame: initiate_root(self, 'measure'), "Measure single object intensity and morphological feature. Crop and save single object image"),
47
- #"Annotate": (lambda frame: initiate_root(self, 'annotate'), "Annotation single object images on a grid. Annotations are saved to database."),
48
- #"Make Masks": (lambda frame: initiate_root(self, 'make_masks'), "Adjust pre-existing Cellpose models to your specific dataset for improved performance"),
49
47
  "Classify": (lambda frame: initiate_root(self, 'classify'), "Train Torch Convolutional Neural Networks (CNNs) or Transformers to classify single object images."),
50
48
  }
51
49
 
52
50
  self.additional_gui_apps = {
53
- #"Umap": (lambda frame: initiate_root(self, 'umap'), "Generate UMAP embeddings with datapoints represented as images."),
54
- #"Train Cellpose": (lambda frame: initiate_root(self, 'train_cellpose'), "Train custom Cellpose models."),
55
- #"ML Analyze": (lambda frame: initiate_root(self, 'ml_analyze'), "Machine learning analysis of data."),
56
- #"Cellpose Masks": (lambda frame: initiate_root(self, 'cellpose_masks'), "Generate Cellpose masks."),
57
- #"Cellpose All": (lambda frame: initiate_root(self, 'cellpose_all'), "Run Cellpose on all images."),
51
+ "ML Analyze": (lambda frame: initiate_root(self, 'ml_analyze'), "Machine learning analysis of data."),
58
52
  "Map Barcodes": (lambda frame: initiate_root(self, 'map_barcodes'), "Map barcodes to data."),
59
53
  "Regression": (lambda frame: initiate_root(self, 'regression'), "Perform regression analysis."),
60
- #"Recruitment": (lambda frame: initiate_root(self, 'recruitment'), "Analyze recruitment data."),
54
+ "Recruitment": (lambda frame: initiate_root(self, 'recruitment'), "Analyze recruitment data."),
61
55
  "Activation": (lambda frame: initiate_root(self, 'activation'), "Generate activation maps of computer vision models and measure channel-activation correlation."),
62
- #"Plaque": (lambda frame: initiate_root(self, 'analyze_plaques'), "Analyze plaque data.")
63
56
  }
64
57
 
65
58
  self.selected_app = tk.StringVar()
spacr/gui_core.py CHANGED
@@ -1266,36 +1266,29 @@ def initiate_root(parent, settings_type='mask'):
1266
1266
  fig_queue = Queue()
1267
1267
  parent_frame, vertical_container, horizontal_container, settings_container = setup_frame(parent_frame)
1268
1268
 
1269
- if settings_type == 'annotate':
1270
- from .app_annotate import initiate_annotation_app
1271
- initiate_annotation_app(horizontal_container)
1272
- elif settings_type == 'make_masks':
1273
- from .app_make_masks import initiate_make_mask_app
1274
- initiate_make_mask_app(horizontal_container)
1269
+ scrollable_frame, vars_dict = setup_settings_panel(settings_container, settings_type)
1270
+ print('setup_settings_panel')
1271
+ canvas, canvas_widget = setup_plot_section(vertical_container, settings_type)
1272
+ console_output, _ = setup_console(vertical_container) #, chatbot)
1273
+ button_scrollable_frame, btn_col = setup_button_section(horizontal_container, settings_type)
1274
+
1275
+ if num_cores > 12:
1276
+ _, usage_bars, btn_col = setup_usage_panel(horizontal_container, btn_col, uppdate_frequency)
1275
1277
  else:
1276
- scrollable_frame, vars_dict = setup_settings_panel(settings_container, settings_type)
1277
- print('setup_settings_panel')
1278
- canvas, canvas_widget = setup_plot_section(vertical_container, settings_type)
1279
- console_output, _ = setup_console(vertical_container) #, chatbot)
1280
- button_scrollable_frame, btn_col = setup_button_section(horizontal_container, settings_type)
1281
-
1282
- if num_cores > 12:
1283
- _, usage_bars, btn_col = setup_usage_panel(horizontal_container, btn_col, uppdate_frequency)
1284
- else:
1285
- usage_bars = []
1278
+ usage_bars = []
1286
1279
 
1287
- set_globals(thread_control, q, console_output, parent_frame, vars_dict, canvas, canvas_widget, scrollable_frame, fig_queue, progress_bar, usage_bars)
1288
- description_text = descriptions.get(settings_type, "No description available for this module.")
1289
-
1290
- q.put(f"Console")
1291
- q.put(f" ")
1292
- q.put(description_text)
1293
-
1294
- process_console_queue()
1295
- process_fig_queue()
1296
- create_menu_bar(parent)
1297
- after_id = parent_window.after(uppdate_frequency, lambda: main_thread_update_function(parent_window, q, fig_queue, canvas_widget))
1298
- parent_window.after_tasks.append(after_id)
1280
+ set_globals(thread_control, q, console_output, parent_frame, vars_dict, canvas, canvas_widget, scrollable_frame, fig_queue, progress_bar, usage_bars)
1281
+ description_text = descriptions.get(settings_type, "No description available for this module.")
1282
+
1283
+ q.put(f"Console")
1284
+ q.put(f" ")
1285
+ q.put(description_text)
1286
+
1287
+ process_console_queue()
1288
+ process_fig_queue()
1289
+ create_menu_bar(parent)
1290
+ after_id = parent_window.after(uppdate_frequency, lambda: main_thread_update_function(parent_window, q, fig_queue, canvas_widget))
1291
+ parent_window.after_tasks.append(after_id)
1299
1292
 
1300
1293
  print("Root initialization complete")
1301
1294
  return parent_frame, vars_dict