spacr 0.1.6__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
spacr/measure.py CHANGED
@@ -16,12 +16,6 @@ from skimage.util import img_as_bool
16
16
 
17
17
  from .logger import log_function_call
18
18
 
19
- #from .io import create_database, _save_settings_to_db
20
- #from .timelapse import _timelapse_masks_to_gif, _scmovie
21
- #from .plot import _plot_cropped_arrays, _save_scimg_plot
22
- #from .utils import _merge_overlapping_objects, _filter_object, _relabel_parent_with_child_labels, _exclude_objects
23
- #from .utils import _merge_and_save_to_database, _crop_center, _find_bounding_box, _generate_names, _get_percentiles, normalize_to_dtype, _map_wells_png, _list_endpoint_subdirectories, _generate_representative_images
24
-
25
19
  def get_components(cell_mask, nucleus_mask, pathogen_mask):
26
20
  """
27
21
  Get the components (nucleus and pathogens) for each cell in the given masks.
@@ -626,8 +620,14 @@ def _measure_crop_core(index, time_ls, file, settings):
626
620
 
627
621
  file_name = os.path.splitext(file)[0]
628
622
  data = np.load(os.path.join(settings['input_folder'], file))
629
-
630
623
  data_type = data.dtype
624
+ if data_type not in ['uint8','uint16']:
625
+ data_type_before = data_type
626
+ data = data.astype(np.uint16)
627
+ data_type = data.dtype
628
+ if settings['verbose']:
629
+ print(f'Converted data from {data_type_before} to {data_type}')
630
+
631
631
  if settings['save_measurements']:
632
632
  os.makedirs(source_folder+'/measurements', exist_ok=True)
633
633
  _create_database(source_folder+'/measurements/measurements.db')
@@ -832,7 +832,6 @@ def _measure_crop_core(index, time_ls, file, settings):
832
832
  png_channels = normalize_to_dtype(png_channels, settings['normalize'][0], settings['normalize'][1], percentile_list=percentile_list)
833
833
  else:
834
834
  png_channels = normalize_to_dtype(png_channels, 0, 100)
835
-
836
835
  os.makedirs(png_folder, exist_ok=True)
837
836
 
838
837
  if png_channels.shape[2] == 2:
@@ -999,12 +998,12 @@ def measure_crop(settings):
999
998
  _save_settings_to_db(settings)
1000
999
 
1001
1000
  files = [f for f in os.listdir(settings['input_folder']) if f.endswith('.npy')]
1002
- max_workers = settings['max_workers'] or mp.cpu_count()-4
1003
- print(f'using {max_workers} cpu cores')
1001
+ n_job = settings['n_job'] or mp.cpu_count()-4
1002
+ print(f'using {n_job} cpu cores')
1004
1003
 
1005
1004
  with mp.Manager() as manager:
1006
1005
  time_ls = manager.list()
1007
- with mp.Pool(max_workers) as pool:
1006
+ with mp.Pool(n_job) as pool:
1008
1007
  result = pool.starmap_async(_measure_crop_core, [(index, time_ls, file, settings) for index, file in enumerate(files)])
1009
1008
 
1010
1009
  # Track progress in the main process
@@ -1013,7 +1012,7 @@ def measure_crop(settings):
1013
1012
  files_processed = len(time_ls)
1014
1013
  files_to_process = len(files)
1015
1014
  average_time = np.mean(time_ls) if len(time_ls) > 0 else 0
1016
- time_left = (((files_to_process-files_processed)*average_time)/max_workers)/60
1015
+ time_left = (((files_to_process-files_processed)*average_time)/n_job)/60
1017
1016
  print(f'Progress: {files_processed}/{files_to_process} Time/img {average_time:.3f}sec, Time Remaining {time_left:.3f} min.', end='\r', flush=True)
1018
1017
  result.get()
1019
1018
 
spacr/plot.py CHANGED
@@ -1565,4 +1565,56 @@ def plot_feature_importance(feature_importance_df):
1565
1565
  ax.set_xlabel('Feature Importance', fontsize=font_size)
1566
1566
  ax.tick_params(axis='both', which='major', labelsize=font_size)
1567
1567
  plt.tight_layout()
1568
- return fig
1568
+ return fig
1569
+
1570
+ def read_and_plot__vision_results(base_dir, y_axis='accuracy', name_split='_time', y_lim=[0.8, 0.9]):
1571
+ # List to store data from all CSV files
1572
+ data_frames = []
1573
+
1574
+ dst = os.path.join(base_dir, 'result')
1575
+ os.mkdir(dst,exists=True)
1576
+
1577
+ # Walk through the directory
1578
+ for root, dirs, files in os.walk(base_dir):
1579
+ for file in files:
1580
+ if file.endswith("_test_result.csv"):
1581
+ file_path = os.path.join(root, file)
1582
+ # Extract model information from the file name
1583
+ file_name = os.path.basename(file_path)
1584
+ model = file_name.split(f'{name_split}')[0]
1585
+
1586
+ # Extract epoch information from the file name
1587
+ epoch_info = file_name.split('_time')[1]
1588
+ base_folder = os.path.dirname(file_path)
1589
+ epoch = os.path.basename(base_folder)
1590
+
1591
+ # Read the CSV file
1592
+ df = pd.read_csv(file_path)
1593
+ df['model'] = model
1594
+ df['epoch'] = epoch
1595
+
1596
+ # Append the data frame to the list
1597
+ data_frames.append(df)
1598
+
1599
+ # Concatenate all data frames
1600
+ if data_frames:
1601
+ result_df = pd.concat(data_frames, ignore_index=True)
1602
+
1603
+ # Calculate average y_axis per model
1604
+ avg_metric = result_df.groupby('model')[y_axis].mean().reset_index()
1605
+ avg_metric = avg_metric.sort_values(by=y_axis)
1606
+ print(avg_metric)
1607
+
1608
+ # Plotting the results
1609
+ plt.figure(figsize=(10, 6))
1610
+ plt.bar(avg_metric['model'], avg_metric[y_axis])
1611
+ plt.xlabel('Model')
1612
+ plt.ylabel(f'{y_axis}')
1613
+ plt.title(f'Average {y_axis.capitalize()} per Model')
1614
+ plt.xticks(rotation=45)
1615
+ plt.tight_layout()
1616
+ if y_lim is not None:
1617
+ plt.ylim(y_lim)
1618
+ plt.show()
1619
+ else:
1620
+ print("No CSV files found in the specified directory.")