spacr 0.2.3__py3-none-any.whl → 0.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. spacr/app_annotate.py +3 -4
  2. spacr/core.py +100 -282
  3. spacr/gui.py +20 -38
  4. spacr/gui_core.py +406 -499
  5. spacr/gui_elements.py +395 -60
  6. spacr/gui_utils.py +393 -73
  7. spacr/io.py +130 -50
  8. spacr/measure.py +199 -154
  9. spacr/plot.py +108 -42
  10. spacr/resources/font/open_sans/OFL.txt +93 -0
  11. spacr/resources/font/open_sans/OpenSans-Italic-VariableFont_wdth,wght.ttf +0 -0
  12. spacr/resources/font/open_sans/OpenSans-VariableFont_wdth,wght.ttf +0 -0
  13. spacr/resources/font/open_sans/README.txt +100 -0
  14. spacr/resources/font/open_sans/static/OpenSans-Bold.ttf +0 -0
  15. spacr/resources/font/open_sans/static/OpenSans-BoldItalic.ttf +0 -0
  16. spacr/resources/font/open_sans/static/OpenSans-ExtraBold.ttf +0 -0
  17. spacr/resources/font/open_sans/static/OpenSans-ExtraBoldItalic.ttf +0 -0
  18. spacr/resources/font/open_sans/static/OpenSans-Italic.ttf +0 -0
  19. spacr/resources/font/open_sans/static/OpenSans-Light.ttf +0 -0
  20. spacr/resources/font/open_sans/static/OpenSans-LightItalic.ttf +0 -0
  21. spacr/resources/font/open_sans/static/OpenSans-Medium.ttf +0 -0
  22. spacr/resources/font/open_sans/static/OpenSans-MediumItalic.ttf +0 -0
  23. spacr/resources/font/open_sans/static/OpenSans-Regular.ttf +0 -0
  24. spacr/resources/font/open_sans/static/OpenSans-SemiBold.ttf +0 -0
  25. spacr/resources/font/open_sans/static/OpenSans-SemiBoldItalic.ttf +0 -0
  26. spacr/resources/font/open_sans/static/OpenSans_Condensed-Bold.ttf +0 -0
  27. spacr/resources/font/open_sans/static/OpenSans_Condensed-BoldItalic.ttf +0 -0
  28. spacr/resources/font/open_sans/static/OpenSans_Condensed-ExtraBold.ttf +0 -0
  29. spacr/resources/font/open_sans/static/OpenSans_Condensed-ExtraBoldItalic.ttf +0 -0
  30. spacr/resources/font/open_sans/static/OpenSans_Condensed-Italic.ttf +0 -0
  31. spacr/resources/font/open_sans/static/OpenSans_Condensed-Light.ttf +0 -0
  32. spacr/resources/font/open_sans/static/OpenSans_Condensed-LightItalic.ttf +0 -0
  33. spacr/resources/font/open_sans/static/OpenSans_Condensed-Medium.ttf +0 -0
  34. spacr/resources/font/open_sans/static/OpenSans_Condensed-MediumItalic.ttf +0 -0
  35. spacr/resources/font/open_sans/static/OpenSans_Condensed-Regular.ttf +0 -0
  36. spacr/resources/font/open_sans/static/OpenSans_Condensed-SemiBold.ttf +0 -0
  37. spacr/resources/font/open_sans/static/OpenSans_Condensed-SemiBoldItalic.ttf +0 -0
  38. spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Bold.ttf +0 -0
  39. spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-BoldItalic.ttf +0 -0
  40. spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-ExtraBold.ttf +0 -0
  41. spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-ExtraBoldItalic.ttf +0 -0
  42. spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Italic.ttf +0 -0
  43. spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Light.ttf +0 -0
  44. spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-LightItalic.ttf +0 -0
  45. spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Medium.ttf +0 -0
  46. spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-MediumItalic.ttf +0 -0
  47. spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Regular.ttf +0 -0
  48. spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-SemiBold.ttf +0 -0
  49. spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-SemiBoldItalic.ttf +0 -0
  50. spacr/resources/icons/logo.pdf +2786 -6
  51. spacr/resources/icons/logo_spacr.png +0 -0
  52. spacr/resources/icons/logo_spacr_1.png +0 -0
  53. spacr/settings.py +12 -87
  54. spacr/utils.py +45 -10
  55. {spacr-0.2.3.dist-info → spacr-0.2.5.dist-info}/METADATA +5 -1
  56. spacr-0.2.5.dist-info/RECORD +100 -0
  57. spacr-0.2.3.dist-info/RECORD +0 -58
  58. {spacr-0.2.3.dist-info → spacr-0.2.5.dist-info}/LICENSE +0 -0
  59. {spacr-0.2.3.dist-info → spacr-0.2.5.dist-info}/WHEEL +0 -0
  60. {spacr-0.2.3.dist-info → spacr-0.2.5.dist-info}/entry_points.txt +0 -0
  61. {spacr-0.2.3.dist-info → spacr-0.2.5.dist-info}/top_level.txt +0 -0
spacr/io.py CHANGED
@@ -583,20 +583,25 @@ def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=Fals
583
583
  None
584
584
  """
585
585
 
586
- from .utils import _extract_filename_metadata
586
+ from .utils import _extract_filename_metadata, print_progress
587
587
 
588
588
  regular_expression = re.compile(regex)
589
589
  images_by_key = defaultdict(list)
590
590
  stack_path = os.path.join(src, 'stack')
591
+ files_processed = 0
591
592
  if not os.path.exists(stack_path) or (os.path.isdir(stack_path) and len(os.listdir(stack_path)) == 0):
592
593
  all_filenames = [filename for filename in os.listdir(src) if filename.endswith(img_format)]
593
- print(f'All_files:{len(all_filenames)} in {src}')
594
- for i in range(0, len(all_filenames), batch_size):
595
- batch_filenames = all_filenames[i:i+batch_size]
594
+ print(f'All_files: {len(all_filenames)} in {src}')
595
+ time_ls = []
596
+
597
+ for idx in range(0, len(all_filenames), batch_size):
598
+ start = time.time()
599
+ batch_filenames = all_filenames[idx:idx+batch_size]
596
600
  for filename in batch_filenames:
597
601
  images_by_key = _extract_filename_metadata(batch_filenames, src, images_by_key, regular_expression, metadata_type, pick_slice, skip_mode)
602
+
598
603
  if pick_slice:
599
- for key in images_by_key:
604
+ for i, key in enumerate(images_by_key):
600
605
  plate, well, field, channel, mode = key
601
606
  max_intensity_slice = max(images_by_key[key], key=lambda x: np.percentile(x, 90))
602
607
  mip_image = Image.fromarray(max_intensity_slice)
@@ -604,13 +609,19 @@ def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=Fals
604
609
  os.makedirs(output_dir, exist_ok=True)
605
610
  output_filename = f'{plate}_{well}_{field}.tif'
606
611
  output_path = os.path.join(output_dir, output_filename)
612
+ files_processed += 1
613
+ stop = time.time()
614
+ duration = stop - start
615
+ time_ls.append(duration)
616
+ files_to_process = len(all_filenames)
617
+ print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=batch_size, operation_type='Preprocessing filenames')
607
618
 
608
619
  if os.path.exists(output_path):
609
620
  print(f'WARNING: A file with the same name already exists at location {output_filename}')
610
621
  else:
611
622
  mip_image.save(output_path)
612
623
  else:
613
- for key, images in images_by_key.items():
624
+ for i, (key, images) in enumerate(images_by_key.items()):
614
625
  mip = np.max(np.stack(images), axis=0)
615
626
  mip_image = Image.fromarray(mip)
616
627
  plate, well, field, channel = key[:4]
@@ -618,12 +629,17 @@ def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=Fals
618
629
  os.makedirs(output_dir, exist_ok=True)
619
630
  output_filename = f'{plate}_{well}_{field}.tif'
620
631
  output_path = os.path.join(output_dir, output_filename)
632
+ files_processed += 1
633
+ stop = time.time()
634
+ duration = stop - start
635
+ time_ls.append(duration)
636
+ files_to_process = len(all_filenames)
637
+ print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=batch_size, operation_type='Preprocessing filenames')
621
638
 
622
639
  if os.path.exists(output_path):
623
640
  print(f'WARNING: A file with the same name already exists at location {output_filename}')
624
641
  else:
625
642
  mip_image.save(output_path)
626
-
627
643
  images_by_key.clear()
628
644
 
629
645
  # Move original images to a new directory
@@ -637,6 +653,7 @@ def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=Fals
637
653
  print(f'WARNING: A file with the same name already exists at location {move}')
638
654
  else:
639
655
  shutil.move(os.path.join(src, filename), move)
656
+ files_processed = 0
640
657
  return
641
658
 
642
659
  def _merge_file(chan_dirs, stack_dir, file_name):
@@ -826,6 +843,7 @@ def _merge_channels(src, plot=False):
826
843
  """
827
844
 
828
845
  from .plot import plot_arrays
846
+ from .utils import print_progress
829
847
 
830
848
  stack_dir = os.path.join(src, 'stack')
831
849
  allowed_names = ['01', '02', '03', '04', '00', '1', '2', '3', '4', '0']
@@ -835,8 +853,7 @@ def _merge_channels(src, plot=False):
835
853
  chan_dirs.sort()
836
854
 
837
855
  print(f'List of folders in src: {chan_dirs}. Single channel folders.')
838
- start_time = time.time()
839
-
856
+
840
857
  # Assuming chan_dirs[0] is not empty and exists, adjust according to your logic
841
858
  first_dir_path = os.path.join(src, chan_dirs[0])
842
859
  dir_files = os.listdir(first_dir_path)
@@ -847,14 +864,18 @@ def _merge_channels(src, plot=False):
847
864
  print(f'Generated folder with merged arrays: {stack_dir}')
848
865
 
849
866
  if _is_dir_empty(stack_dir):
850
- for file_name in dir_files:
867
+ time_ls = []
868
+ files_to_process = len(dir_files)
869
+ for i, file_name in enumerate(dir_files):
870
+ start_time = time.time()
851
871
  full_file_path = os.path.join(first_dir_path, file_name)
852
872
  if os.path.isfile(full_file_path):
853
873
  _merge_file([os.path.join(src, d) for d in chan_dirs], stack_dir, file_name)
854
-
855
- elapsed_time = time.time() - start_time
856
- avg_time = elapsed_time / len(dir_files) if dir_files else 0
857
- print(f'Average Time: {avg_time:.3f} sec, Total Elapsed Time: {elapsed_time:.3f} sec')
874
+ stop_time = time.time()
875
+ duration = stop_time - start_time
876
+ time_ls.append(duration)
877
+ files_processed = i + 1
878
+ print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type='Merging channels into npy stacks')
858
879
 
859
880
  if plot:
860
881
  plot_arrays(os.path.join(src, 'stack'))
@@ -911,6 +932,7 @@ def _mip_all(src, include_first_chan=True):
911
932
 
912
933
  #@log_function_call
913
934
  def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_size=100):
935
+ from .utils import print_progress
914
936
  """
915
937
  Concatenates channel data from multiple files and saves the concatenated data as numpy arrays.
916
938
 
@@ -926,6 +948,7 @@ def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_s
926
948
  """
927
949
  channels = [item for item in channels if item is not None]
928
950
  paths = []
951
+ time_ls = []
929
952
  index = 0
930
953
  channel_stack_loc = os.path.join(os.path.dirname(src), 'channel_stack')
931
954
  os.makedirs(channel_stack_loc, exist_ok=True)
@@ -944,8 +967,13 @@ def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_s
944
967
  array = np.take(array, channels, axis=2)
945
968
  stack_region.append(array)
946
969
  filenames_region.append(os.path.basename(path))
947
- #clear_output(wait=True)
948
- print(f'Region {i+1}/ {len(time_stack_path_lists)}', end='\r', flush=True)
970
+
971
+ stop = time.time()
972
+ duration = stop - start
973
+ time_ls.append(duration)
974
+ files_processed = i+1
975
+ files_to_process = time_stack_path_lists
976
+ print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=batch_size, operation_type="Concatinating")
949
977
  stack = np.stack(stack_region)
950
978
  save_loc = os.path.join(channel_stack_loc, f'{name}.npz')
951
979
  np.savez(save_loc, data=stack, filenames=filenames_region)
@@ -964,23 +992,24 @@ def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_s
964
992
  nr_files = len(paths)
965
993
  batch_index = 0 # Added this to name the output files
966
994
  stack_ls = []
967
- filenames_batch = [] # to hold filenames of the current batch
995
+ filenames_batch = []
968
996
  for i, path in enumerate(paths):
997
+ start = time.time()
969
998
  array = np.load(path)
970
999
  array = np.take(array, channels, axis=2)
971
1000
  stack_ls.append(array)
972
1001
  filenames_batch.append(os.path.basename(path)) # store the filename
973
- #clear_output(wait=True)
974
- print(f'Concatenated: {i+1}/{nr_files} files')
975
- #print(f'Concatenated: {i+1}/{nr_files} files', end='\r', flush=True)
976
-
1002
+ stop = time.time()
1003
+ duration = stop - start
1004
+ time_ls.append(duration)
1005
+ files_processed = i+1
1006
+ files_to_process = nr_files
1007
+ print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=batch_size, operation_type="Concatinating")
977
1008
  if (i+1) % batch_size == 0 or i+1 == nr_files:
978
1009
  unique_shapes = {arr.shape[:-1] for arr in stack_ls}
979
1010
  if len(unique_shapes) > 1:
980
1011
  max_dims = np.max(np.array(list(unique_shapes)), axis=0)
981
- #clear_output(wait=True)
982
1012
  print(f'Warning: arrays with multiple shapes found in batch {i+1}. Padding arrays to max X,Y dimentions {max_dims}')
983
- #print(f'Warning: arrays with multiple shapes found in batch {i+1}. Padding arrays to max X,Y dimentions {max_dims}', end='\r', flush=True)
984
1013
  padded_stack_ls = []
985
1014
  for arr in stack_ls:
986
1015
  pad_width = [(0, max_dim - dim) for max_dim, dim in zip(max_dims, arr.shape[:-1])]
@@ -1002,6 +1031,7 @@ def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_s
1002
1031
 
1003
1032
  def _normalize_img_batch(stack, channels, save_dtype, settings):
1004
1033
 
1034
+ from .utils import print_progress
1005
1035
  """
1006
1036
  Normalize the stack of images.
1007
1037
 
@@ -1018,7 +1048,9 @@ def _normalize_img_batch(stack, channels, save_dtype, settings):
1018
1048
  normalized_stack = np.zeros_like(stack, dtype=np.float32)
1019
1049
 
1020
1050
  #for channel in range(stack.shape[-1]):
1021
- for channel in channels:
1051
+ time_ls = []
1052
+ for i, channel in enumerate(channels):
1053
+ start = time.time()
1022
1054
  if channel == settings['nucleus_channel']:
1023
1055
  background = settings['nucleus_background']
1024
1056
  signal_threshold = settings['nucleus_Signal_to_noise']*settings['nucleus_background']
@@ -1065,10 +1097,18 @@ def _normalize_img_batch(stack, channels, save_dtype, settings):
1065
1097
  arr_2d_normalized = exposure.rescale_intensity(arr_2d, in_range=(global_lower, global_upper), out_range=(0, 1))
1066
1098
  normalized_stack[array_index, :, :, channel] = arr_2d_normalized
1067
1099
 
1100
+ stop = time.time()
1101
+ duration = stop - start
1102
+ time_ls.append(duration)
1103
+ files_processed = i+1
1104
+ files_to_process = len(channels)
1105
+ print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type=f"Normalizing")
1106
+
1068
1107
  return normalized_stack.astype(save_dtype)
1069
1108
 
1070
1109
  def concatenate_and_normalize(src, channels, save_dtype=np.float32, settings={}):
1071
-
1110
+ from .utils import print_progress
1111
+
1072
1112
  """
1073
1113
  Concatenates and normalizes channel data from multiple files and saves the normalized data.
1074
1114
 
@@ -1088,10 +1128,11 @@ def concatenate_and_normalize(src, channels, save_dtype=np.float32, settings={})
1088
1128
  Returns:
1089
1129
  str: The directory path where the concatenated and normalized channel data is saved.
1090
1130
  """
1091
- # n c p
1131
+
1092
1132
  channels = [item for item in channels if item is not None]
1093
1133
 
1094
1134
  paths = []
1135
+ time_ls = []
1095
1136
  output_fldr = os.path.join(os.path.dirname(src), 'norm_channel_stack')
1096
1137
  os.makedirs(output_fldr, exist_ok=True)
1097
1138
 
@@ -1099,6 +1140,7 @@ def concatenate_and_normalize(src, channels, save_dtype=np.float32, settings={})
1099
1140
  try:
1100
1141
  time_stack_path_lists = _generate_time_lists(os.listdir(src))
1101
1142
  for i, time_stack_list in enumerate(time_stack_path_lists):
1143
+ start = time.time()
1102
1144
  stack_region = []
1103
1145
  filenames_region = []
1104
1146
  for idx, file in enumerate(time_stack_list):
@@ -1107,10 +1149,14 @@ def concatenate_and_normalize(src, channels, save_dtype=np.float32, settings={})
1107
1149
  parts = file.split('_')
1108
1150
  name = parts[0] + '_' + parts[1] + '_' + parts[2]
1109
1151
  array = np.load(path)
1110
- #array = np.take(array, channels, axis=2)
1111
1152
  stack_region.append(array)
1112
1153
  filenames_region.append(os.path.basename(path))
1113
- print(f'Region {i + 1}/ {len(time_stack_path_lists)}', end='\r', flush=True)
1154
+ stop = time.time()
1155
+ duration = stop - start
1156
+ time_ls.append(duration)
1157
+ files_processed = i+1
1158
+ files_to_process = len(time_stack_path_lists)
1159
+ print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type="Concatinating")
1114
1160
  stack = np.stack(stack_region)
1115
1161
 
1116
1162
  normalized_stack = _normalize_img_batch(stack=stack,
@@ -1138,13 +1184,19 @@ def concatenate_and_normalize(src, channels, save_dtype=np.float32, settings={})
1138
1184
  batch_index = 0
1139
1185
  stack_ls = []
1140
1186
  filenames_batch = []
1141
-
1187
+ time_ls = []
1188
+ files_processed = 0
1142
1189
  for i, path in enumerate(paths):
1190
+ start = time.time()
1143
1191
  array = np.load(path)
1144
- #array = np.take(array, channels, axis=2)
1145
1192
  stack_ls.append(array)
1146
1193
  filenames_batch.append(os.path.basename(path))
1147
- print(f'Concatenated: {i + 1}/{nr_files} files')
1194
+ stop = time.time()
1195
+ duration = stop - start
1196
+ time_ls.append(duration)
1197
+ files_processed += 1
1198
+ files_to_process = nr_files
1199
+ print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type="Concatinating")
1148
1200
 
1149
1201
  if (i + 1) % settings['batch_size'] == 0 or i + 1 == nr_files:
1150
1202
  unique_shapes = {arr.shape[:-1] for arr in stack_ls}
@@ -1219,6 +1271,7 @@ def _get_lists_for_normalization(settings):
1219
1271
  return backgrounds, signal_to_noise, signal_thresholds, remove_background
1220
1272
 
1221
1273
  def _normalize_stack(src, backgrounds=[100, 100, 100], remove_backgrounds=[False, False, False], lower_percentile=2, save_dtype=np.float32, signal_to_noise=[5, 5, 5], signal_thresholds=[1000, 1000, 1000]):
1274
+ from .utils import print_progress
1222
1275
  """
1223
1276
  Normalize the stack of images.
1224
1277
 
@@ -1266,10 +1319,11 @@ def _normalize_stack(src, backgrounds=[100, 100, 100], remove_backgrounds=[False
1266
1319
  global_upper = np.percentile(non_zero_single_channel, upper_p)
1267
1320
  if global_upper >= signal_threshold:
1268
1321
  break
1269
-
1322
+
1270
1323
  # Normalize the pixels in each image to the global percentiles and then dtype.
1271
1324
  arr_2d_normalized = np.zeros_like(single_channel, dtype=single_channel.dtype)
1272
1325
  signal_to_noise_ratio_ls = []
1326
+ time_ls = []
1273
1327
  for array_index in range(single_channel.shape[0]):
1274
1328
  start = time.time()
1275
1329
  arr_2d = single_channel[array_index, :, :]
@@ -1291,8 +1345,15 @@ def _normalize_stack(src, backgrounds=[100, 100, 100], remove_backgrounds=[False
1291
1345
  duration = (stop - start) * single_channel.shape[0]
1292
1346
  time_ls.append(duration)
1293
1347
  average_time = np.mean(time_ls) if len(time_ls) > 0 else 0
1294
- print(f'Progress: files {file_index + 1}/{len(paths)}, channels:{chan_index}/{stack.shape[-1] - 1}, arrays:{array_index + 1}/{single_channel.shape[0]}, Signal:{upper:.1f}, noise:{lower:.1f}, Signal-to-noise:{average_stnr:.1f}, Time/channel:{average_time:.2f}sec')
1295
-
1348
+ print(f'channels:{chan_index}/{stack.shape[-1] - 1}, arrays:{array_index + 1}/{single_channel.shape[0]}, Signal:{upper:.1f}, noise:{lower:.1f}, Signal-to-noise:{average_stnr:.1f}, Time/channel:{average_time:.2f}sec')
1349
+
1350
+ #stop = time.time()
1351
+ #duration = stop - start
1352
+ #time_ls.append(duration)
1353
+ #files_processed = file_index + 1
1354
+ #files_to_process = len(paths)
1355
+ #print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type="Normalizing")
1356
+
1296
1357
  normalized_stack[:, :, :, channel] = arr_2d_normalized
1297
1358
 
1298
1359
  save_loc = os.path.join(output_fldr, f'{name}_norm_stack.npz')
@@ -1303,6 +1364,7 @@ def _normalize_stack(src, backgrounds=[100, 100, 100], remove_backgrounds=[False
1303
1364
  return print(f'Saved stacks: {output_fldr}')
1304
1365
 
1305
1366
  def _normalize_timelapse(src, lower_percentile=2, save_dtype=np.float32):
1367
+ from .utils import print_progress
1306
1368
  """
1307
1369
  Normalize the timelapse data by rescaling the intensity values based on percentiles.
1308
1370
 
@@ -1326,8 +1388,9 @@ def _normalize_timelapse(src, lower_percentile=2, save_dtype=np.float32):
1326
1388
 
1327
1389
  for chan_index in range(stack.shape[-1]):
1328
1390
  single_channel = stack[:, :, :, chan_index]
1329
-
1391
+ time_ls = []
1330
1392
  for array_index in range(single_channel.shape[0]):
1393
+ start = time.time()
1331
1394
  arr_2d = single_channel[array_index]
1332
1395
  # Calculate the 1% and 98% percentiles for this specific image
1333
1396
  q_low = np.percentile(arr_2d[arr_2d != 0], lower_percentile)
@@ -1337,7 +1400,14 @@ def _normalize_timelapse(src, lower_percentile=2, save_dtype=np.float32):
1337
1400
  arr_2d_rescaled = exposure.rescale_intensity(arr_2d, in_range=(q_low, q_high), out_range='dtype')
1338
1401
  normalized_stack[array_index, :, :, chan_index] = arr_2d_rescaled
1339
1402
 
1340
- print(f'Progress: files {file_index+1}/{len(paths)}, channels:{chan_index+1}/{stack.shape[-1]}, arrays:{array_index+1}/{single_channel.shape[0]}', end='\r')
1403
+ print(f'channels:{chan_index+1}/{stack.shape[-1]}, arrays:{array_index+1}/{single_channel.shape[0]}', end='\r')
1404
+
1405
+ #stop = time.time()
1406
+ #duration = stop - start
1407
+ #time_ls.append(duration)
1408
+ #files_processed = file_index+1
1409
+ #files_to_process = len(paths)
1410
+ #print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type="Normalizing")
1341
1411
 
1342
1412
  save_loc = os.path.join(output_fldr, f'{name}_norm_timelapse.npz')
1343
1413
  np.savez(save_loc, data=normalized_stack, filenames=filenames)
@@ -1547,8 +1617,8 @@ def preprocess_img_data(settings):
1547
1617
  save_dtype=np.float32,
1548
1618
  settings=settings)
1549
1619
 
1550
- if plot:
1551
- _plot_4D_arrays(src+'/norm_channel_stack', nr_npz=1, nr=nr)
1620
+ #if plot:
1621
+ # _plot_4D_arrays(src+'/norm_channel_stack', nr_npz=1, nr=nr)
1552
1622
 
1553
1623
  return settings, src
1554
1624
 
@@ -1602,6 +1672,7 @@ def _get_avg_object_size(masks):
1602
1672
  return 0 # Return 0 if no objects are found
1603
1673
 
1604
1674
  def _save_figure(fig, src, text, dpi=300, i=1, all_folders=1):
1675
+ from .utils import print_progress
1605
1676
  """
1606
1677
  Save a figure to a specified location.
1607
1678
 
@@ -1611,6 +1682,7 @@ def _save_figure(fig, src, text, dpi=300, i=1, all_folders=1):
1611
1682
  text (str): The text to be included in the figure name.
1612
1683
  dpi (int, optional): The resolution of the saved figure. Defaults to 300.
1613
1684
  """
1685
+
1614
1686
  save_folder = os.path.dirname(src)
1615
1687
  obj_type = os.path.basename(src)
1616
1688
  name = os.path.basename(save_folder)
@@ -1619,10 +1691,11 @@ def _save_figure(fig, src, text, dpi=300, i=1, all_folders=1):
1619
1691
  fig_name = f'{obj_type}_{name}_{text}.pdf'
1620
1692
  save_location = os.path.join(save_folder, fig_name)
1621
1693
  fig.savefig(save_location, bbox_inches='tight', dpi=dpi)
1622
- clear_output(wait=True)
1623
- print(f'Progress: {i}/{all_folders}, Saved single cell figure: {os.path.basename(save_location)}')
1624
- #print(f'Progress: {i}/{all_folders}, Saved single cell figure: {os.path.basename(save_location)}', end='\r', flush=True)
1625
- # Close and delete the figure to free up memory
1694
+
1695
+ files_processed = i
1696
+ files_to_process = all_folders
1697
+ print_progress(files_processed, files_to_process, n_jobs=1, time_ls=None, batch_size=None, operation_type="Saving Figures")
1698
+ print(f'Saved single cell figure: {os.path.basename(save_location)}')
1626
1699
  plt.close(fig)
1627
1700
  del fig
1628
1701
  gc.collect()
@@ -1842,6 +1915,9 @@ def _create_database(db_path):
1842
1915
  conn.close()
1843
1916
 
1844
1917
  def _load_and_concatenate_arrays(src, channels, cell_chann_dim, nucleus_chann_dim, pathogen_chann_dim):
1918
+
1919
+ from .utils import print_progress
1920
+
1845
1921
  """
1846
1922
  Load and concatenate arrays from multiple folders.
1847
1923
 
@@ -1870,9 +1946,10 @@ def _load_and_concatenate_arrays(src, channels, cell_chann_dim, nucleus_chann_di
1870
1946
 
1871
1947
  count=0
1872
1948
  all_imgs = len(os.listdir(reference_folder))
1873
-
1949
+ time_ls = []
1874
1950
  # Iterate through each file in the reference folder
1875
- for filename in os.listdir(reference_folder):
1951
+ for idx, filename in enumerate(os.listdir(reference_folder)):
1952
+ start = time.time()
1876
1953
  stack_ls = []
1877
1954
  if filename.endswith('.npy'):
1878
1955
  count += 1
@@ -1929,9 +2006,13 @@ def _load_and_concatenate_arrays(src, channels, cell_chann_dim, nucleus_chann_di
1929
2006
  output_path = os.path.join(output_folder, filename)
1930
2007
  np.save(output_path, stack)
1931
2008
 
1932
- #clear_output(wait=True)
1933
- print(f'Files merged: {count}/{all_imgs}')
1934
- #print(f'Files merged: {count}/{all_imgs}', end='\r', flush=True)
2009
+ stop = time.time()
2010
+ duration = stop - start
2011
+ time_ls.append(duration)
2012
+ files_processed = idx+1
2013
+ files_to_process = all_imgs
2014
+ print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type="Merging Arrays")
2015
+
1935
2016
  return
1936
2017
 
1937
2018
  def _read_db(db_loc, tables):
@@ -2211,7 +2292,7 @@ def _save_model(model, model_type, results_df, dst, epoch, epochs, intermedeate_
2211
2292
 
2212
2293
  def save_model_at_threshold(threshold, epoch, suffix=""):
2213
2294
  percentile = str(threshold * 100)
2214
- print(f'\rfound: {percentile}% accurate model', end='\r', flush=True)
2295
+ print(f'\rfound: {percentile}% accurate model')#, end='\r', flush=True)
2215
2296
  torch.save(model, f'{dst}/{model_type}_epoch_{str(epoch)}{suffix}_acc_{percentile}_channels_{channels_str}.pth')
2216
2297
 
2217
2298
  if epoch % 100 == 0 or epoch == epochs:
@@ -2466,7 +2547,6 @@ def _read_mask(mask_path):
2466
2547
  mask = img_as_uint(mask)
2467
2548
  return mask
2468
2549
 
2469
-
2470
2550
  def convert_numpy_to_tiff(folder_path, limit=None):
2471
2551
  """
2472
2552
  Converts all numpy files in a folder to TIFF format and saves them in a subdirectory 'tiff'.
@@ -2545,5 +2625,5 @@ def generate_cellpose_train_test(src, test_split=0.1):
2545
2625
  new_mask_path = os.path.join(dst_mask, filename)
2546
2626
  shutil.copy(img_path, new_img_path)
2547
2627
  shutil.copy(mask_path, new_mask_path)
2548
- print(f'Copied {idx+1}/{len(ls)} images to {_type} set', end='\r', flush=True)
2628
+ print(f'Copied {idx+1}/{len(ls)} images to {_type} set')#, end='\r', flush=True)
2549
2629