spacr 0.0.1__py3-none-any.whl → 0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
spacr/io.py CHANGED
@@ -1,6 +1,7 @@
1
1
  import os, re, sqlite3, gc, torch, time, random, shutil, cv2, tarfile, cellpose
2
2
  import numpy as np
3
3
  import pandas as pd
4
+ import tifffile
4
5
  from PIL import Image
5
6
  from collections import defaultdict, Counter
6
7
  from pathlib import Path
@@ -22,9 +23,9 @@ import seaborn as sns
22
23
  import matplotlib.pyplot as plt
23
24
  from torchvision.transforms import ToTensor
24
25
 
26
+
25
27
  from .logger import log_function_call
26
28
 
27
- @log_function_call
28
29
  def _load_images_and_labels(image_files, label_files, circular=False, invert=False, image_extension="*.tif", label_extension="*.tif"):
29
30
 
30
31
  from .utils import invert_image, apply_mask
@@ -87,7 +88,6 @@ def _load_images_and_labels(image_files, label_files, circular=False, invert=Fal
87
88
  print(f'image shape: {images[0].shape}, image type: images[0].shape mask shape: {labels[0].shape}, image type: labels[0].shape')
88
89
  return images, labels, image_names, label_names
89
90
 
90
- @log_function_call
91
91
  def _load_normalized_images_and_labels(image_files, label_files, signal_thresholds=[1000], channels=None, percentiles=None, circular=False, invert=False, visualize=False):
92
92
 
93
93
  from .plot import normalize_and_visualize
@@ -527,6 +527,7 @@ class TarImageDataset(Dataset):
527
527
 
528
528
  return img, m.name
529
529
 
530
+ @log_function_call
530
531
  def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=False, skip_mode='01', metadata_type='', img_format='.tif'):
531
532
  """
532
533
  Convert z-stack images to maximum intensity projection (MIP) images.
@@ -706,7 +707,7 @@ def _move_to_chan_folder(src, regex, timelapse=False, metadata_type=''):
706
707
  if metadata_type =='cq1':
707
708
  orig_wellID = wellID
708
709
  wellID = _convert_cq1_well_id(wellID)
709
- print(f'Converted Well ID: {orig_wellID} to {wellID}')
710
+ print(f'Converted Well ID: {orig_wellID} to {wellID}')#, end='\r', flush=True)
710
711
 
711
712
  newname = f"{plateID}_{wellID}_{fieldID}_{timeID if timelapse else ''}{ext}"
712
713
  newpath = src / chanID
@@ -757,6 +758,7 @@ def _merge_channels(src, plot=False):
757
758
 
758
759
  # Create the 'stack' directory if it doesn't exist
759
760
  stack_dir.mkdir(exist_ok=True)
761
+ print(f'generated folder with merged arrays: {stack_dir}')
760
762
 
761
763
  if _is_dir_empty(stack_dir):
762
764
  with Pool(cpu_count()) as pool:
@@ -819,6 +821,7 @@ def _mip_all(src, include_first_chan=True):
819
821
  np.save(os.path.join(src, filename), concatenated)
820
822
  return
821
823
 
824
+ @log_function_call
822
825
  def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_size=100):
823
826
  """
824
827
  Concatenates channel data from multiple files and saves the concatenated data as numpy arrays.
@@ -853,8 +856,8 @@ def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_s
853
856
  array = np.take(array, channels, axis=2)
854
857
  stack_region.append(array)
855
858
  filenames_region.append(os.path.basename(path))
856
- clear_output(wait=True)
857
- print(f'\033[KRegion {i+1}/ {len(time_stack_path_lists)}', end='\r', flush=True)
859
+ #clear_output(wait=True)
860
+ print(f'Region {i+1}/ {len(time_stack_path_lists)}', end='\r', flush=True)
858
861
  stack = np.stack(stack_region)
859
862
  save_loc = os.path.join(channel_stack_loc, f'{name}.npz')
860
863
  np.savez(save_loc, data=stack, filenames=filenames_region)
@@ -879,15 +882,17 @@ def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_s
879
882
  array = np.take(array, channels, axis=2)
880
883
  stack_ls.append(array)
881
884
  filenames_batch.append(os.path.basename(path)) # store the filename
882
- clear_output(wait=True)
883
- print(f'\033[KConcatenated: {i+1}/{nr_files} files', end='\r', flush=True)
885
+ #clear_output(wait=True)
886
+ print(f'Concatenated: {i+1}/{nr_files} files')
887
+ #print(f'Concatenated: {i+1}/{nr_files} files', end='\r', flush=True)
884
888
 
885
889
  if (i+1) % batch_size == 0 or i+1 == nr_files:
886
890
  unique_shapes = {arr.shape[:-1] for arr in stack_ls}
887
891
  if len(unique_shapes) > 1:
888
892
  max_dims = np.max(np.array(list(unique_shapes)), axis=0)
889
- clear_output(wait=True)
890
- print(f'\033[KWarning: arrays with multiple shapes found in batch {i+1}. Padding arrays to max X,Y dimentions {max_dims}', end='\r', flush=True)
893
+ #clear_output(wait=True)
894
+ print(f'Warning: arrays with multiple shapes found in batch {i+1}. Padding arrays to max X,Y dimentions {max_dims}')
895
+ #print(f'Warning: arrays with multiple shapes found in batch {i+1}. Padding arrays to max X,Y dimentions {max_dims}', end='\r', flush=True)
891
896
  padded_stack_ls = []
892
897
  for arr in stack_ls:
893
898
  pad_width = [(0, max_dim - dim) for max_dim, dim in zip(max_dims, arr.shape[:-1])]
@@ -904,7 +909,7 @@ def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_s
904
909
  stack_ls = [] # empty the list for the next batch
905
910
  filenames_batch = [] # empty the filenames list for the next batch
906
911
  padded_stack_ls = []
907
- #print(f'\nAll files concatenated and saved to:{channel_stack_loc}')
912
+ print(f'All files concatenated and saved to:{channel_stack_loc}')
908
913
  return channel_stack_loc
909
914
 
910
915
  def _get_lists_for_normalization(settings):
@@ -1013,8 +1018,9 @@ def _normalize_stack(src, backgrounds=[100,100,100], remove_background=False, lo
1013
1018
  duration = (stop - start)*single_channel.shape[0]
1014
1019
  time_ls.append(duration)
1015
1020
  average_time = np.mean(time_ls) if len(time_ls) > 0 else 0
1016
- clear_output(wait=True)
1017
- print(f'\033[KProgress: files {file_index+1}/{len(paths)}, channels:{chan_index}/{stack.shape[-1]-1}, arrays:{array_index+1}/{single_channel.shape[0]}, Signal:{upper:.1f}, noise:{lower:.1f}, Signal-to-noise:{average_stnr:.1f}, Time/channel:{average_time:.2f}sec', end='\r', flush=True)
1021
+ #clear_output(wait=True)
1022
+ print(f'Progress: files {file_index+1}/{len(paths)}, channels:{chan_index}/{stack.shape[-1]-1}, arrays:{array_index+1}/{single_channel.shape[0]}, Signal:{upper:.1f}, noise:{lower:.1f}, Signal-to-noise:{average_stnr:.1f}, Time/channel:{average_time:.2f}sec')
1023
+ #print(f'Progress: files {file_index+1}/{len(paths)}, channels:{chan_index}/{stack.shape[-1]-1}, arrays:{array_index+1}/{single_channel.shape[0]}, Signal:{upper:.1f}, noise:{lower:.1f}, Signal-to-noise:{average_stnr:.1f}, Time/channel:{average_time:.2f}sec', end='\r', flush=True)
1018
1024
  normalized_single_channel = exposure.rescale_intensity(arr_2d_normalized, out_range='dtype')
1019
1025
  normalized_stack[:, :, :, channel] = normalized_single_channel
1020
1026
  save_loc = output_fldr+'/'+name+'_norm_stack.npz'
@@ -1069,8 +1075,6 @@ def _normalize_timelapse(src, lower_quantile=0.01, save_dtype=np.float32):
1069
1075
 
1070
1076
  print(f'\nSaved normalized stacks: {output_fldr}')
1071
1077
 
1072
-
1073
-
1074
1078
  def _create_movies_from_npy_per_channel(src, fps=10):
1075
1079
  """
1076
1080
  Create movies from numpy files per channel.
@@ -1122,9 +1126,33 @@ def _create_movies_from_npy_per_channel(src, fps=10):
1122
1126
  channel_save_path = os.path.join(save_path, f'{plate}_{well}_{field}_channel_{channel}.mp4')
1123
1127
  _npz_to_movie(normalized_channel_arrays_3d, filenames, channel_save_path, fps)
1124
1128
 
1129
+ def delete_empty_subdirectories(folder_path):
1130
+ """
1131
+ Deletes all empty subdirectories in the specified folder.
1132
+
1133
+ Args:
1134
+ - folder_path (str): The path to the folder in which to look for empty subdirectories.
1135
+ """
1136
+ # Check each item in the specified folder
1137
+ for dirpath, dirnames, filenames in os.walk(folder_path, topdown=False):
1138
+ # os.walk is used with topdown=False to start from the innermost directories and work upwards.
1139
+ for dirname in dirnames:
1140
+ # Construct the full path to the subdirectory
1141
+ full_dir_path = os.path.join(dirpath, dirname)
1142
+ # Try to remove the directory and catch any error (like if the directory is not empty)
1143
+ try:
1144
+ os.rmdir(full_dir_path)
1145
+ print(f"Deleted empty directory: {full_dir_path}")
1146
+ except OSError as e:
1147
+ continue
1148
+ # An error occurred, likely because the directory is not empty
1149
+ #print(f"Skipping non-empty directory: {full_dir_path}")
1150
+
1151
+ @log_function_call
1125
1152
  def preprocess_img_data(settings):
1126
1153
 
1127
1154
  from .plot import plot_arrays, _plot_4D_arrays
1155
+ from .utils import _run_test_mode
1128
1156
 
1129
1157
  """
1130
1158
  Preprocesses image data by converting z-stack images to maximum intensity projection (MIP) images.
@@ -1155,12 +1183,16 @@ def preprocess_img_data(settings):
1155
1183
  Returns:
1156
1184
  None
1157
1185
  """
1186
+
1158
1187
  src = settings['src']
1159
1188
  valid_ext = ['tif', 'tiff', 'png', 'jpeg']
1160
1189
  files = os.listdir(src)
1161
1190
  extensions = [file.split('.')[-1] for file in files]
1162
1191
  extension_counts = Counter(extensions)
1163
1192
  most_common_extension = extension_counts.most_common(1)[0][0]
1193
+ img_format = None
1194
+
1195
+ delete_empty_subdirectories(src)
1164
1196
 
1165
1197
  # Check if the most common extension is one of the specified image formats
1166
1198
  if most_common_extension in valid_ext:
@@ -1168,8 +1200,14 @@ def preprocess_img_data(settings):
1168
1200
  print(f'Found {extension_counts[most_common_extension]} {most_common_extension} files')
1169
1201
  else:
1170
1202
  print(f'Could not find any {valid_ext} files in {src} only found {extension_counts[0]}')
1171
- return
1172
-
1203
+ if os.path.exists(src+'/stack'):
1204
+ print('Found existing stack folder.')
1205
+ if os.path.exists(src+'/channel_stack'):
1206
+ print('Found existing channel_stack folder.')
1207
+ if os.path.exists(src+'/norm_channel_stack'):
1208
+ print('Found existing norm_channel_stack folder. Skipping preprocessing')
1209
+ return
1210
+
1173
1211
  cmap = 'inferno'
1174
1212
  figuresize = 20
1175
1213
  normalize = True
@@ -1191,57 +1229,73 @@ def preprocess_img_data(settings):
1191
1229
  all_to_mip = settings['all_to_mip']
1192
1230
  pick_slice = settings['pick_slice']
1193
1231
  skip_mode = settings['skip_mode']
1194
-
1195
- if metadata_type == 'cellvoyager':
1196
- regex = f'(?P<plateID>.*)_(?P<wellID>.*)_T(?P<timeID>.*)F(?P<fieldID>.*)L(?P<laserID>..)A(?P<AID>..)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1197
- elif metadata_type == 'cq1':
1198
- regex = f'W(?P<wellID>.*)F(?P<fieldID>.*)T(?P<timeID>.*)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1199
- elif metadata_type == 'nikon':
1200
- regex = f'(?P<plateID>.*)_(?P<wellID>.*)_T(?P<timeID>.*)F(?P<fieldID>.*)L(?P<laserID>..)A(?P<AID>..)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1201
- elif metadata_type == 'zeis':
1202
- regex = f'(?P<plateID>.*)_(?P<wellID>.*)_T(?P<timeID>.*)F(?P<fieldID>.*)L(?P<laserID>..)A(?P<AID>..)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1203
- elif metadata_type == 'leica':
1204
- regex = f'(?P<plateID>.*)_(?P<wellID>.*)_T(?P<timeID>.*)F(?P<fieldID>.*)L(?P<laserID>..)A(?P<AID>..)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1205
- elif metadata_type == 'custom':
1206
- regex = f'({custom_regex}){img_format}'
1232
+
1233
+
1234
+ if not img_format == None:
1235
+ if metadata_type == 'cellvoyager':
1236
+ regex = f'(?P<plateID>.*)_(?P<wellID>.*)_T(?P<timeID>.*)F(?P<fieldID>.*)L(?P<laserID>..)A(?P<AID>..)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1237
+ elif metadata_type == 'cq1':
1238
+ regex = f'W(?P<wellID>.*)F(?P<fieldID>.*)T(?P<timeID>.*)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1239
+ elif metadata_type == 'nikon':
1240
+ regex = f'(?P<plateID>.*)_(?P<wellID>.*)_T(?P<timeID>.*)F(?P<fieldID>.*)L(?P<laserID>..)A(?P<AID>..)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1241
+ elif metadata_type == 'zeis':
1242
+ regex = f'(?P<plateID>.*)_(?P<wellID>.*)_T(?P<timeID>.*)F(?P<fieldID>.*)L(?P<laserID>..)A(?P<AID>..)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1243
+ elif metadata_type == 'leica':
1244
+ regex = f'(?P<plateID>.*)_(?P<wellID>.*)_T(?P<timeID>.*)F(?P<fieldID>.*)L(?P<laserID>..)A(?P<AID>..)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1245
+ elif metadata_type == 'custom':
1246
+ regex = f'({custom_regex}){img_format}'
1207
1247
 
1208
- print(f'regex mode:{metadata_type} regex:{regex}')
1248
+ print(f'regex mode:{metadata_type} regex:{regex}')
1249
+
1250
+ if settings.get('test_mode', False):
1251
+ try:
1252
+ os.rmdir(os.path.join(src, 'test'))
1253
+ print(f"Deleted test directory: {os.path.join(src, 'test')}")
1254
+ except OSError as e:
1255
+ pass
1256
+
1257
+ src = _run_test_mode(settings['src'], regex, timelapse=timelapse)
1258
+ settings['src'] = src
1209
1259
 
1210
1260
  if not os.path.exists(src+'/stack'):
1211
- if timelapse:
1212
- _move_to_chan_folder(src, regex, timelapse, metadata_type)
1213
- else:
1214
- #_z_to_mip(src, regex, batch_size, pick_slice, skip_mode, metadata_type, img_format)
1215
- _rename_and_organize_image_files(src, regex, batch_size, pick_slice, skip_mode, metadata_type, img_format)
1216
-
1217
- #Make sure no batches will be of only one image
1218
- all_imgs = len(src+'/stack')
1219
- full_batches = all_imgs // batch_size
1220
- last_batch_size = all_imgs % batch_size
1221
-
1222
- # Check if the last batch is of size 1
1223
- if last_batch_size == 1:
1224
- # If there's only one batch and its size is 1, it's also an issue
1225
- if full_batches == 0:
1226
- raise ValueError("Only one batch of size 1 detected. Adjust the batch size.")
1227
- # If the last batch is of size 1, merge it with the second last batch
1228
- elif full_batches > 0:
1229
- raise ValueError("Last batch of size 1 detected. Adjust the batch size.")
1230
-
1231
- _merge_channels(src, plot=False)
1232
- if timelapse:
1233
- _create_movies_from_npy_per_channel(src+'/stack', fps=2)
1234
-
1235
- if plot:
1236
- print(f'plotting {nr} images from {src}/stack')
1237
- plot_arrays(src+'/stack', figuresize, cmap, nr=nr, normalize=normalize)
1238
- if all_to_mip:
1239
- _mip_all(src+'/stack')
1240
- if plot:
1241
- print(f'plotting {nr} images from {src}/stack')
1242
- plot_arrays(src+'/stack', figuresize, cmap, nr=nr, normalize=normalize)
1243
- #nr_of_stacks = len(src+'/channel_stack')
1244
-
1261
+ try:
1262
+ if not img_format == None:
1263
+ if timelapse:
1264
+ _move_to_chan_folder(src, regex, timelapse, metadata_type)
1265
+ else:
1266
+ _rename_and_organize_image_files(src, regex, batch_size, pick_slice, skip_mode, metadata_type, img_format)
1267
+
1268
+ #Make sure no batches will be of only one image
1269
+ all_imgs = len(src+'/stack')
1270
+ full_batches = all_imgs // batch_size
1271
+ last_batch_size = all_imgs % batch_size
1272
+
1273
+ # Check if the last batch is of size 1
1274
+ if last_batch_size == 1:
1275
+ # If there's only one batch and its size is 1, it's also an issue
1276
+ if full_batches == 0:
1277
+ raise ValueError("Only one batch of size 1 detected. Adjust the batch size.")
1278
+ # If the last batch is of size 1, merge it with the second last batch
1279
+ elif full_batches > 0:
1280
+ raise ValueError("Last batch of size 1 detected. Adjust the batch size.")
1281
+
1282
+ _merge_channels(src, plot=False)
1283
+
1284
+ if timelapse:
1285
+ _create_movies_from_npy_per_channel(src+'/stack', fps=2)
1286
+
1287
+ if plot:
1288
+ print(f'plotting {nr} images from {src}/stack')
1289
+ plot_arrays(src+'/stack', figuresize, cmap, nr=nr, normalize=normalize)
1290
+ if all_to_mip:
1291
+ _mip_all(src+'/stack')
1292
+ if plot:
1293
+ print(f'plotting {nr} images from {src}/stack')
1294
+ plot_arrays(src+'/stack', figuresize, cmap, nr=nr, normalize=normalize)
1295
+ except Exception as e:
1296
+ print(f"Error: {e}")
1297
+
1298
+ print('concatinating cahnnels')
1245
1299
  _concatenate_channel(src+'/stack',
1246
1300
  channels=mask_channels,
1247
1301
  randomize=randomize,
@@ -1251,7 +1305,6 @@ def preprocess_img_data(settings):
1251
1305
  if plot:
1252
1306
  print(f'plotting {nr} images from {src}/channel_stack')
1253
1307
  _plot_4D_arrays(src+'/channel_stack', figuresize, cmap, nr_npz=1, nr=nr)
1254
- nr_of_chan_stacks = len(src+'/channel_stack')
1255
1308
 
1256
1309
  backgrounds, signal_to_noise, signal_thresholds = _get_lists_for_normalization(settings=settings)
1257
1310
 
@@ -1270,7 +1323,7 @@ def preprocess_img_data(settings):
1270
1323
  if plot:
1271
1324
  _plot_4D_arrays(src+'/norm_channel_stack', nr_npz=1, nr=nr)
1272
1325
 
1273
- return
1326
+ return settings, src
1274
1327
 
1275
1328
  def _check_masks(batch, batch_filenames, output_folder):
1276
1329
  """
@@ -1292,8 +1345,7 @@ def _check_masks(batch, batch_filenames, output_folder):
1292
1345
  filtered_filenames = [f for f, exists in zip(batch_filenames, existing_files_mask) if exists]
1293
1346
 
1294
1347
  return np.array(filtered_batch), filtered_filenames
1295
-
1296
-
1348
+
1297
1349
  def _get_avg_object_size(masks):
1298
1350
  """
1299
1351
  Calculate the average size of objects in a list of masks.
@@ -1362,7 +1414,8 @@ def _save_figure(fig, src, text, dpi=300, i=1, all_folders=1):
1362
1414
  save_location = os.path.join(save_folder, fig_name)
1363
1415
  fig.savefig(save_location, bbox_inches='tight', dpi=dpi)
1364
1416
  clear_output(wait=True)
1365
- print(f'\033[KProgress: {i}/{all_folders}, Saved single cell figure: {os.path.basename(save_location)}', end='\r', flush=True)
1417
+ print(f'Progress: {i}/{all_folders}, Saved single cell figure: {os.path.basename(save_location)}')
1418
+ #print(f'Progress: {i}/{all_folders}, Saved single cell figure: {os.path.basename(save_location)}', end='\r', flush=True)
1366
1419
  # Close and delete the figure to free up memory
1367
1420
  plt.close(fig)
1368
1421
  del fig
@@ -1446,6 +1499,56 @@ def _save_settings_to_db(settings):
1446
1499
  settings_df.to_sql('settings', conn, if_exists='replace', index=False) # Replace the table if it already exists
1447
1500
  conn.close()
1448
1501
 
1502
+ def _save_mask_timelapse_as_gif_v1(masks, path, cmap, norm, filenames):
1503
+ """
1504
+ Save a timelapse of masks as a GIF.
1505
+
1506
+ Parameters:
1507
+ masks (list): List of mask frames.
1508
+ path (str): Path to save the GIF.
1509
+ cmap: Colormap for displaying the masks.
1510
+ norm: Normalization for the masks.
1511
+ filenames (list): List of filenames corresponding to each mask frame.
1512
+
1513
+ Returns:
1514
+ None
1515
+ """
1516
+ def _update(frame):
1517
+ """
1518
+ Update the plot with the given frame.
1519
+
1520
+ Parameters:
1521
+ frame (int): The frame number to update the plot with.
1522
+
1523
+ Returns:
1524
+ None
1525
+ """
1526
+ nonlocal filename_text_obj
1527
+ if filename_text_obj is not None:
1528
+ filename_text_obj.remove()
1529
+ ax.clear()
1530
+ ax.axis('off')
1531
+ current_mask = masks[frame]
1532
+ ax.imshow(current_mask, cmap=cmap, norm=norm)
1533
+ ax.set_title(f'Frame: {frame}', fontsize=24, color='white')
1534
+ filename_text = filenames[frame]
1535
+ filename_text_obj = fig.text(0.5, 0.01, filename_text, ha='center', va='center', fontsize=20, color='white')
1536
+ for label_value in np.unique(current_mask):
1537
+ if label_value == 0: continue # Skip background
1538
+ y, x = np.mean(np.where(current_mask == label_value), axis=1)
1539
+ ax.text(x, y, str(label_value), color='white', fontsize=24, ha='center', va='center')
1540
+
1541
+ fig, ax = plt.subplots(figsize=(50, 50), facecolor='black')
1542
+ ax.set_facecolor('black')
1543
+ ax.axis('off')
1544
+ plt.subplots_adjust(left=0, right=1, top=1, bottom=0, wspace=0, hspace=0)
1545
+
1546
+ filename_text_obj = None
1547
+ anim = FuncAnimation(fig, _update, frames=len(masks), blit=False)
1548
+ anim.save(path, writer='pillow', fps=2, dpi=80) # Adjust DPI for size/quality
1549
+ plt.close(fig)
1550
+ print(f'Saved timelapse to {path}')
1551
+
1449
1552
  def _save_mask_timelapse_as_gif(masks, tracks_df, path, cmap, norm, filenames):
1450
1553
  """
1451
1554
  Save a timelapse animation of masks as a GIF.
@@ -1500,9 +1603,10 @@ def _save_mask_timelapse_as_gif(masks, tracks_df, path, cmap, norm, filenames):
1500
1603
  ax.text(x, y, str(label_value), color='white', fontsize=24, ha='center', va='center')
1501
1604
 
1502
1605
  # Overlay tracks
1503
- for track in tracks_df['track_id'].unique():
1504
- _track = tracks_df[tracks_df['track_id'] == track]
1505
- ax.plot(_track['x'], _track['y'], '-w', linewidth=1)
1606
+ if tracks_df is not None:
1607
+ for track in tracks_df['track_id'].unique():
1608
+ _track = tracks_df[tracks_df['track_id'] == track]
1609
+ ax.plot(_track['x'], _track['y'], '-w', linewidth=1)
1506
1610
 
1507
1611
  anim = FuncAnimation(fig, _update, frames=len(masks), blit=False)
1508
1612
  anim.save(path, writer='pillow', fps=2, dpi=80) # Adjust DPI for size/quality
@@ -1616,56 +1720,65 @@ def _load_and_concatenate_arrays(src, channels, cell_chann_dim, nucleus_chann_di
1616
1720
 
1617
1721
  # Iterate through each file in the reference folder
1618
1722
  for filename in os.listdir(reference_folder):
1619
-
1620
1723
  stack_ls = []
1621
- array_path = []
1622
-
1623
1724
  if filename.endswith('.npy'):
1624
- count+=1
1625
- # Initialize the concatenated array with the array from the reference folder
1626
- concatenated_array = np.load(os.path.join(reference_folder, filename))
1627
- if channels is not None:
1628
- concatenated_array = np.take(concatenated_array, channels, axis=2)
1725
+ count += 1
1726
+
1727
+ # Check if this file exists in all the other specified folders
1728
+ exists_in_all_folders = all(os.path.isfile(os.path.join(folder, filename)) for folder in folder_paths)
1729
+
1730
+ if exists_in_all_folders:
1731
+ # Load and potentially modify the array from the reference folder
1732
+ ref_array_path = os.path.join(reference_folder, filename)
1733
+ concatenated_array = np.load(ref_array_path)
1734
+
1735
+ if channels is not None:
1736
+ concatenated_array = np.take(concatenated_array, channels, axis=2)
1737
+
1738
+ # Add the array from the reference folder to 'stack_ls'
1629
1739
  stack_ls.append(concatenated_array)
1630
- # For each of the other folders, load the array and concatenate it
1631
- for folder in folder_paths[1:]:
1632
- array_path = os.path.join(folder, filename)
1633
- if os.path.isfile(array_path):
1740
+
1741
+ # For each of the other folders, load the array and add it to 'stack_ls'
1742
+ for folder in folder_paths[1:]:
1743
+ array_path = os.path.join(folder, filename)
1634
1744
  array = np.load(array_path)
1635
1745
  if array.ndim == 2:
1636
- array = np.expand_dims(array, axis=-1) # add an extra dimension if the array is 2D
1746
+ array = np.expand_dims(array, axis=-1) # Add an extra dimension if the array is 2D
1637
1747
  stack_ls.append(array)
1638
1748
 
1639
- stack_ls = [np.expand_dims(arr, axis=-1) if arr.ndim == 2 else arr for arr in stack_ls]
1640
- unique_shapes = {arr.shape[:-1] for arr in stack_ls}
1641
- if len(unique_shapes) > 1:
1642
- #max_dims = np.max(np.array(list(unique_shapes)), axis=0)
1643
- # Determine the maximum length of tuples in unique_shapes
1644
- max_tuple_length = max(len(shape) for shape in unique_shapes)
1645
- # Pad shorter tuples with zeros to make them all the same length
1646
- padded_shapes = [shape + (0,) * (max_tuple_length - len(shape)) for shape in unique_shapes]
1647
- # Now create a NumPy array and find the maximum dimensions
1648
- max_dims = np.max(np.array(padded_shapes), axis=0)
1649
- clear_output(wait=True)
1650
- print(f'\033[KWarning: arrays with multiple shapes found. Padding arrays to max X,Y dimentions {max_dims}', end='\r', flush=True)
1651
- padded_stack_ls = []
1652
- for arr in stack_ls:
1653
- pad_width = [(0, max_dim - dim) for max_dim, dim in zip(max_dims, arr.shape[:-1])]
1654
- pad_width.append((0, 0))
1655
- padded_arr = np.pad(arr, pad_width)
1656
- padded_stack_ls.append(padded_arr)
1657
- # Concatenate the padded arrays along the channel dimension (last dimension)
1658
- stack = np.concatenate(padded_stack_ls, axis=-1)
1749
+ if len(stack_ls) > 0:
1750
+ stack_ls = [np.expand_dims(arr, axis=-1) if arr.ndim == 2 else arr for arr in stack_ls]
1751
+ unique_shapes = {arr.shape[:-1] for arr in stack_ls}
1752
+ if len(unique_shapes) > 1:
1753
+ #max_dims = np.max(np.array(list(unique_shapes)), axis=0)
1754
+ # Determine the maximum length of tuples in unique_shapes
1755
+ max_tuple_length = max(len(shape) for shape in unique_shapes)
1756
+ # Pad shorter tuples with zeros to make them all the same length
1757
+ padded_shapes = [shape + (0,) * (max_tuple_length - len(shape)) for shape in unique_shapes]
1758
+ # Now create a NumPy array and find the maximum dimensions
1759
+ max_dims = np.max(np.array(padded_shapes), axis=0)
1760
+ #clear_output(wait=True)
1761
+ print(f'Warning: arrays with multiple shapes found. Padding arrays to max X,Y dimentions {max_dims}')
1762
+ #print(f'Warning: arrays with multiple shapes found. Padding arrays to max X,Y dimentions {max_dims}', end='\r', flush=True)
1763
+ padded_stack_ls = []
1764
+ for arr in stack_ls:
1765
+ pad_width = [(0, max_dim - dim) for max_dim, dim in zip(max_dims, arr.shape[:-1])]
1766
+ pad_width.append((0, 0))
1767
+ padded_arr = np.pad(arr, pad_width)
1768
+ padded_stack_ls.append(padded_arr)
1769
+ # Concatenate the padded arrays along the channel dimension (last dimension)
1770
+ stack = np.concatenate(padded_stack_ls, axis=-1)
1659
1771
 
1660
- else:
1661
- stack = np.concatenate(stack_ls, axis=-1)
1772
+ else:
1773
+ stack = np.concatenate(stack_ls, axis=-1)
1662
1774
 
1663
- if stack.shape[-1] > concatenated_array.shape[-1]:
1664
- output_path = os.path.join(output_folder, filename)
1665
- np.save(output_path, stack)
1775
+ if stack.shape[-1] > concatenated_array.shape[-1]:
1776
+ output_path = os.path.join(output_folder, filename)
1777
+ np.save(output_path, stack)
1666
1778
 
1667
- clear_output(wait=True)
1668
- #print(f'\033[KFiles merged: {count}/{all_imgs}', end='\r', flush=True)
1779
+ #clear_output(wait=True)
1780
+ print(f'Files merged: {count}/{all_imgs}')
1781
+ #print(f'Files merged: {count}/{all_imgs}', end='\r', flush=True)
1669
1782
  return
1670
1783
 
1671
1784
  def _read_db(db_loc, tables):
@@ -2139,7 +2252,42 @@ def _read_mask(mask_path):
2139
2252
  if mask.dtype != np.uint16:
2140
2253
  mask = img_as_uint(mask)
2141
2254
  return mask
2255
+
2256
+
2257
+ def convert_numpy_to_tiff(folder_path, limit=None):
2258
+ """
2259
+ Converts all numpy files in a folder to TIFF format and saves them in a subdirectory 'tiff'.
2260
+
2261
+ Args:
2262
+ folder_path (str): The path to the folder containing numpy files.
2263
+ """
2264
+ # Create the subdirectory 'tiff' within the specified folder if it doesn't already exist
2265
+ tiff_subdir = os.path.join(folder_path, 'tiff')
2266
+ os.makedirs(tiff_subdir, exist_ok=True)
2267
+
2268
+ files = os.listdir(folder_path)
2269
+
2270
+ npy_files = [f for f in files if f.endswith('.npy')]
2142
2271
 
2272
+ # Iterate over all files in the folder
2273
+ for i, filename in enumerate(files):
2274
+ if limit is not None and i >= limit:
2275
+ break
2276
+
2277
+ # Construct the full file path
2278
+ file_path = os.path.join(folder_path, filename)
2279
+ # Load the numpy file
2280
+ numpy_array = np.load(file_path)
2281
+
2282
+ # Construct the output TIFF file path
2283
+ tiff_filename = os.path.splitext(filename)[0] + '.tif'
2284
+ tiff_file_path = os.path.join(tiff_subdir, tiff_filename)
2285
+
2286
+ # Save the numpy array as a TIFF file
2287
+ tifffile.imwrite(tiff_file_path, numpy_array)
2288
+
2289
+ print(f"Converted {filename} to {tiff_filename} and saved in 'tiff' subdirectory.")
2290
+ return
2143
2291
 
2144
2292
 
2145
2293
 
spacr/measure.py CHANGED
@@ -92,6 +92,7 @@ def _calculate_zernike(mask, df, degree=8):
92
92
  else:
93
93
  return df
94
94
 
95
+ @log_function_call
95
96
  def _morphological_measurements(cell_mask, nucleus_mask, pathogen_mask, cytoplasm_mask, settings, zernike=True, degree=8):
96
97
  """
97
98
  Calculate morphological measurements for cells, nucleus, pathogens, and cytoplasms based on the given masks.
@@ -435,6 +436,7 @@ def _estimate_blur(image):
435
436
  # Compute and return the variance of the Laplacian
436
437
  return lap.var()
437
438
 
439
+ @log_function_call
438
440
  def _intensity_measurements(cell_mask, nucleus_mask, pathogen_mask, cytoplasm_mask, channel_arrays, settings, sizes=[3, 6, 12, 24], periphery=True, outside=True):
439
441
  """
440
442
  Calculate various intensity measurements for different regions in the image.
@@ -676,7 +678,6 @@ def _measure_crop_core(index, time_ls, file, settings):
676
678
  crop_ls = settings['crop_mode']
677
679
  size_ls = settings['png_size']
678
680
  for crop_idx, crop_mode in enumerate(crop_ls):
679
- print(crop_idx, crop_mode)
680
681
  width, height = size_ls[crop_idx]
681
682
  if crop_mode == 'cell':
682
683
  crop_mask = cell_mask.copy()
@@ -830,6 +831,14 @@ def measure_crop(settings, annotation_settings, advanced_settings):
830
831
  Returns:
831
832
  None
832
833
  """
834
+
835
+ if settings.get('test_mode', False):
836
+ if not os.basename(settings['src']) == 'test':
837
+ src = os.path.join(src, 'test')
838
+ settings['src'] = src
839
+ print(f'Changed source folder to {src} for test mode')
840
+ else:
841
+ print(f'Test mode enabled, using source folder {settings["src"]}')
833
842
 
834
843
  from .io import _save_settings_to_db
835
844
  from .timelapse import _timelapse_masks_to_gif, _scmovie
@@ -913,21 +922,6 @@ def measure_crop(settings, annotation_settings, advanced_settings):
913
922
  time_left = (((files_to_process-files_processed)*average_time)/max_workers)/60
914
923
  print(f'Progress: {files_processed}/{files_to_process} Time/img {average_time:.3f}sec, Time Remaining {time_left:.3f} min.', end='\r', flush=True)
915
924
  result.get()
916
-
917
- #if settings['save_png']:
918
- # img_fldr = os.path.join(os.path.dirname(settings['input_folder']), 'data')
919
- # sc_img_fldrs = _list_endpoint_subdirectories(img_fldr)
920
- # for well_src in sc_img_fldrs:
921
- # if len(os.listdir(well_src)) < 16:
922
- # nr_imgs = len(os.listdir(well_src))
923
- # standardize = False
924
- # else:
925
- # nr_imgs = 16
926
- # standardize = True
927
- # try:
928
- # _save_scimg_plot(src=well_src, nr_imgs=nr_imgs, channel_indices=settings['png_dims'], um_per_pixel=0.1, scale_bar_length_um=10, standardize=standardize, fontsize=12, show_filename=True, channel_names=['red','green','blue'], dpi=300, plot=False)
929
- # except Exception as e: # Consider catching a more specific exception if possible
930
- # print(f"Unable to generate figure for folder {well_src}: {e}", flush=True)
931
925
 
932
926
  if settings['save_png']:
933
927
  img_fldr = os.path.join(os.path.dirname(settings['input_folder']), 'data')
@@ -983,7 +977,7 @@ def measure_crop(settings, annotation_settings, advanced_settings):
983
977
  img_fldr = os.path.join(os.path.dirname(settings['input_folder']), 'data')
984
978
  sc_img_fldrs = _list_endpoint_subdirectories(img_fldr)
985
979
  _scmovie(sc_img_fldrs)
986
-
980
+ print("Successfully completed run")
987
981
 
988
982
  def generate_cellpose_train_set(folders, dst, min_objects=5):
989
983
  os.makedirs(dst, exist_ok=True)