spacr 0.0.18__py3-none-any.whl → 0.0.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
spacr/io.py CHANGED
@@ -1,6 +1,7 @@
1
1
  import os, re, sqlite3, gc, torch, time, random, shutil, cv2, tarfile, cellpose
2
2
  import numpy as np
3
3
  import pandas as pd
4
+ import tifffile
4
5
  from PIL import Image
5
6
  from collections import defaultdict, Counter
6
7
  from pathlib import Path
@@ -22,9 +23,9 @@ import seaborn as sns
22
23
  import matplotlib.pyplot as plt
23
24
  from torchvision.transforms import ToTensor
24
25
 
26
+
25
27
  from .logger import log_function_call
26
28
 
27
- @log_function_call
28
29
  def _load_images_and_labels(image_files, label_files, circular=False, invert=False, image_extension="*.tif", label_extension="*.tif"):
29
30
 
30
31
  from .utils import invert_image, apply_mask
@@ -87,7 +88,6 @@ def _load_images_and_labels(image_files, label_files, circular=False, invert=Fal
87
88
  print(f'image shape: {images[0].shape}, image type: images[0].shape mask shape: {labels[0].shape}, image type: labels[0].shape')
88
89
  return images, labels, image_names, label_names
89
90
 
90
- @log_function_call
91
91
  def _load_normalized_images_and_labels(image_files, label_files, signal_thresholds=[1000], channels=None, percentiles=None, circular=False, invert=False, visualize=False):
92
92
 
93
93
  from .plot import normalize_and_visualize
@@ -527,6 +527,7 @@ class TarImageDataset(Dataset):
527
527
 
528
528
  return img, m.name
529
529
 
530
+ @log_function_call
530
531
  def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=False, skip_mode='01', metadata_type='', img_format='.tif'):
531
532
  """
532
533
  Convert z-stack images to maximum intensity projection (MIP) images.
@@ -706,7 +707,7 @@ def _move_to_chan_folder(src, regex, timelapse=False, metadata_type=''):
706
707
  if metadata_type =='cq1':
707
708
  orig_wellID = wellID
708
709
  wellID = _convert_cq1_well_id(wellID)
709
- print(f'Converted Well ID: {orig_wellID} to {wellID}')
710
+ print(f'Converted Well ID: {orig_wellID} to {wellID}')#, end='\r', flush=True)
710
711
 
711
712
  newname = f"{plateID}_{wellID}_{fieldID}_{timeID if timelapse else ''}{ext}"
712
713
  newpath = src / chanID
@@ -757,6 +758,7 @@ def _merge_channels(src, plot=False):
757
758
 
758
759
  # Create the 'stack' directory if it doesn't exist
759
760
  stack_dir.mkdir(exist_ok=True)
761
+ print(f'generated folder with merged arrays: {stack_dir}')
760
762
 
761
763
  if _is_dir_empty(stack_dir):
762
764
  with Pool(cpu_count()) as pool:
@@ -819,6 +821,7 @@ def _mip_all(src, include_first_chan=True):
819
821
  np.save(os.path.join(src, filename), concatenated)
820
822
  return
821
823
 
824
+ @log_function_call
822
825
  def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_size=100):
823
826
  """
824
827
  Concatenates channel data from multiple files and saves the concatenated data as numpy arrays.
@@ -853,7 +856,7 @@ def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_s
853
856
  array = np.take(array, channels, axis=2)
854
857
  stack_region.append(array)
855
858
  filenames_region.append(os.path.basename(path))
856
- clear_output(wait=True)
859
+ #clear_output(wait=True)
857
860
  print(f'Region {i+1}/ {len(time_stack_path_lists)}', end='\r', flush=True)
858
861
  stack = np.stack(stack_region)
859
862
  save_loc = os.path.join(channel_stack_loc, f'{name}.npz')
@@ -879,7 +882,7 @@ def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_s
879
882
  array = np.take(array, channels, axis=2)
880
883
  stack_ls.append(array)
881
884
  filenames_batch.append(os.path.basename(path)) # store the filename
882
- clear_output(wait=True)
885
+ #clear_output(wait=True)
883
886
  print(f'Concatenated: {i+1}/{nr_files} files')
884
887
  #print(f'Concatenated: {i+1}/{nr_files} files', end='\r', flush=True)
885
888
 
@@ -887,7 +890,7 @@ def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_s
887
890
  unique_shapes = {arr.shape[:-1] for arr in stack_ls}
888
891
  if len(unique_shapes) > 1:
889
892
  max_dims = np.max(np.array(list(unique_shapes)), axis=0)
890
- clear_output(wait=True)
893
+ #clear_output(wait=True)
891
894
  print(f'Warning: arrays with multiple shapes found in batch {i+1}. Padding arrays to max X,Y dimentions {max_dims}')
892
895
  #print(f'Warning: arrays with multiple shapes found in batch {i+1}. Padding arrays to max X,Y dimentions {max_dims}', end='\r', flush=True)
893
896
  padded_stack_ls = []
@@ -1015,7 +1018,7 @@ def _normalize_stack(src, backgrounds=[100,100,100], remove_background=False, lo
1015
1018
  duration = (stop - start)*single_channel.shape[0]
1016
1019
  time_ls.append(duration)
1017
1020
  average_time = np.mean(time_ls) if len(time_ls) > 0 else 0
1018
- clear_output(wait=True)
1021
+ #clear_output(wait=True)
1019
1022
  print(f'Progress: files {file_index+1}/{len(paths)}, channels:{chan_index}/{stack.shape[-1]-1}, arrays:{array_index+1}/{single_channel.shape[0]}, Signal:{upper:.1f}, noise:{lower:.1f}, Signal-to-noise:{average_stnr:.1f}, Time/channel:{average_time:.2f}sec')
1020
1023
  #print(f'Progress: files {file_index+1}/{len(paths)}, channels:{chan_index}/{stack.shape[-1]-1}, arrays:{array_index+1}/{single_channel.shape[0]}, Signal:{upper:.1f}, noise:{lower:.1f}, Signal-to-noise:{average_stnr:.1f}, Time/channel:{average_time:.2f}sec', end='\r', flush=True)
1021
1024
  normalized_single_channel = exposure.rescale_intensity(arr_2d_normalized, out_range='dtype')
@@ -1072,8 +1075,6 @@ def _normalize_timelapse(src, lower_quantile=0.01, save_dtype=np.float32):
1072
1075
 
1073
1076
  print(f'\nSaved normalized stacks: {output_fldr}')
1074
1077
 
1075
-
1076
-
1077
1078
  def _create_movies_from_npy_per_channel(src, fps=10):
1078
1079
  """
1079
1080
  Create movies from numpy files per channel.
@@ -1125,9 +1126,33 @@ def _create_movies_from_npy_per_channel(src, fps=10):
1125
1126
  channel_save_path = os.path.join(save_path, f'{plate}_{well}_{field}_channel_{channel}.mp4')
1126
1127
  _npz_to_movie(normalized_channel_arrays_3d, filenames, channel_save_path, fps)
1127
1128
 
1129
+ def delete_empty_subdirectories(folder_path):
1130
+ """
1131
+ Deletes all empty subdirectories in the specified folder.
1132
+
1133
+ Args:
1134
+ - folder_path (str): The path to the folder in which to look for empty subdirectories.
1135
+ """
1136
+ # Check each item in the specified folder
1137
+ for dirpath, dirnames, filenames in os.walk(folder_path, topdown=False):
1138
+ # os.walk is used with topdown=False to start from the innermost directories and work upwards.
1139
+ for dirname in dirnames:
1140
+ # Construct the full path to the subdirectory
1141
+ full_dir_path = os.path.join(dirpath, dirname)
1142
+ # Try to remove the directory and catch any error (like if the directory is not empty)
1143
+ try:
1144
+ os.rmdir(full_dir_path)
1145
+ print(f"Deleted empty directory: {full_dir_path}")
1146
+ except OSError as e:
1147
+ continue
1148
+ # An error occurred, likely because the directory is not empty
1149
+ #print(f"Skipping non-empty directory: {full_dir_path}")
1150
+
1151
+ @log_function_call
1128
1152
  def preprocess_img_data(settings):
1129
1153
 
1130
1154
  from .plot import plot_arrays, _plot_4D_arrays
1155
+ from .utils import _run_test_mode
1131
1156
 
1132
1157
  """
1133
1158
  Preprocesses image data by converting z-stack images to maximum intensity projection (MIP) images.
@@ -1158,12 +1183,16 @@ def preprocess_img_data(settings):
1158
1183
  Returns:
1159
1184
  None
1160
1185
  """
1186
+
1161
1187
  src = settings['src']
1162
1188
  valid_ext = ['tif', 'tiff', 'png', 'jpeg']
1163
1189
  files = os.listdir(src)
1164
1190
  extensions = [file.split('.')[-1] for file in files]
1165
1191
  extension_counts = Counter(extensions)
1166
1192
  most_common_extension = extension_counts.most_common(1)[0][0]
1193
+ img_format = None
1194
+
1195
+ delete_empty_subdirectories(src)
1167
1196
 
1168
1197
  # Check if the most common extension is one of the specified image formats
1169
1198
  if most_common_extension in valid_ext:
@@ -1171,8 +1200,14 @@ def preprocess_img_data(settings):
1171
1200
  print(f'Found {extension_counts[most_common_extension]} {most_common_extension} files')
1172
1201
  else:
1173
1202
  print(f'Could not find any {valid_ext} files in {src} only found {extension_counts[0]}')
1174
- return
1175
-
1203
+ if os.path.exists(src+'/stack'):
1204
+ print('Found existing stack folder.')
1205
+ if os.path.exists(src+'/channel_stack'):
1206
+ print('Found existing channel_stack folder.')
1207
+ if os.path.exists(src+'/norm_channel_stack'):
1208
+ print('Found existing norm_channel_stack folder. Skipping preprocessing')
1209
+ return
1210
+
1176
1211
  cmap = 'inferno'
1177
1212
  figuresize = 20
1178
1213
  normalize = True
@@ -1194,57 +1229,73 @@ def preprocess_img_data(settings):
1194
1229
  all_to_mip = settings['all_to_mip']
1195
1230
  pick_slice = settings['pick_slice']
1196
1231
  skip_mode = settings['skip_mode']
1197
-
1198
- if metadata_type == 'cellvoyager':
1199
- regex = f'(?P<plateID>.*)_(?P<wellID>.*)_T(?P<timeID>.*)F(?P<fieldID>.*)L(?P<laserID>..)A(?P<AID>..)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1200
- elif metadata_type == 'cq1':
1201
- regex = f'W(?P<wellID>.*)F(?P<fieldID>.*)T(?P<timeID>.*)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1202
- elif metadata_type == 'nikon':
1203
- regex = f'(?P<plateID>.*)_(?P<wellID>.*)_T(?P<timeID>.*)F(?P<fieldID>.*)L(?P<laserID>..)A(?P<AID>..)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1204
- elif metadata_type == 'zeis':
1205
- regex = f'(?P<plateID>.*)_(?P<wellID>.*)_T(?P<timeID>.*)F(?P<fieldID>.*)L(?P<laserID>..)A(?P<AID>..)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1206
- elif metadata_type == 'leica':
1207
- regex = f'(?P<plateID>.*)_(?P<wellID>.*)_T(?P<timeID>.*)F(?P<fieldID>.*)L(?P<laserID>..)A(?P<AID>..)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1208
- elif metadata_type == 'custom':
1209
- regex = f'({custom_regex}){img_format}'
1232
+
1233
+
1234
+ if not img_format == None:
1235
+ if metadata_type == 'cellvoyager':
1236
+ regex = f'(?P<plateID>.*)_(?P<wellID>.*)_T(?P<timeID>.*)F(?P<fieldID>.*)L(?P<laserID>..)A(?P<AID>..)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1237
+ elif metadata_type == 'cq1':
1238
+ regex = f'W(?P<wellID>.*)F(?P<fieldID>.*)T(?P<timeID>.*)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1239
+ elif metadata_type == 'nikon':
1240
+ regex = f'(?P<plateID>.*)_(?P<wellID>.*)_T(?P<timeID>.*)F(?P<fieldID>.*)L(?P<laserID>..)A(?P<AID>..)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1241
+ elif metadata_type == 'zeis':
1242
+ regex = f'(?P<plateID>.*)_(?P<wellID>.*)_T(?P<timeID>.*)F(?P<fieldID>.*)L(?P<laserID>..)A(?P<AID>..)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1243
+ elif metadata_type == 'leica':
1244
+ regex = f'(?P<plateID>.*)_(?P<wellID>.*)_T(?P<timeID>.*)F(?P<fieldID>.*)L(?P<laserID>..)A(?P<AID>..)Z(?P<sliceID>.*)C(?P<chanID>.*){img_format}'
1245
+ elif metadata_type == 'custom':
1246
+ regex = f'({custom_regex}){img_format}'
1210
1247
 
1211
- print(f'regex mode:{metadata_type} regex:{regex}')
1248
+ print(f'regex mode:{metadata_type} regex:{regex}')
1249
+
1250
+ if settings.get('test_mode', False):
1251
+ try:
1252
+ os.rmdir(os.path.join(src, 'test'))
1253
+ print(f"Deleted test directory: {os.path.join(src, 'test')}")
1254
+ except OSError as e:
1255
+ pass
1256
+
1257
+ src = _run_test_mode(settings['src'], regex, timelapse=timelapse)
1258
+ settings['src'] = src
1212
1259
 
1213
1260
  if not os.path.exists(src+'/stack'):
1214
- if timelapse:
1215
- _move_to_chan_folder(src, regex, timelapse, metadata_type)
1216
- else:
1217
- #_z_to_mip(src, regex, batch_size, pick_slice, skip_mode, metadata_type, img_format)
1218
- _rename_and_organize_image_files(src, regex, batch_size, pick_slice, skip_mode, metadata_type, img_format)
1219
-
1220
- #Make sure no batches will be of only one image
1221
- all_imgs = len(src+'/stack')
1222
- full_batches = all_imgs // batch_size
1223
- last_batch_size = all_imgs % batch_size
1224
-
1225
- # Check if the last batch is of size 1
1226
- if last_batch_size == 1:
1227
- # If there's only one batch and its size is 1, it's also an issue
1228
- if full_batches == 0:
1229
- raise ValueError("Only one batch of size 1 detected. Adjust the batch size.")
1230
- # If the last batch is of size 1, merge it with the second last batch
1231
- elif full_batches > 0:
1232
- raise ValueError("Last batch of size 1 detected. Adjust the batch size.")
1233
-
1234
- _merge_channels(src, plot=False)
1235
- if timelapse:
1236
- _create_movies_from_npy_per_channel(src+'/stack', fps=2)
1237
-
1238
- if plot:
1239
- print(f'plotting {nr} images from {src}/stack')
1240
- plot_arrays(src+'/stack', figuresize, cmap, nr=nr, normalize=normalize)
1241
- if all_to_mip:
1242
- _mip_all(src+'/stack')
1243
- if plot:
1244
- print(f'plotting {nr} images from {src}/stack')
1245
- plot_arrays(src+'/stack', figuresize, cmap, nr=nr, normalize=normalize)
1246
- #nr_of_stacks = len(src+'/channel_stack')
1247
-
1261
+ try:
1262
+ if not img_format == None:
1263
+ if timelapse:
1264
+ _move_to_chan_folder(src, regex, timelapse, metadata_type)
1265
+ else:
1266
+ _rename_and_organize_image_files(src, regex, batch_size, pick_slice, skip_mode, metadata_type, img_format)
1267
+
1268
+ #Make sure no batches will be of only one image
1269
+ all_imgs = len(src+'/stack')
1270
+ full_batches = all_imgs // batch_size
1271
+ last_batch_size = all_imgs % batch_size
1272
+
1273
+ # Check if the last batch is of size 1
1274
+ if last_batch_size == 1:
1275
+ # If there's only one batch and its size is 1, it's also an issue
1276
+ if full_batches == 0:
1277
+ raise ValueError("Only one batch of size 1 detected. Adjust the batch size.")
1278
+ # If the last batch is of size 1, merge it with the second last batch
1279
+ elif full_batches > 0:
1280
+ raise ValueError("Last batch of size 1 detected. Adjust the batch size.")
1281
+
1282
+ _merge_channels(src, plot=False)
1283
+
1284
+ if timelapse:
1285
+ _create_movies_from_npy_per_channel(src+'/stack', fps=2)
1286
+
1287
+ if plot:
1288
+ print(f'plotting {nr} images from {src}/stack')
1289
+ plot_arrays(src+'/stack', figuresize, cmap, nr=nr, normalize=normalize)
1290
+ if all_to_mip:
1291
+ _mip_all(src+'/stack')
1292
+ if plot:
1293
+ print(f'plotting {nr} images from {src}/stack')
1294
+ plot_arrays(src+'/stack', figuresize, cmap, nr=nr, normalize=normalize)
1295
+ except Exception as e:
1296
+ print(f"Error: {e}")
1297
+
1298
+ print('concatinating cahnnels')
1248
1299
  _concatenate_channel(src+'/stack',
1249
1300
  channels=mask_channels,
1250
1301
  randomize=randomize,
@@ -1254,7 +1305,6 @@ def preprocess_img_data(settings):
1254
1305
  if plot:
1255
1306
  print(f'plotting {nr} images from {src}/channel_stack')
1256
1307
  _plot_4D_arrays(src+'/channel_stack', figuresize, cmap, nr_npz=1, nr=nr)
1257
- nr_of_chan_stacks = len(src+'/channel_stack')
1258
1308
 
1259
1309
  backgrounds, signal_to_noise, signal_thresholds = _get_lists_for_normalization(settings=settings)
1260
1310
 
@@ -1273,7 +1323,7 @@ def preprocess_img_data(settings):
1273
1323
  if plot:
1274
1324
  _plot_4D_arrays(src+'/norm_channel_stack', nr_npz=1, nr=nr)
1275
1325
 
1276
- return
1326
+ return settings, src
1277
1327
 
1278
1328
  def _check_masks(batch, batch_filenames, output_folder):
1279
1329
  """
@@ -1295,8 +1345,7 @@ def _check_masks(batch, batch_filenames, output_folder):
1295
1345
  filtered_filenames = [f for f, exists in zip(batch_filenames, existing_files_mask) if exists]
1296
1346
 
1297
1347
  return np.array(filtered_batch), filtered_filenames
1298
-
1299
-
1348
+
1300
1349
  def _get_avg_object_size(masks):
1301
1350
  """
1302
1351
  Calculate the average size of objects in a list of masks.
@@ -1450,6 +1499,56 @@ def _save_settings_to_db(settings):
1450
1499
  settings_df.to_sql('settings', conn, if_exists='replace', index=False) # Replace the table if it already exists
1451
1500
  conn.close()
1452
1501
 
1502
+ def _save_mask_timelapse_as_gif_v1(masks, path, cmap, norm, filenames):
1503
+ """
1504
+ Save a timelapse of masks as a GIF.
1505
+
1506
+ Parameters:
1507
+ masks (list): List of mask frames.
1508
+ path (str): Path to save the GIF.
1509
+ cmap: Colormap for displaying the masks.
1510
+ norm: Normalization for the masks.
1511
+ filenames (list): List of filenames corresponding to each mask frame.
1512
+
1513
+ Returns:
1514
+ None
1515
+ """
1516
+ def _update(frame):
1517
+ """
1518
+ Update the plot with the given frame.
1519
+
1520
+ Parameters:
1521
+ frame (int): The frame number to update the plot with.
1522
+
1523
+ Returns:
1524
+ None
1525
+ """
1526
+ nonlocal filename_text_obj
1527
+ if filename_text_obj is not None:
1528
+ filename_text_obj.remove()
1529
+ ax.clear()
1530
+ ax.axis('off')
1531
+ current_mask = masks[frame]
1532
+ ax.imshow(current_mask, cmap=cmap, norm=norm)
1533
+ ax.set_title(f'Frame: {frame}', fontsize=24, color='white')
1534
+ filename_text = filenames[frame]
1535
+ filename_text_obj = fig.text(0.5, 0.01, filename_text, ha='center', va='center', fontsize=20, color='white')
1536
+ for label_value in np.unique(current_mask):
1537
+ if label_value == 0: continue # Skip background
1538
+ y, x = np.mean(np.where(current_mask == label_value), axis=1)
1539
+ ax.text(x, y, str(label_value), color='white', fontsize=24, ha='center', va='center')
1540
+
1541
+ fig, ax = plt.subplots(figsize=(50, 50), facecolor='black')
1542
+ ax.set_facecolor('black')
1543
+ ax.axis('off')
1544
+ plt.subplots_adjust(left=0, right=1, top=1, bottom=0, wspace=0, hspace=0)
1545
+
1546
+ filename_text_obj = None
1547
+ anim = FuncAnimation(fig, _update, frames=len(masks), blit=False)
1548
+ anim.save(path, writer='pillow', fps=2, dpi=80) # Adjust DPI for size/quality
1549
+ plt.close(fig)
1550
+ print(f'Saved timelapse to {path}')
1551
+
1453
1552
  def _save_mask_timelapse_as_gif(masks, tracks_df, path, cmap, norm, filenames):
1454
1553
  """
1455
1554
  Save a timelapse animation of masks as a GIF.
@@ -1504,9 +1603,10 @@ def _save_mask_timelapse_as_gif(masks, tracks_df, path, cmap, norm, filenames):
1504
1603
  ax.text(x, y, str(label_value), color='white', fontsize=24, ha='center', va='center')
1505
1604
 
1506
1605
  # Overlay tracks
1507
- for track in tracks_df['track_id'].unique():
1508
- _track = tracks_df[tracks_df['track_id'] == track]
1509
- ax.plot(_track['x'], _track['y'], '-w', linewidth=1)
1606
+ if tracks_df is not None:
1607
+ for track in tracks_df['track_id'].unique():
1608
+ _track = tracks_df[tracks_df['track_id'] == track]
1609
+ ax.plot(_track['x'], _track['y'], '-w', linewidth=1)
1510
1610
 
1511
1611
  anim = FuncAnimation(fig, _update, frames=len(masks), blit=False)
1512
1612
  anim.save(path, writer='pillow', fps=2, dpi=80) # Adjust DPI for size/quality
@@ -1620,56 +1720,63 @@ def _load_and_concatenate_arrays(src, channels, cell_chann_dim, nucleus_chann_di
1620
1720
 
1621
1721
  # Iterate through each file in the reference folder
1622
1722
  for filename in os.listdir(reference_folder):
1623
-
1624
1723
  stack_ls = []
1625
- array_path = []
1626
-
1627
1724
  if filename.endswith('.npy'):
1628
- count+=1
1629
- # Initialize the concatenated array with the array from the reference folder
1630
- concatenated_array = np.load(os.path.join(reference_folder, filename))
1631
- if channels is not None:
1632
- concatenated_array = np.take(concatenated_array, channels, axis=2)
1725
+ count += 1
1726
+
1727
+ # Check if this file exists in all the other specified folders
1728
+ exists_in_all_folders = all(os.path.isfile(os.path.join(folder, filename)) for folder in folder_paths)
1729
+
1730
+ if exists_in_all_folders:
1731
+ # Load and potentially modify the array from the reference folder
1732
+ ref_array_path = os.path.join(reference_folder, filename)
1733
+ concatenated_array = np.load(ref_array_path)
1734
+
1735
+ if channels is not None:
1736
+ concatenated_array = np.take(concatenated_array, channels, axis=2)
1737
+
1738
+ # Add the array from the reference folder to 'stack_ls'
1633
1739
  stack_ls.append(concatenated_array)
1634
- # For each of the other folders, load the array and concatenate it
1635
- for folder in folder_paths[1:]:
1636
- array_path = os.path.join(folder, filename)
1637
- if os.path.isfile(array_path):
1740
+
1741
+ # For each of the other folders, load the array and add it to 'stack_ls'
1742
+ for folder in folder_paths[1:]:
1743
+ array_path = os.path.join(folder, filename)
1638
1744
  array = np.load(array_path)
1639
1745
  if array.ndim == 2:
1640
- array = np.expand_dims(array, axis=-1) # add an extra dimension if the array is 2D
1746
+ array = np.expand_dims(array, axis=-1) # Add an extra dimension if the array is 2D
1641
1747
  stack_ls.append(array)
1642
1748
 
1643
- stack_ls = [np.expand_dims(arr, axis=-1) if arr.ndim == 2 else arr for arr in stack_ls]
1644
- unique_shapes = {arr.shape[:-1] for arr in stack_ls}
1645
- if len(unique_shapes) > 1:
1646
- #max_dims = np.max(np.array(list(unique_shapes)), axis=0)
1647
- # Determine the maximum length of tuples in unique_shapes
1648
- max_tuple_length = max(len(shape) for shape in unique_shapes)
1649
- # Pad shorter tuples with zeros to make them all the same length
1650
- padded_shapes = [shape + (0,) * (max_tuple_length - len(shape)) for shape in unique_shapes]
1651
- # Now create a NumPy array and find the maximum dimensions
1652
- max_dims = np.max(np.array(padded_shapes), axis=0)
1653
- clear_output(wait=True)
1654
- print(f'Warning: arrays with multiple shapes found. Padding arrays to max X,Y dimentions {max_dims}')
1655
- #print(f'Warning: arrays with multiple shapes found. Padding arrays to max X,Y dimentions {max_dims}', end='\r', flush=True)
1656
- padded_stack_ls = []
1657
- for arr in stack_ls:
1658
- pad_width = [(0, max_dim - dim) for max_dim, dim in zip(max_dims, arr.shape[:-1])]
1659
- pad_width.append((0, 0))
1660
- padded_arr = np.pad(arr, pad_width)
1661
- padded_stack_ls.append(padded_arr)
1662
- # Concatenate the padded arrays along the channel dimension (last dimension)
1663
- stack = np.concatenate(padded_stack_ls, axis=-1)
1749
+ if len(stack_ls) > 0:
1750
+ stack_ls = [np.expand_dims(arr, axis=-1) if arr.ndim == 2 else arr for arr in stack_ls]
1751
+ unique_shapes = {arr.shape[:-1] for arr in stack_ls}
1752
+ if len(unique_shapes) > 1:
1753
+ #max_dims = np.max(np.array(list(unique_shapes)), axis=0)
1754
+ # Determine the maximum length of tuples in unique_shapes
1755
+ max_tuple_length = max(len(shape) for shape in unique_shapes)
1756
+ # Pad shorter tuples with zeros to make them all the same length
1757
+ padded_shapes = [shape + (0,) * (max_tuple_length - len(shape)) for shape in unique_shapes]
1758
+ # Now create a NumPy array and find the maximum dimensions
1759
+ max_dims = np.max(np.array(padded_shapes), axis=0)
1760
+ #clear_output(wait=True)
1761
+ print(f'Warning: arrays with multiple shapes found. Padding arrays to max X,Y dimentions {max_dims}')
1762
+ #print(f'Warning: arrays with multiple shapes found. Padding arrays to max X,Y dimentions {max_dims}', end='\r', flush=True)
1763
+ padded_stack_ls = []
1764
+ for arr in stack_ls:
1765
+ pad_width = [(0, max_dim - dim) for max_dim, dim in zip(max_dims, arr.shape[:-1])]
1766
+ pad_width.append((0, 0))
1767
+ padded_arr = np.pad(arr, pad_width)
1768
+ padded_stack_ls.append(padded_arr)
1769
+ # Concatenate the padded arrays along the channel dimension (last dimension)
1770
+ stack = np.concatenate(padded_stack_ls, axis=-1)
1664
1771
 
1665
- else:
1666
- stack = np.concatenate(stack_ls, axis=-1)
1772
+ else:
1773
+ stack = np.concatenate(stack_ls, axis=-1)
1667
1774
 
1668
- if stack.shape[-1] > concatenated_array.shape[-1]:
1669
- output_path = os.path.join(output_folder, filename)
1670
- np.save(output_path, stack)
1775
+ if stack.shape[-1] > concatenated_array.shape[-1]:
1776
+ output_path = os.path.join(output_folder, filename)
1777
+ np.save(output_path, stack)
1671
1778
 
1672
- clear_output(wait=True)
1779
+ #clear_output(wait=True)
1673
1780
  print(f'Files merged: {count}/{all_imgs}')
1674
1781
  #print(f'Files merged: {count}/{all_imgs}', end='\r', flush=True)
1675
1782
  return
@@ -2145,7 +2252,42 @@ def _read_mask(mask_path):
2145
2252
  if mask.dtype != np.uint16:
2146
2253
  mask = img_as_uint(mask)
2147
2254
  return mask
2255
+
2256
+
2257
+ def convert_numpy_to_tiff(folder_path, limit=None):
2258
+ """
2259
+ Converts all numpy files in a folder to TIFF format and saves them in a subdirectory 'tiff'.
2260
+
2261
+ Args:
2262
+ folder_path (str): The path to the folder containing numpy files.
2263
+ """
2264
+ # Create the subdirectory 'tiff' within the specified folder if it doesn't already exist
2265
+ tiff_subdir = os.path.join(folder_path, 'tiff')
2266
+ os.makedirs(tiff_subdir, exist_ok=True)
2267
+
2268
+ files = os.listdir(folder_path)
2269
+
2270
+ npy_files = [f for f in files if f.endswith('.npy')]
2148
2271
 
2272
+ # Iterate over all files in the folder
2273
+ for i, filename in enumerate(files):
2274
+ if limit is not None and i >= limit:
2275
+ break
2276
+
2277
+ # Construct the full file path
2278
+ file_path = os.path.join(folder_path, filename)
2279
+ # Load the numpy file
2280
+ numpy_array = np.load(file_path)
2281
+
2282
+ # Construct the output TIFF file path
2283
+ tiff_filename = os.path.splitext(filename)[0] + '.tif'
2284
+ tiff_file_path = os.path.join(tiff_subdir, tiff_filename)
2285
+
2286
+ # Save the numpy array as a TIFF file
2287
+ tifffile.imwrite(tiff_file_path, numpy_array)
2288
+
2289
+ print(f"Converted {filename} to {tiff_filename} and saved in 'tiff' subdirectory.")
2290
+ return
2149
2291
 
2150
2292
 
2151
2293
 
spacr/measure.py CHANGED
@@ -92,6 +92,7 @@ def _calculate_zernike(mask, df, degree=8):
92
92
  else:
93
93
  return df
94
94
 
95
+ @log_function_call
95
96
  def _morphological_measurements(cell_mask, nucleus_mask, pathogen_mask, cytoplasm_mask, settings, zernike=True, degree=8):
96
97
  """
97
98
  Calculate morphological measurements for cells, nucleus, pathogens, and cytoplasms based on the given masks.
@@ -435,6 +436,7 @@ def _estimate_blur(image):
435
436
  # Compute and return the variance of the Laplacian
436
437
  return lap.var()
437
438
 
439
+ @log_function_call
438
440
  def _intensity_measurements(cell_mask, nucleus_mask, pathogen_mask, cytoplasm_mask, channel_arrays, settings, sizes=[3, 6, 12, 24], periphery=True, outside=True):
439
441
  """
440
442
  Calculate various intensity measurements for different regions in the image.
@@ -676,7 +678,6 @@ def _measure_crop_core(index, time_ls, file, settings):
676
678
  crop_ls = settings['crop_mode']
677
679
  size_ls = settings['png_size']
678
680
  for crop_idx, crop_mode in enumerate(crop_ls):
679
- print(crop_idx, crop_mode)
680
681
  width, height = size_ls[crop_idx]
681
682
  if crop_mode == 'cell':
682
683
  crop_mask = cell_mask.copy()
@@ -830,6 +831,14 @@ def measure_crop(settings, annotation_settings, advanced_settings):
830
831
  Returns:
831
832
  None
832
833
  """
834
+
835
+ if settings.get('test_mode', False):
836
+ if not os.basename(settings['src']) == 'test':
837
+ src = os.path.join(src, 'test')
838
+ settings['src'] = src
839
+ print(f'Changed source folder to {src} for test mode')
840
+ else:
841
+ print(f'Test mode enabled, using source folder {settings["src"]}')
833
842
 
834
843
  from .io import _save_settings_to_db
835
844
  from .timelapse import _timelapse_masks_to_gif, _scmovie
@@ -913,21 +922,6 @@ def measure_crop(settings, annotation_settings, advanced_settings):
913
922
  time_left = (((files_to_process-files_processed)*average_time)/max_workers)/60
914
923
  print(f'Progress: {files_processed}/{files_to_process} Time/img {average_time:.3f}sec, Time Remaining {time_left:.3f} min.', end='\r', flush=True)
915
924
  result.get()
916
-
917
- #if settings['save_png']:
918
- # img_fldr = os.path.join(os.path.dirname(settings['input_folder']), 'data')
919
- # sc_img_fldrs = _list_endpoint_subdirectories(img_fldr)
920
- # for well_src in sc_img_fldrs:
921
- # if len(os.listdir(well_src)) < 16:
922
- # nr_imgs = len(os.listdir(well_src))
923
- # standardize = False
924
- # else:
925
- # nr_imgs = 16
926
- # standardize = True
927
- # try:
928
- # _save_scimg_plot(src=well_src, nr_imgs=nr_imgs, channel_indices=settings['png_dims'], um_per_pixel=0.1, scale_bar_length_um=10, standardize=standardize, fontsize=12, show_filename=True, channel_names=['red','green','blue'], dpi=300, plot=False)
929
- # except Exception as e: # Consider catching a more specific exception if possible
930
- # print(f"Unable to generate figure for folder {well_src}: {e}", flush=True)
931
925
 
932
926
  if settings['save_png']:
933
927
  img_fldr = os.path.join(os.path.dirname(settings['input_folder']), 'data')