spacr 0.4.1__py3-none-any.whl → 0.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
spacr/gui_utils.py CHANGED
@@ -106,7 +106,6 @@ def parse_list(value):
106
106
  except (ValueError, SyntaxError) as e:
107
107
  raise ValueError(f"Invalid format for list: {value}. Error: {e}")
108
108
 
109
- # Usage example in your create_input_field function
110
109
  def create_input_field(frame, label_text, row, var_type='entry', options=None, default_value=None):
111
110
  """
112
111
  Create an input field in the specified frame.
@@ -365,27 +364,30 @@ def convert_settings_dict_for_gui(settings):
365
364
  from torchvision import models as torch_models
366
365
  torchvision_models = [name for name, obj in torch_models.__dict__.items() if callable(obj)]
367
366
  chans = ['0', '1', '2', '3', '4', '5', '6', '7', '8', None]
367
+ chan_list = ['[0,1,2,3,4,5,6,7,8]','[0,1,2,3,4,5,6,7]','[0,1,2,3,4,5,6]','[0,1,2,3,4,5]','[0,1,2,3,4]','[0,1,2,3]', '[0,1,2]', '[0,1]', '[0]', '[0,0]']
368
368
  chans_v2 = [0, 1, 2, 3, None]
369
+ chans_v3 = list(range(0, 21, 1)) + [None]
370
+ chans_v4 = [0, 1, 2, 3, None]
369
371
  variables = {}
370
372
  special_cases = {
371
- 'metadata_type': ('combo', ['cellvoyager', 'cq1', 'nikon', 'zeis', 'custom'], 'cellvoyager'),
372
- 'channels': ('combo', ['[0,1,2,3]', '[0,1,2]', '[0,1]', '[0]', '[0,0]'], '[0,1,2,3]'),
373
+ 'metadata_type': ('combo', ['cellvoyager', 'cq1', 'auto', 'custom'], 'cellvoyager'),
374
+ 'channels': ('combo', chan_list, '[0,1,2,3]'),
373
375
  'train_channels': ('combo', ["['r','g','b']", "['r','g']", "['r','b']", "['g','b']", "['r']", "['g']", "['b']"], "['r','g','b']"),
374
- 'channel_dims': ('combo', ['[0,1,2,3]', '[0,1,2]', '[0,1]', '[0]'], '[0,1,2,3]'),
376
+ 'channel_dims': ('combo', chan_list, '[0,1,2,3]'),
375
377
  'dataset_mode': ('combo', ['annotation', 'metadata', 'recruitment'], 'metadata'),
376
378
  'cov_type': ('combo', ['HC0', 'HC1', 'HC2', 'HC3', None], None),
377
- 'cell_mask_dim': ('combo', chans, None),
378
- 'cell_chann_dim': ('combo', chans, None),
379
- 'nucleus_mask_dim': ('combo', chans, None),
380
- 'nucleus_chann_dim': ('combo', chans, None),
381
- 'pathogen_mask_dim': ('combo', chans, None),
382
- 'pathogen_chann_dim': ('combo', chans, None),
383
- 'crop_mode': ('combo', [['cell'], ['nucleus'], ['pathogen'], ['cell', 'nucleus'], ['cell', 'pathogen'], ['nucleus', 'pathogen'], ['cell', 'nucleus', 'pathogen']], ['cell']),
384
- 'magnification': ('combo', [20, 40, 60], 20),
385
- 'nucleus_channel': ('combo', chans_v2, None),
386
- 'cell_channel': ('combo', chans_v2, None),
387
- 'channel_of_interest': ('combo', chans_v2, None),
388
- 'pathogen_channel': ('combo', chans_v2, None),
379
+ #'cell_mask_dim': ('combo', chans_v3, None),
380
+ #'cell_chann_dim': ('combo', chans_v3, None),
381
+ #'nucleus_mask_dim': ('combo', chans_v3, None),
382
+ #'nucleus_chann_dim': ('combo', chans_v3, None),
383
+ #'pathogen_mask_dim': ('combo', chans_v3, None),
384
+ #'pathogen_chann_dim': ('combo', chans_v3, None),
385
+ 'crop_mode': ('combo', ["['cell']", "['nucleus']", "['pathogen']", "['cell', 'nucleus']", "['cell', 'pathogen']", "['nucleus', 'pathogen']", "['cell', 'nucleus', 'pathogen']"], "['cell']"),
386
+ #'magnification': ('combo', [20, 40, 60], 20),
387
+ #'nucleus_channel': ('combo', chans_v3, None),
388
+ #'cell_channel': ('combo', chans_v3, None),
389
+ #'channel_of_interest': ('combo', chans_v3, None),
390
+ #'pathogen_channel': ('combo', chans_v3, None),
389
391
  'timelapse_mode': ('combo', ['trackpy', 'btrack'], 'trackpy'),
390
392
  'train_mode': ('combo', ['erm', 'irm'], 'erm'),
391
393
  'clustering': ('combo', ['dbscan', 'kmean'], 'dbscan'),
@@ -462,10 +464,11 @@ def function_gui_wrapper(function=None, settings={}, q=None, fig_queue=None, imp
462
464
  finally:
463
465
  # Restore the original plt.show function
464
466
  plt.show = original_show
467
+
465
468
 
469
+
466
470
  def run_function_gui(settings_type, settings, q, fig_queue, stop_requested):
467
471
 
468
- from .gui_utils import process_stdout_stderr
469
472
  from .core import generate_image_umap, preprocess_generate_masks
470
473
  from .cellpose import identify_masks_finetune, check_cellpose_models, compare_cellpose_masks
471
474
  from .submodules import analyze_recruitment
@@ -476,9 +479,10 @@ def run_function_gui(settings_type, settings, q, fig_queue, stop_requested):
476
479
  from .sim import run_multiple_simulations
477
480
  from .deep_spacr import deep_spacr, apply_model_to_tar
478
481
  from .sequencing import generate_barecode_mapping
482
+
479
483
  process_stdout_stderr(q)
480
-
481
- print(f'run_function_gui settings_type: {settings_type}')
484
+
485
+ print(f'run_function_gui settings_type: {settings_type}')
482
486
 
483
487
  if settings_type == 'mask':
484
488
  function = preprocess_generate_masks
@@ -523,7 +527,7 @@ def run_function_gui(settings_type, settings, q, fig_queue, stop_requested):
523
527
  function = process_non_tif_non_2D_images
524
528
  imports = 1
525
529
  else:
526
- raise ValueError(f"Invalid settings type: {settings_type}")
530
+ raise ValueError(f"Error: Invalid settings type: {settings_type}")
527
531
  try:
528
532
  function_gui_wrapper(function, settings, q, fig_queue, imports)
529
533
  except Exception as e:
spacr/io.py CHANGED
@@ -891,11 +891,16 @@ def _merge_channels(src, plot=False):
891
891
  from .utils import print_progress
892
892
 
893
893
  stack_dir = os.path.join(src, 'stack')
894
- allowed_names = ['01', '02', '03', '04', '00', '1', '2', '3', '4', '0']
894
+ #allowed_names = ['01', '02', '03', '04', '00', '1', '2', '3', '4', '0']
895
+
896
+ string_list = [str(i) for i in range(101)]+[f"{i:02d}" for i in range(10)]
897
+ allowed_names = sorted(string_list, key=lambda x: int(x))
895
898
 
896
899
  # List directories that match the allowed names
897
900
  chan_dirs = [d for d in os.listdir(src) if os.path.isdir(os.path.join(src, d)) and d in allowed_names]
898
901
  chan_dirs.sort()
902
+
903
+ num_matching_folders = len(chan_dirs)
899
904
 
900
905
  print(f'List of folders in src: {chan_dirs}. Single channel folders.')
901
906
 
@@ -925,7 +930,7 @@ def _merge_channels(src, plot=False):
925
930
  if plot:
926
931
  plot_arrays(os.path.join(src, 'stack'))
927
932
 
928
- return
933
+ return num_matching_folders
929
934
 
930
935
  def _mip_all(src, include_first_chan=True):
931
936
 
@@ -1584,10 +1589,6 @@ def preprocess_img_data(settings):
1584
1589
  else:
1585
1590
  print(f'Could not find any {valid_ext} files in {src} only found {extension_counts[0]}')
1586
1591
 
1587
-
1588
-
1589
-
1590
-
1591
1592
  if os.path.exists(os.path.join(src,'stack')):
1592
1593
  print('Found existing stack folder.')
1593
1594
  if os.path.exists(os.path.join(src,'channel_stack')):
@@ -1644,7 +1645,13 @@ def preprocess_img_data(settings):
1644
1645
  print(f"all images: {all_imgs}, full batch: {full_batches}, last batch: {last_batch_size}")
1645
1646
  raise ValueError("Last batch of size 1 detected. Adjust the batch size.")
1646
1647
 
1647
- _merge_channels(src, plot=False)
1648
+ nr_channel_folders = _merge_channels(src, plot=False)
1649
+
1650
+ if len(settings['channels']) != nr_channel_folders:
1651
+ print(f"Number of channels does not match number of channel folders. channels: {settings['channels']} channel folders: {nr_channel_folders}")
1652
+ new_channels = list(range(nr_channel_folders))
1653
+ print(f"Changing channels from {settings['channels']} to {new_channels}")
1654
+ settings['channels'] = new_channels
1648
1655
 
1649
1656
  if timelapse:
1650
1657
  _create_movies_from_npy_per_channel(stack_path, fps=2)
@@ -3095,4 +3102,210 @@ def generate_dataset_from_lists(dst, class_data, classes, test_split=0.1):
3095
3102
  test_class_dir = os.path.join(dst, f'test/{cls}')
3096
3103
  print(f'Train class {cls}: {len(os.listdir(train_class_dir))}, Test class {cls}: {len(os.listdir(test_class_dir))}')
3097
3104
 
3098
- return os.path.join(dst, 'train'), os.path.join(dst, 'test')
3105
+ return os.path.join(dst, 'train'), os.path.join(dst, 'test')
3106
+
3107
+ def convert_separate_files_to_yokogawa(folder, regex):
3108
+ ROWS = "ABCDEFGHIJKLMNOP"
3109
+ COLS = [f"{i:02d}" for i in range(1, 25)]
3110
+ WELLS = [f"{r}{c}" for r in ROWS for c in COLS]
3111
+
3112
+ def _get_next_well(used_wells):
3113
+ plate = 1
3114
+ for well in WELLS:
3115
+ well_name = f"plate{plate}_{well}"
3116
+ if well_name not in used_wells:
3117
+ return well_name
3118
+ if well == "P24":
3119
+ plate += 1
3120
+ return f"plate{plate}_A01"
3121
+
3122
+ pattern = re.compile(regex, re.I)
3123
+
3124
+ files_by_fov = {}
3125
+ rename_log = []
3126
+ csv_path = os.path.join(folder, "rename_log.csv")
3127
+ used_wells = set(os.listdir(folder))
3128
+ fov_to_well = {}
3129
+
3130
+ # Group files by FOV
3131
+ for file in os.listdir(folder):
3132
+ match = pattern.match(file)
3133
+ if not match:
3134
+ print(f"Skipping {file}: does not match regex.")
3135
+ continue
3136
+
3137
+ meta = match.groupdict()
3138
+
3139
+ # Extract required metadata
3140
+ fov = meta['fov']
3141
+ channel = int(meta['channel'])
3142
+
3143
+ # Optional metadata with defaults
3144
+ time = int(meta.get('time', 1))
3145
+ z_slice = int(meta.get('slice', 1))
3146
+
3147
+ files_by_fov.setdefault(fov, []).append((file, channel, time, z_slice))
3148
+
3149
+ # Assign wells per FOV
3150
+ for fov, file_list in files_by_fov.items():
3151
+ if fov not in fov_to_well:
3152
+ fov_to_well[fov] = _get_next_well(used_wells)
3153
+ used_wells.add(fov_to_well[fov])
3154
+
3155
+ well = fov_to_well[fov]
3156
+
3157
+ for original_file, channel, time, z_slice in file_list:
3158
+ img = tifffile.imread(os.path.join(folder, original_file))
3159
+ dtype = img.dtype
3160
+
3161
+ filename = f"{well}_T{time:04d}F001L01C{channel:02d}Z{z_slice:03d}.tif"
3162
+ filepath = os.path.join(folder, filename)
3163
+ tifffile.imwrite(filepath, img.astype(dtype))
3164
+
3165
+ rename_log.append({"Original File": original_file, "Renamed TIFF": filename})
3166
+
3167
+ pd.DataFrame(rename_log).to_csv(csv_path, index=False)
3168
+ print(f"Processing complete. Files saved in {folder} and rename log saved as {csv_path}.")
3169
+
3170
+ def convert_to_yokogawa(folder):
3171
+ """
3172
+ Detects file type in the folder and converts them
3173
+ to Yokogawa-style naming with Maximum Intensity Projection (MIP).
3174
+ """
3175
+
3176
+ def _get_next_well(used_wells):
3177
+ """
3178
+ Determines the next available well position in a 384-well format.
3179
+ Iterates wells, and after P24, switches to plate2.
3180
+ """
3181
+ plate = 1
3182
+ for well in WELLS:
3183
+ well_name = f"plate{plate}_{well}"
3184
+ if well_name not in used_wells:
3185
+ return well_name
3186
+ if well == "P24":
3187
+ plate += 1
3188
+ return f"plate{plate}_A01"
3189
+
3190
+ # Define 384-well plate format
3191
+ ROWS = "ABCDEFGHIJKLMNOP"
3192
+ COLS = [f"{i:02d}" for i in range(1, 25)]
3193
+ WELLS = [f"{r}{c}" for r in ROWS for c in COLS]
3194
+
3195
+ filenames = []
3196
+ rename_log = []
3197
+ csv_path = os.path.join(folder, "rename_log.csv")
3198
+ used_wells = set(os.listdir(folder))
3199
+
3200
+ # **Dictionary to store well assignments per original file**
3201
+ file_to_well = {}
3202
+
3203
+ for file in os.listdir(folder):
3204
+ path = os.path.join(folder, file)
3205
+ ext = file.lower().split('.')[-1]
3206
+
3207
+ # **Assign a well only once per original file**
3208
+ if file not in file_to_well:
3209
+ file_to_well[file] = _get_next_well(used_wells)
3210
+ used_wells.add(file_to_well[file]) # Mark it as used
3211
+
3212
+ well = file_to_well[file] # Use the same well for all channels/times
3213
+
3214
+ ### **Process Nikon ND2 Files**
3215
+ if ext == 'nd2':
3216
+ nd2 = ND2Reader(path)
3217
+ metadata = nd2.metadata
3218
+
3219
+ timepoints = metadata.get("frames", [0])
3220
+ fields = metadata.get("fields_of_view", [0])
3221
+ z_levels = list(metadata.get("z_levels", range(1)))
3222
+ channels = metadata.get("channels", [])
3223
+
3224
+ for t_idx in timepoints:
3225
+ for f_idx in fields:
3226
+ for c_idx, channel in enumerate(channels):
3227
+ z_stack = [nd2.get_frame_2D(t=t_idx, v=f_idx, z=z_idx, c=c_idx) for z_idx in z_levels]
3228
+ mip_image = np.max(np.stack(z_stack), axis=0)
3229
+ dtype = z_stack[0].dtype
3230
+
3231
+ filename = f"{well}_T{t_idx+1:04d}F{f_idx+1:03d}L01C{c_idx+1:02d}.tif"
3232
+ filepath = os.path.join(folder, filename)
3233
+ tifffile.imwrite(filepath, mip_image.astype(dtype))
3234
+
3235
+ rename_log.append({"Original File": file, "Renamed TIFF": filename})
3236
+
3237
+ ### **Process Zeiss CZI Files**
3238
+ elif ext == 'czi':
3239
+ with czifile.CziFile(path) as czi:
3240
+ shape = czi.shape
3241
+ timepoints = range(shape[0])
3242
+ z_levels = range(shape[1])
3243
+ channels = range(shape[2])
3244
+
3245
+ for t_idx in timepoints:
3246
+ for c_idx in channels:
3247
+ z_stack = [czi.asarray()[t_idx, z_idx, c_idx] for z_idx in z_levels]
3248
+ mip_image = np.max(np.stack(z_stack), axis=0)
3249
+ dtype = z_stack[0].dtype
3250
+
3251
+ filename = f"{well}_T{t_idx+1:04d}F001L01C{c_idx+1:02d}.tif"
3252
+ filepath = os.path.join(folder, filename)
3253
+ tifffile.imwrite(filepath, mip_image.astype(dtype))
3254
+
3255
+ rename_log.append({"Original File": file, "Renamed TIFF": filename})
3256
+
3257
+ ### **Process Leica LIF Files**
3258
+ elif ext == 'lif':
3259
+ lif_file = readlif.Reader(path)
3260
+
3261
+ for image in lif_file.getIterImage():
3262
+ timepoints = range(image.dims.t)
3263
+ z_levels = range(image.dims.z)
3264
+ channels = range(image.dims.c)
3265
+
3266
+ for t_idx in timepoints:
3267
+ for c_idx in channels:
3268
+ z_stack = [image.getFrame(z=z_idx, t=t_idx, c=c_idx) for z_idx in z_levels]
3269
+ mip_image = np.max(np.stack(z_stack), axis=0)
3270
+ dtype = z_stack[0].dtype
3271
+
3272
+ filename = f"{well}_T{t_idx+1:04d}F001L01C{c_idx+1:02d}.tif"
3273
+ filepath = os.path.join(folder, filename)
3274
+ tifffile.imwrite(filepath, mip_image.astype(dtype))
3275
+
3276
+ rename_log.append({"Original File": file, "Renamed TIFF": filename})
3277
+
3278
+ ### **Process Standard Image Files**
3279
+ elif ext in ['tif', 'tiff', 'png', 'jpg', 'jpeg', 'bmp'] and not file.startswith("plate"):
3280
+ with tifffile.TiffFile(path) as tif:
3281
+ num_pages = len(tif.pages)
3282
+
3283
+ if num_pages > 1:
3284
+ images = tif.asarray()
3285
+ if images.ndim == 4:
3286
+ timepoints = range(images.shape[0])
3287
+ channels = range(images.shape[2])
3288
+
3289
+ for t_idx in timepoints:
3290
+ for c_idx in channels:
3291
+ mip_image = np.max(images[t_idx, :, c_idx], axis=0)
3292
+ dtype = images.dtype
3293
+
3294
+ filename = f"{well}_T{t_idx+1:04d}F001L01C{c_idx+1:02d}.tif"
3295
+ filepath = os.path.join(folder, filename)
3296
+ tifffile.imwrite(filepath, mip_image.astype(dtype))
3297
+
3298
+ rename_log.append({"Original File": file, "Renamed TIFF": filename})
3299
+ else:
3300
+ image = tif.pages[0].asarray()
3301
+ dtype = image.dtype
3302
+
3303
+ filename = f"{well}_T0001F001L01C01.tif"
3304
+ filepath = os.path.join(folder, filename)
3305
+ tifffile.imwrite(filepath, image.astype(dtype))
3306
+
3307
+ rename_log.append({"Original File": file, "Renamed TIFF": filename})
3308
+
3309
+ # Save rename log as CSV
3310
+ pd.DataFrame(rename_log).to_csv(csv_path, index=False)
3311
+ print(f"Processing complete. Files saved in {folder} and rename log saved as {csv_path}.")
spacr/measure.py CHANGED
@@ -945,20 +945,25 @@ def measure_crop(settings):
945
945
 
946
946
  from .io import _save_settings_to_db
947
947
  from .timelapse import _timelapse_masks_to_gif
948
- from .utils import measure_test_mode, print_progress, delete_intermedeate_files, save_settings
948
+ from .utils import measure_test_mode, print_progress, delete_intermedeate_files, save_settings, format_path_for_system, normalize_src_path
949
949
  from .settings import get_measure_crop_settings
950
950
 
951
951
  if not isinstance(settings['src'], (str, list)):
952
952
  ValueError(f'src must be a string or a list of strings')
953
953
  return
954
954
 
955
+ settings['src'] = normalize_src_path(settings['src'])
956
+
955
957
  if isinstance(settings['src'], str):
956
958
  settings['src'] = [settings['src']]
957
959
 
958
960
  if isinstance(settings['src'], list):
959
961
  source_folders = settings['src']
962
+
960
963
  for source_folder in source_folders:
961
964
  print(f'Processing folder: {source_folder}')
965
+
966
+ source_folder = format_path_for_system(source_folder)
962
967
  settings['src'] = source_folder
963
968
  src = source_folder
964
969
 
@@ -966,15 +971,12 @@ def measure_crop(settings):
966
971
  settings = measure_test_mode(settings)
967
972
 
968
973
  src_fldr = settings['src']
974
+
969
975
  if not os.path.basename(src_fldr).endswith('merged'):
970
976
  print(f"WARNING: Source folder, settings: src: {src_fldr} should end with '/merged'")
971
977
  src_fldr = os.path.join(src_fldr, 'merged')
978
+ settings['src'] = src_fldr
972
979
  print(f"Changed source folder to: {src_fldr}")
973
-
974
- #if settings['save_measurements']:
975
- #source_folder = os.path.dirname(settings['src'])
976
- #os.makedirs(source_folder+'/measurements', exist_ok=True)
977
- #_create_database(source_folder+'/measurements/measurements.db')
978
980
 
979
981
  if settings['cell_mask_dim'] is None:
980
982
  settings['uninfected'] = True
@@ -995,12 +997,9 @@ def measure_crop(settings):
995
997
  print(f'Warning reserving 6 CPU cores for other processes, setting n_jobs to {spacr_cores}')
996
998
  settings['n_jobs'] = spacr_cores
997
999
 
998
- #dirname = os.path.dirname(settings['src'])
999
- #settings_df = pd.DataFrame(list(settings.items()), columns=['Key', 'Value'])
1000
- #settings_csv = os.path.join(dirname,'settings','measure_crop_settings.csv')
1001
- #os.makedirs(os.path.join(dirname,'settings'), exist_ok=True)
1002
- #settings_df.to_csv(settings_csv, index=False)
1003
- save_settings(settings, name='measure_crop_settings', show=True)
1000
+ settings_save = settings.copy()
1001
+ settings_save['src'] = os.path.dirname(settings['src'])
1002
+ save_settings(settings_save, name='measure_crop_settings', show=True)
1004
1003
 
1005
1004
  if settings['timelapse_objects'] == 'nucleus':
1006
1005
  if not settings['cell_mask_dim'] is None: