spacr 0.4.60__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- spacr/__init__.py +2 -4
- spacr/__main__.py +3 -3
- spacr/core.py +13 -107
- spacr/gui.py +0 -1
- spacr/gui_core.py +2 -2
- spacr/gui_utils.py +5 -14
- spacr/io.py +189 -200
- spacr/mediar.py +12 -8
- spacr/plot.py +50 -13
- spacr/settings.py +71 -14
- spacr/submodules.py +21 -14
- spacr/timelapse.py +192 -6
- spacr/utils.py +180 -56
- {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/METADATA +64 -62
- {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/RECORD +20 -72
- {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/WHEEL +1 -1
- spacr/resources/MEDIAR/.gitignore +0 -18
- spacr/resources/MEDIAR/LICENSE +0 -21
- spacr/resources/MEDIAR/README.md +0 -189
- spacr/resources/MEDIAR/SetupDict.py +0 -39
- spacr/resources/MEDIAR/config/baseline.json +0 -60
- spacr/resources/MEDIAR/config/mediar_example.json +0 -72
- spacr/resources/MEDIAR/config/pred/pred_mediar.json +0 -17
- spacr/resources/MEDIAR/config/step1_pretraining/phase1.json +0 -55
- spacr/resources/MEDIAR/config/step1_pretraining/phase2.json +0 -58
- spacr/resources/MEDIAR/config/step2_finetuning/finetuning1.json +0 -66
- spacr/resources/MEDIAR/config/step2_finetuning/finetuning2.json +0 -66
- spacr/resources/MEDIAR/config/step3_prediction/base_prediction.json +0 -16
- spacr/resources/MEDIAR/config/step3_prediction/ensemble_tta.json +0 -23
- spacr/resources/MEDIAR/core/BasePredictor.py +0 -120
- spacr/resources/MEDIAR/core/BaseTrainer.py +0 -240
- spacr/resources/MEDIAR/core/Baseline/Predictor.py +0 -59
- spacr/resources/MEDIAR/core/Baseline/Trainer.py +0 -113
- spacr/resources/MEDIAR/core/Baseline/__init__.py +0 -2
- spacr/resources/MEDIAR/core/Baseline/utils.py +0 -80
- spacr/resources/MEDIAR/core/MEDIAR/EnsemblePredictor.py +0 -105
- spacr/resources/MEDIAR/core/MEDIAR/Predictor.py +0 -234
- spacr/resources/MEDIAR/core/MEDIAR/Trainer.py +0 -172
- spacr/resources/MEDIAR/core/MEDIAR/__init__.py +0 -3
- spacr/resources/MEDIAR/core/MEDIAR/utils.py +0 -429
- spacr/resources/MEDIAR/core/__init__.py +0 -2
- spacr/resources/MEDIAR/core/utils.py +0 -40
- spacr/resources/MEDIAR/evaluate.py +0 -71
- spacr/resources/MEDIAR/generate_mapping.py +0 -121
- spacr/resources/MEDIAR/image/examples/img1.tiff +0 -0
- spacr/resources/MEDIAR/image/examples/img2.tif +0 -0
- spacr/resources/MEDIAR/image/failure_cases.png +0 -0
- spacr/resources/MEDIAR/image/mediar_framework.png +0 -0
- spacr/resources/MEDIAR/image/mediar_model.PNG +0 -0
- spacr/resources/MEDIAR/image/mediar_results.png +0 -0
- spacr/resources/MEDIAR/main.py +0 -125
- spacr/resources/MEDIAR/predict.py +0 -70
- spacr/resources/MEDIAR/requirements.txt +0 -14
- spacr/resources/MEDIAR/train_tools/__init__.py +0 -3
- spacr/resources/MEDIAR/train_tools/data_utils/__init__.py +0 -1
- spacr/resources/MEDIAR/train_tools/data_utils/custom/CellAware.py +0 -88
- spacr/resources/MEDIAR/train_tools/data_utils/custom/LoadImage.py +0 -161
- spacr/resources/MEDIAR/train_tools/data_utils/custom/NormalizeImage.py +0 -77
- spacr/resources/MEDIAR/train_tools/data_utils/custom/__init__.py +0 -3
- spacr/resources/MEDIAR/train_tools/data_utils/custom/modalities.pkl +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/datasetter.py +0 -208
- spacr/resources/MEDIAR/train_tools/data_utils/transforms.py +0 -148
- spacr/resources/MEDIAR/train_tools/data_utils/utils.py +0 -84
- spacr/resources/MEDIAR/train_tools/measures.py +0 -200
- spacr/resources/MEDIAR/train_tools/models/MEDIARFormer.py +0 -102
- spacr/resources/MEDIAR/train_tools/models/__init__.py +0 -1
- spacr/resources/MEDIAR/train_tools/utils.py +0 -70
- spacr/stats.py +0 -221
- /spacr/{cellpose.py → spacr_cellpose.py} +0 -0
- {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/LICENSE +0 -0
- {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/entry_points.txt +0 -0
- {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/top_level.txt +0 -0
spacr/io.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
import os, re, sqlite3, gc, torch, time, random, shutil, cv2, tarfile, cellpose, glob, queue, tifffile, czifile, atexit, datetime
|
1
|
+
import os, re, sqlite3, gc, torch, time, random, shutil, cv2, tarfile, cellpose, glob, queue, tifffile, czifile, atexit, datetime, traceback
|
2
2
|
import numpy as np
|
3
3
|
import pandas as pd
|
4
4
|
from PIL import Image, ImageOps
|
@@ -24,6 +24,7 @@ from nd2reader import ND2Reader
|
|
24
24
|
from torchvision import transforms
|
25
25
|
from sklearn.model_selection import train_test_split
|
26
26
|
import readlif
|
27
|
+
from pylibCZIrw import czi as pyczi
|
27
28
|
|
28
29
|
def process_non_tif_non_2D_images(folder):
|
29
30
|
"""Processes all images in the folder and splits them into grayscale channels, preserving bit depth."""
|
@@ -652,7 +653,7 @@ def load_images_from_paths(images_by_key):
|
|
652
653
|
return images_dict
|
653
654
|
|
654
655
|
#@log_function_call
|
655
|
-
def _rename_and_organize_image_files(src, regex, batch_size=100,
|
656
|
+
def _rename_and_organize_image_files(src, regex, batch_size=100, metadata_type='', img_format='.tif', timelapse=False):
|
656
657
|
"""
|
657
658
|
Convert z-stack images to maximum intensity projection (MIP) images.
|
658
659
|
|
@@ -660,8 +661,6 @@ def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=Fals
|
|
660
661
|
src (str): The source directory containing the z-stack images.
|
661
662
|
regex (str): The regular expression pattern used to match the filenames of the z-stack images.
|
662
663
|
batch_size (int, optional): The number of images to process in each batch. Defaults to 100.
|
663
|
-
pick_slice (bool, optional): Whether to pick a specific slice based on the provided skip mode. Defaults to False.
|
664
|
-
skip_mode (str, optional): The skip mode used to filter out specific slices. Defaults to '01'.
|
665
664
|
metadata_type (str, optional): The type of metadata associated with the images. Defaults to ''.
|
666
665
|
|
667
666
|
Returns:
|
@@ -679,8 +678,9 @@ def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=Fals
|
|
679
678
|
if not os.path.exists(stack_path) or (os.path.isdir(stack_path) and len(os.listdir(stack_path)) == 0):
|
680
679
|
all_filenames = [filename for filename in os.listdir(src) if any(filename.endswith(ext) for ext in img_format)]
|
681
680
|
print(f'All files: {len(all_filenames)} in {src}')
|
681
|
+
all_filenames = [f for f in all_filenames if not f.startswith('.')] #Exclude hidden files
|
682
682
|
time_ls = []
|
683
|
-
image_paths_by_key = _extract_filename_metadata(all_filenames, src, regular_expression, metadata_type
|
683
|
+
image_paths_by_key = _extract_filename_metadata(all_filenames, src, regular_expression, metadata_type)
|
684
684
|
# Convert dictionary keys to a list for batching
|
685
685
|
batching_keys = list(image_paths_by_key.keys())
|
686
686
|
print(f'All unique FOV: {len(image_paths_by_key)} in {src}')
|
@@ -691,47 +691,34 @@ def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=Fals
|
|
691
691
|
batch_keys = batching_keys[idx:idx+batch_size]
|
692
692
|
batch_images_by_key = {key: image_paths_by_key[key] for key in batch_keys}
|
693
693
|
images_by_key = load_images_from_paths(batch_images_by_key)
|
694
|
-
|
695
|
-
|
696
|
-
|
697
|
-
|
698
|
-
|
699
|
-
|
700
|
-
|
701
|
-
os.makedirs(output_dir, exist_ok=True)
|
702
|
-
output_filename = f'{plate}_{well}_{field}.tif'
|
703
|
-
output_path = os.path.join(output_dir, output_filename)
|
704
|
-
files_processed += 1
|
705
|
-
stop = time.time()
|
706
|
-
duration = stop - start
|
707
|
-
time_ls.append(duration)
|
708
|
-
files_to_process = len(all_filenames)
|
709
|
-
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=batch_size, operation_type='Preprocessing filenames')
|
710
|
-
|
711
|
-
if not os.path.exists(output_path):
|
712
|
-
mip_image.save(output_path)
|
713
|
-
else:
|
714
|
-
print(f'WARNING: A file with the same name already exists at location {output_filename}')
|
715
|
-
else:
|
716
|
-
for i, (key, images) in enumerate(images_by_key.items()):
|
717
|
-
plate, well, field, channel = key[:4]
|
718
|
-
output_dir = os.path.join(src, channel)
|
719
|
-
mip = np.max(np.stack(images), axis=0)
|
720
|
-
mip_image = Image.fromarray(mip)
|
721
|
-
os.makedirs(output_dir, exist_ok=True)
|
694
|
+
|
695
|
+
# Process each batch of images
|
696
|
+
for i, (key, images) in enumerate(images_by_key.items()):
|
697
|
+
|
698
|
+
plate, well, field, channel, timeID, sliceID = key
|
699
|
+
|
700
|
+
if timelapse:
|
722
701
|
output_filename = f'{plate}_{well}_{field}.tif'
|
723
|
-
|
724
|
-
|
725
|
-
|
726
|
-
|
727
|
-
|
728
|
-
|
729
|
-
|
730
|
-
|
731
|
-
|
732
|
-
|
733
|
-
|
734
|
-
|
702
|
+
else:
|
703
|
+
output_filename = f'{plate}_{well}_{field}_{timeID}.tif'
|
704
|
+
|
705
|
+
output_dir = os.path.join(src, channel)
|
706
|
+
os.makedirs(output_dir, exist_ok=True)
|
707
|
+
output_path = os.path.join(output_dir, output_filename)
|
708
|
+
mip = np.max(np.stack(images), axis=0)
|
709
|
+
mip_image = Image.fromarray(mip)
|
710
|
+
|
711
|
+
files_processed += 1
|
712
|
+
stop = time.time()
|
713
|
+
duration = stop - start
|
714
|
+
time_ls.append(duration)
|
715
|
+
files_to_process = len(all_filenames)
|
716
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=batch_size, operation_type='Preprocessing filenames')
|
717
|
+
|
718
|
+
if not os.path.exists(output_path):
|
719
|
+
mip_image.save(output_path)
|
720
|
+
else:
|
721
|
+
print(f'WARNING: A file with the same name already exists at location {output_filename}')
|
735
722
|
|
736
723
|
images_by_key.clear()
|
737
724
|
|
@@ -1188,7 +1175,7 @@ def concatenate_and_normalize(src, channels, save_dtype=np.float32, settings={})
|
|
1188
1175
|
|
1189
1176
|
paths = []
|
1190
1177
|
time_ls = []
|
1191
|
-
output_fldr = os.path.join(os.path.dirname(src), '
|
1178
|
+
output_fldr = os.path.join(os.path.dirname(src), 'masks')
|
1192
1179
|
os.makedirs(output_fldr, exist_ok=True)
|
1193
1180
|
|
1194
1181
|
if settings['timelapse']:
|
@@ -1243,7 +1230,11 @@ def concatenate_and_normalize(src, channels, save_dtype=np.float32, settings={})
|
|
1243
1230
|
files_processed = 0
|
1244
1231
|
for i, path in enumerate(paths):
|
1245
1232
|
start = time.time()
|
1246
|
-
|
1233
|
+
try:
|
1234
|
+
array = np.load(path)
|
1235
|
+
except Exception as e:
|
1236
|
+
print(f"Error loading file {path}: {e}")
|
1237
|
+
continue
|
1247
1238
|
stack_ls.append(array)
|
1248
1239
|
filenames_batch.append(os.path.basename(path))
|
1249
1240
|
stop = time.time()
|
@@ -1342,7 +1333,7 @@ def _normalize_stack(src, backgrounds=[100, 100, 100], remove_backgrounds=[False
|
|
1342
1333
|
None
|
1343
1334
|
"""
|
1344
1335
|
paths = [os.path.join(src, file) for file in os.listdir(src) if file.endswith('.npz')]
|
1345
|
-
output_fldr = os.path.join(os.path.dirname(src), '
|
1336
|
+
output_fldr = os.path.join(os.path.dirname(src), 'masks')
|
1346
1337
|
os.makedirs(output_fldr, exist_ok=True)
|
1347
1338
|
time_ls = []
|
1348
1339
|
|
@@ -1427,7 +1418,7 @@ def _normalize_timelapse(src, lower_percentile=2, save_dtype=np.float32):
|
|
1427
1418
|
save_dtype (numpy.dtype, optional): The data type to save the normalized stack. Defaults to np.float32.
|
1428
1419
|
"""
|
1429
1420
|
paths = [os.path.join(src, file) for file in os.listdir(src) if file.endswith('.npz')]
|
1430
|
-
output_fldr = os.path.join(os.path.dirname(src), '
|
1421
|
+
output_fldr = os.path.join(os.path.dirname(src), 'masks')
|
1431
1422
|
os.makedirs(output_fldr, exist_ok=True)
|
1432
1423
|
|
1433
1424
|
for file_index, path in enumerate(paths):
|
@@ -1571,59 +1562,62 @@ def preprocess_img_data(settings):
|
|
1571
1562
|
save_dtype (type, optional): The data type used for saving the preprocessed images. Defaults to np.float32.
|
1572
1563
|
randomize (bool, optional): Whether to randomize the order of the images. Defaults to True.
|
1573
1564
|
all_to_mip (bool, optional): Whether to convert all images to MIP. Defaults to False.
|
1574
|
-
pick_slice (bool, optional): Whether to pick a specific slice based on the provided skip mode. Defaults to False.
|
1575
|
-
skip_mode (str, optional): The skip mode used to filter out specific slices. Defaults to '01'.
|
1576
1565
|
settings (dict, optional): Additional settings for preprocessing. Defaults to {}.
|
1577
1566
|
|
1578
1567
|
Returns:
|
1579
1568
|
None
|
1580
1569
|
"""
|
1581
|
-
|
1582
1570
|
src = settings['src']
|
1583
|
-
|
1571
|
+
|
1572
|
+
if len(os.listdir(src)) < 100:
|
1573
|
+
delete_empty_subdirectories(src)
|
1574
|
+
|
1584
1575
|
files = os.listdir(src)
|
1585
|
-
|
1586
|
-
|
1587
|
-
|
1576
|
+
valid_ext = ['tif', 'tiff', 'png', 'jpg', 'jpeg', 'bmp', 'nd2', 'czi', 'lif']
|
1577
|
+
extensions = [file.split('.')[-1].lower() for file in files]
|
1578
|
+
# Filter only valid extensions
|
1579
|
+
valid_extensions = [ext for ext in extensions if ext in valid_ext]
|
1580
|
+
# Determine most common valid extension
|
1588
1581
|
img_format = None
|
1582
|
+
if valid_extensions:
|
1583
|
+
extension_counts = Counter(valid_extensions)
|
1584
|
+
most_common_extension = Counter(valid_extensions).most_common(1)[0][0]
|
1585
|
+
img_format = most_common_extension
|
1586
|
+
|
1587
|
+
print(f"Found {extension_counts[most_common_extension]} {most_common_extension} files")
|
1589
1588
|
|
1590
|
-
delete_empty_subdirectories(src)
|
1591
|
-
|
1592
|
-
# Check if the most common extension is one of the specified image formats
|
1593
|
-
if most_common_extension in valid_ext:
|
1594
|
-
img_format = f'.{most_common_extension}'
|
1595
|
-
print(f'Found {extension_counts[most_common_extension]} {most_common_extension} files')
|
1596
1589
|
else:
|
1597
|
-
print(f
|
1590
|
+
print(f"Could not find any {valid_ext} files in {src} only found {extension_counts[0]}")
|
1591
|
+
print(f"{files} in {src}")
|
1592
|
+
print(f"Please check the folder and try again")
|
1598
1593
|
|
1599
1594
|
if os.path.exists(os.path.join(src,'stack')):
|
1600
1595
|
print('Found existing stack folder.')
|
1601
1596
|
if os.path.exists(os.path.join(src,'channel_stack')):
|
1602
1597
|
print('Found existing channel_stack folder.')
|
1603
|
-
if os.path.exists(os.path.join(src,'
|
1604
|
-
print('Found existing
|
1598
|
+
if os.path.exists(os.path.join(src,'masks')):
|
1599
|
+
print('Found existing masks folder. Skipping preprocessing')
|
1605
1600
|
return settings, src
|
1606
|
-
|
1601
|
+
|
1607
1602
|
mask_channels = [settings['nucleus_channel'], settings['cell_channel'], settings['pathogen_channel']]
|
1608
|
-
backgrounds = [settings['nucleus_background'], settings['cell_background'], settings['pathogen_background']]
|
1609
|
-
|
1610
|
-
settings, metadata_type, custom_regex, nr, plot, batch_size, timelapse, lower_percentile, randomize, all_to_mip, pick_slice, skip_mode, cmap, figuresize, normalize, save_dtype, test_mode, test_images, random_test = set_default_settings_preprocess_img_data(settings)
|
1611
|
-
|
1612
|
-
regex = _get_regex(metadata_type, img_format, custom_regex)
|
1613
|
-
|
1614
|
-
if test_mode:
|
1615
1603
|
|
1616
|
-
|
1604
|
+
settings = set_default_settings_preprocess_img_data(settings)
|
1605
|
+
|
1606
|
+
regex = _get_regex(settings['metadata_type'], img_format, settings['custom_regex'])
|
1607
|
+
|
1608
|
+
if settings['test_mode']:
|
1609
|
+
print(f"Running spacr in test mode")
|
1617
1610
|
settings['plot'] = True
|
1618
|
-
|
1619
|
-
|
1620
|
-
|
1621
|
-
|
1622
|
-
|
1623
|
-
|
1624
|
-
|
1611
|
+
if os.path.exists(os.path.join(src,'test')):
|
1612
|
+
try:
|
1613
|
+
os.rmdir(os.path.join(src, 'test'))
|
1614
|
+
print(f"Deleted test directory: {os.path.join(src, 'test')}")
|
1615
|
+
except OSError as e:
|
1616
|
+
print(f"Error deleting test directory: {e}")
|
1617
|
+
print(f"Delete manually before running test mode")
|
1618
|
+
pass
|
1625
1619
|
|
1626
|
-
src = _run_test_mode(settings['src'], regex, timelapse, test_images, random_test)
|
1620
|
+
src = _run_test_mode(settings['src'], regex, settings['timelapse'], settings['test_images'], settings['random_test'])
|
1627
1621
|
settings['src'] = src
|
1628
1622
|
|
1629
1623
|
stack_path = os.path.join(src, 'stack')
|
@@ -1634,26 +1628,23 @@ def preprocess_img_data(settings):
|
|
1634
1628
|
if not os.path.exists(stack_path):
|
1635
1629
|
try:
|
1636
1630
|
if not img_format == None:
|
1637
|
-
|
1638
|
-
|
1639
|
-
|
1640
|
-
|
1641
|
-
|
1642
|
-
|
1643
|
-
|
1644
|
-
|
1645
|
-
|
1646
|
-
|
1647
|
-
|
1648
|
-
|
1649
|
-
|
1650
|
-
|
1651
|
-
|
1652
|
-
|
1653
|
-
|
1654
|
-
elif full_batches > 0:
|
1655
|
-
print(f"all images: {all_imgs}, full batch: {full_batches}, last batch: {last_batch_size}")
|
1656
|
-
raise ValueError("Last batch of size 1 detected. Adjust the batch size.")
|
1631
|
+
img_format = ['.tif', '.tiff', '.png', '.jpg', '.jpeg', '.bmp', '.nd2', '.czi', '.lif']
|
1632
|
+
_rename_and_organize_image_files(src, regex, settings['batch_size'], settings['metadata_type'], img_format)
|
1633
|
+
|
1634
|
+
#Make sure no batches will be of only one image
|
1635
|
+
all_imgs = len(stack_path)
|
1636
|
+
full_batches = all_imgs // settings['batch_size']
|
1637
|
+
last_batch_size = all_imgs % settings['batch_size']
|
1638
|
+
|
1639
|
+
# Check if the last batch is of size 1
|
1640
|
+
if last_batch_size == 1:
|
1641
|
+
# If there's only one batch and its size is 1, it's also an issue
|
1642
|
+
if full_batches == 0:
|
1643
|
+
raise ValueError("Only one batch of size 1 detected. Adjust the batch size.")
|
1644
|
+
# If the last batch is of size 1, merge it with the second last batch
|
1645
|
+
elif full_batches > 0:
|
1646
|
+
print(f"all images: {all_imgs}, full batch: {full_batches}, last batch: {last_batch_size}")
|
1647
|
+
raise ValueError("Last batch of size 1 detected. Adjust the batch size.")
|
1657
1648
|
|
1658
1649
|
nr_channel_folders = _merge_channels(src, plot=False)
|
1659
1650
|
|
@@ -1663,18 +1654,19 @@ def preprocess_img_data(settings):
|
|
1663
1654
|
print(f"Changing channels from {settings['channels']} to {new_channels}")
|
1664
1655
|
settings['channels'] = new_channels
|
1665
1656
|
|
1666
|
-
if timelapse:
|
1667
|
-
_create_movies_from_npy_per_channel(stack_path, fps=
|
1657
|
+
if settings['timelapse']:
|
1658
|
+
_create_movies_from_npy_per_channel(stack_path, fps=settings['fps'])
|
1668
1659
|
|
1669
|
-
if plot:
|
1670
|
-
print(f
|
1671
|
-
plot_arrays(stack_path, figuresize, cmap, nr=nr, normalize=normalize)
|
1660
|
+
if settings['plot']:
|
1661
|
+
print(f"plotting {settings['nr']} images from {src}/stack")
|
1662
|
+
plot_arrays(stack_path, settings['figuresize'], settings['cmap'], nr=settings['nr'], normalize=settings['normalize'])
|
1672
1663
|
|
1673
|
-
if all_to_mip:
|
1664
|
+
if settings['all_to_mip']:
|
1674
1665
|
_mip_all(stack_path)
|
1675
|
-
if plot:
|
1676
|
-
print(f
|
1677
|
-
plot_arrays(stack_path, figuresize, cmap, nr=nr, normalize=normalize)
|
1666
|
+
if settings['plot']:
|
1667
|
+
print(f"plotting {settings['nr']} images from {src}/stack")
|
1668
|
+
plot_arrays(stack_path, settings['figuresize'], settings['cmap'], nr=settings['nr'], normalize=settings['normalize'])
|
1669
|
+
|
1678
1670
|
except Exception as e:
|
1679
1671
|
print(f"Error: {e}")
|
1680
1672
|
|
@@ -1683,9 +1675,6 @@ def preprocess_img_data(settings):
|
|
1683
1675
|
save_dtype=np.float32,
|
1684
1676
|
settings=settings)
|
1685
1677
|
|
1686
|
-
#if plot:
|
1687
|
-
# _plot_4D_arrays(src+'/norm_channel_stack', nr_npz=1, nr=nr)
|
1688
|
-
|
1689
1678
|
return settings, src
|
1690
1679
|
|
1691
1680
|
def _check_masks(batch, batch_filenames, output_folder):
|
@@ -2002,12 +1991,12 @@ def _load_and_concatenate_arrays(src, channels, cell_chann_dim, nucleus_chann_di
|
|
2002
1991
|
"""
|
2003
1992
|
folder_paths = [os.path.join(src+'/stack')]
|
2004
1993
|
|
2005
|
-
if cell_chann_dim is not None or os.path.exists(os.path.join(src, '
|
2006
|
-
folder_paths = folder_paths + [os.path.join(src, '
|
2007
|
-
if nucleus_chann_dim is not None or os.path.exists(os.path.join(src, '
|
2008
|
-
folder_paths = folder_paths + [os.path.join(src, '
|
2009
|
-
if pathogen_chann_dim is not None or os.path.exists(os.path.join(src, '
|
2010
|
-
folder_paths = folder_paths + [os.path.join(src, '
|
1994
|
+
if cell_chann_dim is not None or os.path.exists(os.path.join(src, 'masks', 'cell_mask_stack')):
|
1995
|
+
folder_paths = folder_paths + [os.path.join(src, 'masks','cell_mask_stack')]
|
1996
|
+
if nucleus_chann_dim is not None or os.path.exists(os.path.join(src, 'masks', 'nucleus_mask_stack')):
|
1997
|
+
folder_paths = folder_paths + [os.path.join(src, 'masks','nucleus_mask_stack')]
|
1998
|
+
if pathogen_chann_dim is not None or os.path.exists(os.path.join(src, 'masks', 'pathogen_mask_stack')):
|
1999
|
+
folder_paths = folder_paths + [os.path.join(src, 'masks','pathogen_mask_stack')]
|
2011
2000
|
|
2012
2001
|
output_folder = src+'/merged'
|
2013
2002
|
reference_folder = folder_paths[0]
|
@@ -2881,7 +2870,6 @@ def generate_training_dataset(settings):
|
|
2881
2870
|
|
2882
2871
|
return class_paths_ls
|
2883
2872
|
|
2884
|
-
from .io import _read_and_merge_data, _read_db
|
2885
2873
|
from .utils import get_paths_from_db, annotate_conditions, save_settings
|
2886
2874
|
from .settings import set_generate_training_dataset_defaults
|
2887
2875
|
|
@@ -3208,28 +3196,13 @@ def convert_separate_files_to_yokogawa(folder, regex):
|
|
3208
3196
|
|
3209
3197
|
pd.DataFrame(rename_log).to_csv(csv_path, index=False)
|
3210
3198
|
print(f"Processing complete. Files saved in {folder} and rename log saved as {csv_path}.")
|
3211
|
-
|
3199
|
+
|
3212
3200
|
def convert_to_yokogawa(folder):
|
3213
3201
|
"""
|
3214
3202
|
Detects file type in the folder and converts them
|
3215
3203
|
to Yokogawa-style naming with Maximum Intensity Projection (MIP).
|
3216
3204
|
"""
|
3217
|
-
|
3218
|
-
#def _get_next_well(used_wells):
|
3219
|
-
# """
|
3220
|
-
# Determines the next available well position in a 384-well format.
|
3221
|
-
# Iterates wells, and after P24, switches to plate2.
|
3222
|
-
# """
|
3223
|
-
# plate = 1
|
3224
|
-
# for well in WELLS:
|
3225
|
-
# well_name = f"plate{plate}_{well}"
|
3226
|
-
# if well_name not in used_wells:
|
3227
|
-
# used_wells.add(well_name)
|
3228
|
-
# return well_name
|
3229
|
-
# if well == "P24":
|
3230
|
-
# plate += 1
|
3231
|
-
# raise ValueError("All wells exhausted.")
|
3232
|
-
|
3205
|
+
|
3233
3206
|
def _get_next_well(used_wells):
|
3234
3207
|
"""
|
3235
3208
|
Determines the next available well position across multiple 384-well plates.
|
@@ -3297,75 +3270,92 @@ def convert_to_yokogawa(folder):
|
|
3297
3270
|
filepath = os.path.join(folder, filename)
|
3298
3271
|
|
3299
3272
|
tifffile.imwrite(filepath, mip_image.astype(dtype))
|
3300
|
-
rename_log.append({"Original File": file,
|
3273
|
+
rename_log.append({"Original File": file,
|
3274
|
+
"Renamed TIFF": filename,
|
3275
|
+
"ext": ext,
|
3276
|
+
"time": t_idx,
|
3277
|
+
"field": f_idx,
|
3278
|
+
"channel": channel,
|
3279
|
+
"z": z_levels})
|
3301
3280
|
|
3302
3281
|
except IndexError:
|
3303
3282
|
print(f"Warning: ND2 file {file} has an incomplete data structure. Skipping.")
|
3304
3283
|
|
3305
3284
|
except Exception as e:
|
3306
3285
|
print(f"Error processing ND2 file {file}: {e}")
|
3307
|
-
|
3308
|
-
### **Process Zeiss CZI Files**
|
3286
|
+
|
3309
3287
|
elif ext == 'czi':
|
3310
|
-
|
3311
|
-
|
3312
|
-
|
3313
|
-
|
3314
|
-
|
3315
|
-
|
3316
|
-
|
3317
|
-
|
3318
|
-
|
3319
|
-
|
3320
|
-
|
3321
|
-
|
3322
|
-
|
3323
|
-
|
3324
|
-
|
3325
|
-
|
3326
|
-
|
3327
|
-
|
3328
|
-
|
3329
|
-
|
3330
|
-
|
3331
|
-
|
3332
|
-
|
3333
|
-
|
3334
|
-
|
3335
|
-
|
3336
|
-
|
3337
|
-
|
3338
|
-
|
3339
|
-
|
3340
|
-
|
3341
|
-
|
3342
|
-
|
3343
|
-
|
3344
|
-
|
3345
|
-
|
3346
|
-
|
3347
|
-
|
3348
|
-
|
3349
|
-
|
3350
|
-
|
3351
|
-
|
3352
|
-
|
3353
|
-
|
3354
|
-
|
3355
|
-
|
3356
|
-
|
3357
|
-
|
3358
|
-
|
3359
|
-
|
3360
|
-
|
3361
|
-
|
3362
|
-
|
3363
|
-
|
3364
|
-
|
3365
|
-
|
3366
|
-
|
3288
|
+
try:
|
3289
|
+
# Open the CZI in streaming mode
|
3290
|
+
with pyczi.open_czi(path) as czidoc:
|
3291
|
+
|
3292
|
+
# 1) Global dimension ranges
|
3293
|
+
bbox = czidoc.total_bounding_box
|
3294
|
+
_, tlen = bbox.get('T', (0,1))
|
3295
|
+
_, clen = bbox.get('C', (0,1))
|
3296
|
+
_, zlen = bbox.get('Z', (0,1))
|
3297
|
+
|
3298
|
+
# 2) Scene → list of scene indices
|
3299
|
+
scenes_bb = czidoc.scenes_bounding_rectangle
|
3300
|
+
scenes = sorted(scenes_bb.keys()) if scenes_bb else [None]
|
3301
|
+
|
3302
|
+
# 3) Output folder (same as .czi)
|
3303
|
+
folder = os.path.dirname(path)
|
3304
|
+
|
3305
|
+
# 4) Loop scene × time × channel × Z
|
3306
|
+
for scene in scenes:
|
3307
|
+
# *** assign a unique well for this scene ***
|
3308
|
+
scene_well = _get_next_well(used_wells)
|
3309
|
+
|
3310
|
+
# Field index = scene+1 (or 1 if no scene)
|
3311
|
+
F_idx = scene + 1 if scene is not None else 1
|
3312
|
+
# Scene index for “A”
|
3313
|
+
A_idx = scene + 1 if scene is not None else 1
|
3314
|
+
|
3315
|
+
for t in range(tlen):
|
3316
|
+
for c in range(clen):
|
3317
|
+
for z in range(zlen):
|
3318
|
+
# Read exactly one 2D plane
|
3319
|
+
arr = czidoc.read(
|
3320
|
+
plane={'T': t, 'C': c, 'Z': z},
|
3321
|
+
scene=scene
|
3322
|
+
)
|
3323
|
+
plane = np.squeeze(arr)
|
3324
|
+
|
3325
|
+
# Build Yokogawa‐style filename:
|
3326
|
+
fn = (
|
3327
|
+
f"{scene_well}_"
|
3328
|
+
f"T{t+1:04d}"
|
3329
|
+
f"F{F_idx:03d}"
|
3330
|
+
f"L01"
|
3331
|
+
f"A{A_idx:02d}"
|
3332
|
+
f"Z{z+1:02d}"
|
3333
|
+
f"C{c+1:02d}.tif"
|
3334
|
+
)
|
3335
|
+
outpath = os.path.join(folder, fn)
|
3336
|
+
|
3337
|
+
# Write with lossless compression
|
3338
|
+
tifffile.imwrite(
|
3339
|
+
outpath,
|
3340
|
+
plane.astype(plane.dtype),
|
3341
|
+
compression='zlib'
|
3342
|
+
)
|
3343
|
+
|
3344
|
+
# Log it
|
3345
|
+
rename_log.append({
|
3346
|
+
"Original File": file,
|
3347
|
+
"Renamed TIFF": fn,
|
3348
|
+
"ext": ext,
|
3349
|
+
"scene": scene,
|
3350
|
+
"time": t,
|
3351
|
+
"slice": z,
|
3352
|
+
"field": F_idx,
|
3353
|
+
"channel": c,
|
3354
|
+
"well": scene_well
|
3355
|
+
})
|
3367
3356
|
|
3368
|
-
|
3357
|
+
except Exception as e:
|
3358
|
+
print(f"Error processing CZI file {file}: {e}")
|
3369
3359
|
|
3370
3360
|
### **Process Leica LIF Files**
|
3371
3361
|
elif ext == 'lif':
|
@@ -3445,7 +3435,6 @@ def convert_to_yokogawa(folder):
|
|
3445
3435
|
except Exception as e:
|
3446
3436
|
print(f"Error processing standard image file {file}: {e}")
|
3447
3437
|
|
3448
|
-
|
3449
3438
|
# Save rename log as CSV
|
3450
3439
|
pd.DataFrame(rename_log).to_csv(csv_path, index=False)
|
3451
3440
|
print(f"Processing complete. Files saved in {folder} and rename log saved as {csv_path}.")
|
spacr/mediar.py
CHANGED
@@ -15,14 +15,18 @@ if not os.path.exists(init_file):
|
|
15
15
|
# Add MEDIAR to sys.path
|
16
16
|
sys.path.insert(0, mediar_path)
|
17
17
|
|
18
|
-
try:
|
19
|
-
|
20
|
-
from core.MEDIAR import Predictor, EnsemblePredictor
|
21
|
-
from train_tools.models import MEDIARFormer
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
18
|
+
#try:
|
19
|
+
# Now import the dependencies from MEDIAR
|
20
|
+
# from core.MEDIAR import Predictor, EnsemblePredictor
|
21
|
+
# from train_tools.models import MEDIARFormer
|
22
|
+
|
23
|
+
# from train_tools.models import MEDIARFormer
|
24
|
+
Predictor, EnsemblePredictor, MEDIARFormer = None, None, None
|
25
|
+
|
26
|
+
#finally:
|
27
|
+
# # Remove the temporary __init__.py file after the import
|
28
|
+
# if os.path.exists(init_file):
|
29
|
+
# os.remove(init_file) # Remove the __init__.py file
|
26
30
|
|
27
31
|
def display_imgs_in_list(lists_of_imgs, cmaps=None):
|
28
32
|
"""
|