spacr 0.4.60__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- spacr/__init__.py +2 -2
- spacr/core.py +0 -1
- spacr/gui.py +0 -1
- spacr/gui_utils.py +5 -2
- spacr/io.py +168 -178
- spacr/resources/MEDIAR/__pycache__/SetupDict.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/__pycache__/evaluate.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/__pycache__/generate_mapping.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/__pycache__/main.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/Baseline/__pycache__/Predictor.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/Baseline/__pycache__/Trainer.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/Baseline/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/Baseline/__pycache__/utils.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/MEDIAR/__pycache__/EnsemblePredictor.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/MEDIAR/__pycache__/Predictor.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/MEDIAR/__pycache__/Trainer.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/MEDIAR/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/MEDIAR/__pycache__/utils.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/__pycache__/BasePredictor.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/__pycache__/BaseTrainer.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/core/__pycache__/utils.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/__pycache__/measures.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/__pycache__/utils.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/__pycache__/datasetter.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/__pycache__/transforms.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/__pycache__/utils.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/custom/__pycache__/CellAware.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/custom/__pycache__/LoadImage.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/custom/__pycache__/NormalizeImage.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/custom/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/models/__pycache__/MEDIARFormer.cpython-39.pyc +0 -0
- spacr/resources/MEDIAR/train_tools/models/__pycache__/__init__.cpython-39.pyc +0 -0
- spacr/settings.py +26 -8
- spacr/submodules.py +10 -13
- spacr/timelapse.py +172 -4
- spacr/utils.py +26 -41
- {spacr-0.4.60.dist-info → spacr-0.5.0.dist-info}/METADATA +4 -2
- {spacr-0.4.60.dist-info → spacr-0.5.0.dist-info}/RECORD +46 -17
- spacr/stats.py +0 -221
- /spacr/{cellpose.py → spacr_cellpose.py} +0 -0
- {spacr-0.4.60.dist-info → spacr-0.5.0.dist-info}/LICENSE +0 -0
- {spacr-0.4.60.dist-info → spacr-0.5.0.dist-info}/WHEEL +0 -0
- {spacr-0.4.60.dist-info → spacr-0.5.0.dist-info}/entry_points.txt +0 -0
- {spacr-0.4.60.dist-info → spacr-0.5.0.dist-info}/top_level.txt +0 -0
spacr/__init__.py
CHANGED
@@ -26,7 +26,7 @@ from . import submodules
|
|
26
26
|
from . import openai
|
27
27
|
from . import ml
|
28
28
|
from . import toxo
|
29
|
-
from . import
|
29
|
+
from . import spacr_cellpose
|
30
30
|
from . import sp_stats
|
31
31
|
from . import logger
|
32
32
|
|
@@ -57,7 +57,7 @@ __all__ = [
|
|
57
57
|
"openai",
|
58
58
|
"ml",
|
59
59
|
"toxo",
|
60
|
-
"
|
60
|
+
"spacr_cellpose",
|
61
61
|
"sp_stats",
|
62
62
|
"logger"
|
63
63
|
]
|
spacr/core.py
CHANGED
spacr/gui.py
CHANGED
@@ -48,7 +48,6 @@ class MainApp(tk.Tk):
|
|
48
48
|
}
|
49
49
|
|
50
50
|
self.additional_gui_apps = {
|
51
|
-
"Convert": (lambda frame: initiate_root(self, 'convert'), "Convert images to Grayscale TIFs."),
|
52
51
|
"Umap": (lambda frame: initiate_root(self, 'umap'), "Generate UMAP embeddings with datapoints represented as images."),
|
53
52
|
"Train Cellpose": (lambda frame: initiate_root(self, 'train_cellpose'), "Train custom Cellpose models."),
|
54
53
|
"ML Analyze": (lambda frame: initiate_root(self, 'ml_analyze'), "Machine learning analysis of data."),
|
spacr/gui_utils.py
CHANGED
@@ -394,7 +394,8 @@ def convert_settings_dict_for_gui(settings):
|
|
394
394
|
'reduction_method': ('combo', ['umap', 'tsne'], 'umap'),
|
395
395
|
'model_name': ('combo', ['cyto', 'cyto_2', 'cyto_3', 'nuclei'], 'cyto'),
|
396
396
|
'regression_type': ('combo', ['ols','gls','wls','rlm','glm','mixed','quantile','logit','probit','poisson','lasso','ridge'], 'ols'),
|
397
|
-
'timelapse_objects': ('combo', ['cell', 'nucleus', 'pathogen', '
|
397
|
+
'timelapse_objects': ('combo', ["['cell']", "['nucleus']", "['pathogen']", "['cell', 'nucleus']", "['cell', 'pathogen']", "['nucleus', 'pathogen']", "['cell', 'nucleus', 'pathogen']", None], None),
|
398
|
+
#'timelapse_objects': ('combo', '[cell]', '[nucleus]', '[pathogen]', '[cytoplasm]', None, None),
|
398
399
|
'model_type': ('combo', torchvision_models, 'resnet50'),
|
399
400
|
'optimizer_type': ('combo', ['adamw', 'adam'], 'adamw'),
|
400
401
|
'schedule': ('combo', ['reduce_lr_on_plateau', 'step_lr'], 'reduce_lr_on_plateau'),
|
@@ -470,7 +471,7 @@ def function_gui_wrapper(function=None, settings={}, q=None, fig_queue=None, imp
|
|
470
471
|
def run_function_gui(settings_type, settings, q, fig_queue, stop_requested):
|
471
472
|
|
472
473
|
from .core import generate_image_umap, preprocess_generate_masks
|
473
|
-
from .
|
474
|
+
from .spacr_cellpose import identify_masks_finetune, check_cellpose_models, compare_cellpose_masks
|
474
475
|
from .submodules import analyze_recruitment
|
475
476
|
from .ml import generate_ml_scores, perform_regression
|
476
477
|
from .submodules import train_cellpose, analyze_plaques
|
@@ -939,8 +940,10 @@ def convert_to_number(value):
|
|
939
940
|
|
940
941
|
"""
|
941
942
|
Converts a string value to an integer if possible, otherwise converts to a float.
|
943
|
+
|
942
944
|
Args:
|
943
945
|
value (str): The string representation of the number.
|
946
|
+
|
944
947
|
Returns:
|
945
948
|
int or float: The converted number.
|
946
949
|
"""
|
spacr/io.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
import os, re, sqlite3, gc, torch, time, random, shutil, cv2, tarfile, cellpose, glob, queue, tifffile, czifile, atexit, datetime
|
1
|
+
import os, re, sqlite3, gc, torch, time, random, shutil, cv2, tarfile, cellpose, glob, queue, tifffile, czifile, atexit, datetime, traceback
|
2
2
|
import numpy as np
|
3
3
|
import pandas as pd
|
4
4
|
from PIL import Image, ImageOps
|
@@ -24,6 +24,7 @@ from nd2reader import ND2Reader
|
|
24
24
|
from torchvision import transforms
|
25
25
|
from sklearn.model_selection import train_test_split
|
26
26
|
import readlif
|
27
|
+
from pylibCZIrw import czi as pyczi
|
27
28
|
|
28
29
|
def process_non_tif_non_2D_images(folder):
|
29
30
|
"""Processes all images in the folder and splits them into grayscale channels, preserving bit depth."""
|
@@ -652,7 +653,7 @@ def load_images_from_paths(images_by_key):
|
|
652
653
|
return images_dict
|
653
654
|
|
654
655
|
#@log_function_call
|
655
|
-
def _rename_and_organize_image_files(src, regex, batch_size=100,
|
656
|
+
def _rename_and_organize_image_files(src, regex, batch_size=100, metadata_type='', img_format='.tif', timelapse=False):
|
656
657
|
"""
|
657
658
|
Convert z-stack images to maximum intensity projection (MIP) images.
|
658
659
|
|
@@ -660,8 +661,6 @@ def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=Fals
|
|
660
661
|
src (str): The source directory containing the z-stack images.
|
661
662
|
regex (str): The regular expression pattern used to match the filenames of the z-stack images.
|
662
663
|
batch_size (int, optional): The number of images to process in each batch. Defaults to 100.
|
663
|
-
pick_slice (bool, optional): Whether to pick a specific slice based on the provided skip mode. Defaults to False.
|
664
|
-
skip_mode (str, optional): The skip mode used to filter out specific slices. Defaults to '01'.
|
665
664
|
metadata_type (str, optional): The type of metadata associated with the images. Defaults to ''.
|
666
665
|
|
667
666
|
Returns:
|
@@ -679,8 +678,9 @@ def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=Fals
|
|
679
678
|
if not os.path.exists(stack_path) or (os.path.isdir(stack_path) and len(os.listdir(stack_path)) == 0):
|
680
679
|
all_filenames = [filename for filename in os.listdir(src) if any(filename.endswith(ext) for ext in img_format)]
|
681
680
|
print(f'All files: {len(all_filenames)} in {src}')
|
681
|
+
all_filenames = [f for f in all_filenames if not f.startswith('.')] #Exclude hidden files
|
682
682
|
time_ls = []
|
683
|
-
image_paths_by_key = _extract_filename_metadata(all_filenames, src, regular_expression, metadata_type
|
683
|
+
image_paths_by_key = _extract_filename_metadata(all_filenames, src, regular_expression, metadata_type)
|
684
684
|
# Convert dictionary keys to a list for batching
|
685
685
|
batching_keys = list(image_paths_by_key.keys())
|
686
686
|
print(f'All unique FOV: {len(image_paths_by_key)} in {src}')
|
@@ -691,47 +691,34 @@ def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=Fals
|
|
691
691
|
batch_keys = batching_keys[idx:idx+batch_size]
|
692
692
|
batch_images_by_key = {key: image_paths_by_key[key] for key in batch_keys}
|
693
693
|
images_by_key = load_images_from_paths(batch_images_by_key)
|
694
|
-
|
695
|
-
|
696
|
-
|
697
|
-
|
698
|
-
|
699
|
-
|
700
|
-
|
701
|
-
os.makedirs(output_dir, exist_ok=True)
|
702
|
-
output_filename = f'{plate}_{well}_{field}.tif'
|
703
|
-
output_path = os.path.join(output_dir, output_filename)
|
704
|
-
files_processed += 1
|
705
|
-
stop = time.time()
|
706
|
-
duration = stop - start
|
707
|
-
time_ls.append(duration)
|
708
|
-
files_to_process = len(all_filenames)
|
709
|
-
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=batch_size, operation_type='Preprocessing filenames')
|
710
|
-
|
711
|
-
if not os.path.exists(output_path):
|
712
|
-
mip_image.save(output_path)
|
713
|
-
else:
|
714
|
-
print(f'WARNING: A file with the same name already exists at location {output_filename}')
|
715
|
-
else:
|
716
|
-
for i, (key, images) in enumerate(images_by_key.items()):
|
717
|
-
plate, well, field, channel = key[:4]
|
718
|
-
output_dir = os.path.join(src, channel)
|
719
|
-
mip = np.max(np.stack(images), axis=0)
|
720
|
-
mip_image = Image.fromarray(mip)
|
721
|
-
os.makedirs(output_dir, exist_ok=True)
|
694
|
+
|
695
|
+
# Process each batch of images
|
696
|
+
for i, (key, images) in enumerate(images_by_key.items()):
|
697
|
+
|
698
|
+
plate, well, field, channel, timeID, sliceID = key
|
699
|
+
|
700
|
+
if timelapse:
|
722
701
|
output_filename = f'{plate}_{well}_{field}.tif'
|
723
|
-
|
724
|
-
|
725
|
-
|
726
|
-
|
727
|
-
|
728
|
-
|
729
|
-
|
730
|
-
|
731
|
-
|
732
|
-
|
733
|
-
|
734
|
-
|
702
|
+
else:
|
703
|
+
output_filename = f'{plate}_{well}_{field}_{timeID}.tif'
|
704
|
+
|
705
|
+
output_dir = os.path.join(src, channel)
|
706
|
+
os.makedirs(output_dir, exist_ok=True)
|
707
|
+
output_path = os.path.join(output_dir, output_filename)
|
708
|
+
mip = np.max(np.stack(images), axis=0)
|
709
|
+
mip_image = Image.fromarray(mip)
|
710
|
+
|
711
|
+
files_processed += 1
|
712
|
+
stop = time.time()
|
713
|
+
duration = stop - start
|
714
|
+
time_ls.append(duration)
|
715
|
+
files_to_process = len(all_filenames)
|
716
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=batch_size, operation_type='Preprocessing filenames')
|
717
|
+
|
718
|
+
if not os.path.exists(output_path):
|
719
|
+
mip_image.save(output_path)
|
720
|
+
else:
|
721
|
+
print(f'WARNING: A file with the same name already exists at location {output_filename}')
|
735
722
|
|
736
723
|
images_by_key.clear()
|
737
724
|
|
@@ -1243,7 +1230,11 @@ def concatenate_and_normalize(src, channels, save_dtype=np.float32, settings={})
|
|
1243
1230
|
files_processed = 0
|
1244
1231
|
for i, path in enumerate(paths):
|
1245
1232
|
start = time.time()
|
1246
|
-
|
1233
|
+
try:
|
1234
|
+
array = np.load(path)
|
1235
|
+
except Exception as e:
|
1236
|
+
print(f"Error loading file {path}: {e}")
|
1237
|
+
continue
|
1247
1238
|
stack_ls.append(array)
|
1248
1239
|
filenames_batch.append(os.path.basename(path))
|
1249
1240
|
stop = time.time()
|
@@ -1571,8 +1562,6 @@ def preprocess_img_data(settings):
|
|
1571
1562
|
save_dtype (type, optional): The data type used for saving the preprocessed images. Defaults to np.float32.
|
1572
1563
|
randomize (bool, optional): Whether to randomize the order of the images. Defaults to True.
|
1573
1564
|
all_to_mip (bool, optional): Whether to convert all images to MIP. Defaults to False.
|
1574
|
-
pick_slice (bool, optional): Whether to pick a specific slice based on the provided skip mode. Defaults to False.
|
1575
|
-
skip_mode (str, optional): The skip mode used to filter out specific slices. Defaults to '01'.
|
1576
1565
|
settings (dict, optional): Additional settings for preprocessing. Defaults to {}.
|
1577
1566
|
|
1578
1567
|
Returns:
|
@@ -1580,21 +1569,27 @@ def preprocess_img_data(settings):
|
|
1580
1569
|
"""
|
1581
1570
|
|
1582
1571
|
src = settings['src']
|
1583
|
-
|
1572
|
+
delete_empty_subdirectories(src)
|
1584
1573
|
files = os.listdir(src)
|
1585
|
-
extensions = [file.split('.')[-1] for file in files]
|
1586
|
-
extension_counts = Counter(extensions)
|
1587
|
-
most_common_extension = extension_counts.most_common(1)[0][0]
|
1588
|
-
img_format = None
|
1589
1574
|
|
1590
|
-
|
1575
|
+
valid_ext = ['tif', 'tiff', 'png', 'jpg', 'jpeg', 'bmp', 'nd2', 'czi', 'lif']
|
1576
|
+
extensions = [file.split('.')[-1].lower() for file in files]
|
1577
|
+
# Filter only valid extensions
|
1578
|
+
valid_extensions = [ext for ext in extensions if ext in valid_ext]
|
1591
1579
|
|
1592
|
-
#
|
1593
|
-
|
1594
|
-
|
1595
|
-
|
1580
|
+
# Determine most common valid extension
|
1581
|
+
img_format = None
|
1582
|
+
if valid_extensions:
|
1583
|
+
extension_counts = Counter(valid_extensions)
|
1584
|
+
most_common_extension = Counter(valid_extensions).most_common(1)[0][0]
|
1585
|
+
img_format = most_common_extension
|
1586
|
+
|
1587
|
+
print(f"Found {extension_counts[most_common_extension]} {most_common_extension} files")
|
1588
|
+
|
1596
1589
|
else:
|
1597
|
-
print(f
|
1590
|
+
print(f"Could not find any {valid_ext} files in {src} only found {extension_counts[0]}")
|
1591
|
+
print(f"{files} in {src}")
|
1592
|
+
print(f"Please check the folder and try again")
|
1598
1593
|
|
1599
1594
|
if os.path.exists(os.path.join(src,'stack')):
|
1600
1595
|
print('Found existing stack folder.')
|
@@ -1605,15 +1600,14 @@ def preprocess_img_data(settings):
|
|
1605
1600
|
return settings, src
|
1606
1601
|
|
1607
1602
|
mask_channels = [settings['nucleus_channel'], settings['cell_channel'], settings['pathogen_channel']]
|
1608
|
-
|
1609
|
-
|
1610
|
-
|
1611
|
-
|
1612
|
-
regex = _get_regex(metadata_type, img_format, custom_regex)
|
1603
|
+
|
1604
|
+
settings = set_default_settings_preprocess_img_data(settings)
|
1605
|
+
|
1606
|
+
regex = _get_regex(settings['metadata_type'], img_format, settings['custom_regex'])
|
1613
1607
|
|
1614
|
-
if test_mode:
|
1608
|
+
if settings['test_mode']:
|
1615
1609
|
|
1616
|
-
print(f
|
1610
|
+
print(f"Running spacr in test mode")
|
1617
1611
|
settings['plot'] = True
|
1618
1612
|
try:
|
1619
1613
|
os.rmdir(os.path.join(src, 'test'))
|
@@ -1623,7 +1617,7 @@ def preprocess_img_data(settings):
|
|
1623
1617
|
print(f"Delete manually before running test mode")
|
1624
1618
|
pass
|
1625
1619
|
|
1626
|
-
src = _run_test_mode(settings['src'], regex, timelapse, test_images, random_test)
|
1620
|
+
src = _run_test_mode(settings['src'], regex, settings['timelapse'], settings['test_images'], settings['random_test'])
|
1627
1621
|
settings['src'] = src
|
1628
1622
|
|
1629
1623
|
stack_path = os.path.join(src, 'stack')
|
@@ -1634,26 +1628,23 @@ def preprocess_img_data(settings):
|
|
1634
1628
|
if not os.path.exists(stack_path):
|
1635
1629
|
try:
|
1636
1630
|
if not img_format == None:
|
1637
|
-
|
1638
|
-
|
1639
|
-
|
1640
|
-
|
1641
|
-
|
1642
|
-
|
1643
|
-
|
1644
|
-
|
1645
|
-
|
1646
|
-
|
1647
|
-
|
1648
|
-
|
1649
|
-
|
1650
|
-
|
1651
|
-
|
1652
|
-
|
1653
|
-
|
1654
|
-
elif full_batches > 0:
|
1655
|
-
print(f"all images: {all_imgs}, full batch: {full_batches}, last batch: {last_batch_size}")
|
1656
|
-
raise ValueError("Last batch of size 1 detected. Adjust the batch size.")
|
1631
|
+
img_format = ['.tif', '.tiff', '.png', '.jpg', '.jpeg', '.bmp', '.nd2', '.czi', '.lif']
|
1632
|
+
_rename_and_organize_image_files(src, regex, settings['batch_size'], settings['metadata_type'], img_format)
|
1633
|
+
|
1634
|
+
#Make sure no batches will be of only one image
|
1635
|
+
all_imgs = len(stack_path)
|
1636
|
+
full_batches = all_imgs // settings['batch_size']
|
1637
|
+
last_batch_size = all_imgs % settings['batch_size']
|
1638
|
+
|
1639
|
+
# Check if the last batch is of size 1
|
1640
|
+
if last_batch_size == 1:
|
1641
|
+
# If there's only one batch and its size is 1, it's also an issue
|
1642
|
+
if full_batches == 0:
|
1643
|
+
raise ValueError("Only one batch of size 1 detected. Adjust the batch size.")
|
1644
|
+
# If the last batch is of size 1, merge it with the second last batch
|
1645
|
+
elif full_batches > 0:
|
1646
|
+
print(f"all images: {all_imgs}, full batch: {full_batches}, last batch: {last_batch_size}")
|
1647
|
+
raise ValueError("Last batch of size 1 detected. Adjust the batch size.")
|
1657
1648
|
|
1658
1649
|
nr_channel_folders = _merge_channels(src, plot=False)
|
1659
1650
|
|
@@ -1663,18 +1654,19 @@ def preprocess_img_data(settings):
|
|
1663
1654
|
print(f"Changing channels from {settings['channels']} to {new_channels}")
|
1664
1655
|
settings['channels'] = new_channels
|
1665
1656
|
|
1666
|
-
if timelapse:
|
1667
|
-
_create_movies_from_npy_per_channel(stack_path, fps=
|
1657
|
+
if settings['timelapse']:
|
1658
|
+
_create_movies_from_npy_per_channel(stack_path, fps=settings['fps'])
|
1668
1659
|
|
1669
|
-
if plot:
|
1670
|
-
print(f
|
1671
|
-
plot_arrays(stack_path, figuresize, cmap, nr=nr, normalize=normalize)
|
1660
|
+
if settings['plot']:
|
1661
|
+
print(f"plotting {settings['nr']} images from {src}/stack")
|
1662
|
+
plot_arrays(stack_path, settings['figuresize'], settings['cmap'], nr=settings['nr'], normalize=settings['normalize'])
|
1672
1663
|
|
1673
|
-
if all_to_mip:
|
1664
|
+
if settings['all_to_mip']:
|
1674
1665
|
_mip_all(stack_path)
|
1675
|
-
if plot:
|
1676
|
-
print(f
|
1677
|
-
plot_arrays(stack_path, figuresize, cmap, nr=nr, normalize=normalize)
|
1666
|
+
if settings['plot']:
|
1667
|
+
print(f"plotting {settings['nr']} images from {src}/stack")
|
1668
|
+
plot_arrays(stack_path, settings['figuresize'], settings['cmap'], nr=settings['nr'], normalize=settings['normalize'])
|
1669
|
+
|
1678
1670
|
except Exception as e:
|
1679
1671
|
print(f"Error: {e}")
|
1680
1672
|
|
@@ -1683,9 +1675,6 @@ def preprocess_img_data(settings):
|
|
1683
1675
|
save_dtype=np.float32,
|
1684
1676
|
settings=settings)
|
1685
1677
|
|
1686
|
-
#if plot:
|
1687
|
-
# _plot_4D_arrays(src+'/norm_channel_stack', nr_npz=1, nr=nr)
|
1688
|
-
|
1689
1678
|
return settings, src
|
1690
1679
|
|
1691
1680
|
def _check_masks(batch, batch_filenames, output_folder):
|
@@ -3208,28 +3197,13 @@ def convert_separate_files_to_yokogawa(folder, regex):
|
|
3208
3197
|
|
3209
3198
|
pd.DataFrame(rename_log).to_csv(csv_path, index=False)
|
3210
3199
|
print(f"Processing complete. Files saved in {folder} and rename log saved as {csv_path}.")
|
3211
|
-
|
3200
|
+
|
3212
3201
|
def convert_to_yokogawa(folder):
|
3213
3202
|
"""
|
3214
3203
|
Detects file type in the folder and converts them
|
3215
3204
|
to Yokogawa-style naming with Maximum Intensity Projection (MIP).
|
3216
3205
|
"""
|
3217
|
-
|
3218
|
-
#def _get_next_well(used_wells):
|
3219
|
-
# """
|
3220
|
-
# Determines the next available well position in a 384-well format.
|
3221
|
-
# Iterates wells, and after P24, switches to plate2.
|
3222
|
-
# """
|
3223
|
-
# plate = 1
|
3224
|
-
# for well in WELLS:
|
3225
|
-
# well_name = f"plate{plate}_{well}"
|
3226
|
-
# if well_name not in used_wells:
|
3227
|
-
# used_wells.add(well_name)
|
3228
|
-
# return well_name
|
3229
|
-
# if well == "P24":
|
3230
|
-
# plate += 1
|
3231
|
-
# raise ValueError("All wells exhausted.")
|
3232
|
-
|
3206
|
+
|
3233
3207
|
def _get_next_well(used_wells):
|
3234
3208
|
"""
|
3235
3209
|
Determines the next available well position across multiple 384-well plates.
|
@@ -3297,75 +3271,92 @@ def convert_to_yokogawa(folder):
|
|
3297
3271
|
filepath = os.path.join(folder, filename)
|
3298
3272
|
|
3299
3273
|
tifffile.imwrite(filepath, mip_image.astype(dtype))
|
3300
|
-
rename_log.append({"Original File": file,
|
3274
|
+
rename_log.append({"Original File": file,
|
3275
|
+
"Renamed TIFF": filename,
|
3276
|
+
"ext": ext,
|
3277
|
+
"time": t_idx,
|
3278
|
+
"field": f_idx,
|
3279
|
+
"channel": channel,
|
3280
|
+
"z": z_levels})
|
3301
3281
|
|
3302
3282
|
except IndexError:
|
3303
3283
|
print(f"Warning: ND2 file {file} has an incomplete data structure. Skipping.")
|
3304
3284
|
|
3305
3285
|
except Exception as e:
|
3306
3286
|
print(f"Error processing ND2 file {file}: {e}")
|
3307
|
-
|
3308
|
-
### **Process Zeiss CZI Files**
|
3287
|
+
|
3309
3288
|
elif ext == 'czi':
|
3310
|
-
|
3311
|
-
|
3312
|
-
|
3313
|
-
|
3314
|
-
|
3315
|
-
|
3316
|
-
|
3317
|
-
|
3318
|
-
|
3319
|
-
|
3320
|
-
|
3321
|
-
|
3322
|
-
|
3323
|
-
|
3324
|
-
|
3325
|
-
|
3326
|
-
|
3327
|
-
|
3328
|
-
|
3329
|
-
|
3330
|
-
|
3331
|
-
|
3332
|
-
|
3333
|
-
|
3334
|
-
|
3335
|
-
|
3336
|
-
|
3337
|
-
|
3338
|
-
|
3339
|
-
|
3340
|
-
|
3341
|
-
|
3342
|
-
|
3343
|
-
|
3344
|
-
|
3345
|
-
|
3346
|
-
|
3347
|
-
|
3348
|
-
|
3349
|
-
|
3350
|
-
|
3351
|
-
|
3352
|
-
|
3353
|
-
|
3354
|
-
|
3355
|
-
|
3356
|
-
|
3357
|
-
|
3358
|
-
|
3359
|
-
|
3360
|
-
|
3361
|
-
|
3362
|
-
|
3363
|
-
|
3364
|
-
|
3365
|
-
|
3366
|
-
|
3289
|
+
try:
|
3290
|
+
# Open the CZI in streaming mode
|
3291
|
+
with pyczi.open_czi(path) as czidoc:
|
3292
|
+
|
3293
|
+
# 1) Global dimension ranges
|
3294
|
+
bbox = czidoc.total_bounding_box
|
3295
|
+
_, tlen = bbox.get('T', (0,1))
|
3296
|
+
_, clen = bbox.get('C', (0,1))
|
3297
|
+
_, zlen = bbox.get('Z', (0,1))
|
3298
|
+
|
3299
|
+
# 2) Scene → list of scene indices
|
3300
|
+
scenes_bb = czidoc.scenes_bounding_rectangle
|
3301
|
+
scenes = sorted(scenes_bb.keys()) if scenes_bb else [None]
|
3302
|
+
|
3303
|
+
# 3) Output folder (same as .czi)
|
3304
|
+
folder = os.path.dirname(path)
|
3305
|
+
|
3306
|
+
# 4) Loop scene × time × channel × Z
|
3307
|
+
for scene in scenes:
|
3308
|
+
# *** assign a unique well for this scene ***
|
3309
|
+
scene_well = _get_next_well(used_wells)
|
3310
|
+
|
3311
|
+
# Field index = scene+1 (or 1 if no scene)
|
3312
|
+
F_idx = scene + 1 if scene is not None else 1
|
3313
|
+
# Scene index for “A”
|
3314
|
+
A_idx = scene + 1 if scene is not None else 1
|
3315
|
+
|
3316
|
+
for t in range(tlen):
|
3317
|
+
for c in range(clen):
|
3318
|
+
for z in range(zlen):
|
3319
|
+
# Read exactly one 2D plane
|
3320
|
+
arr = czidoc.read(
|
3321
|
+
plane={'T': t, 'C': c, 'Z': z},
|
3322
|
+
scene=scene
|
3323
|
+
)
|
3324
|
+
plane = np.squeeze(arr)
|
3325
|
+
|
3326
|
+
# Build Yokogawa‐style filename:
|
3327
|
+
fn = (
|
3328
|
+
f"{scene_well}_"
|
3329
|
+
f"T{t+1:04d}"
|
3330
|
+
f"F{F_idx:03d}"
|
3331
|
+
f"L01"
|
3332
|
+
f"A{A_idx:02d}"
|
3333
|
+
f"Z{z+1:02d}"
|
3334
|
+
f"C{c+1:02d}.tif"
|
3335
|
+
)
|
3336
|
+
outpath = os.path.join(folder, fn)
|
3337
|
+
|
3338
|
+
# Write with lossless compression
|
3339
|
+
tifffile.imwrite(
|
3340
|
+
outpath,
|
3341
|
+
plane.astype(plane.dtype),
|
3342
|
+
compression='zlib'
|
3343
|
+
)
|
3344
|
+
|
3345
|
+
# Log it
|
3346
|
+
rename_log.append({
|
3347
|
+
"Original File": file,
|
3348
|
+
"Renamed TIFF": fn,
|
3349
|
+
"ext": ext,
|
3350
|
+
"scene": scene,
|
3351
|
+
"time": t,
|
3352
|
+
"slice": z,
|
3353
|
+
"field": F_idx,
|
3354
|
+
"channel": c,
|
3355
|
+
"well": scene_well
|
3356
|
+
})
|
3367
3357
|
|
3368
|
-
|
3358
|
+
except Exception as e:
|
3359
|
+
print(f"Error processing CZI file {file}: {e}")
|
3369
3360
|
|
3370
3361
|
### **Process Leica LIF Files**
|
3371
3362
|
elif ext == 'lif':
|
@@ -3445,7 +3436,6 @@ def convert_to_yokogawa(folder):
|
|
3445
3436
|
except Exception as e:
|
3446
3437
|
print(f"Error processing standard image file {file}: {e}")
|
3447
3438
|
|
3448
|
-
|
3449
3439
|
# Save rename log as CSV
|
3450
3440
|
pd.DataFrame(rename_log).to_csv(csv_path, index=False)
|
3451
3441
|
print(f"Processing complete. Files saved in {folder} and rename log saved as {csv_path}.")
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
spacr/resources/MEDIAR/train_tools/data_utils/custom/__pycache__/NormalizeImage.cpython-39.pyc
ADDED
Binary file
|
Binary file
|