spacr 0.2.4__py3-none-any.whl → 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- spacr/core.py +56 -67
- spacr/gui.py +20 -38
- spacr/gui_core.py +390 -489
- spacr/gui_elements.py +309 -59
- spacr/gui_utils.py +361 -73
- spacr/io.py +42 -46
- spacr/measure.py +198 -151
- spacr/plot.py +108 -42
- spacr/resources/font/open_sans/OFL.txt +93 -0
- spacr/resources/font/open_sans/OpenSans-Italic-VariableFont_wdth,wght.ttf +0 -0
- spacr/resources/font/open_sans/OpenSans-VariableFont_wdth,wght.ttf +0 -0
- spacr/resources/font/open_sans/README.txt +100 -0
- spacr/resources/font/open_sans/static/OpenSans-Bold.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans-BoldItalic.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans-ExtraBold.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans-ExtraBoldItalic.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans-Italic.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans-Light.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans-LightItalic.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans-Medium.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans-MediumItalic.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans-Regular.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans-SemiBold.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans-SemiBoldItalic.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_Condensed-Bold.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_Condensed-BoldItalic.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_Condensed-ExtraBold.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_Condensed-ExtraBoldItalic.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_Condensed-Italic.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_Condensed-Light.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_Condensed-LightItalic.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_Condensed-Medium.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_Condensed-MediumItalic.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_Condensed-Regular.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_Condensed-SemiBold.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_Condensed-SemiBoldItalic.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Bold.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-BoldItalic.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-ExtraBold.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-ExtraBoldItalic.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Italic.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Light.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-LightItalic.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Medium.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-MediumItalic.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Regular.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-SemiBold.ttf +0 -0
- spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-SemiBoldItalic.ttf +0 -0
- spacr/resources/icons/logo.pdf +2786 -6
- spacr/resources/icons/logo_spacr.png +0 -0
- spacr/resources/icons/logo_spacr_1.png +0 -0
- spacr/settings.py +11 -83
- spacr/utils.py +13 -33
- {spacr-0.2.4.dist-info → spacr-0.2.5.dist-info}/METADATA +5 -1
- spacr-0.2.5.dist-info/RECORD +100 -0
- spacr-0.2.4.dist-info/RECORD +0 -58
- {spacr-0.2.4.dist-info → spacr-0.2.5.dist-info}/LICENSE +0 -0
- {spacr-0.2.4.dist-info → spacr-0.2.5.dist-info}/WHEEL +0 -0
- {spacr-0.2.4.dist-info → spacr-0.2.5.dist-info}/entry_points.txt +0 -0
- {spacr-0.2.4.dist-info → spacr-0.2.5.dist-info}/top_level.txt +0 -0
spacr/io.py
CHANGED
@@ -588,20 +588,20 @@ def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=Fals
|
|
588
588
|
regular_expression = re.compile(regex)
|
589
589
|
images_by_key = defaultdict(list)
|
590
590
|
stack_path = os.path.join(src, 'stack')
|
591
|
+
files_processed = 0
|
591
592
|
if not os.path.exists(stack_path) or (os.path.isdir(stack_path) and len(os.listdir(stack_path)) == 0):
|
592
593
|
all_filenames = [filename for filename in os.listdir(src) if filename.endswith(img_format)]
|
593
|
-
print(f'All_files:{len(all_filenames)} in {src}')
|
594
|
+
print(f'All_files: {len(all_filenames)} in {src}')
|
594
595
|
time_ls = []
|
595
|
-
|
596
|
-
for
|
596
|
+
|
597
|
+
for idx in range(0, len(all_filenames), batch_size):
|
597
598
|
start = time.time()
|
598
|
-
batch_filenames = all_filenames[
|
599
|
-
processed += len(batch_filenames)
|
599
|
+
batch_filenames = all_filenames[idx:idx+batch_size]
|
600
600
|
for filename in batch_filenames:
|
601
601
|
images_by_key = _extract_filename_metadata(batch_filenames, src, images_by_key, regular_expression, metadata_type, pick_slice, skip_mode)
|
602
|
-
|
602
|
+
|
603
603
|
if pick_slice:
|
604
|
-
for key in images_by_key:
|
604
|
+
for i, key in enumerate(images_by_key):
|
605
605
|
plate, well, field, channel, mode = key
|
606
606
|
max_intensity_slice = max(images_by_key[key], key=lambda x: np.percentile(x, 90))
|
607
607
|
mip_image = Image.fromarray(max_intensity_slice)
|
@@ -609,21 +609,19 @@ def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=Fals
|
|
609
609
|
os.makedirs(output_dir, exist_ok=True)
|
610
610
|
output_filename = f'{plate}_{well}_{field}.tif'
|
611
611
|
output_path = os.path.join(output_dir, output_filename)
|
612
|
-
|
613
|
-
if os.path.exists(output_path):
|
614
|
-
print(f'WARNING: A file with the same name already exists at location {output_filename}')
|
615
|
-
else:
|
616
|
-
mip_image.save(output_path)
|
617
|
-
|
612
|
+
files_processed += 1
|
618
613
|
stop = time.time()
|
619
614
|
duration = stop - start
|
620
615
|
time_ls.append(duration)
|
621
|
-
files_processed = processed
|
622
616
|
files_to_process = len(all_filenames)
|
623
617
|
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=batch_size, operation_type='Preprocessing filenames')
|
624
618
|
|
619
|
+
if os.path.exists(output_path):
|
620
|
+
print(f'WARNING: A file with the same name already exists at location {output_filename}')
|
621
|
+
else:
|
622
|
+
mip_image.save(output_path)
|
625
623
|
else:
|
626
|
-
for key, images in images_by_key.items():
|
624
|
+
for i, (key, images) in enumerate(images_by_key.items()):
|
627
625
|
mip = np.max(np.stack(images), axis=0)
|
628
626
|
mip_image = Image.fromarray(mip)
|
629
627
|
plate, well, field, channel = key[:4]
|
@@ -631,18 +629,17 @@ def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=Fals
|
|
631
629
|
os.makedirs(output_dir, exist_ok=True)
|
632
630
|
output_filename = f'{plate}_{well}_{field}.tif'
|
633
631
|
output_path = os.path.join(output_dir, output_filename)
|
634
|
-
|
635
|
-
if os.path.exists(output_path):
|
636
|
-
print(f'WARNING: A file with the same name already exists at location {output_filename}')
|
637
|
-
else:
|
638
|
-
mip_image.save(output_path)
|
632
|
+
files_processed += 1
|
639
633
|
stop = time.time()
|
640
634
|
duration = stop - start
|
641
635
|
time_ls.append(duration)
|
642
|
-
files_processed = processed
|
643
636
|
files_to_process = len(all_filenames)
|
644
637
|
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=batch_size, operation_type='Preprocessing filenames')
|
645
638
|
|
639
|
+
if os.path.exists(output_path):
|
640
|
+
print(f'WARNING: A file with the same name already exists at location {output_filename}')
|
641
|
+
else:
|
642
|
+
mip_image.save(output_path)
|
646
643
|
images_by_key.clear()
|
647
644
|
|
648
645
|
# Move original images to a new directory
|
@@ -656,6 +653,7 @@ def _rename_and_organize_image_files(src, regex, batch_size=100, pick_slice=Fals
|
|
656
653
|
print(f'WARNING: A file with the same name already exists at location {move}')
|
657
654
|
else:
|
658
655
|
shutil.move(os.path.join(src, filename), move)
|
656
|
+
files_processed = 0
|
659
657
|
return
|
660
658
|
|
661
659
|
def _merge_file(chan_dirs, stack_dir, file_name):
|
@@ -975,7 +973,7 @@ def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_s
|
|
975
973
|
time_ls.append(duration)
|
976
974
|
files_processed = i+1
|
977
975
|
files_to_process = time_stack_path_lists
|
978
|
-
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=
|
976
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=batch_size, operation_type="Concatinating")
|
979
977
|
stack = np.stack(stack_region)
|
980
978
|
save_loc = os.path.join(channel_stack_loc, f'{name}.npz')
|
981
979
|
np.savez(save_loc, data=stack, filenames=filenames_region)
|
@@ -1006,7 +1004,7 @@ def _concatenate_channel(src, channels, randomize=True, timelapse=False, batch_s
|
|
1006
1004
|
time_ls.append(duration)
|
1007
1005
|
files_processed = i+1
|
1008
1006
|
files_to_process = nr_files
|
1009
|
-
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=
|
1007
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=batch_size, operation_type="Concatinating")
|
1010
1008
|
if (i+1) % batch_size == 0 or i+1 == nr_files:
|
1011
1009
|
unique_shapes = {arr.shape[:-1] for arr in stack_ls}
|
1012
1010
|
if len(unique_shapes) > 1:
|
@@ -1104,7 +1102,7 @@ def _normalize_img_batch(stack, channels, save_dtype, settings):
|
|
1104
1102
|
time_ls.append(duration)
|
1105
1103
|
files_processed = i+1
|
1106
1104
|
files_to_process = len(channels)
|
1107
|
-
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=
|
1105
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type=f"Normalizing")
|
1108
1106
|
|
1109
1107
|
return normalized_stack.astype(save_dtype)
|
1110
1108
|
|
@@ -1151,7 +1149,6 @@ def concatenate_and_normalize(src, channels, save_dtype=np.float32, settings={})
|
|
1151
1149
|
parts = file.split('_')
|
1152
1150
|
name = parts[0] + '_' + parts[1] + '_' + parts[2]
|
1153
1151
|
array = np.load(path)
|
1154
|
-
#array = np.take(array, channels, axis=2)
|
1155
1152
|
stack_region.append(array)
|
1156
1153
|
filenames_region.append(os.path.basename(path))
|
1157
1154
|
stop = time.time()
|
@@ -1159,7 +1156,7 @@ def concatenate_and_normalize(src, channels, save_dtype=np.float32, settings={})
|
|
1159
1156
|
time_ls.append(duration)
|
1160
1157
|
files_processed = i+1
|
1161
1158
|
files_to_process = len(time_stack_path_lists)
|
1162
|
-
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=
|
1159
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type="Concatinating")
|
1163
1160
|
stack = np.stack(stack_region)
|
1164
1161
|
|
1165
1162
|
normalized_stack = _normalize_img_batch(stack=stack,
|
@@ -1188,18 +1185,18 @@ def concatenate_and_normalize(src, channels, save_dtype=np.float32, settings={})
|
|
1188
1185
|
stack_ls = []
|
1189
1186
|
filenames_batch = []
|
1190
1187
|
time_ls = []
|
1188
|
+
files_processed = 0
|
1191
1189
|
for i, path in enumerate(paths):
|
1192
1190
|
start = time.time()
|
1193
1191
|
array = np.load(path)
|
1194
|
-
#array = np.take(array, channels, axis=2)
|
1195
1192
|
stack_ls.append(array)
|
1196
1193
|
filenames_batch.append(os.path.basename(path))
|
1197
1194
|
stop = time.time()
|
1198
1195
|
duration = stop - start
|
1199
1196
|
time_ls.append(duration)
|
1200
|
-
files_processed
|
1197
|
+
files_processed += 1
|
1201
1198
|
files_to_process = nr_files
|
1202
|
-
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=
|
1199
|
+
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type="Concatinating")
|
1203
1200
|
|
1204
1201
|
if (i + 1) % settings['batch_size'] == 0 or i + 1 == nr_files:
|
1205
1202
|
unique_shapes = {arr.shape[:-1] for arr in stack_ls}
|
@@ -1350,12 +1347,12 @@ def _normalize_stack(src, backgrounds=[100, 100, 100], remove_backgrounds=[False
|
|
1350
1347
|
average_time = np.mean(time_ls) if len(time_ls) > 0 else 0
|
1351
1348
|
print(f'channels:{chan_index}/{stack.shape[-1] - 1}, arrays:{array_index + 1}/{single_channel.shape[0]}, Signal:{upper:.1f}, noise:{lower:.1f}, Signal-to-noise:{average_stnr:.1f}, Time/channel:{average_time:.2f}sec')
|
1352
1349
|
|
1353
|
-
stop = time.time()
|
1354
|
-
duration = stop - start
|
1355
|
-
time_ls.append(duration)
|
1356
|
-
files_processed = file_index + 1
|
1357
|
-
files_to_process = len(paths)
|
1358
|
-
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type="Normalizing")
|
1350
|
+
#stop = time.time()
|
1351
|
+
#duration = stop - start
|
1352
|
+
#time_ls.append(duration)
|
1353
|
+
#files_processed = file_index + 1
|
1354
|
+
#files_to_process = len(paths)
|
1355
|
+
#print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type="Normalizing")
|
1359
1356
|
|
1360
1357
|
normalized_stack[:, :, :, channel] = arr_2d_normalized
|
1361
1358
|
|
@@ -1405,12 +1402,12 @@ def _normalize_timelapse(src, lower_percentile=2, save_dtype=np.float32):
|
|
1405
1402
|
|
1406
1403
|
print(f'channels:{chan_index+1}/{stack.shape[-1]}, arrays:{array_index+1}/{single_channel.shape[0]}', end='\r')
|
1407
1404
|
|
1408
|
-
stop = time.time()
|
1409
|
-
duration = stop - start
|
1410
|
-
time_ls.append(duration)
|
1411
|
-
files_processed = file_index+1
|
1412
|
-
files_to_process = len(paths)
|
1413
|
-
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type="Normalizing")
|
1405
|
+
#stop = time.time()
|
1406
|
+
#duration = stop - start
|
1407
|
+
#time_ls.append(duration)
|
1408
|
+
#files_processed = file_index+1
|
1409
|
+
#files_to_process = len(paths)
|
1410
|
+
#print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type="Normalizing")
|
1414
1411
|
|
1415
1412
|
save_loc = os.path.join(output_fldr, f'{name}_norm_timelapse.npz')
|
1416
1413
|
np.savez(save_loc, data=normalized_stack, filenames=filenames)
|
@@ -1620,8 +1617,8 @@ def preprocess_img_data(settings):
|
|
1620
1617
|
save_dtype=np.float32,
|
1621
1618
|
settings=settings)
|
1622
1619
|
|
1623
|
-
if plot:
|
1624
|
-
|
1620
|
+
#if plot:
|
1621
|
+
# _plot_4D_arrays(src+'/norm_channel_stack', nr_npz=1, nr=nr)
|
1625
1622
|
|
1626
1623
|
return settings, src
|
1627
1624
|
|
@@ -1951,7 +1948,7 @@ def _load_and_concatenate_arrays(src, channels, cell_chann_dim, nucleus_chann_di
|
|
1951
1948
|
all_imgs = len(os.listdir(reference_folder))
|
1952
1949
|
time_ls = []
|
1953
1950
|
# Iterate through each file in the reference folder
|
1954
|
-
for filename in os.listdir(reference_folder):
|
1951
|
+
for idx, filename in enumerate(os.listdir(reference_folder)):
|
1955
1952
|
start = time.time()
|
1956
1953
|
stack_ls = []
|
1957
1954
|
if filename.endswith('.npy'):
|
@@ -2012,7 +2009,7 @@ def _load_and_concatenate_arrays(src, channels, cell_chann_dim, nucleus_chann_di
|
|
2012
2009
|
stop = time.time()
|
2013
2010
|
duration = stop - start
|
2014
2011
|
time_ls.append(duration)
|
2015
|
-
files_processed =
|
2012
|
+
files_processed = idx+1
|
2016
2013
|
files_to_process = all_imgs
|
2017
2014
|
print_progress(files_processed, files_to_process, n_jobs=1, time_ls=time_ls, batch_size=None, operation_type="Merging Arrays")
|
2018
2015
|
|
@@ -2550,7 +2547,6 @@ def _read_mask(mask_path):
|
|
2550
2547
|
mask = img_as_uint(mask)
|
2551
2548
|
return mask
|
2552
2549
|
|
2553
|
-
|
2554
2550
|
def convert_numpy_to_tiff(folder_path, limit=None):
|
2555
2551
|
"""
|
2556
2552
|
Converts all numpy files in a folder to TIFF format and saves them in a subdirectory 'tiff'.
|