cellects 0.1.3__py3-none-any.whl → 0.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cellects/__main__.py +65 -25
- cellects/config/all_vars_dict.py +18 -17
- cellects/core/cellects_threads.py +1034 -396
- cellects/core/motion_analysis.py +1664 -2010
- cellects/core/one_image_analysis.py +1082 -1061
- cellects/core/program_organizer.py +1687 -1316
- cellects/core/script_based_run.py +80 -76
- cellects/gui/advanced_parameters.py +365 -326
- cellects/gui/cellects.py +102 -91
- cellects/gui/custom_widgets.py +4 -3
- cellects/gui/first_window.py +226 -104
- cellects/gui/if_several_folders_window.py +117 -68
- cellects/gui/image_analysis_window.py +841 -450
- cellects/gui/required_output.py +100 -56
- cellects/gui/ui_strings.py +840 -0
- cellects/gui/video_analysis_window.py +317 -135
- cellects/image_analysis/cell_leaving_detection.py +64 -4
- cellects/image_analysis/image_segmentation.py +451 -22
- cellects/image_analysis/morphological_operations.py +2166 -1635
- cellects/image_analysis/network_functions.py +616 -253
- cellects/image_analysis/one_image_analysis_threads.py +94 -153
- cellects/image_analysis/oscillations_functions.py +131 -0
- cellects/image_analysis/progressively_add_distant_shapes.py +2 -3
- cellects/image_analysis/shape_descriptors.py +517 -466
- cellects/utils/formulas.py +169 -6
- cellects/utils/load_display_save.py +362 -105
- cellects/utils/utilitarian.py +86 -9
- cellects-0.2.6.dist-info/LICENSE +675 -0
- cellects-0.2.6.dist-info/METADATA +829 -0
- cellects-0.2.6.dist-info/RECORD +44 -0
- cellects/core/one_video_per_blob.py +0 -540
- cellects/image_analysis/cluster_flux_study.py +0 -102
- cellects-0.1.3.dist-info/LICENSE.odt +0 -0
- cellects-0.1.3.dist-info/METADATA +0 -176
- cellects-0.1.3.dist-info/RECORD +0 -44
- {cellects-0.1.3.dist-info → cellects-0.2.6.dist-info}/WHEEL +0 -0
- {cellects-0.1.3.dist-info → cellects-0.2.6.dist-info}/entry_points.txt +0 -0
- {cellects-0.1.3.dist-info → cellects-0.2.6.dist-info}/top_level.txt +0 -0
|
@@ -14,6 +14,7 @@ import h5py
|
|
|
14
14
|
from timeit import default_timer
|
|
15
15
|
import numpy as np
|
|
16
16
|
from numpy.typing import NDArray
|
|
17
|
+
from typing import Tuple
|
|
17
18
|
import cv2
|
|
18
19
|
from pathlib import Path
|
|
19
20
|
import exifread
|
|
@@ -21,7 +22,7 @@ from exif import Image
|
|
|
21
22
|
from matplotlib import pyplot as plt
|
|
22
23
|
from cellects.image_analysis.image_segmentation import combine_color_spaces, get_color_spaces, generate_color_space_combination
|
|
23
24
|
from cellects.utils.formulas import bracket_to_uint8_image_contrast, sum_of_abs_differences
|
|
24
|
-
from cellects.utils.utilitarian import translate_dict
|
|
25
|
+
from cellects.utils.utilitarian import translate_dict, split_dict
|
|
25
26
|
|
|
26
27
|
|
|
27
28
|
class PickleRick:
|
|
@@ -300,114 +301,156 @@ def write_video(np_array: NDArray[np.uint8], vid_name: str, is_color: bool=True,
|
|
|
300
301
|
vid.release()
|
|
301
302
|
|
|
302
303
|
|
|
303
|
-
def
|
|
304
|
+
def write_video_sets(img_list: list, sizes: NDArray, vid_names: list, crop_coord, bounding_boxes,
|
|
305
|
+
bunch_nb: int, video_nb_per_bunch: int, remaining: int,
|
|
306
|
+
raw_images: bool, is_landscape: bool, use_list_of_vid: bool,
|
|
307
|
+
in_colors: bool=False, reduce_image_dim: bool=False, pathway: str=""):
|
|
304
308
|
"""
|
|
305
|
-
|
|
309
|
+
Write video sets from a list of images, applying cropping and optional rotation.
|
|
306
310
|
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
311
|
+
Parameters
|
|
312
|
+
----------
|
|
313
|
+
img_list : list
|
|
314
|
+
List of image file names.
|
|
315
|
+
sizes : NDArray
|
|
316
|
+
Array containing the dimensions of each video frame.
|
|
317
|
+
vid_names : list
|
|
318
|
+
List of video file names to be saved.
|
|
319
|
+
crop_coord : dict or tuple
|
|
320
|
+
Coordinates for cropping regions of interest in images/videos.
|
|
321
|
+
bounding_boxes : tuple
|
|
322
|
+
Bounding box coordinates to extract sub-images from the original images.
|
|
323
|
+
bunch_nb : int
|
|
324
|
+
Number of bunches to divide the videos into.
|
|
325
|
+
video_nb_per_bunch : int
|
|
326
|
+
Number of videos per bunch.
|
|
327
|
+
remaining : int
|
|
328
|
+
Number of videos remaining after the last full bunch.
|
|
329
|
+
raw_images : bool
|
|
330
|
+
Whether the images are in raw format.
|
|
331
|
+
is_landscape : bool
|
|
332
|
+
If true, rotate the images to landscape orientation before processing.
|
|
333
|
+
use_list_of_vid : bool
|
|
334
|
+
Flag indicating if the output should be a list of videos.
|
|
335
|
+
in_colors : bool, optional
|
|
336
|
+
If true, process images with color information. Default is False.
|
|
337
|
+
reduce_image_dim : bool, optional
|
|
338
|
+
If true, reduce image dimensions. Default is False.
|
|
339
|
+
pathway : str, optional
|
|
340
|
+
Path where the videos should be saved. Default is an empty string.
|
|
341
|
+
"""
|
|
342
|
+
top, bot, left, right = bounding_boxes
|
|
343
|
+
for bunch in np.arange(bunch_nb):
|
|
344
|
+
print(f'\nSaving the bunch n: {bunch + 1} / {bunch_nb} of videos:', end=' ')
|
|
345
|
+
if bunch == (bunch_nb - 1) and remaining > 0:
|
|
346
|
+
arenas = np.arange(bunch * video_nb_per_bunch, bunch * video_nb_per_bunch + remaining, dtype=np.uint32)
|
|
347
|
+
else:
|
|
348
|
+
arenas = np.arange(bunch * video_nb_per_bunch, (bunch + 1) * video_nb_per_bunch, dtype=np.uint32)
|
|
349
|
+
if use_list_of_vid:
|
|
350
|
+
video_bunch = [np.zeros(sizes[i, :], dtype=np.uint8) for i in arenas]
|
|
351
|
+
else:
|
|
352
|
+
video_bunch = np.zeros(np.append(sizes[0, :], len(arenas)), dtype=np.uint8)
|
|
353
|
+
prev_img = None
|
|
354
|
+
images_done = bunch * len(img_list)
|
|
355
|
+
for image_i, image_name in enumerate(img_list):
|
|
356
|
+
img = read_and_rotate(image_name, prev_img, raw_images, is_landscape, crop_coord)
|
|
357
|
+
prev_img = img.copy()
|
|
358
|
+
if not in_colors and reduce_image_dim:
|
|
359
|
+
img = img[:, :, 0]
|
|
360
|
+
|
|
361
|
+
for arena_i, arena_name in enumerate(arenas):
|
|
362
|
+
# arena_i = 0; arena_name = arena[arena_i]
|
|
363
|
+
sub_img = img[top[arena_name]: (bot[arena_name] + 1), left[arena_name]: (right[arena_name] + 1), ...]
|
|
364
|
+
if use_list_of_vid:
|
|
365
|
+
video_bunch[arena_i][image_i, ...] = sub_img
|
|
366
|
+
else:
|
|
367
|
+
if len(video_bunch.shape) == 5:
|
|
368
|
+
video_bunch[image_i, :, :, :, arena_i] = sub_img
|
|
369
|
+
else:
|
|
370
|
+
video_bunch[image_i, :, :, arena_i] = sub_img
|
|
371
|
+
for arena_i, arena_name in enumerate(arenas):
|
|
372
|
+
if use_list_of_vid:
|
|
373
|
+
np.save(pathway + vid_names[arena_name], video_bunch[arena_i])
|
|
374
|
+
else:
|
|
375
|
+
if len(video_bunch.shape) == 5:
|
|
376
|
+
np.save(pathway + vid_names[arena_name], video_bunch[:, :, :, :, arena_i])
|
|
377
|
+
else:
|
|
378
|
+
np.save(pathway + vid_names[arena_name], video_bunch[:, :, :, arena_i])
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
def video2numpy(vid_name: str, conversion_dict=None, background: NDArray=None, background2: NDArray=None,
|
|
382
|
+
true_frame_width: int=None):
|
|
383
|
+
"""
|
|
384
|
+
Convert a video file to a NumPy array.
|
|
312
385
|
|
|
313
386
|
Parameters
|
|
314
387
|
----------
|
|
315
388
|
vid_name : str
|
|
316
|
-
|
|
389
|
+
The path to the video file. Can be a `.mp4` or `.npy`.
|
|
317
390
|
conversion_dict : dict, optional
|
|
318
|
-
Dictionary
|
|
319
|
-
background :
|
|
320
|
-
|
|
391
|
+
Dictionary containing color space conversion parameters.
|
|
392
|
+
background : NDArray, optional
|
|
393
|
+
Background image for processing.
|
|
394
|
+
background2 : NDArray, optional
|
|
395
|
+
Second background image for processing.
|
|
321
396
|
true_frame_width : int, optional
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
Other Parameters
|
|
325
|
-
----------------
|
|
326
|
-
background : bool, optional
|
|
327
|
-
Whether to subtract the background from the video frames. Default is None.
|
|
328
|
-
true_frame_width : int, optional
|
|
329
|
-
The true width of the video frames. Default is None.
|
|
397
|
+
True width of the frame. If specified and the current width is double this value,
|
|
398
|
+
adjusts to true_frame_width.
|
|
330
399
|
|
|
331
400
|
Returns
|
|
332
401
|
-------
|
|
333
|
-
|
|
402
|
+
NDArray or tuple of NDArrays
|
|
334
403
|
If conversion_dict is None, returns the video as a NumPy array.
|
|
335
|
-
Otherwise, returns a tuple containing the original and converted
|
|
336
|
-
|
|
337
|
-
Raises
|
|
338
|
-
------
|
|
339
|
-
ValueError
|
|
340
|
-
If the video file cannot be opened or if there is an error in processing.
|
|
404
|
+
Otherwise, returns a tuple containing the original video and converted video.
|
|
341
405
|
|
|
342
406
|
Notes
|
|
343
407
|
-----
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
>>> print(vid_array.shape)
|
|
365
|
-
(100, 720, 960, 3)
|
|
366
|
-
|
|
367
|
-
>>> vid_array = video2numpy('example_video.npy', {'rgb': 'gray'}, True, 960)
|
|
368
|
-
>>> print(vid_array.shape)
|
|
369
|
-
(100, 720, 960)"""
|
|
370
|
-
if vid_name[-4:] == ".npy":
|
|
371
|
-
video = np.load(vid_name) # , allow_pickle='TRUE'
|
|
372
|
-
frame_width = video.shape[2]
|
|
373
|
-
if true_frame_width is not None:
|
|
374
|
-
if frame_width == 2 * true_frame_width:
|
|
375
|
-
frame_width = true_frame_width
|
|
376
|
-
if conversion_dict is not None:
|
|
377
|
-
converted_video = np.zeros((video.shape[0], video.shape[1], frame_width), dtype=np.uint8)
|
|
408
|
+
This function uses OpenCV to read the contents of a `.mp4` video file.
|
|
409
|
+
"""
|
|
410
|
+
np_loading = vid_name[-4:] == ".npy"
|
|
411
|
+
if np_loading:
|
|
412
|
+
video = np.load(vid_name)
|
|
413
|
+
dims = list(video.shape)
|
|
414
|
+
else:
|
|
415
|
+
cap = cv2.VideoCapture(vid_name)
|
|
416
|
+
dims = [int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))]
|
|
417
|
+
|
|
418
|
+
if true_frame_width is not None:
|
|
419
|
+
if dims[2] == 2 * true_frame_width:
|
|
420
|
+
dims[2] = true_frame_width
|
|
421
|
+
|
|
422
|
+
if conversion_dict is not None:
|
|
423
|
+
first_dict, second_dict, c_spaces = split_dict(conversion_dict)
|
|
424
|
+
converted_video = np.empty(dims[:3], dtype=np.uint8)
|
|
425
|
+
if conversion_dict['logical'] == 'None':
|
|
426
|
+
converted_video2 = np.empty(dims[:3], dtype=np.uint8)
|
|
427
|
+
if np_loading:
|
|
378
428
|
for counter in np.arange(video.shape[0]):
|
|
379
|
-
img = video[counter, :, :
|
|
380
|
-
greyscale_image, greyscale_image2 = generate_color_space_combination(img,
|
|
381
|
-
|
|
429
|
+
img = video[counter, :, :dims[2], :]
|
|
430
|
+
greyscale_image, greyscale_image2, all_c_spaces, first_pc_vector = generate_color_space_combination(img, c_spaces,
|
|
431
|
+
first_dict, second_dict, background=background,background2=background2,
|
|
382
432
|
convert_to_uint8=True)
|
|
383
433
|
converted_video[counter, ...] = greyscale_image
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
cap = cv2.VideoCapture(vid_name)
|
|
388
|
-
frame_number = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
389
|
-
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
390
|
-
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
391
|
-
if true_frame_width is not None:
|
|
392
|
-
if frame_width == 2 * true_frame_width:
|
|
393
|
-
frame_width = true_frame_width
|
|
434
|
+
if conversion_dict['logical'] == 'None':
|
|
435
|
+
converted_video2[counter, ...] = greyscale_image2
|
|
436
|
+
video = video[:, :, :dims[2], ...]
|
|
394
437
|
|
|
438
|
+
if not np_loading:
|
|
395
439
|
# 2) Create empty arrays to store video analysis data
|
|
396
|
-
|
|
397
|
-
video = np.empty((frame_number, frame_height, frame_width, 3), dtype=np.uint8)
|
|
398
|
-
if conversion_dict is not None:
|
|
399
|
-
converted_video = np.empty((frame_number, frame_height, frame_width), dtype=np.uint8)
|
|
440
|
+
video = np.empty((dims[0], dims[1], dims[2], 3), dtype=np.uint8)
|
|
400
441
|
# 3) Read and convert the video frame by frame
|
|
401
442
|
counter = 0
|
|
402
|
-
while cap.isOpened() and counter <
|
|
443
|
+
while cap.isOpened() and counter < dims[0]:
|
|
403
444
|
ret, frame = cap.read()
|
|
404
|
-
frame = frame[:, :
|
|
445
|
+
frame = frame[:, :dims[2], ...]
|
|
405
446
|
video[counter, ...] = frame
|
|
406
447
|
if conversion_dict is not None:
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
converted_video[counter, ...] =
|
|
448
|
+
greyscale_image, greyscale_image2, all_c_spaces, first_pc_vector = generate_color_space_combination(frame, c_spaces,
|
|
449
|
+
first_dict, second_dict, background=background,background2=background2,
|
|
450
|
+
convert_to_uint8=True)
|
|
451
|
+
converted_video[counter, ...] = greyscale_image
|
|
452
|
+
if conversion_dict['logical'] == 'None':
|
|
453
|
+
converted_video2[counter, ...] = greyscale_image2
|
|
411
454
|
counter += 1
|
|
412
455
|
cap.release()
|
|
413
456
|
|
|
@@ -417,7 +460,7 @@ def video2numpy(vid_name: str, conversion_dict=None, background=None, true_frame
|
|
|
417
460
|
return video, converted_video
|
|
418
461
|
|
|
419
462
|
|
|
420
|
-
def movie(video,
|
|
463
|
+
def movie(video, increase_contrast: bool=True):
|
|
421
464
|
"""
|
|
422
465
|
Summary
|
|
423
466
|
-------
|
|
@@ -427,8 +470,6 @@ def movie(video, keyboard=1, increase_contrast: bool=True):
|
|
|
427
470
|
----------
|
|
428
471
|
video : numpy.ndarray
|
|
429
472
|
The input video represented as a 3D NumPy array.
|
|
430
|
-
keyboard : int, optional
|
|
431
|
-
Key for waiting during display (default is 1).
|
|
432
473
|
increase_contrast : bool, optional
|
|
433
474
|
Flag to increase the contrast of each frame (default is True).
|
|
434
475
|
|
|
@@ -470,7 +511,8 @@ def movie(video, keyboard=1, increase_contrast: bool=True):
|
|
|
470
511
|
image = bracket_to_uint8_image_contrast(image)
|
|
471
512
|
final_img = cv2.resize(image, (500, 500))
|
|
472
513
|
cv2.imshow('Motion analysis', final_img)
|
|
473
|
-
cv2.waitKey(
|
|
514
|
+
if cv2.waitKey(25) & 0xFF == ord('q'):
|
|
515
|
+
break
|
|
474
516
|
cv2.destroyAllWindows()
|
|
475
517
|
|
|
476
518
|
|
|
@@ -624,6 +666,17 @@ def read_and_rotate(image_name, prev_img: NDArray=None, raw_images: bool=False,
|
|
|
624
666
|
img = img[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
|
|
625
667
|
return img
|
|
626
668
|
|
|
669
|
+
def read_rotate_crop_and_reduce_image(image_name: str, prev_img: NDArray, crop_coord: list, cr: list,
|
|
670
|
+
raw_images: bool, is_landscape: bool, reduce_image_dim: bool):
|
|
671
|
+
img = read_and_rotate(image_name, prev_img, raw_images, is_landscape)
|
|
672
|
+
prev_img = img.copy()
|
|
673
|
+
if crop_coord is not None:
|
|
674
|
+
img = img[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], :]
|
|
675
|
+
img = img[cr[0]:(cr[1] + 1), cr[2]:(cr[3] + 1), :]
|
|
676
|
+
if reduce_image_dim:
|
|
677
|
+
img = img[:, :, 0]
|
|
678
|
+
return img, prev_img
|
|
679
|
+
|
|
627
680
|
|
|
628
681
|
def vstack_h5_array(file_name, table: NDArray, key: str="data"):
|
|
629
682
|
"""
|
|
@@ -865,6 +918,49 @@ def show(img, interactive: bool=True, cmap=None, show: bool=True):
|
|
|
865
918
|
|
|
866
919
|
return fig, ax
|
|
867
920
|
|
|
921
|
+
def zoom_on_nonzero(binary_image:NDArray, padding: int = 2, return_coord: bool=True):
|
|
922
|
+
"""
|
|
923
|
+
Crops a binary image around non-zero elements with optional padding and returns either coordinates or cropped region.
|
|
924
|
+
|
|
925
|
+
Parameters
|
|
926
|
+
----------
|
|
927
|
+
binary_image : NDArray
|
|
928
|
+
2D NumPy array containing binary values (0/1)
|
|
929
|
+
padding : int, default=2
|
|
930
|
+
Amount of zero-padding to add around the minimum bounding box
|
|
931
|
+
return_coord : bool, default=True
|
|
932
|
+
If True, return slice coordinates instead of cropped image
|
|
933
|
+
|
|
934
|
+
Returns
|
|
935
|
+
-------
|
|
936
|
+
If `return_coord` is True: [y_min, y_max, x_min, x_max] as 4-element Tuple.
|
|
937
|
+
If False: 2D binary array representing the cropped region defined by non-zero elements plus padding.
|
|
938
|
+
|
|
939
|
+
Examples
|
|
940
|
+
--------
|
|
941
|
+
>>> img = np.zeros((10,10))
|
|
942
|
+
>>> img[3:7,4:6] = 1
|
|
943
|
+
>>> result = zoom_on_nonzero(img)
|
|
944
|
+
>>> print(result)
|
|
945
|
+
[1 8 2 7]
|
|
946
|
+
>>> cropped = zoom_on_nonzero(img, return_coord=False)
|
|
947
|
+
>>> print(cropped.shape)
|
|
948
|
+
(6, 5)
|
|
949
|
+
|
|
950
|
+
Notes
|
|
951
|
+
-----
|
|
952
|
+
- Returns empty slice coordinates if input contains no non-zero elements.
|
|
953
|
+
- Coordinate indices are 0-based and compatible with NumPy array slicing syntax.
|
|
954
|
+
"""
|
|
955
|
+
y, x = np.nonzero(binary_image)
|
|
956
|
+
cy_min = np.max((0, y.min() - padding))
|
|
957
|
+
cy_max = np.min((binary_image.shape[0], y.max() + padding + 1))
|
|
958
|
+
cx_min = np.max((0, x.min() - padding))
|
|
959
|
+
cx_max = np.min((binary_image.shape[1], x.max() + padding + 1))
|
|
960
|
+
if return_coord:
|
|
961
|
+
return cy_min, cy_max, cx_min, cx_max
|
|
962
|
+
else:
|
|
963
|
+
return binary_image[cy_min:cy_max, cx_min:cx_max]
|
|
868
964
|
|
|
869
965
|
def save_fig(img: NDArray, full_path, cmap=None):
|
|
870
966
|
"""
|
|
@@ -994,21 +1090,6 @@ def extract_time(image_list: list, pathway="", raw_images:bool=False):
|
|
|
994
1090
|
>>> print(time)
|
|
995
1091
|
array([0, 0])
|
|
996
1092
|
|
|
997
|
-
Notes
|
|
998
|
-
--------
|
|
999
|
-
dir(my_image)
|
|
1000
|
-
['<unknown EXIF tag 59932>', '<unknown EXIF tag 59933>', '_exif_ifd_pointer', '_gps_ifd_pointer', '_segments', 'aperture
|
|
1001
|
-
_value', 'brightness_value', 'color_space', 'components_configuration', 'compression', 'datetime', 'datetime_digitized',
|
|
1002
|
-
'datetime_original', 'exif_version', 'exposure_bias_value', 'exposure_mode', 'exposure_program', 'exposure_time', 'f_
|
|
1003
|
-
number', 'flash', 'flashpix_version', 'focal_length', 'focal_length_in_35mm_film', 'get', 'get_file', 'get_thumbnail',
|
|
1004
|
-
'gps_altitude', 'gps_altitude_ref', 'gps_datestamp', 'gps_dest_bearing', 'gps_dest_bearing_ref', 'gps_horizontal_
|
|
1005
|
-
positioning_error', 'gps_img_direction', 'gps_img_direction_ref', 'gps_latitude', 'gps_latitude_ref', 'gps_longitude',
|
|
1006
|
-
'gps_longitude_ref', 'gps_speed', 'gps_speed_ref', 'gps_timestamp', 'has_exif', 'jpeg_interchange_format', 'jpeg_
|
|
1007
|
-
interchange_format_length', 'lens_make', 'lens_model', 'lens_specification', 'make', 'maker_note', 'metering_mode',
|
|
1008
|
-
'model', 'orientation', 'photographic_sensitivity', 'pixel_x_dimension', 'pixel_y_dimension', 'resolution_unit',
|
|
1009
|
-
'scene_capture_type', 'scene_type', 'sensing_method', 'shutter_speed_value', 'software', 'subject_area', 'subsec_time_
|
|
1010
|
-
digitized', 'subsec_time_original', 'white_balance', 'x_resolution', 'y_and_c_positioning', 'y_resolution']
|
|
1011
|
-
|
|
1012
1093
|
"""
|
|
1013
1094
|
if isinstance(pathway, str):
|
|
1014
1095
|
pathway = Path(pathway)
|
|
@@ -1016,7 +1097,7 @@ def extract_time(image_list: list, pathway="", raw_images:bool=False):
|
|
|
1016
1097
|
timings = np.zeros((nb, 6), dtype=np.int64)
|
|
1017
1098
|
if raw_images:
|
|
1018
1099
|
for i in np.arange(nb):
|
|
1019
|
-
with open(pathway / image_list
|
|
1100
|
+
with open(pathway / image_list, 'rb') as image_file:
|
|
1020
1101
|
my_image = exifread.process_file(image_file, details=False, stop_tag='DateTimeOriginal')
|
|
1021
1102
|
datetime = my_image["EXIF DateTimeOriginal"]
|
|
1022
1103
|
datetime = datetime.values[:10] + ':' + datetime.values[11:]
|
|
@@ -1047,4 +1128,180 @@ def extract_time(image_list: list, pathway="", raw_images:bool=False):
|
|
|
1047
1128
|
time = np.repeat(0, nb)#arange(1, nb * 60, 60)#"Do not experiment the 31th of december!!!"
|
|
1048
1129
|
if time.sum() == 0:
|
|
1049
1130
|
time = np.repeat(0, nb)#arange(1, nb * 60, 60)
|
|
1050
|
-
return time
|
|
1131
|
+
return time
|
|
1132
|
+
|
|
1133
|
+
|
|
1134
|
+
def read_one_arena(arena_label, already_greyscale:bool, csc_dict: dict, videos_already_in_ram=None,
|
|
1135
|
+
true_frame_width=None, vid_name: str=None, background: NDArray=None, background2: NDArray=None):
|
|
1136
|
+
"""
|
|
1137
|
+
Read a single arena's video data, potentially converting it from color to greyscale.
|
|
1138
|
+
|
|
1139
|
+
Parameters
|
|
1140
|
+
----------
|
|
1141
|
+
arena_label : int
|
|
1142
|
+
The label of the arena.
|
|
1143
|
+
already_greyscale : bool
|
|
1144
|
+
Whether the video is already in greyscale format.
|
|
1145
|
+
csc_dict : dict
|
|
1146
|
+
Dictionary containing color space conversion settings.
|
|
1147
|
+
videos_already_in_ram : np.ndarray, optional
|
|
1148
|
+
Pre-loaded video frames in memory. Default is None.
|
|
1149
|
+
true_frame_width : int, optional
|
|
1150
|
+
The true width of the video frames. Default is None.
|
|
1151
|
+
vid_name : str, optional
|
|
1152
|
+
Name of the video file. Default is None.
|
|
1153
|
+
background : np.ndarray, optional
|
|
1154
|
+
Background image for subtractions. Default is None.
|
|
1155
|
+
background2 : np.ndarray, optional
|
|
1156
|
+
Second background image for subtractions. Default is None.
|
|
1157
|
+
|
|
1158
|
+
Returns
|
|
1159
|
+
-------
|
|
1160
|
+
tuple
|
|
1161
|
+
A tuple containing:
|
|
1162
|
+
- visu: np.ndarray or None, the visual frame.
|
|
1163
|
+
- converted_video: np.ndarray or None, the video data converted as needed.
|
|
1164
|
+
- converted_video2: np.ndarray or None, additional video data if necessary.
|
|
1165
|
+
|
|
1166
|
+
Raises
|
|
1167
|
+
------
|
|
1168
|
+
FileNotFoundError
|
|
1169
|
+
If the specified video file does not exist.
|
|
1170
|
+
ValueError
|
|
1171
|
+
If the video data shape is invalid.
|
|
1172
|
+
|
|
1173
|
+
Notes
|
|
1174
|
+
-----
|
|
1175
|
+
This function assumes that `video2numpy` is a helper function available in the scope.
|
|
1176
|
+
For optimal performance, ensure all video data fits in RAM.
|
|
1177
|
+
"""
|
|
1178
|
+
visu, converted_video, converted_video2 = None, None, None
|
|
1179
|
+
logging.info(f"Arena n°{arena_label}. Load images and videos")
|
|
1180
|
+
if videos_already_in_ram is not None:
|
|
1181
|
+
if already_greyscale:
|
|
1182
|
+
converted_video = videos_already_in_ram
|
|
1183
|
+
else:
|
|
1184
|
+
if csc_dict['logical'] == 'None':
|
|
1185
|
+
visu, converted_video = videos_already_in_ram
|
|
1186
|
+
else:
|
|
1187
|
+
visu, converted_video, converted_video2 = videos_already_in_ram
|
|
1188
|
+
else:
|
|
1189
|
+
if vid_name is not None:
|
|
1190
|
+
if already_greyscale:
|
|
1191
|
+
converted_video = video2numpy(vid_name, None, background, background2, true_frame_width)
|
|
1192
|
+
if len(converted_video.shape) == 4:
|
|
1193
|
+
converted_video = converted_video[:, :, :, 0]
|
|
1194
|
+
else:
|
|
1195
|
+
visu = video2numpy(vid_name, None, background, background2, true_frame_width)
|
|
1196
|
+
else:
|
|
1197
|
+
vid_name = f"ind_{arena_label}.npy"
|
|
1198
|
+
if os.path.isfile(vid_name):
|
|
1199
|
+
if already_greyscale:
|
|
1200
|
+
converted_video = video2numpy(vid_name, None, background, background2, true_frame_width)
|
|
1201
|
+
if len(converted_video.shape) == 4:
|
|
1202
|
+
converted_video = converted_video[:, :, :, 0]
|
|
1203
|
+
else:
|
|
1204
|
+
visu = video2numpy(vid_name, None, background, background2, true_frame_width)
|
|
1205
|
+
return visu, converted_video, converted_video2
|
|
1206
|
+
|
|
1207
|
+
def create_empty_videos(image_list: list, cr: list, lose_accuracy_to_save_memory: bool,
|
|
1208
|
+
already_greyscale: bool, csc_dict: dict):
|
|
1209
|
+
"""
|
|
1210
|
+
|
|
1211
|
+
Create empty video arrays based on input parameters.
|
|
1212
|
+
|
|
1213
|
+
Parameters
|
|
1214
|
+
----------
|
|
1215
|
+
image_list : list
|
|
1216
|
+
List of images.
|
|
1217
|
+
cr : list
|
|
1218
|
+
Crop region defined by [x_start, y_start, x_end, y_end].
|
|
1219
|
+
lose_accuracy_to_save_memory : bool
|
|
1220
|
+
Boolean flag to determine if memory should be saved by using uint8 data type.
|
|
1221
|
+
already_greyscale : bool
|
|
1222
|
+
Boolean flag indicating if the images are already in greyscale format.
|
|
1223
|
+
csc_dict : dict
|
|
1224
|
+
Dictionary containing color space conversion settings, including 'logical' key.
|
|
1225
|
+
|
|
1226
|
+
Returns
|
|
1227
|
+
-------
|
|
1228
|
+
tuple
|
|
1229
|
+
A tuple containing three elements:
|
|
1230
|
+
- `visu`: NumPy array with shape (len(image_list), cr[1] - cr[0] + 1, cr[3] - cr[2] + 1, 3) and dtype uint8 for RGB images.
|
|
1231
|
+
- `converted_video`: NumPy array with shape (len(image_list), cr[1] - cr[0] + 1, cr[3] - cr[2] + 1) and dtype uint8 or float according to `lose_accuracy_to_save_memory`.
|
|
1232
|
+
- `converted_video2`: NumPy array with shape same as `converted_video` and dtype uint8 or float according to `lose_accuracy_to_save_memory`.
|
|
1233
|
+
|
|
1234
|
+
Notes
|
|
1235
|
+
-----
|
|
1236
|
+
Performance considerations:
|
|
1237
|
+
- If `lose_accuracy_to_save_memory` is True, the function uses np.uint8 for memory efficiency.
|
|
1238
|
+
- If `already_greyscale` is False, additional arrays are created to store RGB data.
|
|
1239
|
+
"""
|
|
1240
|
+
visu, converted_video, converted_video2 = None, None, None
|
|
1241
|
+
dims = len(image_list), cr[1] - cr[0], cr[3] - cr[2]
|
|
1242
|
+
if lose_accuracy_to_save_memory:
|
|
1243
|
+
converted_video = np.zeros(dims, dtype=np.uint8)
|
|
1244
|
+
else:
|
|
1245
|
+
converted_video = np.zeros(dims, dtype=float)
|
|
1246
|
+
if not already_greyscale:
|
|
1247
|
+
visu = np.zeros((dims[0], dims[1], dims[2], 3), dtype=np.uint8)
|
|
1248
|
+
if csc_dict['logical'] != 'None':
|
|
1249
|
+
if lose_accuracy_to_save_memory:
|
|
1250
|
+
converted_video2 = np.zeros(dims, dtype=np.uint8)
|
|
1251
|
+
else:
|
|
1252
|
+
converted_video2 = np.zeros(dims, dtype=float)
|
|
1253
|
+
return visu, converted_video, converted_video2
|
|
1254
|
+
|
|
1255
|
+
def display_network_methods(network_detection: object, save_path: str=None):
|
|
1256
|
+
"""
|
|
1257
|
+
|
|
1258
|
+
Display segmentation results from a network detection object.
|
|
1259
|
+
|
|
1260
|
+
Extended Description
|
|
1261
|
+
--------------------
|
|
1262
|
+
|
|
1263
|
+
Plots the binary segmentation results for various methods stored in ``network_detection.all_results``.
|
|
1264
|
+
Highlights the best result based on quality metrics and allows for saving the figure to a file.
|
|
1265
|
+
|
|
1266
|
+
Parameters
|
|
1267
|
+
----------
|
|
1268
|
+
network_detection : object
|
|
1269
|
+
An object containing segmentation results and quality metrics.
|
|
1270
|
+
save_path : str, optional
|
|
1271
|
+
Path to save the figure. If ``None``, the plot is displayed.
|
|
1272
|
+
|
|
1273
|
+
"""
|
|
1274
|
+
row_nb = 6
|
|
1275
|
+
fig, axes = plt.subplots(int(np.ceil(len(network_detection.all_results) / row_nb)), row_nb, figsize=(100, 100))
|
|
1276
|
+
fig.suptitle(f'Segmentation Comparison: Frangi + Sato Variations', fontsize=16)
|
|
1277
|
+
|
|
1278
|
+
# Plot all results
|
|
1279
|
+
for idx, result in enumerate(network_detection.all_results):
|
|
1280
|
+
row = idx // row_nb
|
|
1281
|
+
col = idx % row_nb
|
|
1282
|
+
|
|
1283
|
+
ax = axes[row, col]
|
|
1284
|
+
|
|
1285
|
+
# Display binary segmentation result
|
|
1286
|
+
ax.imshow(result['binary'], cmap='gray')
|
|
1287
|
+
|
|
1288
|
+
# Create title with filter info and quality score
|
|
1289
|
+
title = f"{result['method']}: {str(np.round(network_detection.quality_metrics[idx], 0))}"
|
|
1290
|
+
|
|
1291
|
+
# Highlight the best result
|
|
1292
|
+
if idx == network_detection.best_idx:
|
|
1293
|
+
ax.set_title(title, fontsize=8, color='red', fontweight='bold')
|
|
1294
|
+
ax.add_patch(plt.Rectangle((0, 0), result['binary'].shape[1] - 1,
|
|
1295
|
+
result['binary'].shape[0] - 1,
|
|
1296
|
+
fill=False, edgecolor='red', linewidth=3))
|
|
1297
|
+
else:
|
|
1298
|
+
ax.set_title(title, fontsize=8)
|
|
1299
|
+
|
|
1300
|
+
ax.axis('off')
|
|
1301
|
+
plt.tight_layout()
|
|
1302
|
+
|
|
1303
|
+
if save_path is not None:
|
|
1304
|
+
plt.savefig(save_path, bbox_inches='tight', pad_inches=0., transparent=True, dpi=500)
|
|
1305
|
+
plt.close()
|
|
1306
|
+
else:
|
|
1307
|
+
plt.show()
|
cellects/utils/utilitarian.py
CHANGED
|
@@ -141,6 +141,65 @@ def translate_dict(old_dict: dict) -> Dict:
|
|
|
141
141
|
return numba_dict
|
|
142
142
|
|
|
143
143
|
|
|
144
|
+
def split_dict(c_space_dict: dict) -> Tuple[Dict, Dict, list]:
|
|
145
|
+
"""
|
|
146
|
+
|
|
147
|
+
Split a dictionary into two dictionaries based on specific criteria and return their keys.
|
|
148
|
+
|
|
149
|
+
Split the input dictionary `c_space_dict` into two dictionaries: one for items not
|
|
150
|
+
ending with '2' and another where the key is truncated by removing its last
|
|
151
|
+
character if it does end with '2'. Additionally, return the keys that have been
|
|
152
|
+
processed.
|
|
153
|
+
|
|
154
|
+
Parameters
|
|
155
|
+
----------
|
|
156
|
+
c_space_dict : dict
|
|
157
|
+
The dictionary to be split. Expected keys are strings and values can be any type.
|
|
158
|
+
|
|
159
|
+
Returns
|
|
160
|
+
-------
|
|
161
|
+
first_dict : dict
|
|
162
|
+
Dictionary containing items from `c_space_dict` whose keys do not end with '2'.
|
|
163
|
+
second_dict : dict
|
|
164
|
+
Dictionary containing items from `c_space_dict` whose keys end with '2',
|
|
165
|
+
where the key is truncated by removing its last character.
|
|
166
|
+
c_spaces : list
|
|
167
|
+
List of keys from `c_space_dict` that have been processed.
|
|
168
|
+
|
|
169
|
+
Raises
|
|
170
|
+
------
|
|
171
|
+
None
|
|
172
|
+
|
|
173
|
+
Notes
|
|
174
|
+
-----
|
|
175
|
+
No critical information to share.
|
|
176
|
+
|
|
177
|
+
Examples
|
|
178
|
+
--------
|
|
179
|
+
>>> c_space_dict = {'key1': 10, 'key2': 20, 'logical': 30}
|
|
180
|
+
>>> first_dict, second_dict, c_spaces = split_dict(c_space_dict)
|
|
181
|
+
>>> print(first_dict)
|
|
182
|
+
{'key1': 10}
|
|
183
|
+
>>> print(second_dict)
|
|
184
|
+
{'key': 20}
|
|
185
|
+
>>> print(c_spaces)
|
|
186
|
+
['key1', 'key']
|
|
187
|
+
|
|
188
|
+
"""
|
|
189
|
+
first_dict = Dict()
|
|
190
|
+
second_dict = Dict()
|
|
191
|
+
c_spaces = []
|
|
192
|
+
for k, v in c_space_dict.items():
|
|
193
|
+
if k == 'PCA' or k != 'logical' and np.absolute(v).sum() > 0:
|
|
194
|
+
if k[-1] != '2':
|
|
195
|
+
first_dict[k] = v
|
|
196
|
+
c_spaces.append(k)
|
|
197
|
+
else:
|
|
198
|
+
second_dict[k[:-1]] = v
|
|
199
|
+
c_spaces.append(k[:-1])
|
|
200
|
+
return first_dict, second_dict, c_spaces
|
|
201
|
+
|
|
202
|
+
|
|
144
203
|
def reduce_path_len(pathway: str, to_start: int, from_end: int) -> str:
|
|
145
204
|
"""
|
|
146
205
|
Reduce the length of a given pathway string by truncating it from both ends.
|
|
@@ -435,7 +494,6 @@ def smallest_memory_array(array_object, array_type='uint') -> NDArray:
|
|
|
435
494
|
if isinstance(array_object, np.ndarray):
|
|
436
495
|
value_max = array_object.max()
|
|
437
496
|
else:
|
|
438
|
-
|
|
439
497
|
if len(array_object[0]) > 0:
|
|
440
498
|
value_max = np.max((array_object[0].max(), array_object[1].max()))
|
|
441
499
|
else:
|
|
@@ -474,17 +532,36 @@ def remove_coordinates(arr1: NDArray, arr2: NDArray) -> NDArray:
|
|
|
474
532
|
|
|
475
533
|
Examples
|
|
476
534
|
--------
|
|
477
|
-
>>> arr1 = np.
|
|
478
|
-
>>> arr2 = np.array([[
|
|
535
|
+
>>> arr1 = np.array([[1, 2], [3, 4]])
|
|
536
|
+
>>> arr2 = np.array([[3, 4]])
|
|
479
537
|
>>> remove_coordinates(arr1, arr2)
|
|
480
|
-
array([[
|
|
538
|
+
array([[1, 2],
|
|
481
539
|
[3, 4]])
|
|
482
|
-
"""
|
|
483
|
-
if arr1.shape[1] != 2 or arr2.shape[1] != 2:
|
|
484
|
-
raise ValueError("Both arrays must have shape (n, 2)")
|
|
485
|
-
mask = ~np.isin(arr1, arr2).all(axis=1)
|
|
486
|
-
return arr1[mask]
|
|
487
540
|
|
|
541
|
+
>>> arr1 = np.array([[1, 2], [3, 4]])
|
|
542
|
+
>>> arr2 = np.array([[3, 2], [1, 4]])
|
|
543
|
+
>>> remove_coordinates(arr1, arr2)
|
|
544
|
+
array([[1, 2],
|
|
545
|
+
[3, 4]])
|
|
488
546
|
|
|
547
|
+
>>> arr1 = np.array([[1, 2], [3, 4]])
|
|
548
|
+
>>> arr2 = np.array([[3, 2], [1, 2]])
|
|
549
|
+
>>> remove_coordinates(arr1, arr2)
|
|
550
|
+
array([[3, 4]])
|
|
489
551
|
|
|
552
|
+
>>> arr1 = np.arange(200).reshape(100, 2)
|
|
553
|
+
>>> arr2 = np.array([[196, 197], [198, 199]])
|
|
554
|
+
>>> new_arr1 = remove_coordinates(arr1, arr2)
|
|
555
|
+
>>> new_arr1.shape
|
|
556
|
+
(98, 2)
|
|
557
|
+
"""
|
|
558
|
+
if arr2.shape[0] == 0:
|
|
559
|
+
return arr1
|
|
560
|
+
else:
|
|
561
|
+
if arr1.shape[1] != 2 or arr2.shape[1] != 2:
|
|
562
|
+
raise ValueError("Both arrays must have shape (n, 2)")
|
|
563
|
+
c_to_keep = ~np.all(arr1 == arr2[0], axis=1)
|
|
564
|
+
for row in arr2[1:]:
|
|
565
|
+
c_to_keep *= ~np.all(arr1 == row, axis=1)
|
|
566
|
+
return arr1[c_to_keep]
|
|
490
567
|
|