cellects 0.2.6__tar.gz → 0.3.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. {cellects-0.2.6 → cellects-0.3.2}/PKG-INFO +2 -2
  2. {cellects-0.2.6 → cellects-0.3.2}/README.md +1 -1
  3. {cellects-0.2.6 → cellects-0.3.2}/pyproject.toml +1 -1
  4. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/core/cellects_threads.py +46 -184
  5. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/core/motion_analysis.py +74 -45
  6. cellects-0.3.2/src/cellects/core/one_image_analysis.py +787 -0
  7. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/core/program_organizer.py +157 -98
  8. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/core/script_based_run.py +13 -23
  9. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/gui/first_window.py +7 -4
  10. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/gui/image_analysis_window.py +45 -35
  11. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/gui/ui_strings.py +3 -2
  12. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/image_analysis/image_segmentation.py +21 -77
  13. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/image_analysis/morphological_operations.py +9 -13
  14. cellects-0.3.2/src/cellects/image_analysis/one_image_analysis_threads.py +360 -0
  15. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/image_analysis/shape_descriptors.py +1068 -1067
  16. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/utils/formulas.py +3 -1
  17. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/utils/load_display_save.py +1 -1
  18. {cellects-0.2.6 → cellects-0.3.2}/src/cellects.egg-info/PKG-INFO +2 -2
  19. {cellects-0.2.6 → cellects-0.3.2}/tests/test_formulas.py +4 -4
  20. {cellects-0.2.6 → cellects-0.3.2}/tests/test_image_segmentation.py +20 -14
  21. {cellects-0.2.6 → cellects-0.3.2}/tests/test_motion_analysis.py +10 -22
  22. {cellects-0.2.6 → cellects-0.3.2}/tests/test_network_functions.py +0 -4
  23. cellects-0.3.2/tests/test_one_image_analysis.py +401 -0
  24. {cellects-0.2.6 → cellects-0.3.2}/tests/test_program_organizer.py +9 -6
  25. {cellects-0.2.6 → cellects-0.3.2}/tests/test_shape_descriptors.py +0 -1
  26. cellects-0.2.6/src/cellects/core/one_image_analysis.py +0 -1082
  27. cellects-0.2.6/src/cellects/image_analysis/one_image_analysis_threads.py +0 -230
  28. cellects-0.2.6/tests/test_one_image_analysis.py +0 -259
  29. {cellects-0.2.6 → cellects-0.3.2}/LICENSE +0 -0
  30. {cellects-0.2.6 → cellects-0.3.2}/setup.cfg +0 -0
  31. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/__init__.py +0 -0
  32. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/__main__.py +0 -0
  33. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/config/__init__.py +0 -0
  34. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/config/all_vars_dict.py +0 -0
  35. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/core/__init__.py +0 -0
  36. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/core/cellects_paths.py +0 -0
  37. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/gui/__init__.py +0 -0
  38. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/gui/advanced_parameters.py +0 -0
  39. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/gui/cellects.py +0 -0
  40. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/gui/custom_widgets.py +0 -0
  41. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/gui/if_several_folders_window.py +0 -0
  42. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/gui/required_output.py +0 -0
  43. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/gui/video_analysis_window.py +0 -0
  44. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/icons/__init__.py +0 -0
  45. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/icons/cellects_icon.icns +0 -0
  46. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/icons/cellects_icon.ico +0 -0
  47. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/image_analysis/__init__.py +0 -0
  48. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/image_analysis/cell_leaving_detection.py +0 -0
  49. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/image_analysis/network_functions.py +0 -0
  50. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/image_analysis/oscillations_functions.py +0 -0
  51. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/image_analysis/progressively_add_distant_shapes.py +0 -0
  52. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/utils/__init__.py +0 -0
  53. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/utils/decorators.py +0 -0
  54. {cellects-0.2.6 → cellects-0.3.2}/src/cellects/utils/utilitarian.py +0 -0
  55. {cellects-0.2.6 → cellects-0.3.2}/src/cellects.egg-info/SOURCES.txt +0 -0
  56. {cellects-0.2.6 → cellects-0.3.2}/src/cellects.egg-info/dependency_links.txt +0 -0
  57. {cellects-0.2.6 → cellects-0.3.2}/src/cellects.egg-info/entry_points.txt +0 -0
  58. {cellects-0.2.6 → cellects-0.3.2}/src/cellects.egg-info/requires.txt +0 -0
  59. {cellects-0.2.6 → cellects-0.3.2}/src/cellects.egg-info/top_level.txt +0 -0
  60. {cellects-0.2.6 → cellects-0.3.2}/tests/test_based_run.py +0 -0
  61. {cellects-0.2.6 → cellects-0.3.2}/tests/test_cell_leaving_detection.py +0 -0
  62. {cellects-0.2.6 → cellects-0.3.2}/tests/test_load_display_save.py +0 -0
  63. {cellects-0.2.6 → cellects-0.3.2}/tests/test_morphological_operations.py +0 -0
  64. {cellects-0.2.6 → cellects-0.3.2}/tests/test_progressively_add_distant_shapes.py +0 -0
  65. {cellects-0.2.6 → cellects-0.3.2}/tests/test_utilitarian.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: cellects
3
- Version: 0.2.6
3
+ Version: 0.3.2
4
4
  Summary: Cell Expansion Computer Tracking Software.
5
5
  Author: Aurèle Boussard
6
6
  License: GNU GENERAL PUBLIC LICENSE
@@ -730,7 +730,7 @@ Requires-Dist: mkdocs-jupyter; extra == "doc"
730
730
  [![Python versions](https://img.shields.io/pypi/pyversions/cellects.svg?style=flat-square)](https://pypi.org/project/cellects/)
731
731
  [![License](https://img.shields.io/pypi/l/cellects.svg?style=flat-square)](https://github.com/Aurele-B/cellects/blob/main/LICENSE)
732
732
  [![Stars](https://img.shields.io/github/stars/Aurele-B/cellects.svg?style=flat-square)](https://github.com/Aurele-B/cellects/stargazers)
733
- ![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/Aurele-B/Cellects/.github%2Fworkflows%2Fci.yml)
733
+ ![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/Aurele-B/Cellects/.github%2Fworkflows%2Frelease.yml)
734
734
  ![Coverage](https://raw.githubusercontent.com/Aurele-B/cellects/gh-pages/badges/coverage.svg)
735
735
 
736
736
  Description
@@ -10,7 +10,7 @@
10
10
  [![Python versions](https://img.shields.io/pypi/pyversions/cellects.svg?style=flat-square)](https://pypi.org/project/cellects/)
11
11
  [![License](https://img.shields.io/pypi/l/cellects.svg?style=flat-square)](https://github.com/Aurele-B/cellects/blob/main/LICENSE)
12
12
  [![Stars](https://img.shields.io/github/stars/Aurele-B/cellects.svg?style=flat-square)](https://github.com/Aurele-B/cellects/stargazers)
13
- ![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/Aurele-B/Cellects/.github%2Fworkflows%2Fci.yml)
13
+ ![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/Aurele-B/Cellects/.github%2Fworkflows%2Frelease.yml)
14
14
  ![Coverage](https://raw.githubusercontent.com/Aurele-B/cellects/gh-pages/badges/coverage.svg)
15
15
 
16
16
  Description
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "cellects"
7
- version = "0.2.6"
7
+ version = "0.3.2"
8
8
  description = "Cell Expansion Computer Tracking Software."
9
9
  readme = "README.md"
10
10
  license = { file = "LICENSE" }
@@ -354,26 +354,18 @@ class UpdateImageThread(QtCore.QThread):
354
354
  image = self.parent().imageanalysiswindow.drawn_image.copy()
355
355
  # 3) The automatically detected video contours
356
356
  if self.parent().imageanalysiswindow.delineation_done: # add a mask of the video contour
357
+ if self.parent().po.vars['contour_color'] == 255:
358
+ arena_contour_col = (240, 232, 202)
359
+ else:
360
+ arena_contour_col = (138, 95, 18)
357
361
  # Draw the delineation mask of each arena
358
- for contour_i in range(len(self.parent().po.top)):
359
- min_cy = self.parent().po.top[contour_i]
360
- max_cy = self.parent().po.bot[contour_i]
361
- min_cx = self.parent().po.left[contour_i]
362
- max_cx = self.parent().po.right[contour_i]
363
- text = f"{contour_i + 1}"
364
- position = (self.parent().po.left[contour_i] + 25, self.parent().po.top[contour_i] + (self.parent().po.bot[contour_i] - self.parent().po.top[contour_i]) // 2)
365
- image = cv2.putText(image, # numpy array on which text is written
366
- text, # text
367
- position, # position at which writing has to start
368
- cv2.FONT_HERSHEY_SIMPLEX, # font family
369
- 1, # font size
370
- (138, 95, 18, 255),
371
- # (209, 80, 0, 255), # font color
372
- 2) # font stroke
362
+ for _i, (min_cy, max_cy, min_cx, max_cx) in enumerate(zip(self.parent().po.top, self.parent().po.bot, self.parent().po.left, self.parent().po.right)):
363
+ position = (min_cx + 25, min_cy + (max_cy - min_cy) // 2)
364
+ image = cv2.putText(image, f"{_i + 1}", position, cv2.FONT_HERSHEY_SIMPLEX, 1, arena_contour_col + (255,),2)
373
365
  if (max_cy - min_cy) < 0 or (max_cx - min_cx) < 0:
374
366
  self.parent().imageanalysiswindow.message.setText("Error: the shape number or the detection is wrong")
375
367
  image = draw_img_with_mask(image, dims, (min_cy, max_cy - 1, min_cx, max_cx - 1),
376
- self.parent().po.vars['arena_shape'], (138, 95, 18), True, contour_width)
368
+ self.parent().po.vars['arena_shape'], arena_contour_col, True, contour_width)
377
369
  else: #load
378
370
  if user_input:
379
371
  # III/ If this thread runs from user input: update the drawn_image according to the current user input
@@ -391,7 +383,7 @@ class UpdateImageThread(QtCore.QThread):
391
383
  mask_shape = "rectangle"
392
384
  else:
393
385
  color = (0, 0, 0)
394
- mask_shape = self.parent().po.all['arena_shape']
386
+ mask_shape = self.parent().po.vars['arena_shape']
395
387
  image = draw_img_with_mask(image, dims, minmax, mask_shape, color)
396
388
  self.parent().imageanalysiswindow.display_image.update_image(image)
397
389
  self.message_when_thread_finished.emit(True)
@@ -455,67 +447,12 @@ class FirstImageAnalysisThread(QtCore.QThread):
455
447
  pixel sizes, and updates various state attributes on the parent object.
456
448
  """
457
449
  tic = default_timer()
458
- biomask = None
459
- backmask = None
460
- if self.parent().imageanalysiswindow.bio_masks_number != 0:
461
- shape_nb, ordered_image = cv2.connectedComponents((self.parent().imageanalysiswindow.bio_mask > 0).astype(np.uint8))
462
- shape_nb -= 1
463
- biomask = np.nonzero(self.parent().imageanalysiswindow.bio_mask)
464
- else:
465
- shape_nb = 0
466
- if self.parent().imageanalysiswindow.back_masks_number != 0:
467
- backmask = np.nonzero(self.parent().imageanalysiswindow.back_mask)
468
- if self.parent().po.visualize or len(self.parent().po.first_im.shape) == 2 or shape_nb == self.parent().po.sample_number:
469
- self.message_from_thread.emit("Image segmentation, wait")
470
- if not self.parent().imageanalysiswindow.asking_first_im_parameters_flag and self.parent().po.all['scale_with_image_or_cells'] == 0 and self.parent().po.all["set_spot_size"]:
471
- self.parent().po.get_average_pixel_size()
472
- else:
473
- self.parent().po.starting_blob_hsize_in_pixels = None
474
- self.parent().po.all["bio_mask"] = biomask
475
- self.parent().po.all["back_mask"] = backmask
476
- self.parent().po.fast_first_image_segmentation()
477
- if shape_nb == self.parent().po.sample_number and self.parent().po.first_image.im_combinations[self.parent().po.current_combination_id]['shape_number'] != self.parent().po.sample_number:
478
- self.parent().po.first_image.im_combinations[self.parent().po.current_combination_id]['shape_number'] = shape_nb
479
- self.parent().po.first_image.shape_number = shape_nb
480
- self.parent().po.first_image.validated_shapes = (self.parent().imageanalysiswindow.bio_mask > 0).astype(np.uint8)
481
- self.parent().po.first_image.im_combinations[self.parent().po.current_combination_id]['binary_image'] = self.parent().po.first_image.validated_shapes
450
+ if self.parent().po.visualize or len(self.parent().po.first_im.shape) == 2:
451
+ self.message_from_thread.emit("Image segmentation, wait...")
482
452
  else:
483
- self.message_from_thread.emit("Generating analysis options, wait...")
484
- if self.parent().po.vars["color_number"] > 2:
485
- kmeans_clust_nb = self.parent().po.vars["color_number"]
486
- if self.parent().po.basic:
487
- self.message_from_thread.emit("Generating analysis options, wait less than 30 minutes")
488
- else:
489
- self.message_from_thread.emit("Generating analysis options, a few minutes")
490
- else:
491
- kmeans_clust_nb = None
492
- if self.parent().po.basic:
493
- self.message_from_thread.emit("Generating analysis options, wait a few minutes")
494
- else:
495
- self.message_from_thread.emit("Generating analysis options, around 1 minute")
496
- if self.parent().imageanalysiswindow.asking_first_im_parameters_flag:
497
- self.parent().po.first_image.find_first_im_csc(sample_number=self.parent().po.sample_number,
498
- several_blob_per_arena=None,
499
- spot_shape=None, spot_size=None,
500
- kmeans_clust_nb=kmeans_clust_nb,
501
- biomask=self.parent().po.all["bio_mask"],
502
- backmask=self.parent().po.all["back_mask"],
503
- color_space_dictionaries=None,
504
- basic=self.parent().po.basic)
505
- else:
506
- if self.parent().po.all['scale_with_image_or_cells'] == 0:
507
- self.parent().po.get_average_pixel_size()
508
- else:
509
- self.parent().po.starting_blob_hsize_in_pixels = None
510
- self.parent().po.first_image.find_first_im_csc(sample_number=self.parent().po.sample_number,
511
- several_blob_per_arena=self.parent().po.vars['several_blob_per_arena'],
512
- spot_shape=self.parent().po.all['starting_blob_shape'],
513
- spot_size=self.parent().po.starting_blob_hsize_in_pixels,
514
- kmeans_clust_nb=kmeans_clust_nb,
515
- biomask=self.parent().po.all["bio_mask"],
516
- backmask=self.parent().po.all["back_mask"],
517
- color_space_dictionaries=None,
518
- basic=self.parent().po.basic)
453
+ self.message_from_thread.emit("Generating segmentation options, wait...")
454
+ self.parent().po.full_first_image_segmentation(not self.parent().imageanalysiswindow.asking_first_im_parameters_flag,
455
+ self.parent().imageanalysiswindow.bio_mask, self.parent().imageanalysiswindow.back_mask)
519
456
 
520
457
  logging.info(f" image analysis lasted {np.floor((default_timer() - tic) / 60).astype(int)} minutes {np.round((default_timer() - tic) % 60).astype(int)} secondes")
521
458
  self.message_when_thread_finished.emit(True)
@@ -582,58 +519,11 @@ class LastImageAnalysisThread(QtCore.QThread):
582
519
  message_when_thread_finished.emit(success : bool) : signal
583
520
  Signal to indicate the completion of the thread.
584
521
  """
585
- self.parent().po.cropping(False)
586
- self.parent().po.get_background_to_subtract()
587
- biomask = None
588
- backmask = None
589
- if self.parent().imageanalysiswindow.bio_masks_number != 0:
590
- biomask = np.nonzero(self.parent().imageanalysiswindow.bio_mask)
591
- if self.parent().imageanalysiswindow.back_masks_number != 0:
592
- backmask = np.nonzero(self.parent().imageanalysiswindow.back_mask)
593
522
  if self.parent().po.visualize or (len(self.parent().po.first_im.shape) == 2 and not self.parent().po.network_shaped):
594
523
  self.message_from_thread.emit("Image segmentation, wait...")
595
- self.parent().po.fast_last_image_segmentation(biomask=biomask, backmask=backmask)
596
524
  else:
597
525
  self.message_from_thread.emit("Generating analysis options, wait...")
598
- arenas_mask = None
599
- if self.parent().po.all['are_gravity_centers_moving'] != 1:
600
- cr = [self.parent().po.top, self.parent().po.bot, self.parent().po.left, self.parent().po.right]
601
- arenas_mask = np.zeros_like(self.parent().po.first_image.validated_shapes)
602
- for _i in np.arange(len(self.parent().po.vars['analyzed_individuals'])):
603
- if self.parent().po.vars['arena_shape'] == 'circle':
604
- ellipse = create_ellipse(cr[1][_i] - cr[0][_i], cr[3][_i] - cr[2][_i])
605
- arenas_mask[cr[0][_i]: cr[1][_i], cr[2][_i]:cr[3][_i]] = ellipse
606
- else:
607
- arenas_mask[cr[0][_i]: cr[1][_i], cr[2][_i]:cr[3][_i]] = 1
608
- if self.parent().po.network_shaped:
609
- self.parent().po.last_image.network_detection(arenas_mask, csc_dict=self.parent().po.vars["convert_for_motion"], biomask=biomask, backmask=backmask)
610
- else:
611
- if self.parent().po.vars['several_blob_per_arena']:
612
- concomp_nb = [self.parent().po.sample_number, self.parent().po.first_image.size // 50]
613
- max_shape_size = .75 * self.parent().po.first_image.size
614
- total_surfarea = .99 * self.parent().po.first_image.size
615
- else:
616
- concomp_nb = [self.parent().po.sample_number, self.parent().po.sample_number * 200]
617
- if self.parent().po.all['are_zigzag'] == "columns":
618
- inter_dist = np.mean(np.diff(np.nonzero(self.parent().po.first_image.y_boundaries)))
619
- elif self.parent().po.all['are_zigzag'] == "rows":
620
- inter_dist = np.mean(np.diff(np.nonzero(self.parent().po.first_image.x_boundaries)))
621
- else:
622
- dist1 = np.mean(np.diff(np.nonzero(self.parent().po.first_image.y_boundaries)))
623
- dist2 = np.mean(np.diff(np.nonzero(self.parent().po.first_image.x_boundaries)))
624
- inter_dist = np.max(dist1, dist2)
625
- if self.parent().po.all['starting_blob_shape'] == "rectangle":
626
- max_shape_size = np.square(2 * inter_dist)
627
- else:
628
- max_shape_size = np.pi * np.square(inter_dist)
629
- total_surfarea = max_shape_size * self.parent().po.sample_number
630
- ref_image = self.parent().po.first_image.validated_shapes
631
- self.parent().po.first_image.generate_subtract_background(self.parent().po.vars['convert_for_motion'], self.parent().po.vars['drift_already_corrected'])
632
- kmeans_clust_nb = None
633
- self.parent().po.last_image.find_last_im_csc(concomp_nb, total_surfarea, max_shape_size, arenas_mask,
634
- ref_image, self.parent().po.first_image.subtract_background,
635
- kmeans_clust_nb, biomask, backmask, color_space_dictionaries=None,
636
- basic=self.parent().po.basic)
526
+ self.parent().po.full_last_image_segmentation(self.parent().imageanalysiswindow.bio_mask, self.parent().imageanalysiswindow.back_mask)
637
527
  self.message_when_thread_finished.emit(True)
638
528
 
639
529
 
@@ -645,7 +535,7 @@ class CropScaleSubtractDelineateThread(QtCore.QThread):
645
535
  -------
646
536
  message_from_thread : Signal(str)
647
537
  Signal emitted when progress messages are available.
648
- message_when_thread_finished : Signal(bool)
538
+ message_when_thread_finished : Signal(dict)
649
539
  Signal emitted upon completion of the thread's task.
650
540
 
651
541
  Notes
@@ -653,7 +543,7 @@ class CropScaleSubtractDelineateThread(QtCore.QThread):
653
543
  This class uses `QThread` to manage the process asynchronously.
654
544
  """
655
545
  message_from_thread = QtCore.Signal(str)
656
- message_when_thread_finished = QtCore.Signal(str)
546
+ message_when_thread_finished = QtCore.Signal(dict)
657
547
 
658
548
  def __init__(self, parent=None):
659
549
  """
@@ -686,8 +576,8 @@ class CropScaleSubtractDelineateThread(QtCore.QThread):
686
576
  to perform necessary image processing tasks.
687
577
  """
688
578
  logging.info("Start cropping if required")
579
+ analysis_status = {"continue": True, "message": ""}
689
580
  self.parent().po.cropping(is_first_image=True)
690
- self.parent().po.cropping(is_first_image=False)
691
581
  self.parent().po.get_average_pixel_size()
692
582
  if os.path.isfile('Data to run Cellects quickly.pkl'):
693
583
  os.remove('Data to run Cellects quickly.pkl')
@@ -700,18 +590,22 @@ class CropScaleSubtractDelineateThread(QtCore.QThread):
700
590
  nb, shapes, stats, centroids = cv2.connectedComponentsWithStats(self.parent().po.first_image.validated_shapes)
701
591
  y_lim = self.parent().po.first_image.y_boundaries
702
592
  if ((nb - 1) != self.parent().po.sample_number or np.any(stats[:, 4] == 1)):
703
- self.message_from_thread.emit("Image analysis failed to detect the right cell(s) number: restart the analysis.")
593
+ analysis_status["message"] = "Image analysis failed to detect the right cell(s) number: restart the analysis."
594
+ analysis_status['continue'] = False
595
+ elif y_lim is None:
596
+ analysis_status["message"] = "The shapes detected in the image did not allow automatic arena delineation."
597
+ analysis_status['continue'] = False
704
598
  elif (y_lim == - 1).sum() != (y_lim == 1).sum():
705
- self.message_from_thread.emit("Automatic arena delineation cannot work if one cell touches the image border.")
599
+ analysis_status["message"] = "Automatic arena delineation cannot work if one cell touches the image border."
706
600
  self.parent().po.first_image.y_boundaries = None
707
- else:
708
- logging.info("Start automatic video delineation")
709
- analysis_status = self.parent().po.delineate_each_arena()
710
- self.message_when_thread_finished.emit(analysis_status["message"])
711
- else:
601
+ analysis_status['continue'] = False
602
+ if analysis_status['continue']:
712
603
  logging.info("Start automatic video delineation")
713
604
  analysis_status = self.parent().po.delineate_each_arena()
714
- self.message_when_thread_finished.emit(analysis_status["message"])
605
+ else:
606
+ self.parent().po.first_image.validated_shapes = np.zeros(self.parent().po.first_image.image.shape[:2], dtype=np.uint8)
607
+ logging.info(analysis_status["message"])
608
+ self.message_when_thread_finished.emit(analysis_status)
715
609
 
716
610
 
717
611
  class SaveManualDelineationThread(QtCore.QThread):
@@ -738,21 +632,18 @@ class SaveManualDelineationThread(QtCore.QThread):
738
632
  """
739
633
  Do save the coordinates.
740
634
  """
741
- self.parent().po.left = np.arange(self.parent().po.sample_number)
742
- self.parent().po.right = np.arange(self.parent().po.sample_number)
743
- self.parent().po.top = np.arange(self.parent().po.sample_number)
744
- self.parent().po.bot = np.arange(self.parent().po.sample_number)
745
- for arena in np.arange(1, self.parent().po.sample_number + 1):
746
- y, x = np.nonzero(self.parent().imageanalysiswindow.arena_mask == arena)
747
- self.parent().po.left[arena - 1] = np.min(x)
748
- self.parent().po.right[arena - 1] = np.max(x)
749
- self.parent().po.top[arena - 1] = np.min(y)
750
- self.parent().po.bot[arena - 1] = np.max(y)
751
-
752
- logging.info("Save data to run Cellects quickly")
753
- self.parent().po.data_to_save['coordinates'] = True
635
+ self.parent().po.left = np.zeros(self.parent().po.sample_number)
636
+ self.parent().po.right = np.zeros(self.parent().po.sample_number)
637
+ self.parent().po.top = np.zeros(self.parent().po.sample_number)
638
+ self.parent().po.bot = np.zeros(self.parent().po.sample_number)
639
+ for arena_i in np.arange(self.parent().po.sample_number):
640
+ y, x = np.nonzero(self.parent().imageanalysiswindow.arena_mask == arena_i + 1)
641
+ self.parent().po.left[arena_i] = np.min(x)
642
+ self.parent().po.right[arena_i] = np.max(x)
643
+ self.parent().po.top[arena_i] = np.min(y)
644
+ self.parent().po.bot[arena_i] = np.max(y)
645
+ self.parent().po.list_coordinates()
754
646
  self.parent().po.save_data_to_run_cellects_quickly()
755
- self.parent().po.data_to_save['coordinates'] = False
756
647
 
757
648
  logging.info("Save manual video delineation")
758
649
  self.parent().po.vars['analyzed_individuals'] = np.arange(self.parent().po.sample_number) + 1
@@ -816,7 +707,6 @@ class CompleteImageAnalysisThread(QtCore.QThread):
816
707
  def run(self):
817
708
  self.parent().po.get_background_to_subtract()
818
709
  self.parent().po.get_origins_and_backgrounds_lists()
819
- self.parent().po.data_to_save['coordinates'] = True
820
710
  self.parent().po.data_to_save['exif'] = True
821
711
  self.parent().po.save_data_to_run_cellects_quickly()
822
712
  self.parent().po.all['bio_mask'] = None
@@ -867,10 +757,8 @@ class PrepareVideoAnalysisThread(QtCore.QThread):
867
757
  self.parent().po.find_if_lighter_background()
868
758
  logging.info("The current (or the first) folder is ready to run")
869
759
  self.parent().po.first_exp_ready_to_run = True
870
- self.parent().po.data_to_save['coordinates'] = True
871
760
  self.parent().po.data_to_save['exif'] = True
872
761
  self.parent().po.save_data_to_run_cellects_quickly()
873
- self.parent().po.data_to_save['coordinates'] = False
874
762
  self.parent().po.data_to_save['exif'] = False
875
763
 
876
764
 
@@ -1096,7 +984,7 @@ class OneArenaThread(QtCore.QThread):
1096
984
  """
1097
985
  arena = self.parent().po.all['arena']
1098
986
  i = np.nonzero(self.parent().po.vars['analyzed_individuals'] == arena)[0][0]
1099
- true_frame_width = self.parent().po.vars['origin_list'][i].shape[1]
987
+ true_frame_width = self.parent().po.right[i] - self.parent().po.left[i]# self.parent().po.vars['origin_list'][i].shape[1]
1100
988
  if self.parent().po.all['overwrite_unaltered_videos'] and os.path.isfile(f'ind_{arena}.npy'):
1101
989
  os.remove(f'ind_{arena}.npy')
1102
990
  background = None
@@ -1173,16 +1061,7 @@ class OneArenaThread(QtCore.QThread):
1173
1061
  self.parent().po.converted_video = deepcopy(self.parent().po.motion.converted_video)
1174
1062
  if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
1175
1063
  self.parent().po.converted_video2 = deepcopy(self.parent().po.motion.converted_video2)
1176
- self.parent().po.motion.get_origin_shape()
1177
-
1178
- if self.parent().po.motion.dims[0] >= 40:
1179
- step = self.parent().po.motion.dims[0] // 20
1180
- else:
1181
- step = 1
1182
- if self.parent().po.motion.start >= (self.parent().po.motion.dims[0] - step - 1):
1183
- self.parent().po.motion.start = None
1184
- else:
1185
- self.parent().po.motion.get_covering_duration(step)
1064
+ self.parent().po.motion.assess_motion_detection()
1186
1065
  self.when_loading_finished.emit(save_loaded_video)
1187
1066
 
1188
1067
  if self.parent().po.motion.visu is None:
@@ -1323,8 +1202,7 @@ class OneArenaThread(QtCore.QThread):
1323
1202
 
1324
1203
  while self._isRunning and analysis_i.t < analysis_i.binary.shape[0]:
1325
1204
  analysis_i.update_shape(False)
1326
- contours = np.nonzero(
1327
- cv2.morphologyEx(analysis_i.binary[analysis_i.t - 1, :, :], cv2.MORPH_GRADIENT, cross_33))
1205
+ contours = np.nonzero(get_contours(analysis_i.binary[analysis_i.t - 1, :, :]))
1328
1206
  current_image = deepcopy(self.parent().po.motion.visu[analysis_i.t - 1, :, :, :])
1329
1207
  current_image[contours[0], contours[1], :] = self.parent().po.vars['contour_color']
1330
1208
  self.image_from_thread.emit(
@@ -1355,7 +1233,6 @@ class OneArenaThread(QtCore.QThread):
1355
1233
  self.when_detection_finished.emit("Post processing done, read to see the result")
1356
1234
 
1357
1235
 
1358
-
1359
1236
  class VideoReaderThread(QtCore.QThread):
1360
1237
  """
1361
1238
  Thread for reading a video in the GUI.
@@ -1427,7 +1304,7 @@ class VideoReaderThread(QtCore.QThread):
1427
1304
  video_mask = np.cumsum(video_mask.astype(np.uint32), axis=0)
1428
1305
  video_mask[video_mask > 0] = 1
1429
1306
  video_mask = video_mask.astype(np.uint8)
1430
- logging.info(f"sum: {video_mask.sum()}")
1307
+ frame_delay = (8 + np.log10(self.parent().po.motion.dims[0])) / self.parent().po.motion.dims[0]
1431
1308
  for t in np.arange(self.parent().po.motion.dims[0]):
1432
1309
  mask = cv2.morphologyEx(video_mask[t, ...], cv2.MORPH_GRADIENT, cross_33)
1433
1310
  mask = np.stack((mask, mask, mask), axis=2)
@@ -1435,7 +1312,7 @@ class VideoReaderThread(QtCore.QThread):
1435
1312
  current_image[mask > 0] = self.parent().po.vars['contour_color']
1436
1313
  self.message_from_thread.emit(
1437
1314
  {"current_image": current_image, "message": f"Reading in progress... Image number: {t}"}) #, "time": timings[t]
1438
- time.sleep(1 / 50)
1315
+ time.sleep(frame_delay)
1439
1316
  self.message_from_thread.emit({"current_image": current_image, "message": ""})#, "time": timings[t]
1440
1317
 
1441
1318
 
@@ -1552,26 +1429,11 @@ class WriteVideoThread(QtCore.QThread):
1552
1429
 
1553
1430
  already_greyscale : bool
1554
1431
  Flag indicating if the video is already in greyscale format.
1555
- This parameter must be set as a variable named 'already_greyscale' in the instance
1556
- variables of the parent object.
1557
-
1558
- Returns
1559
- -------
1560
- None
1561
1432
 
1562
1433
  Raises
1563
1434
  ------
1564
1435
  FileNotFoundError
1565
1436
  When the path to write the video is not specified.
1566
-
1567
- Examples
1568
- --------
1569
- >>> self.parent().po.vars['already_greyscale'] = False
1570
- >>> self.run()
1571
- >>> # Expects to write a visualization video as 'ind_arena.npy'
1572
- >>> self.parent().po.vars['already_greyscale'] = True
1573
- >>> self.run()
1574
- >>> # Expects to write a converted video as 'ind_arena.npy'
1575
1437
  """
1576
1438
  arena = self.parent().po.all['arena']
1577
1439
  if not self.parent().po.vars['already_greyscale']:
@@ -151,28 +151,14 @@ class MotionAnalysis:
151
151
 
152
152
  self.start = None
153
153
  if detect_shape:
154
- self.start = None
155
- # Here to conditional layers allow to detect if an expansion/exploration occured
156
- self.get_origin_shape()
157
- # The first, user-defined is the 'first_move_threshold' and the second is the detection of the
158
- # substantial image: if any of them is not detected, the program considers there is not exp.
159
- if self.dims[0] >= 40:
160
- step = self.dims[0] // 20
161
- else:
162
- step = 1
163
- if self.dims[0] == 1 or self.start >= (self.dims[0] - step - 1):
164
- self.start = None
165
- else:
166
- self.get_covering_duration(step)
167
- if self.start is not None:
168
- self.detection()
169
- self.initialize_post_processing()
170
- self.t = self.start
171
- while self.t < self.dims[0]: #200:
172
- self.update_shape(show_seg)
154
+ self.assess_motion_detection()
155
+ if self.start is not None:
156
+ self.detection()
157
+ self.initialize_post_processing()
158
+ self.t = self.start
159
+ while self.t < self.dims[0]: #200:
160
+ self.update_shape(show_seg)
173
161
  #
174
- if self.start is None:
175
- self.binary = np.repeat(np.expand_dims(self.origin, 0), self.converted_video.shape[0], axis=0)
176
162
 
177
163
  if analyse_shape:
178
164
  self.get_descriptors_from_binary()
@@ -204,8 +190,25 @@ class MotionAnalysis:
204
190
 
205
191
  """
206
192
  logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Load images and videos")
207
- self.origin = self.vars['origin_list'][i] # self.vars['origins_list'][i]
208
- true_frame_width = self.origin.shape[1]
193
+ if 'bb_coord' in self.vars:
194
+ crop_top, crop_bot, crop_left, crop_right, top, bot, left, right = self.vars['bb_coord']
195
+ elif videos_already_in_ram is not None:
196
+ if isinstance(videos_already_in_ram, list):
197
+ crop_bot, crop_right = videos_already_in_ram[0].shape[1], videos_already_in_ram[0].shape[2]
198
+ else:
199
+ crop_bot, crop_right = videos_already_in_ram.shape[1], videos_already_in_ram.shape[2]
200
+ crop_top, crop_left, top, bot, left, right = 0, 0, [0], [crop_bot], [0], [crop_right]
201
+ if isinstance(self.vars['origin_list'][i], Tuple):
202
+ self.origin_idx = self.vars['origin_list'][i]
203
+ frame_height = bot[i] - top[i]
204
+ true_frame_width = right[i] - left[i]
205
+ self.origin = np.zeros((frame_height, true_frame_width), dtype=np.uint8)
206
+ self.origin[self.origin_idx[0], self.origin_idx[1]] = 1
207
+ else:
208
+ self.origin = self.vars['origin_list'][i]
209
+ frame_height = self.origin.shape[0]
210
+ true_frame_width = self.origin.shape[1]
211
+
209
212
  vid_name = None
210
213
  if self.vars['video_list'] is not None:
211
214
  vid_name = self.vars['video_list'][i]
@@ -219,6 +222,18 @@ class MotionAnalysis:
219
222
  self.vars['convert_for_motion'], videos_already_in_ram, true_frame_width, vid_name,
220
223
  self.background, self.background2)
221
224
  self.visu, self.converted_video, self.converted_video2 = vids
225
+ # When the video(s) already exists (not just written as .pny), they need to be sliced:
226
+ if self.visu is not None:
227
+ if self.visu.shape[1] != frame_height or self.visu.shape[2] != true_frame_width:
228
+ self.visu = self.visu[:, crop_top:crop_bot, crop_left:crop_right, ...]
229
+ self.visu = self.visu[:, top[i]:bot[i], left[i]:right[i], ...]
230
+ if self.converted_video is not None:
231
+ self.converted_video = self.converted_video[:, crop_top:crop_bot, crop_left:crop_right]
232
+ self.converted_video = self.converted_video[:, top[i]:bot[i], left[i]:right[i]]
233
+ if self.converted_video2 is not None:
234
+ self.converted_video2 = self.converted_video2[:, crop_top:crop_bot, crop_left:crop_right]
235
+ self.converted_video2 = self.converted_video2[:, top[i]:bot[i], left[i]:right[i]]
236
+
222
237
  if self.converted_video is None:
223
238
  logging.info(
224
239
  f"Arena n°{self.one_descriptor_per_arena['arena']}. Convert the RGB visu video into a greyscale image using the color space combination: {self.vars['convert_for_motion']}")
@@ -228,6 +243,26 @@ class MotionAnalysis:
228
243
  self.vars['filter_spec'])
229
244
  self.converted_video, self.converted_video2 = vids
230
245
 
246
+ def assess_motion_detection(self):
247
+ """
248
+ Assess if a motion can be detected using the current parameters.
249
+
250
+ Validate the specimen(s) detected in the first frame and evaluate roughly how growth occurs during the video.
251
+ """
252
+ # Here to conditional layers allow to detect if an expansion/exploration occured
253
+ self.get_origin_shape()
254
+ # The first, user-defined is the 'first_move_threshold' and the second is the detection of the
255
+ # substantial image: if any of them is not detected, the program considers there is no motion.
256
+ if self.dims[0] >= 40:
257
+ step = self.dims[0] // 20
258
+ else:
259
+ step = 1
260
+ if self.dims[0] == 1 or self.start >= (self.dims[0] - step - 1):
261
+ self.start = None
262
+ self.binary = np.repeat(np.expand_dims(self.origin, 0), self.converted_video.shape[0], axis=0)
263
+ else:
264
+ self.get_covering_duration(step)
265
+
231
266
  def get_origin_shape(self):
232
267
  """
233
268
  Determine the origin shape and initialize variables based on the state of the current analysis.
@@ -259,25 +294,19 @@ class MotionAnalysis:
259
294
  self.drift_mask_coord[:, 2] == 0) and np.all(self.drift_mask_coord[:, 3] == self.dims[2] - 1):
260
295
  logging.error(f"Drift correction has been wrongly detected. Images do not contain zero-valued pixels")
261
296
  self.vars['drift_already_corrected'] = False
262
- if self.vars['origin_state'] == "constant":
263
- self.start = 1
264
- if self.vars['lighter_background']:
265
- # Initialize the covering_intensity matrix as a reference for pixel fading
266
- self.covering_intensity[self.origin_idx[0], self.origin_idx[1]] = 200
267
- else:
268
- self.start = 0
297
+ self.start = 1
298
+ if self.vars['origin_state'] == "invisible":
299
+ self.start += self.vars['first_detection_frame']
269
300
  analysisi = self.frame_by_frame_segmentation(self.start, self.origin)
270
- while np.logical_and(np.sum(analysisi.binary_image) < self.vars['first_move_threshold'], self.start < self.dims[0]):
271
- self.start += 1
272
- analysisi = self.frame_by_frame_segmentation(self.start, self.origin)
273
-
274
301
  # Use connected components to find which shape is the nearest from the image center.
275
302
  if self.vars['several_blob_per_arena']:
276
303
  self.origin = analysisi.binary_image
277
304
  else:
278
- nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(analysisi.binary_image,
279
- connectivity=8)
280
- if self.vars['appearance_detection_method'] == 'most_central':
305
+ if self.vars['appearance_detection_method'] == 'largest':
306
+ self.origin = keep_one_connected_component(analysisi.binary_image)
307
+ elif self.vars['appearance_detection_method'] == 'most_central':
308
+ nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(analysisi.binary_image,
309
+ connectivity=8)
281
310
  center = np.array((self.dims[2] // 2, self.dims[1] // 2))
282
311
  stats = np.zeros(nb_components - 1)
283
312
  for shape_i in np.arange(1, nb_components):
@@ -285,10 +314,11 @@ class MotionAnalysis:
285
314
  # The shape having the minimal euclidean distance from the center will be the original shape
286
315
  self.origin = np.zeros((self.dims[1], self.dims[2]), dtype=np.uint8)
287
316
  self.origin[output == (np.argmin(stats) + 1)] = 1
288
- elif self.vars['appearance_detection_method'] == 'largest':
289
- self.origin = np.zeros((self.dims[1], self.dims[2]), dtype=np.uint8)
290
- self.origin[output == np.argmax(stats[1:, 4])] = 1
291
317
  self.origin_idx = np.nonzero(self.origin)
318
+ if self.vars['origin_state'] == "constant":
319
+ if self.vars['lighter_background']:
320
+ # Initialize the covering_intensity matrix as a reference for pixel fading
321
+ self.covering_intensity[self.origin_idx[0], self.origin_idx[1]] = 200
292
322
  self.substantial_growth = np.min((1.2 * self.origin.sum(), self.origin.sum() + 250))
293
323
 
294
324
  def get_covering_duration(self, step: int):
@@ -851,7 +881,7 @@ class MotionAnalysis:
851
881
  self.pixel_ring_depth = 3
852
882
  if self.pixel_ring_depth % 2 == 0:
853
883
  self.pixel_ring_depth = self.pixel_ring_depth + 1
854
- self.erodila_disk = create_ellipse(self.pixel_ring_depth, self.pixel_ring_depth).astype(np.uint8)
884
+ self.erodila_disk = create_ellipse(self.pixel_ring_depth, self.pixel_ring_depth, min_size=3).astype(np.uint8)
855
885
  self.max_distance = self.pixel_ring_depth * self.vars['detection_range_factor']
856
886
 
857
887
  def initialize_post_processing(self):
@@ -918,7 +948,7 @@ class MotionAnalysis:
918
948
  self.near_periphery = np.zeros(self.dims[1:])
919
949
  if self.vars['arena_shape'] == 'circle':
920
950
  periphery_width = self.vars['periphery_width'] * 2
921
- elliperiphery = create_ellipse(self.dims[1] - periphery_width, self.dims[2] - periphery_width)
951
+ elliperiphery = create_ellipse(self.dims[1] - periphery_width, self.dims[2] - periphery_width, min_size=3)
922
952
  half_width = periphery_width // 2
923
953
  if periphery_width % 2 == 0:
924
954
  self.near_periphery[half_width:-half_width, half_width:-half_width] = elliperiphery
@@ -1166,7 +1196,7 @@ class MotionAnalysis:
1166
1196
  `PercentAndTimeTracker` for progress tracking, and other image processing techniques such as connected components analysis.
1167
1197
 
1168
1198
  """
1169
- ##
1199
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Computing and saving specimen(s) coordinates and required descriptors")
1170
1200
  if release_memory:
1171
1201
  self.substantial_image = None
1172
1202
  self.covering_intensity = None
@@ -1529,7 +1559,6 @@ Extract and analyze graphs from a binary representation of network dynamics, pro
1529
1559
 
1530
1560
  if np.any(self.one_row_per_frame['time'] > 0):
1531
1561
  position = (5, self.dims[1] - 5)
1532
- print(self.vars['time_step_is_arbitrary'])
1533
1562
  if self.vars['time_step_is_arbitrary']:
1534
1563
  time_unit = ""
1535
1564
  else:
@@ -1597,7 +1626,7 @@ Extract and analyze graphs from a binary representation of network dynamics, pro
1597
1626
  """
1598
1627
  Manages the saving and updating of CSV files based on data extracted from analyzed
1599
1628
  one arena. Specifically handles three CSV files: "one_row_per_arena.csv",
1600
- "one_row_per_frame.csv", and "one_row_per_oscillating_cluster.csv".
1629
+ "one_row_per_frame.csv".
1601
1630
  Each file is updated or created based on the presence of existing data.
1602
1631
  The method ensures that each CSV file contains the relevant information for
1603
1632
  the given arena, frame, and oscillator cluster data.