cellects 0.1.3__py3-none-any.whl → 0.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cellects/__main__.py +65 -25
- cellects/config/all_vars_dict.py +18 -17
- cellects/core/cellects_threads.py +1034 -396
- cellects/core/motion_analysis.py +1664 -2010
- cellects/core/one_image_analysis.py +1082 -1061
- cellects/core/program_organizer.py +1687 -1316
- cellects/core/script_based_run.py +80 -76
- cellects/gui/advanced_parameters.py +365 -326
- cellects/gui/cellects.py +102 -91
- cellects/gui/custom_widgets.py +4 -3
- cellects/gui/first_window.py +226 -104
- cellects/gui/if_several_folders_window.py +117 -68
- cellects/gui/image_analysis_window.py +841 -450
- cellects/gui/required_output.py +100 -56
- cellects/gui/ui_strings.py +840 -0
- cellects/gui/video_analysis_window.py +317 -135
- cellects/image_analysis/cell_leaving_detection.py +64 -4
- cellects/image_analysis/image_segmentation.py +451 -22
- cellects/image_analysis/morphological_operations.py +2166 -1635
- cellects/image_analysis/network_functions.py +616 -253
- cellects/image_analysis/one_image_analysis_threads.py +94 -153
- cellects/image_analysis/oscillations_functions.py +131 -0
- cellects/image_analysis/progressively_add_distant_shapes.py +2 -3
- cellects/image_analysis/shape_descriptors.py +517 -466
- cellects/utils/formulas.py +169 -6
- cellects/utils/load_display_save.py +362 -105
- cellects/utils/utilitarian.py +86 -9
- cellects-0.2.7.dist-info/LICENSE +675 -0
- cellects-0.2.7.dist-info/METADATA +829 -0
- cellects-0.2.7.dist-info/RECORD +44 -0
- cellects/core/one_video_per_blob.py +0 -540
- cellects/image_analysis/cluster_flux_study.py +0 -102
- cellects-0.1.3.dist-info/LICENSE.odt +0 -0
- cellects-0.1.3.dist-info/METADATA +0 -176
- cellects-0.1.3.dist-info/RECORD +0 -44
- {cellects-0.1.3.dist-info → cellects-0.2.7.dist-info}/WHEEL +0 -0
- {cellects-0.1.3.dist-info → cellects-0.2.7.dist-info}/entry_points.txt +0 -0
- {cellects-0.1.3.dist-info → cellects-0.2.7.dist-info}/top_level.txt +0 -0
cellects/core/motion_analysis.py
CHANGED
|
@@ -1,2010 +1,1664 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
1.
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
The
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
The
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
self.
|
|
128
|
-
self.
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
self.
|
|
135
|
-
|
|
136
|
-
self.
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
self.
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
self.
|
|
145
|
-
|
|
146
|
-
self.
|
|
147
|
-
self.
|
|
148
|
-
|
|
149
|
-
self.
|
|
150
|
-
self.
|
|
151
|
-
|
|
152
|
-
self.
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
if
|
|
178
|
-
self.
|
|
179
|
-
|
|
180
|
-
self.
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Load images and videos")
|
|
207
|
-
self.origin = self.vars['origin_list'][i]# self.vars['origins_list'][i]
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
self.
|
|
258
|
-
if self.
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
self.
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
self.
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
else:
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
#
|
|
427
|
-
#
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
if compute_all_possibilities:
|
|
482
|
-
logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Compute all options to detect cell motion and growth. Maximal growth per frame: {self.vars['maximal_growth_factor']}")
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
if self.vars['
|
|
492
|
-
logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Detect with luminosity
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
self.
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
if
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
if
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
else:
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
self.
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
if
|
|
900
|
-
|
|
901
|
-
self.
|
|
902
|
-
self.
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
if self.vars['
|
|
912
|
-
#
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
self.
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
if
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
self.covering_intensity =
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
self.
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
self.
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1156
|
-
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
|
|
1239
|
-
|
|
1240
|
-
|
|
1241
|
-
isisotropic
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
|
|
1284
|
-
|
|
1285
|
-
|
|
1286
|
-
|
|
1287
|
-
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
|
|
1318
|
-
|
|
1319
|
-
|
|
1320
|
-
|
|
1321
|
-
|
|
1322
|
-
|
|
1323
|
-
|
|
1324
|
-
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
self.
|
|
1328
|
-
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
|
|
1332
|
-
|
|
1333
|
-
|
|
1334
|
-
|
|
1335
|
-
|
|
1336
|
-
|
|
1337
|
-
|
|
1338
|
-
|
|
1339
|
-
|
|
1340
|
-
|
|
1341
|
-
|
|
1342
|
-
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
|
|
1347
|
-
|
|
1348
|
-
|
|
1349
|
-
|
|
1350
|
-
|
|
1351
|
-
|
|
1352
|
-
|
|
1353
|
-
|
|
1354
|
-
|
|
1355
|
-
|
|
1356
|
-
|
|
1357
|
-
|
|
1358
|
-
|
|
1359
|
-
|
|
1360
|
-
|
|
1361
|
-
|
|
1362
|
-
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
|
|
1368
|
-
|
|
1369
|
-
|
|
1370
|
-
|
|
1371
|
-
|
|
1372
|
-
|
|
1373
|
-
|
|
1374
|
-
|
|
1375
|
-
|
|
1376
|
-
|
|
1377
|
-
|
|
1378
|
-
|
|
1379
|
-
|
|
1380
|
-
|
|
1381
|
-
|
|
1382
|
-
|
|
1383
|
-
|
|
1384
|
-
if self.
|
|
1385
|
-
|
|
1386
|
-
|
|
1387
|
-
|
|
1388
|
-
|
|
1389
|
-
|
|
1390
|
-
|
|
1391
|
-
|
|
1392
|
-
|
|
1393
|
-
|
|
1394
|
-
|
|
1395
|
-
|
|
1396
|
-
|
|
1397
|
-
|
|
1398
|
-
|
|
1399
|
-
|
|
1400
|
-
|
|
1401
|
-
|
|
1402
|
-
|
|
1403
|
-
|
|
1404
|
-
|
|
1405
|
-
|
|
1406
|
-
|
|
1407
|
-
|
|
1408
|
-
|
|
1409
|
-
|
|
1410
|
-
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
|
|
1414
|
-
|
|
1415
|
-
|
|
1416
|
-
|
|
1417
|
-
|
|
1418
|
-
|
|
1419
|
-
|
|
1420
|
-
|
|
1421
|
-
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
|
|
1425
|
-
|
|
1426
|
-
|
|
1427
|
-
|
|
1428
|
-
|
|
1429
|
-
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
|
-
|
|
1435
|
-
|
|
1436
|
-
|
|
1437
|
-
|
|
1438
|
-
|
|
1439
|
-
|
|
1440
|
-
|
|
1441
|
-
|
|
1442
|
-
|
|
1443
|
-
|
|
1444
|
-
|
|
1445
|
-
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
|
|
1449
|
-
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
|
|
1458
|
-
|
|
1459
|
-
|
|
1460
|
-
|
|
1461
|
-
|
|
1462
|
-
|
|
1463
|
-
|
|
1464
|
-
|
|
1465
|
-
|
|
1466
|
-
|
|
1467
|
-
|
|
1468
|
-
|
|
1469
|
-
|
|
1470
|
-
|
|
1471
|
-
|
|
1472
|
-
|
|
1473
|
-
|
|
1474
|
-
|
|
1475
|
-
|
|
1476
|
-
|
|
1477
|
-
|
|
1478
|
-
|
|
1479
|
-
|
|
1480
|
-
|
|
1481
|
-
|
|
1482
|
-
|
|
1483
|
-
|
|
1484
|
-
|
|
1485
|
-
|
|
1486
|
-
|
|
1487
|
-
|
|
1488
|
-
|
|
1489
|
-
|
|
1490
|
-
|
|
1491
|
-
|
|
1492
|
-
|
|
1493
|
-
|
|
1494
|
-
|
|
1495
|
-
|
|
1496
|
-
|
|
1497
|
-
|
|
1498
|
-
|
|
1499
|
-
|
|
1500
|
-
|
|
1501
|
-
|
|
1502
|
-
|
|
1503
|
-
|
|
1504
|
-
|
|
1505
|
-
|
|
1506
|
-
|
|
1507
|
-
|
|
1508
|
-
|
|
1509
|
-
|
|
1510
|
-
|
|
1511
|
-
|
|
1512
|
-
|
|
1513
|
-
|
|
1514
|
-
|
|
1515
|
-
|
|
1516
|
-
|
|
1517
|
-
|
|
1518
|
-
|
|
1519
|
-
|
|
1520
|
-
|
|
1521
|
-
|
|
1522
|
-
|
|
1523
|
-
|
|
1524
|
-
|
|
1525
|
-
|
|
1526
|
-
|
|
1527
|
-
|
|
1528
|
-
|
|
1529
|
-
|
|
1530
|
-
|
|
1531
|
-
|
|
1532
|
-
|
|
1533
|
-
|
|
1534
|
-
|
|
1535
|
-
|
|
1536
|
-
|
|
1537
|
-
|
|
1538
|
-
|
|
1539
|
-
|
|
1540
|
-
|
|
1541
|
-
|
|
1542
|
-
|
|
1543
|
-
|
|
1544
|
-
|
|
1545
|
-
|
|
1546
|
-
|
|
1547
|
-
|
|
1548
|
-
|
|
1549
|
-
|
|
1550
|
-
|
|
1551
|
-
|
|
1552
|
-
|
|
1553
|
-
|
|
1554
|
-
|
|
1555
|
-
|
|
1556
|
-
|
|
1557
|
-
|
|
1558
|
-
|
|
1559
|
-
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
|
|
1567
|
-
|
|
1568
|
-
|
|
1569
|
-
|
|
1570
|
-
|
|
1571
|
-
|
|
1572
|
-
|
|
1573
|
-
|
|
1574
|
-
|
|
1575
|
-
|
|
1576
|
-
|
|
1577
|
-
|
|
1578
|
-
|
|
1579
|
-
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
|
|
1583
|
-
|
|
1584
|
-
|
|
1585
|
-
|
|
1586
|
-
|
|
1587
|
-
|
|
1588
|
-
|
|
1589
|
-
|
|
1590
|
-
|
|
1591
|
-
|
|
1592
|
-
|
|
1593
|
-
|
|
1594
|
-
|
|
1595
|
-
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
|
|
1608
|
-
|
|
1609
|
-
|
|
1610
|
-
|
|
1611
|
-
|
|
1612
|
-
|
|
1613
|
-
|
|
1614
|
-
|
|
1615
|
-
|
|
1616
|
-
|
|
1617
|
-
|
|
1618
|
-
|
|
1619
|
-
|
|
1620
|
-
|
|
1621
|
-
|
|
1622
|
-
|
|
1623
|
-
|
|
1624
|
-
|
|
1625
|
-
|
|
1626
|
-
|
|
1627
|
-
|
|
1628
|
-
|
|
1629
|
-
|
|
1630
|
-
|
|
1631
|
-
|
|
1632
|
-
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
|
|
1636
|
-
|
|
1637
|
-
|
|
1638
|
-
|
|
1639
|
-
|
|
1640
|
-
|
|
1641
|
-
|
|
1642
|
-
|
|
1643
|
-
|
|
1644
|
-
|
|
1645
|
-
|
|
1646
|
-
|
|
1647
|
-
|
|
1648
|
-
|
|
1649
|
-
|
|
1650
|
-
|
|
1651
|
-
|
|
1652
|
-
|
|
1653
|
-
|
|
1654
|
-
|
|
1655
|
-
|
|
1656
|
-
|
|
1657
|
-
|
|
1658
|
-
|
|
1659
|
-
|
|
1660
|
-
|
|
1661
|
-
|
|
1662
|
-
|
|
1663
|
-
|
|
1664
|
-
|
|
1665
|
-
cluster_dynamic[clust_i, :] = np.array(
|
|
1666
|
-
(t * self.time_interval, cluster_names[0], flow, centroid_y, centroid_x,
|
|
1667
|
-
current_cluster_img.sum() * self.vars['average_pixel_size'],
|
|
1668
|
-
inner_network_area * self.vars['average_pixel_size'], box_count_dim, r_value,
|
|
1669
|
-
box_nb, inner_network_box_count_dim, inner_net_r_value, inner_net_box_nb),
|
|
1670
|
-
dtype=np.float64)
|
|
1671
|
-
else:
|
|
1672
|
-
cluster_dynamic[clust_i, :] = np.array((t, cluster_names[0], flow, centroid_y,
|
|
1673
|
-
centroid_x, current_cluster_img.sum(),
|
|
1674
|
-
inner_network_area, box_count_dim, r_value,
|
|
1675
|
-
box_nb, inner_network_box_count_dim,
|
|
1676
|
-
inner_net_r_value, inner_net_box_nb),
|
|
1677
|
-
dtype=np.float64)
|
|
1678
|
-
|
|
1679
|
-
updated_cluster_names = np.append(updated_cluster_names, cluster_names)
|
|
1680
|
-
vstack_h5_array(f"oscillating_clusters_temporal_dynamics.h5",
|
|
1681
|
-
cluster_dynamic, key=f"arena{self.one_descriptor_per_arena['arena']}")
|
|
1682
|
-
|
|
1683
|
-
# Reset cluster_id_matrix for the next frame
|
|
1684
|
-
cluster_id_matrix *= self.binary[t, :, :]
|
|
1685
|
-
|
|
1686
|
-
period_tracking, self.clusters_final_data = efflux_study.update_flux(t, contours, efflux,
|
|
1687
|
-
period_tracking,
|
|
1688
|
-
self.clusters_final_data)
|
|
1689
|
-
period_tracking, self.clusters_final_data = influx_study.update_flux(t, contours, influx,
|
|
1690
|
-
period_tracking,
|
|
1691
|
-
self.clusters_final_data)
|
|
1692
|
-
|
|
1693
|
-
mean_cluster_area[t] = np.mean(np.concatenate((in_stats[:, 4], ef_stats[:, 4])))
|
|
1694
|
-
if self.vars['output_in_mm']:
|
|
1695
|
-
self.clusters_final_data[:, 1] *= self.time_interval # phase
|
|
1696
|
-
self.clusters_final_data[:, 2] *= self.vars['average_pixel_size'] # size
|
|
1697
|
-
self.clusters_final_data[:, 3] *= np.sqrt(self.vars['average_pixel_size']) # distance
|
|
1698
|
-
self.one_row_per_frame['mean_cluster_area'] = mean_cluster_area * self.vars['average_pixel_size']
|
|
1699
|
-
self.one_row_per_frame['cluster_number'] = named_cluster_number
|
|
1700
|
-
|
|
1701
|
-
del oscillations_video
|
|
1702
|
-
|
|
1703
|
-
|
|
1704
|
-
def fractal_descriptions(self):
|
|
1705
|
-
if not pd.isna(self.one_descriptor_per_arena["first_move"]) and self.vars['fractal_analysis']:
|
|
1706
|
-
logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Starting fractal analysis.")
|
|
1707
|
-
|
|
1708
|
-
if self.vars['network_analysis']:
|
|
1709
|
-
box_counting_dimensions = np.zeros((self.dims[0], 7), dtype=np.float64)
|
|
1710
|
-
else:
|
|
1711
|
-
box_counting_dimensions = np.zeros((self.dims[0], 3), dtype=np.float64)
|
|
1712
|
-
|
|
1713
|
-
for t in np.arange(self.dims[0]):
|
|
1714
|
-
if self.vars['network_analysis']:
|
|
1715
|
-
box_counting_dimensions[t, 0] = self.network_dynamics[t, ...].sum()
|
|
1716
|
-
zoomed_binary, side_lengths = prepare_box_counting(self.binary[t, ...], side_threshold=self.vars[
|
|
1717
|
-
'fractal_box_side_threshold'], zoom_step=self.vars['fractal_zoom_step'], contours=True)
|
|
1718
|
-
box_counting_dimensions[t, 1], box_counting_dimensions[t, 2], box_counting_dimensions[
|
|
1719
|
-
t, 3] = box_counting_dimension(zoomed_binary, side_lengths)
|
|
1720
|
-
zoomed_binary, side_lengths = prepare_box_counting(self.network_dynamics[t, ...],
|
|
1721
|
-
side_threshold=self.vars[
|
|
1722
|
-
'fractal_box_side_threshold'],
|
|
1723
|
-
zoom_step=self.vars['fractal_zoom_step'],
|
|
1724
|
-
contours=False)
|
|
1725
|
-
box_counting_dimensions[t, 4], box_counting_dimensions[t, 5], box_counting_dimensions[
|
|
1726
|
-
t, 6] = box_counting_dimension(zoomed_binary, side_lengths)
|
|
1727
|
-
else:
|
|
1728
|
-
zoomed_binary, side_lengths = prepare_box_counting(self.binary[t, ...],
|
|
1729
|
-
side_threshold=self.vars['fractal_box_side_threshold'],
|
|
1730
|
-
zoom_step=self.vars['fractal_zoom_step'], contours=True)
|
|
1731
|
-
box_counting_dimensions[t, :] = box_counting_dimension(zoomed_binary, side_lengths)
|
|
1732
|
-
|
|
1733
|
-
if self.vars['network_analysis']:
|
|
1734
|
-
self.one_row_per_frame["inner_network_size"] = box_counting_dimensions[:, 0]
|
|
1735
|
-
self.one_row_per_frame["fractal_dimension"] = box_counting_dimensions[:, 1]
|
|
1736
|
-
self.one_row_per_frame["fractal_r_value"] = box_counting_dimensions[:, 2]
|
|
1737
|
-
self.one_row_per_frame["fractal_box_nb"] = box_counting_dimensions[:, 3]
|
|
1738
|
-
self.one_row_per_frame["inner_network_fractal_dimension"] = box_counting_dimensions[:, 4]
|
|
1739
|
-
self.one_row_per_frame["inner_network_fractal_r_value"] = box_counting_dimensions[:, 5]
|
|
1740
|
-
self.one_row_per_frame["inner_network_fractal_box_nb"] = box_counting_dimensions[:, 6]
|
|
1741
|
-
if self.vars['output_in_mm']:
|
|
1742
|
-
self.one_row_per_frame["inner_network_size"] *= self.vars['average_pixel_size']
|
|
1743
|
-
else:
|
|
1744
|
-
self.one_row_per_frame["fractal_dimension"] = box_counting_dimensions[:, 0]
|
|
1745
|
-
self.one_row_per_frame["fractal_box_nb"] = box_counting_dimensions[:, 1]
|
|
1746
|
-
self.one_row_per_frame["fractal_r_value"] = box_counting_dimensions[:, 2]
|
|
1747
|
-
|
|
1748
|
-
if self.vars['network_analysis'] or self.vars['save_coord_network']:
|
|
1749
|
-
del self.network_dynamics
|
|
1750
|
-
|
|
1751
|
-
def get_descriptors_summary(self):
|
|
1752
|
-
potential_descriptors = ["area", "perimeter", "circularity", "rectangularity", "total_hole_area", "solidity",
|
|
1753
|
-
"convexity", "eccentricity", "euler_number", "standard_deviation_y",
|
|
1754
|
-
"standard_deviation_x", "skewness_y", "skewness_x", "kurtosis_y", "kurtosis_x",
|
|
1755
|
-
"major_axis_len", "minor_axis_len", "axes_orientation"]
|
|
1756
|
-
|
|
1757
|
-
self.one_descriptor_per_arena["final_area"] = self.binary[-1, :, :].sum()
|
|
1758
|
-
|
|
1759
|
-
def save_efficiency_tests(self):
|
|
1760
|
-
# Provide images allowing to assess the analysis efficiency
|
|
1761
|
-
if self.dims[0] > 1:
|
|
1762
|
-
after_one_tenth_of_time = np.ceil(self.dims[0] / 10).astype(np.uint64)
|
|
1763
|
-
else:
|
|
1764
|
-
after_one_tenth_of_time = 0
|
|
1765
|
-
|
|
1766
|
-
last_good_detection = self.dims[0] - 1
|
|
1767
|
-
if self.dims[0] > self.lost_frames:
|
|
1768
|
-
if self.vars['do_threshold_segmentation']:
|
|
1769
|
-
last_good_detection -= self.lost_frames
|
|
1770
|
-
else:
|
|
1771
|
-
last_good_detection = 0
|
|
1772
|
-
if self.visu is None:
|
|
1773
|
-
if len(self.converted_video.shape) == 3:
|
|
1774
|
-
self.converted_video = np.stack((self.converted_video, self.converted_video, self.converted_video),
|
|
1775
|
-
axis=3)
|
|
1776
|
-
self.efficiency_test_1 = deepcopy(self.converted_video[after_one_tenth_of_time, ...])
|
|
1777
|
-
self.efficiency_test_2 = deepcopy(self.converted_video[last_good_detection, ...])
|
|
1778
|
-
else:
|
|
1779
|
-
self.efficiency_test_1 = deepcopy(self.visu[after_one_tenth_of_time, :, :, :])
|
|
1780
|
-
self.efficiency_test_2 = deepcopy(self.visu[last_good_detection, :, :, :])
|
|
1781
|
-
|
|
1782
|
-
position = (25, self.dims[1] // 2)
|
|
1783
|
-
text = str(self.one_descriptor_per_arena['arena'])
|
|
1784
|
-
eroded_binary = cv2.erode(self.binary[after_one_tenth_of_time, :, :], cross_33)
|
|
1785
|
-
contours = np.nonzero(self.binary[after_one_tenth_of_time, :, :] - eroded_binary)
|
|
1786
|
-
self.efficiency_test_1[contours[0], contours[1], :] = self.vars['contour_color']
|
|
1787
|
-
self.efficiency_test_1 = cv2.putText(self.efficiency_test_1, text, position, cv2.FONT_HERSHEY_SIMPLEX, 1,
|
|
1788
|
-
(self.vars["contour_color"], self.vars["contour_color"],
|
|
1789
|
-
self.vars["contour_color"], 255), 3)
|
|
1790
|
-
|
|
1791
|
-
eroded_binary = cv2.erode(self.binary[last_good_detection, :, :], cross_33)
|
|
1792
|
-
contours = np.nonzero(self.binary[last_good_detection, :, :] - eroded_binary)
|
|
1793
|
-
self.efficiency_test_2[contours[0], contours[1], :] = self.vars['contour_color']
|
|
1794
|
-
self.efficiency_test_2 = cv2.putText(self.efficiency_test_2, text, position, cv2.FONT_HERSHEY_SIMPLEX, 1,
|
|
1795
|
-
(self.vars["contour_color"], self.vars["contour_color"],
|
|
1796
|
-
self.vars["contour_color"], 255), 3)
|
|
1797
|
-
|
|
1798
|
-
def save_video(self):
|
|
1799
|
-
|
|
1800
|
-
if self.vars['save_processed_videos']:
|
|
1801
|
-
self.check_converted_video_type()
|
|
1802
|
-
if len(self.converted_video.shape) == 3:
|
|
1803
|
-
self.converted_video = np.stack((self.converted_video, self.converted_video, self.converted_video),
|
|
1804
|
-
axis=3)
|
|
1805
|
-
for t in np.arange(self.dims[0]):
|
|
1806
|
-
|
|
1807
|
-
eroded_binary = cv2.erode(self.binary[t, :, :], cross_33)
|
|
1808
|
-
contours = np.nonzero(self.binary[t, :, :] - eroded_binary)
|
|
1809
|
-
self.converted_video[t, contours[0], contours[1], :] = self.vars['contour_color']
|
|
1810
|
-
if "iso_digi_transi" in self.one_descriptor_per_arena.keys():
|
|
1811
|
-
if self.vars['iso_digi_analysis'] and not self.vars['several_blob_per_arena'] and not pd.isna(self.one_descriptor_per_arena["iso_digi_transi"]):
|
|
1812
|
-
if self.one_descriptor_per_arena["is_growth_isotropic"] == 1:
|
|
1813
|
-
if t < self.one_descriptor_per_arena["iso_digi_transi"]:
|
|
1814
|
-
self.converted_video[t, contours[0], contours[1], :] = 0, 0, 255
|
|
1815
|
-
del self.binary
|
|
1816
|
-
del self.surfarea
|
|
1817
|
-
del self.borders
|
|
1818
|
-
del self.origin
|
|
1819
|
-
del self.origin_idx
|
|
1820
|
-
del self.mean_intensity_per_frame
|
|
1821
|
-
del self.erodila_disk
|
|
1822
|
-
collect()
|
|
1823
|
-
if self.visu is None:
|
|
1824
|
-
true_frame_width = self.dims[2]
|
|
1825
|
-
if len(self.vars['background_list']) == 0:
|
|
1826
|
-
self.background = None
|
|
1827
|
-
else:
|
|
1828
|
-
self.background = self.vars['background_list'][self.one_descriptor_per_arena['arena'] - 1]
|
|
1829
|
-
self.visu = video2numpy(f"ind_{self.one_descriptor_per_arena['arena']}.npy", None, self.background, true_frame_width)
|
|
1830
|
-
if len(self.visu.shape) == 3:
|
|
1831
|
-
self.visu = np.stack((self.visu, self.visu, self.visu), axis=3)
|
|
1832
|
-
self.converted_video = np.concatenate((self.visu, self.converted_video), axis=2)
|
|
1833
|
-
# self.visu = None
|
|
1834
|
-
|
|
1835
|
-
if np.any(self.one_row_per_frame['time'] > 0):
|
|
1836
|
-
position = (5, self.dims[1] - 5)
|
|
1837
|
-
for t in np.arange(self.dims[0]):
|
|
1838
|
-
image = self.converted_video[t, ...]
|
|
1839
|
-
text = str(self.one_row_per_frame['time'][t]) + " min"
|
|
1840
|
-
image = cv2.putText(image, # numpy array on which text is written
|
|
1841
|
-
text, # text
|
|
1842
|
-
position, # position at which writing has to start
|
|
1843
|
-
cv2.FONT_HERSHEY_SIMPLEX, # font family
|
|
1844
|
-
1, # font size
|
|
1845
|
-
(self.vars["contour_color"], self.vars["contour_color"], self.vars["contour_color"], 255), #(209, 80, 0, 255),
|
|
1846
|
-
2) # font stroke
|
|
1847
|
-
self.converted_video[t, ...] = image
|
|
1848
|
-
vid_name = f"ind_{self.one_descriptor_per_arena['arena']}{self.vars['videos_extension']}"
|
|
1849
|
-
write_video(self.converted_video, vid_name, is_color=True, fps=self.vars['video_fps'])
|
|
1850
|
-
# self.converted_video = None
|
|
1851
|
-
|
|
1852
|
-
def save_results(self):
|
|
1853
|
-
self.save_efficiency_tests()
|
|
1854
|
-
self.save_video()
|
|
1855
|
-
if self.vars['several_blob_per_arena']:
|
|
1856
|
-
try:
|
|
1857
|
-
with open(f"one_row_per_frame_arena{self.one_descriptor_per_arena['arena']}.csv", 'w') as file:
|
|
1858
|
-
self.one_row_per_frame.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
1859
|
-
except PermissionError:
|
|
1860
|
-
logging.error(f"Never let one_row_per_frame_arena{self.one_descriptor_per_arena['arena']}.csv open when Cellects runs")
|
|
1861
|
-
|
|
1862
|
-
create_new_csv: bool = False
|
|
1863
|
-
if os.path.isfile("one_row_per_arena.csv"):
|
|
1864
|
-
try:
|
|
1865
|
-
with open(f"one_row_per_arena.csv", 'r') as file:
|
|
1866
|
-
stats = pd.read_csv(file, header=0, sep=";")
|
|
1867
|
-
except PermissionError:
|
|
1868
|
-
logging.error("Never let one_row_per_arena.csv open when Cellects runs")
|
|
1869
|
-
|
|
1870
|
-
if len(self.one_descriptor_per_arena) == len(stats.columns) - 1:
|
|
1871
|
-
try:
|
|
1872
|
-
with open(f"one_row_per_arena.csv", 'w') as file:
|
|
1873
|
-
stats.iloc[(self.one_descriptor_per_arena['arena'] - 1), 1:] = self.one_descriptor_per_arena.values()
|
|
1874
|
-
# if len(self.vars['analyzed_individuals']) == 1:
|
|
1875
|
-
# stats = pd.DataFrame(self.one_descriptor_per_arena, index=[0])
|
|
1876
|
-
# else:
|
|
1877
|
-
# stats = pd.DataFrame.from_dict(self.one_descriptor_per_arena)
|
|
1878
|
-
# stats.to_csv("stats.csv", sep=';', index=False, lineterminator='\n')
|
|
1879
|
-
stats.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
1880
|
-
except PermissionError:
|
|
1881
|
-
logging.error("Never let one_row_per_arena.csv open when Cellects runs")
|
|
1882
|
-
else:
|
|
1883
|
-
create_new_csv = True
|
|
1884
|
-
else:
|
|
1885
|
-
create_new_csv = True
|
|
1886
|
-
if create_new_csv:
|
|
1887
|
-
with open(f"one_row_per_arena.csv", 'w') as file:
|
|
1888
|
-
stats = pd.DataFrame(np.zeros((len(self.vars['analyzed_individuals']), len(self.one_descriptor_per_arena))),
|
|
1889
|
-
columns=list(self.one_descriptor_per_arena.keys()))
|
|
1890
|
-
stats.iloc[(self.one_descriptor_per_arena['arena'] - 1), :] = np.array(list(self.one_descriptor_per_arena.values()), dtype=np.uint32)
|
|
1891
|
-
stats.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
1892
|
-
if not self.vars['keep_unaltered_videos'] and os.path.isfile(f"ind_{self.one_descriptor_per_arena['arena']}.npy"):
|
|
1893
|
-
os.remove(f"ind_{self.one_descriptor_per_arena['arena']}.npy")
|
|
1894
|
-
|
|
1895
|
-
def change_results_of_one_arena(self):
|
|
1896
|
-
self.save_video()
|
|
1897
|
-
# I/ Update/Create one_row_per_arena.csv
|
|
1898
|
-
create_new_csv: bool = False
|
|
1899
|
-
if os.path.isfile("one_row_per_arena.csv"):
|
|
1900
|
-
try:
|
|
1901
|
-
with open(f"one_row_per_arena.csv", 'r') as file:
|
|
1902
|
-
stats = pd.read_csv(file, header=0, sep=";")
|
|
1903
|
-
for stat_name, stat_value in self.one_descriptor_per_arena.items():
|
|
1904
|
-
if stat_name in stats.columns:
|
|
1905
|
-
stats.loc[(self.one_descriptor_per_arena['arena'] - 1), stat_name] = np.uint32(self.one_descriptor_per_arena[stat_name])
|
|
1906
|
-
with open(f"one_row_per_arena.csv", 'w') as file:
|
|
1907
|
-
stats.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
1908
|
-
except PermissionError:
|
|
1909
|
-
logging.error("Never let one_row_per_arena.csv open when Cellects runs")
|
|
1910
|
-
except Exception as e:
|
|
1911
|
-
logging.error(f"{e}")
|
|
1912
|
-
create_new_csv = True
|
|
1913
|
-
# if len(self.one_descriptor_per_arena) == len(stats.columns):
|
|
1914
|
-
# try:
|
|
1915
|
-
# with open(f"one_row_per_arena.csv", 'w') as file:
|
|
1916
|
-
# stats.iloc[(self.one_descriptor_per_arena['arena'] - 1), :] = self.one_descriptor_per_arena.values()
|
|
1917
|
-
# # stats.to_csv("stats.csv", sep=';', index=False, lineterminator='\n')
|
|
1918
|
-
# stats.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
1919
|
-
# except PermissionError:
|
|
1920
|
-
# logging.error("Never let one_row_per_arena.csv open when Cellects runs")
|
|
1921
|
-
# else:
|
|
1922
|
-
# create_new_csv = True
|
|
1923
|
-
else:
|
|
1924
|
-
create_new_csv = True
|
|
1925
|
-
if create_new_csv:
|
|
1926
|
-
logging.info("Create a new one_row_per_arena.csv file")
|
|
1927
|
-
try:
|
|
1928
|
-
with open(f"one_row_per_arena.csv", 'w') as file:
|
|
1929
|
-
stats = pd.DataFrame(np.zeros((len(self.vars['analyzed_individuals']), len(self.one_descriptor_per_arena))),
|
|
1930
|
-
columns=list(self.one_descriptor_per_arena.keys()))
|
|
1931
|
-
stats.iloc[(self.one_descriptor_per_arena['arena'] - 1), :] = np.array(list(self.one_descriptor_per_arena.values()), dtype=np.uint32)
|
|
1932
|
-
stats.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
1933
|
-
except PermissionError:
|
|
1934
|
-
logging.error("Never let one_row_per_arena.csv open when Cellects runs")
|
|
1935
|
-
|
|
1936
|
-
# II/ Update/Create one_row_per_frame.csv
|
|
1937
|
-
create_new_csv = False
|
|
1938
|
-
if os.path.isfile("one_row_per_frame.csv"):
|
|
1939
|
-
try:
|
|
1940
|
-
with open(f"one_row_per_frame.csv", 'r') as file:
|
|
1941
|
-
descriptors = pd.read_csv(file, header=0, sep=";")
|
|
1942
|
-
for stat_name, stat_value in self.one_row_per_frame.items():
|
|
1943
|
-
if stat_name in descriptors.columns:
|
|
1944
|
-
descriptors.loc[((self.one_descriptor_per_arena['arena'] - 1) * self.dims[0]):((self.one_descriptor_per_arena['arena']) * self.dims[0] - 1), stat_name] = self.one_row_per_frame.loc[:, stat_name].values[:]
|
|
1945
|
-
with open(f"one_row_per_frame.csv", 'w') as file:
|
|
1946
|
-
descriptors.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
1947
|
-
# with open(f"one_row_per_frame.csv", 'w') as file:
|
|
1948
|
-
# for descriptor in descriptors.keys():
|
|
1949
|
-
# descriptors.loc[
|
|
1950
|
-
# ((self.one_descriptor_per_arena['arena'] - 1) * self.dims[0]):((self.one_descriptor_per_arena['arena']) * self.dims[0]),
|
|
1951
|
-
# descriptor] = self.one_row_per_frame[descriptor]
|
|
1952
|
-
# descriptors.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
1953
|
-
|
|
1954
|
-
|
|
1955
|
-
|
|
1956
|
-
# if len(self.one_row_per_frame.columns) == len(descriptors.columns):
|
|
1957
|
-
# with open(f"one_row_per_frame.csv", 'w') as file:
|
|
1958
|
-
# # NEW
|
|
1959
|
-
# for descriptor in descriptors.keys():
|
|
1960
|
-
# descriptors.loc[((self.one_descriptor_per_arena['arena'] - 1) * self.dims[0]):((self.one_descriptor_per_arena['arena']) * self.dims[0]), descriptor] = self.one_row_per_frame[descriptor]
|
|
1961
|
-
# # Old
|
|
1962
|
-
# # descriptors.iloc[((self.one_descriptor_per_arena['arena'] - 1) * self.dims[0]):((self.one_descriptor_per_arena['arena']) * self.dims[0]), :] = self.one_row_per_frame
|
|
1963
|
-
# descriptors.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
1964
|
-
# else:
|
|
1965
|
-
# create_new_csv = True
|
|
1966
|
-
except PermissionError:
|
|
1967
|
-
logging.error("Never let one_row_per_frame.csv open when Cellects runs")
|
|
1968
|
-
except Exception as e:
|
|
1969
|
-
logging.error(f"{e}")
|
|
1970
|
-
create_new_csv = True
|
|
1971
|
-
else:
|
|
1972
|
-
create_new_csv = True
|
|
1973
|
-
if create_new_csv:
|
|
1974
|
-
logging.info("Create a new one_row_per_frame.csv file")
|
|
1975
|
-
try:
|
|
1976
|
-
with open(f"one_row_per_frame.csv", 'w') as file:
|
|
1977
|
-
descriptors = pd.DataFrame(np.zeros((len(self.vars['analyzed_individuals']) * self.dims[0], len(self.one_row_per_frame.columns))),
|
|
1978
|
-
columns=list(self.one_row_per_frame.keys()))
|
|
1979
|
-
descriptors.iloc[((self.one_descriptor_per_arena['arena'] - 1) * self.dims[0]):((self.one_descriptor_per_arena['arena']) * self.dims[0]), :] = self.one_row_per_frame
|
|
1980
|
-
descriptors.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
1981
|
-
except PermissionError:
|
|
1982
|
-
logging.error("Never let one_row_per_frame.csv open when Cellects runs")
|
|
1983
|
-
|
|
1984
|
-
# III/ Update/Create one_row_per_oscillating_cluster.csv
|
|
1985
|
-
if not pd.isna(self.one_descriptor_per_arena["first_move"]) and self.vars['oscilacyto_analysis']:
|
|
1986
|
-
oscil_i = pd.DataFrame(
|
|
1987
|
-
np.c_[np.repeat(self.one_descriptor_per_arena['arena'], self.clusters_final_data.shape[0]), self.clusters_final_data],
|
|
1988
|
-
columns=['arena', 'mean_pixel_period', 'phase', 'cluster_size', 'edge_distance', 'coord_y', 'coord_x'])
|
|
1989
|
-
if os.path.isfile("one_row_per_oscillating_cluster.csv"):
|
|
1990
|
-
try:
|
|
1991
|
-
with open(f"one_row_per_oscillating_cluster.csv", 'r') as file:
|
|
1992
|
-
one_row_per_oscillating_cluster = pd.read_csv(file, header=0, sep=";")
|
|
1993
|
-
with open(f"one_row_per_oscillating_cluster.csv", 'w') as file:
|
|
1994
|
-
one_row_per_oscillating_cluster_before = one_row_per_oscillating_cluster[one_row_per_oscillating_cluster['arena'] < self.one_descriptor_per_arena['arena']]
|
|
1995
|
-
one_row_per_oscillating_cluster_after = one_row_per_oscillating_cluster[one_row_per_oscillating_cluster['arena'] > self.one_descriptor_per_arena['arena']]
|
|
1996
|
-
one_row_per_oscillating_cluster = pd.concat((one_row_per_oscillating_cluster_before, oscil_i, one_row_per_oscillating_cluster_after))
|
|
1997
|
-
one_row_per_oscillating_cluster.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
1998
|
-
|
|
1999
|
-
# one_row_per_oscillating_cluster = one_row_per_oscillating_cluster[one_row_per_oscillating_cluster['arena'] != self.one_descriptor_per_arena['arena']]
|
|
2000
|
-
# one_row_per_oscillating_cluster = pd.concat((one_row_per_oscillating_cluster, oscil_i))
|
|
2001
|
-
# one_row_per_oscillating_cluster.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
2002
|
-
except PermissionError:
|
|
2003
|
-
logging.error("Never let one_row_per_oscillating_cluster.csv open when Cellects runs")
|
|
2004
|
-
else:
|
|
2005
|
-
try:
|
|
2006
|
-
with open(f"one_row_per_oscillating_cluster.csv", 'w') as file:
|
|
2007
|
-
oscil_i.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
2008
|
-
except PermissionError:
|
|
2009
|
-
logging.error("Never let one_row_per_oscillating_cluster.csv open when Cellects runs")
|
|
2010
|
-
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Module for analyzing motion, growth patterns, and structural properties of biological specimens in video data.
|
|
3
|
+
|
|
4
|
+
This module provides comprehensive tools to analyze videos of biological samples (e.g., cell colonies) by:
|
|
5
|
+
1. Loading and converting RGB videos to grayscale using configurable color space combinations
|
|
6
|
+
2. Performing multi-strategy segmentation (frame-by-frame, intensity thresholding, derivative-based detection)
|
|
7
|
+
3. Applying post-processing steps including error correction algorithms for shape continuity
|
|
8
|
+
4. Computing morphological descriptors over time (area, perimeter, fractal dimension, etc.)
|
|
9
|
+
5. Detecting network structures and oscillatory behavior in dynamic biological systems
|
|
10
|
+
|
|
11
|
+
Classes
|
|
12
|
+
-------
|
|
13
|
+
MotionAnalysis : Processes video data to analyze specimen motion, growth patterns, and structural properties.
|
|
14
|
+
Provides methods for loading videos, performing segmentation using multiple algorithms,
|
|
15
|
+
post-processing results with error correction, extracting morphological descriptors,
|
|
16
|
+
detecting network structures, analyzing oscillations, and saving processed outputs.
|
|
17
|
+
|
|
18
|
+
Functions
|
|
19
|
+
---------
|
|
20
|
+
load_images_and_videos : Loads and converts video files to appropriate format for analysis.
|
|
21
|
+
get_converted_video : Converts RGB video to grayscale based on specified color space parameters.
|
|
22
|
+
detection : Performs multi-strategy segmentation of the specimen across all frames.
|
|
23
|
+
update_shape : Updates segmented shape with post-processing steps like noise filtering and hole filling.
|
|
24
|
+
save_results : Saves processed data, efficiency tests, and annotated videos.
|
|
25
|
+
|
|
26
|
+
Notes
|
|
27
|
+
-----
|
|
28
|
+
The features of this module include:
|
|
29
|
+
- Processes large video datasets with memory optimization strategies including typed arrays (NumPy)
|
|
30
|
+
and progressive processing techniques.
|
|
31
|
+
- The module supports both single-specimen and multi-specimen analysis through configurable parameters.
|
|
32
|
+
- Segmentation strategies include intensity-based thresholding, gradient detection, and combinations thereof.
|
|
33
|
+
- Post-processing includes morphological operations to refine segmented regions and error correction for specific use cases (e.g., Physarum polycephalum).
|
|
34
|
+
- Biological network detection and graph extraction is available to represent network structures as vertex-edge tables.
|
|
35
|
+
- Biological oscillatory pattern detection
|
|
36
|
+
- Fractal dimension calculation
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
import weakref
|
|
40
|
+
from gc import collect
|
|
41
|
+
import numpy as np
|
|
42
|
+
from numba.typed import Dict as TDict
|
|
43
|
+
from psutil import virtual_memory
|
|
44
|
+
from cellects.core.one_image_analysis import OneImageAnalysis
|
|
45
|
+
from cellects.image_analysis.cell_leaving_detection import cell_leaving_detection
|
|
46
|
+
from cellects.image_analysis.oscillations_functions import detect_oscillations_dynamics
|
|
47
|
+
from cellects.image_analysis.image_segmentation import segment_with_lum_value, convert_subtract_and_filter_video
|
|
48
|
+
from cellects.image_analysis.morphological_operations import (find_major_incline, create_ellipse, draw_me_a_sun,
|
|
49
|
+
inverted_distance_transform, dynamically_expand_to_fill_holes,
|
|
50
|
+
box_counting_dimension, prepare_box_counting, cc)
|
|
51
|
+
from cellects.image_analysis.network_functions import *
|
|
52
|
+
from cellects.image_analysis.progressively_add_distant_shapes import ProgressivelyAddDistantShapes
|
|
53
|
+
from cellects.image_analysis.shape_descriptors import compute_one_descriptor_per_frame, compute_one_descriptor_per_colony, scale_descriptors, ShapeDescriptors, from_shape_descriptors_class
|
|
54
|
+
from cellects.utils.utilitarian import smallest_memory_array
|
|
55
|
+
from cellects.utils.formulas import detect_first_move
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class MotionAnalysis:
|
|
59
|
+
|
|
60
|
+
def __init__(self, l: list):
|
|
61
|
+
|
|
62
|
+
"""
|
|
63
|
+
Analyzes motion in a given arena using video data.
|
|
64
|
+
|
|
65
|
+
This class processes video frames to analyze motion within a specified area,
|
|
66
|
+
detecting shapes, covering durations, and generating descriptors for further
|
|
67
|
+
analysis.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
l (list): A list containing various parameters and flags necessary for the motion
|
|
71
|
+
analysis.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
l[0] (int): Arena index.
|
|
75
|
+
l[1] (str): Arena identifier or name, stored in one_descriptor_per_arena['arena'].
|
|
76
|
+
l[2] (dict): Variables required for the analysis, stored in vars.
|
|
77
|
+
l[3] (bool): Flag to detect shape.
|
|
78
|
+
l[4] (bool): Flag to analyze shape.
|
|
79
|
+
l[5] (bool): Flag to show segmentation.
|
|
80
|
+
l[6] (None or list): Videos already in RAM.
|
|
81
|
+
|
|
82
|
+
Attributes:
|
|
83
|
+
vars (dict): Variables required for the analysis.
|
|
84
|
+
visu (None): Placeholder for visualization data.
|
|
85
|
+
binary (None): Placeholder for binary segmentation data.
|
|
86
|
+
origin_idx (None): Placeholder for the index of the first frame.
|
|
87
|
+
smoothing_flag (bool): Flag to indicate if smoothing should be applied.
|
|
88
|
+
dims (tuple): Dimensions of the converted video.
|
|
89
|
+
segmentation (ndarray): Array to store segmentation data.
|
|
90
|
+
covering_intensity (ndarray): Intensity values for covering analysis.
|
|
91
|
+
mean_intensity_per_frame (ndarray): Mean intensity per frame.
|
|
92
|
+
borders (object): Borders of the arena.
|
|
93
|
+
pixel_ring_depth (int): Depth of the pixel ring for analysis, default is 9.
|
|
94
|
+
step (int): Step size for processing, default is 10.
|
|
95
|
+
lost_frames (int): Number of lost frames to account for, default is 10.
|
|
96
|
+
start (None or int): Starting frame index for the analysis.
|
|
97
|
+
|
|
98
|
+
Methods:
|
|
99
|
+
load_images_and_videos(videos_already_in_ram, arena_idx): Loads images and videos
|
|
100
|
+
for the specified arena index.
|
|
101
|
+
update_ring_width(): Updates the width of the pixel ring for analysis.
|
|
102
|
+
get_origin_shape(): Detects the origin shape in the video frames.
|
|
103
|
+
get_covering_duration(step): Calculates the covering duration based on a step size.
|
|
104
|
+
detection(): Performs motion detection within the arena.
|
|
105
|
+
initialize_post_processing(): Initializes post-processing steps.
|
|
106
|
+
update_shape(show_seg): Updates the shape based on segmentation and visualization flags.
|
|
107
|
+
get_descriptors_from_binary(): Extracts descriptors from binary data.
|
|
108
|
+
detect_growth_transitions(): Detects growth transitions in the data.
|
|
109
|
+
networks_analysis(show_seg): Detected networks within the arena based on segmentation
|
|
110
|
+
visualization.
|
|
111
|
+
study_cytoscillations(show_seg): Studies cytoscillations within the arena with
|
|
112
|
+
segmentation visualization.
|
|
113
|
+
fractal_descriptions(): Generates fractal descriptions of the analyzed data.
|
|
114
|
+
get_descriptors_summary(): Summarizes the descriptors obtained from the analysis.
|
|
115
|
+
save_results(): Saves the results of the analysis.
|
|
116
|
+
|
|
117
|
+
"""
|
|
118
|
+
self.one_descriptor_per_arena = {}
|
|
119
|
+
self.one_descriptor_per_arena['arena'] = l[1]
|
|
120
|
+
vars = l[2]
|
|
121
|
+
detect_shape = l[3]
|
|
122
|
+
analyse_shape = l[4]
|
|
123
|
+
show_seg = l[5]
|
|
124
|
+
videos_already_in_ram = l[6]
|
|
125
|
+
self.visu = None
|
|
126
|
+
self.binary = None
|
|
127
|
+
self.origin_idx = None
|
|
128
|
+
self.smoothing_flag: bool = False
|
|
129
|
+
self.drift_mask_coord = None
|
|
130
|
+
self.coord_network = None
|
|
131
|
+
logging.info(f"Start the motion analysis of the arena n°{self.one_descriptor_per_arena['arena']}")
|
|
132
|
+
|
|
133
|
+
self.vars = vars
|
|
134
|
+
if not 'contour_color' in self.vars:
|
|
135
|
+
self.vars['contour_color']: np.uint8 = 0
|
|
136
|
+
if not 'background_list' in self.vars:
|
|
137
|
+
self.vars['background_list'] = []
|
|
138
|
+
self.load_images_and_videos(videos_already_in_ram, l[0])
|
|
139
|
+
|
|
140
|
+
self.dims = self.converted_video.shape
|
|
141
|
+
self.segmented = np.zeros(self.dims, dtype=np.uint8)
|
|
142
|
+
|
|
143
|
+
self.covering_intensity = np.zeros(self.dims[1:], dtype=np.float64)
|
|
144
|
+
self.mean_intensity_per_frame = np.mean(self.converted_video, (1, 2))
|
|
145
|
+
|
|
146
|
+
self.borders = image_borders(self.dims[1:], shape=self.vars['arena_shape'])
|
|
147
|
+
self.pixel_ring_depth = 9
|
|
148
|
+
self.step: int = 10
|
|
149
|
+
self.lost_frames = 10
|
|
150
|
+
self.update_ring_width()
|
|
151
|
+
|
|
152
|
+
self.start = None
|
|
153
|
+
if detect_shape:
|
|
154
|
+
self.start = None
|
|
155
|
+
# Here to conditional layers allow to detect if an expansion/exploration occured
|
|
156
|
+
self.get_origin_shape()
|
|
157
|
+
# The first, user-defined is the 'first_move_threshold' and the second is the detection of the
|
|
158
|
+
# substantial image: if any of them is not detected, the program considers there is not exp.
|
|
159
|
+
if self.dims[0] >= 40:
|
|
160
|
+
step = self.dims[0] // 20
|
|
161
|
+
else:
|
|
162
|
+
step = 1
|
|
163
|
+
if self.dims[0] == 1 or self.start >= (self.dims[0] - step - 1):
|
|
164
|
+
self.start = None
|
|
165
|
+
else:
|
|
166
|
+
self.get_covering_duration(step)
|
|
167
|
+
if self.start is not None:
|
|
168
|
+
self.detection()
|
|
169
|
+
self.initialize_post_processing()
|
|
170
|
+
self.t = self.start
|
|
171
|
+
while self.t < self.dims[0]: #200:
|
|
172
|
+
self.update_shape(show_seg)
|
|
173
|
+
#
|
|
174
|
+
if self.start is None:
|
|
175
|
+
self.binary = np.repeat(np.expand_dims(self.origin, 0), self.converted_video.shape[0], axis=0)
|
|
176
|
+
|
|
177
|
+
if analyse_shape:
|
|
178
|
+
self.get_descriptors_from_binary()
|
|
179
|
+
self.detect_growth_transitions()
|
|
180
|
+
self.networks_analysis(show_seg)
|
|
181
|
+
self.study_cytoscillations(show_seg)
|
|
182
|
+
self.fractal_descriptions()
|
|
183
|
+
if videos_already_in_ram is None:
|
|
184
|
+
self.save_results()
|
|
185
|
+
|
|
186
|
+
def load_images_and_videos(self, videos_already_in_ram, i: int):
|
|
187
|
+
"""
|
|
188
|
+
|
|
189
|
+
Load images and videos from disk or RAM.
|
|
190
|
+
|
|
191
|
+
Parameters
|
|
192
|
+
----------
|
|
193
|
+
videos_already_in_ram : numpy.ndarray or None
|
|
194
|
+
Video data that is already loaded into RAM. If `None`, videos will be
|
|
195
|
+
loaded from disk.
|
|
196
|
+
i : int
|
|
197
|
+
Index used to select the origin and background data.
|
|
198
|
+
|
|
199
|
+
Notes
|
|
200
|
+
-----
|
|
201
|
+
This method logs information about the arena number and loads necessary data
|
|
202
|
+
from disk or RAM based on whether videos are already in memory. It sets various
|
|
203
|
+
attributes like `self.origin`, `self.background`, and `self.converted_video`.
|
|
204
|
+
|
|
205
|
+
"""
|
|
206
|
+
logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Load images and videos")
|
|
207
|
+
self.origin = self.vars['origin_list'][i] # self.vars['origins_list'][i]
|
|
208
|
+
true_frame_width = self.origin.shape[1]
|
|
209
|
+
vid_name = None
|
|
210
|
+
if self.vars['video_list'] is not None:
|
|
211
|
+
vid_name = self.vars['video_list'][i]
|
|
212
|
+
self.background = None
|
|
213
|
+
if len(self.vars['background_list']) > 0:
|
|
214
|
+
self.background = self.vars['background_list'][i]
|
|
215
|
+
self.background2 = None
|
|
216
|
+
if 'background_list2' in self.vars and len(self.vars['background_list2']) > 0:
|
|
217
|
+
self.background2 = self.vars['background_list2'][i]
|
|
218
|
+
vids = read_one_arena(self.one_descriptor_per_arena['arena'], self.vars['already_greyscale'],
|
|
219
|
+
self.vars['convert_for_motion'], videos_already_in_ram, true_frame_width, vid_name,
|
|
220
|
+
self.background, self.background2)
|
|
221
|
+
self.visu, self.converted_video, self.converted_video2 = vids
|
|
222
|
+
if self.converted_video is None:
|
|
223
|
+
logging.info(
|
|
224
|
+
f"Arena n°{self.one_descriptor_per_arena['arena']}. Convert the RGB visu video into a greyscale image using the color space combination: {self.vars['convert_for_motion']}")
|
|
225
|
+
vids = convert_subtract_and_filter_video(self.visu, self.vars['convert_for_motion'],
|
|
226
|
+
self.background, self.background2,
|
|
227
|
+
self.vars['lose_accuracy_to_save_memory'],
|
|
228
|
+
self.vars['filter_spec'])
|
|
229
|
+
self.converted_video, self.converted_video2 = vids
|
|
230
|
+
|
|
231
|
+
def get_origin_shape(self):
|
|
232
|
+
"""
|
|
233
|
+
Determine the origin shape and initialize variables based on the state of the current analysis.
|
|
234
|
+
|
|
235
|
+
This method analyzes the initial frame or frames to determine the origin shape
|
|
236
|
+
of an object in a video, initializing necessary variables and matrices for
|
|
237
|
+
further processing.
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
Attributes Modified:
|
|
241
|
+
start: (int) Indicates the starting frame index.
|
|
242
|
+
origin_idx: (np.ndarray) The indices of non-zero values in the origin matrix.
|
|
243
|
+
covering_intensity: (np.ndarray) Matrix used for pixel fading intensity.
|
|
244
|
+
substantial_growth: (int) Represents a significant growth measure based on the origin.
|
|
245
|
+
|
|
246
|
+
Notes:
|
|
247
|
+
- The method behavior varies if 'origin_state' is set to "constant" or not.
|
|
248
|
+
- If the background is lighter, 'covering_intensity' matrix is initialized.
|
|
249
|
+
- Uses connected components to determine which shape is closest to the center
|
|
250
|
+
or largest, based on 'appearance_detection_method'.
|
|
251
|
+
"""
|
|
252
|
+
logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Make sure of origin shape")
|
|
253
|
+
if self.vars['drift_already_corrected']:
|
|
254
|
+
self.drift_mask_coord = np.zeros((self.dims[0], 4), dtype=np.uint32)
|
|
255
|
+
for frame_i in np.arange(self.dims[0]): # 100):#
|
|
256
|
+
true_pixels = np.nonzero(self.converted_video[frame_i, ...])
|
|
257
|
+
self.drift_mask_coord[frame_i, :] = np.min(true_pixels[0]), np.max(true_pixels[0]) + 1, np.min(true_pixels[1]), np.max(true_pixels[1]) + 1
|
|
258
|
+
if np.all(self.drift_mask_coord[:, 0] == 0) and np.all(self.drift_mask_coord[:, 1] == self.dims[1] - 1) and np.all(
|
|
259
|
+
self.drift_mask_coord[:, 2] == 0) and np.all(self.drift_mask_coord[:, 3] == self.dims[2] - 1):
|
|
260
|
+
logging.error(f"Drift correction has been wrongly detected. Images do not contain zero-valued pixels")
|
|
261
|
+
self.vars['drift_already_corrected'] = False
|
|
262
|
+
if self.vars['origin_state'] == "constant":
|
|
263
|
+
self.start = 1
|
|
264
|
+
if self.vars['lighter_background']:
|
|
265
|
+
# Initialize the covering_intensity matrix as a reference for pixel fading
|
|
266
|
+
self.covering_intensity[self.origin_idx[0], self.origin_idx[1]] = 200
|
|
267
|
+
else:
|
|
268
|
+
self.start = 0
|
|
269
|
+
analysisi = self.frame_by_frame_segmentation(self.start, self.origin)
|
|
270
|
+
while np.logical_and(np.sum(analysisi.binary_image) < self.vars['first_move_threshold'], self.start < self.dims[0]):
|
|
271
|
+
self.start += 1
|
|
272
|
+
analysisi = self.frame_by_frame_segmentation(self.start, self.origin)
|
|
273
|
+
|
|
274
|
+
# Use connected components to find which shape is the nearest from the image center.
|
|
275
|
+
if self.vars['several_blob_per_arena']:
|
|
276
|
+
self.origin = analysisi.binary_image
|
|
277
|
+
else:
|
|
278
|
+
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(analysisi.binary_image,
|
|
279
|
+
connectivity=8)
|
|
280
|
+
if self.vars['appearance_detection_method'] == 'most_central':
|
|
281
|
+
center = np.array((self.dims[2] // 2, self.dims[1] // 2))
|
|
282
|
+
stats = np.zeros(nb_components - 1)
|
|
283
|
+
for shape_i in np.arange(1, nb_components):
|
|
284
|
+
stats[shape_i - 1] = eudist(center, centroids[shape_i, :])
|
|
285
|
+
# The shape having the minimal euclidean distance from the center will be the original shape
|
|
286
|
+
self.origin = np.zeros((self.dims[1], self.dims[2]), dtype=np.uint8)
|
|
287
|
+
self.origin[output == (np.argmin(stats) + 1)] = 1
|
|
288
|
+
elif self.vars['appearance_detection_method'] == 'largest':
|
|
289
|
+
self.origin = np.zeros((self.dims[1], self.dims[2]), dtype=np.uint8)
|
|
290
|
+
self.origin[output == np.argmax(stats[1:, 4])] = 1
|
|
291
|
+
self.origin_idx = np.nonzero(self.origin)
|
|
292
|
+
self.substantial_growth = np.min((1.2 * self.origin.sum(), self.origin.sum() + 250))
|
|
293
|
+
|
|
294
|
+
def get_covering_duration(self, step: int):
|
|
295
|
+
"""
|
|
296
|
+
Determine the number of frames necessary for a pixel to get covered.
|
|
297
|
+
|
|
298
|
+
This function identifies the time when significant growth or motion occurs
|
|
299
|
+
in a video and calculates the number of frames needed for a pixel to be
|
|
300
|
+
completely covered. It also handles noise and ensures that the calculated
|
|
301
|
+
step value is reasonable.
|
|
302
|
+
|
|
303
|
+
Parameters
|
|
304
|
+
----------
|
|
305
|
+
step : int
|
|
306
|
+
The initial step size for frame analysis.
|
|
307
|
+
|
|
308
|
+
Raises
|
|
309
|
+
------
|
|
310
|
+
Exception
|
|
311
|
+
If an error occurs during the calculation process.
|
|
312
|
+
|
|
313
|
+
Notes
|
|
314
|
+
-----
|
|
315
|
+
This function may modify several instance attributes including
|
|
316
|
+
`substantial_time`, `step`, and `start`.
|
|
317
|
+
"""
|
|
318
|
+
logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Find a frame with a significant growth/motion and determine the number of frames necessary for a pixel to get covered")
|
|
319
|
+
## Find the time at which growth reached a substantial growth.
|
|
320
|
+
self.substantial_time = self.start
|
|
321
|
+
# To avoid noisy images to have deleterious effects, make sure that area area reaches the threshold thrice.
|
|
322
|
+
occurrence = 0
|
|
323
|
+
allowed_window = None
|
|
324
|
+
if self.vars['drift_already_corrected']:
|
|
325
|
+
allowed_window = self.drift_mask_coord[:, 0].max(), self.drift_mask_coord[:, 1].min(), self.drift_mask_coord[:, 2].max(), self.drift_mask_coord[:, 3].min()
|
|
326
|
+
prev_bin_im = self.origin
|
|
327
|
+
while np.logical_and(occurrence < 3, self.substantial_time < (self.dims[0] - step - 1)):
|
|
328
|
+
self.substantial_time += step
|
|
329
|
+
growth_vision = self.frame_by_frame_segmentation(self.substantial_time, prev_bin_im)
|
|
330
|
+
prev_bin_im = growth_vision.binary_image * self.borders
|
|
331
|
+
surfarea = np.sum(prev_bin_im)
|
|
332
|
+
prev_bin_im = np.logical_or(prev_bin_im, self.origin).astype(np.uint8)
|
|
333
|
+
if surfarea > self.substantial_growth:
|
|
334
|
+
occurrence += 1
|
|
335
|
+
# get a rough idea of the area covered during this time
|
|
336
|
+
if (self.substantial_time - self.start) > 20:
|
|
337
|
+
if self.vars['lighter_background']:
|
|
338
|
+
growth = (np.sum(self.converted_video[self.start:(self.start + 10), :, :], 0) / 10) - (np.sum(self.converted_video[(self.substantial_time - 10):self.substantial_time, :, :], 0) / 10)
|
|
339
|
+
else:
|
|
340
|
+
growth = (np.sum(self.converted_video[(self.substantial_time - 10):self.substantial_time, :, :], 0) / 10) - (
|
|
341
|
+
np.sum(self.converted_video[self.start:(self.start + 10), :, :], 0) / 10)
|
|
342
|
+
else:
|
|
343
|
+
if self.vars['lighter_background']:
|
|
344
|
+
growth = self.converted_video[self.start, ...] - self.converted_video[self.substantial_time, ...]
|
|
345
|
+
else:
|
|
346
|
+
growth = self.converted_video[self.substantial_time, ...] - self.converted_video[self.start, ...]
|
|
347
|
+
intensity_extent = np.ptp(self.converted_video[self.start:self.substantial_time, :, :], axis=0)
|
|
348
|
+
growth[np.logical_or(growth < 0, intensity_extent < np.median(intensity_extent))] = 0
|
|
349
|
+
growth = bracket_to_uint8_image_contrast(growth)
|
|
350
|
+
growth *= self.borders
|
|
351
|
+
growth_vision = OneImageAnalysis(growth)
|
|
352
|
+
growth_vision.segmentation(allowed_window=allowed_window)
|
|
353
|
+
if self.vars['several_blob_per_arena']:
|
|
354
|
+
_, _, stats, _ = cv2.connectedComponentsWithStats(self.origin)
|
|
355
|
+
do_erode = np.any(stats[1:, 4] > 50)
|
|
356
|
+
else:
|
|
357
|
+
do_erode = self.origin.sum() > 50
|
|
358
|
+
if do_erode:
|
|
359
|
+
self.substantial_image = cv2.erode(growth_vision.binary_image, cross_33, iterations=2)
|
|
360
|
+
else:
|
|
361
|
+
self.substantial_image = growth_vision.binary_image
|
|
362
|
+
|
|
363
|
+
if np.any(self.substantial_image):
|
|
364
|
+
natural_noise = np.nonzero(intensity_extent == np.min(intensity_extent))
|
|
365
|
+
natural_noise = self.converted_video[self.start:self.substantial_time, natural_noise[0][0], natural_noise[1][0]]
|
|
366
|
+
natural_noise = moving_average(natural_noise, 5)
|
|
367
|
+
natural_noise = np.ptp(natural_noise)
|
|
368
|
+
subst_idx = np.nonzero(self.substantial_image)
|
|
369
|
+
cover_lengths = np.zeros(len(subst_idx[0]), dtype=np.uint32)
|
|
370
|
+
for index in np.arange(len(subst_idx[0])):
|
|
371
|
+
vector = self.converted_video[self.start:self.substantial_time, subst_idx[0][index], subst_idx[1][index]]
|
|
372
|
+
left, right = find_major_incline(vector, natural_noise)
|
|
373
|
+
# If find_major_incline did find a major incline: (otherwise it put 0 to left and 1 to right)
|
|
374
|
+
if not np.logical_and(left == 0, right == 1):
|
|
375
|
+
cover_lengths[index] = len(vector[left:-right])
|
|
376
|
+
# If this analysis fails put a deterministic step
|
|
377
|
+
if len(cover_lengths[cover_lengths > 0]) > 0:
|
|
378
|
+
self.step = (np.round(np.mean(cover_lengths[cover_lengths > 0])).astype(int) // 2) + 1
|
|
379
|
+
logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Pre-processing detection: the time for a pixel to get covered is set to {self.step}")
|
|
380
|
+
else:
|
|
381
|
+
logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Pre-processing detection: could not automatically find the time for a pixel to get covered. Default value is 1 for video length < 40 and 10 otherwise")
|
|
382
|
+
|
|
383
|
+
# Make sure to avoid a step overestimation
|
|
384
|
+
if self.step > self.dims[0] // 20:
|
|
385
|
+
self.step: int = self.dims[0] // 20
|
|
386
|
+
|
|
387
|
+
if self.step == 0:
|
|
388
|
+
self.step: int = 1
|
|
389
|
+
# When the first_move_threshold is not stringent enough the program may detect a movement due to noise
|
|
390
|
+
# In that case, the substantial_image is empty and there is no reason to proceed further
|
|
391
|
+
else:
|
|
392
|
+
self.start = None
|
|
393
|
+
|
|
394
|
+
def detection(self, compute_all_possibilities: bool=False):
|
|
395
|
+
"""
|
|
396
|
+
|
|
397
|
+
Perform frame-by-frame or luminosity-based segmentation on video data to detect cell motion and growth.
|
|
398
|
+
|
|
399
|
+
This function processes video frames using either frame-by-frame segmentation or luminosity-based
|
|
400
|
+
segmentation algorithms to detect cell motion and growth. It handles drift correction, adjusts parameters
|
|
401
|
+
based on configuration settings, and applies logical operations to combine results from different segmentation
|
|
402
|
+
methods.
|
|
403
|
+
|
|
404
|
+
Parameters
|
|
405
|
+
----------
|
|
406
|
+
compute_all_possibilities : bool, optional
|
|
407
|
+
Flag to determine if all segmentation possibilities should be computed, by default False
|
|
408
|
+
|
|
409
|
+
Returns
|
|
410
|
+
-------
|
|
411
|
+
None
|
|
412
|
+
|
|
413
|
+
Notes
|
|
414
|
+
-----
|
|
415
|
+
This function modifies the instance variables `self.segmented`, `self.converted_video`,
|
|
416
|
+
and potentially `self.luminosity_segmentation` and `self.gradient_segmentation`.
|
|
417
|
+
Depending on the configuration settings, it performs various segmentation algorithms and updates
|
|
418
|
+
the instance variables accordingly.
|
|
419
|
+
|
|
420
|
+
"""
|
|
421
|
+
if self.start is None:
|
|
422
|
+
self.start = 1
|
|
423
|
+
else:
|
|
424
|
+
self.start = np.max((self.start, 1))
|
|
425
|
+
self.lost_frames = np.min((self.step, self.dims[0] // 4))
|
|
426
|
+
# I/ Image by image segmentation algorithms
|
|
427
|
+
# If images contain a drift correction (zeros at borders of the image,
|
|
428
|
+
# Replace these 0 by normal background values before segmenting
|
|
429
|
+
if self.vars['frame_by_frame_segmentation'] or compute_all_possibilities:
|
|
430
|
+
logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Detect cell motion and growth using the frame by frame segmentation algorithm")
|
|
431
|
+
self.segmented = np.zeros(self.dims, dtype=np.uint8)
|
|
432
|
+
for t in np.arange(self.dims[0]):#20):#
|
|
433
|
+
analysisi = self.frame_by_frame_segmentation(t, self.segmented[t - 1, ...])
|
|
434
|
+
self.segmented[t, ...] = analysisi.binary_image
|
|
435
|
+
|
|
436
|
+
if self.vars['lose_accuracy_to_save_memory']:
|
|
437
|
+
self.converted_video[t, ...] = bracket_to_uint8_image_contrast(analysisi.image)
|
|
438
|
+
else:
|
|
439
|
+
self.converted_video[t, ...] = analysisi.image
|
|
440
|
+
if self.vars['convert_for_motion']['logical'] != 'None':
|
|
441
|
+
if self.vars['lose_accuracy_to_save_memory']:
|
|
442
|
+
self.converted_video2[t, ...] = bracket_to_uint8_image_contrast(analysisi.image2)
|
|
443
|
+
else:
|
|
444
|
+
self.converted_video2[t, ...] = analysisi.image2
|
|
445
|
+
|
|
446
|
+
if self.vars['color_number'] == 2:
|
|
447
|
+
luminosity_segmentation, l_threshold_over_time = self.lum_value_segmentation(self.converted_video, do_threshold_segmentation=self.vars['do_threshold_segmentation'] or compute_all_possibilities)
|
|
448
|
+
self.converted_video = self.smooth_pixel_slopes(self.converted_video)
|
|
449
|
+
gradient_segmentation = None
|
|
450
|
+
if self.vars['do_slope_segmentation'] or compute_all_possibilities:
|
|
451
|
+
gradient_segmentation = self.lum_slope_segmentation(self.converted_video)
|
|
452
|
+
if gradient_segmentation is not None:
|
|
453
|
+
gradient_segmentation[-self.lost_frames:, ...] = np.repeat(gradient_segmentation[-self.lost_frames, :, :][np.newaxis, :, :], self.lost_frames, axis=0)
|
|
454
|
+
if self.vars['convert_for_motion']['logical'] != 'None':
|
|
455
|
+
if self.vars['do_threshold_segmentation'] or compute_all_possibilities:
|
|
456
|
+
luminosity_segmentation2, l_threshold_over_time2 = self.lum_value_segmentation(self.converted_video2, do_threshold_segmentation=True)
|
|
457
|
+
if luminosity_segmentation is None:
|
|
458
|
+
luminosity_segmentation = luminosity_segmentation2
|
|
459
|
+
if luminosity_segmentation is not None:
|
|
460
|
+
if self.vars['convert_for_motion']['logical'] == 'Or':
|
|
461
|
+
luminosity_segmentation = np.logical_or(luminosity_segmentation, luminosity_segmentation2)
|
|
462
|
+
elif self.vars['convert_for_motion']['logical'] == 'And':
|
|
463
|
+
luminosity_segmentation = np.logical_and(luminosity_segmentation, luminosity_segmentation2)
|
|
464
|
+
elif self.vars['convert_for_motion']['logical'] == 'Xor':
|
|
465
|
+
luminosity_segmentation = np.logical_xor(luminosity_segmentation, luminosity_segmentation2)
|
|
466
|
+
self.converted_video2 = self.smooth_pixel_slopes(self.converted_video2)
|
|
467
|
+
if self.vars['do_slope_segmentation'] or compute_all_possibilities:
|
|
468
|
+
gradient_segmentation2 = self.lum_slope_segmentation(self.converted_video2)
|
|
469
|
+
if gradient_segmentation2 is not None:
|
|
470
|
+
gradient_segmentation2[-self.lost_frames:, ...] = np.repeat(gradient_segmentation2[-self.lost_frames, :, :][np.newaxis, :, :], self.lost_frames, axis=0)
|
|
471
|
+
if gradient_segmentation is None:
|
|
472
|
+
gradient_segmentation = gradient_segmentation2
|
|
473
|
+
if gradient_segmentation is not None:
|
|
474
|
+
if self.vars['convert_for_motion']['logical'] == 'Or':
|
|
475
|
+
gradient_segmentation = np.logical_or(gradient_segmentation, gradient_segmentation2)
|
|
476
|
+
elif self.vars['convert_for_motion']['logical'] == 'And':
|
|
477
|
+
gradient_segmentation = np.logical_and(gradient_segmentation, gradient_segmentation2)
|
|
478
|
+
elif self.vars['convert_for_motion']['logical'] == 'Xor':
|
|
479
|
+
gradient_segmentation = np.logical_xor(gradient_segmentation, gradient_segmentation2)
|
|
480
|
+
|
|
481
|
+
if compute_all_possibilities:
|
|
482
|
+
logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Compute all options to detect cell motion and growth. Maximal growth per frame: {self.vars['maximal_growth_factor']}")
|
|
483
|
+
if luminosity_segmentation is not None:
|
|
484
|
+
self.luminosity_segmentation = np.nonzero(luminosity_segmentation)
|
|
485
|
+
if gradient_segmentation is not None:
|
|
486
|
+
self.gradient_segmentation = np.nonzero(gradient_segmentation)
|
|
487
|
+
if luminosity_segmentation is not None and gradient_segmentation is not None:
|
|
488
|
+
self.logical_and = np.nonzero(np.logical_and(luminosity_segmentation, gradient_segmentation))
|
|
489
|
+
self.logical_or = np.nonzero(np.logical_or(luminosity_segmentation, gradient_segmentation))
|
|
490
|
+
elif not self.vars['frame_by_frame_segmentation']:
|
|
491
|
+
if self.vars['do_threshold_segmentation'] and not self.vars['do_slope_segmentation']:
|
|
492
|
+
logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Detect with luminosity threshold segmentation algorithm")
|
|
493
|
+
self.segmented = luminosity_segmentation
|
|
494
|
+
if self.vars['do_slope_segmentation']:
|
|
495
|
+
logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Detect with luminosity slope segmentation algorithm")
|
|
496
|
+
self.segmented = gradient_segmentation
|
|
497
|
+
if np.logical_and(self.vars['do_threshold_segmentation'], self.vars['do_slope_segmentation']):
|
|
498
|
+
if self.vars['true_if_use_light_AND_slope_else_OR']:
|
|
499
|
+
logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Detection resuts from threshold AND slope segmentation algorithms")
|
|
500
|
+
if luminosity_segmentation is not None and gradient_segmentation is not None:
|
|
501
|
+
self.segmented = np.logical_and(luminosity_segmentation, gradient_segmentation)
|
|
502
|
+
else:
|
|
503
|
+
logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Detection resuts from threshold OR slope segmentation algorithms")
|
|
504
|
+
if luminosity_segmentation is not None and gradient_segmentation is not None:
|
|
505
|
+
self.segmented = np.logical_or(luminosity_segmentation, gradient_segmentation)
|
|
506
|
+
self.segmented = self.segmented.astype(np.uint8)
|
|
507
|
+
|
|
508
|
+
|
|
509
|
+
def frame_by_frame_segmentation(self, t: int, previous_binary_image: NDArray=None):
|
|
510
|
+
"""
|
|
511
|
+
|
|
512
|
+
Frame-by-frame segmentation of a video.
|
|
513
|
+
|
|
514
|
+
Parameters
|
|
515
|
+
----------
|
|
516
|
+
t : int
|
|
517
|
+
The time index of the frame to process.
|
|
518
|
+
previous_binary_image : NDArray, optional
|
|
519
|
+
The binary image from the previous frame. Default is `None`.
|
|
520
|
+
|
|
521
|
+
Returns
|
|
522
|
+
-------
|
|
523
|
+
OneImageAnalysis
|
|
524
|
+
An object containing the analysis of the current frame.
|
|
525
|
+
"""
|
|
526
|
+
contrasted_im = bracket_to_uint8_image_contrast(self.converted_video[t, :, :])
|
|
527
|
+
# 1. Get the mask valid for a number of images around it (step).
|
|
528
|
+
allowed_window = None
|
|
529
|
+
if self.vars['drift_already_corrected']:
|
|
530
|
+
half_step = np.ceil(self.step / 2).astype(int)
|
|
531
|
+
t_start = t - half_step
|
|
532
|
+
t_end = t + half_step
|
|
533
|
+
t_start = np.max((t_start, 0))
|
|
534
|
+
t_end = np.min((t_end, self.dims[0]))
|
|
535
|
+
min_y, max_y = np.max(self.drift_mask_coord[t_start:t_end, 0]), np.min(self.drift_mask_coord[t_start:t_end, 1])
|
|
536
|
+
min_x, max_x = np.max(self.drift_mask_coord[t_start:t_end, 2]), np.min(self.drift_mask_coord[t_start:t_end, 3])
|
|
537
|
+
allowed_window = min_y, max_y, min_x, max_x
|
|
538
|
+
|
|
539
|
+
analysisi = OneImageAnalysis(contrasted_im)
|
|
540
|
+
if self.vars['convert_for_motion']['logical'] != 'None':
|
|
541
|
+
contrasted_im2 = bracket_to_uint8_image_contrast(self.converted_video2[t, :, :])
|
|
542
|
+
analysisi.image2 = contrasted_im2
|
|
543
|
+
|
|
544
|
+
if previous_binary_image is None or t == 0:
|
|
545
|
+
analysisi.previous_binary_image = self.origin
|
|
546
|
+
else:
|
|
547
|
+
analysisi.previous_binary_image = previous_binary_image
|
|
548
|
+
|
|
549
|
+
analysisi.segmentation(self.vars['convert_for_motion']['logical'], self.vars['color_number'],
|
|
550
|
+
bio_label=self.vars["bio_label"], bio_label2=self.vars["bio_label2"],
|
|
551
|
+
rolling_window_segmentation=self.vars['rolling_window_segmentation'],
|
|
552
|
+
lighter_background=self.vars['lighter_background'],
|
|
553
|
+
allowed_window=allowed_window, filter_spec=self.vars['filter_spec']) # filtering already done when creating converted_video
|
|
554
|
+
|
|
555
|
+
return analysisi
|
|
556
|
+
|
|
557
|
+
def lum_value_segmentation(self, converted_video: NDArray, do_threshold_segmentation: bool) -> Tuple[NDArray, NDArray]:
|
|
558
|
+
"""
|
|
559
|
+
Perform segmentation based on luminosity values from a video.
|
|
560
|
+
|
|
561
|
+
Parameters
|
|
562
|
+
----------
|
|
563
|
+
converted_video : NDArray
|
|
564
|
+
The input video data in a NumPy array format.
|
|
565
|
+
do_threshold_segmentation : bool
|
|
566
|
+
Flag to determine whether threshold segmentation should be applied.
|
|
567
|
+
|
|
568
|
+
Returns
|
|
569
|
+
-------
|
|
570
|
+
Tuple[NDArray, NDArray]
|
|
571
|
+
A tuple containing two NumPy arrays:
|
|
572
|
+
- The first array is the luminosity segmentation of the video.
|
|
573
|
+
- The second array represents the luminosity threshold over time.
|
|
574
|
+
|
|
575
|
+
Notes
|
|
576
|
+
-----
|
|
577
|
+
This function operates under the assumption that there is sufficient motion in the video data.
|
|
578
|
+
If no valid thresholds are found for segmentation, the function returns None for
|
|
579
|
+
`luminosity_segmentation`.
|
|
580
|
+
"""
|
|
581
|
+
shape_motion_failed: bool = False
|
|
582
|
+
if self.vars['lighter_background']:
|
|
583
|
+
covering_l_values = np.min(converted_video[:self.substantial_time, :, :],
|
|
584
|
+
0) * self.substantial_image
|
|
585
|
+
else:
|
|
586
|
+
covering_l_values = np.max(converted_video[:self.substantial_time, :, :],
|
|
587
|
+
0) * self.substantial_image
|
|
588
|
+
# Avoid errors by checking whether the covering values are nonzero
|
|
589
|
+
covering_l_values = covering_l_values[covering_l_values != 0]
|
|
590
|
+
if len(covering_l_values) == 0:
|
|
591
|
+
shape_motion_failed = True
|
|
592
|
+
|
|
593
|
+
luminosity_segmentation = None
|
|
594
|
+
l_threshold_over_time = None
|
|
595
|
+
if not shape_motion_failed:
|
|
596
|
+
value_segmentation_thresholds = np.arange(0.8, -0.7, -0.1)
|
|
597
|
+
validated_thresholds = np.zeros(value_segmentation_thresholds.shape, dtype=bool)
|
|
598
|
+
counter = 0
|
|
599
|
+
while_condition = True
|
|
600
|
+
max_motion_per_frame = (self.dims[1] * self.dims[2]) * self.vars['maximal_growth_factor'] * 2
|
|
601
|
+
if self.vars['lighter_background']:
|
|
602
|
+
basic_bckgrnd_values = np.quantile(converted_video[:(self.lost_frames + 1), ...], 0.9, axis=(1, 2))
|
|
603
|
+
else:
|
|
604
|
+
basic_bckgrnd_values = np.quantile(converted_video[:(self.lost_frames + 1), ...], 0.1, axis=(1, 2))
|
|
605
|
+
# Try different values of do_threshold_segmentation and keep the one that does not
|
|
606
|
+
# segment more than x percent of the image
|
|
607
|
+
while counter <= 14:
|
|
608
|
+
value_threshold = value_segmentation_thresholds[counter]
|
|
609
|
+
if self.vars['lighter_background']:
|
|
610
|
+
l_threshold = (1 + value_threshold) * np.max(covering_l_values)
|
|
611
|
+
else:
|
|
612
|
+
l_threshold = (1 - value_threshold) * np.min(covering_l_values)
|
|
613
|
+
starting_segmentation, l_threshold_over_time = segment_with_lum_value(converted_video[:(self.lost_frames + 1), ...],
|
|
614
|
+
basic_bckgrnd_values, l_threshold,
|
|
615
|
+
self.vars['lighter_background'])
|
|
616
|
+
|
|
617
|
+
changing_pixel_number = np.sum(np.absolute(np.diff(starting_segmentation.astype(np.int8), 1, 0)), (1, 2))
|
|
618
|
+
validation = np.max(np.sum(starting_segmentation, (1, 2))) < max_motion_per_frame and (
|
|
619
|
+
np.max(changing_pixel_number) < max_motion_per_frame)
|
|
620
|
+
validated_thresholds[counter] = validation
|
|
621
|
+
if np.any(validated_thresholds):
|
|
622
|
+
if not validation:
|
|
623
|
+
break
|
|
624
|
+
counter += 1
|
|
625
|
+
# If any threshold is accepted, use their average to proceed the final thresholding
|
|
626
|
+
valid_number = validated_thresholds.sum()
|
|
627
|
+
if valid_number > 0:
|
|
628
|
+
if valid_number > 2:
|
|
629
|
+
index_to_keep = 2
|
|
630
|
+
else:
|
|
631
|
+
index_to_keep = valid_number - 1
|
|
632
|
+
value_threshold = value_segmentation_thresholds[
|
|
633
|
+
np.uint8(np.floor(np.mean(np.nonzero(validated_thresholds)[0][index_to_keep])))]
|
|
634
|
+
else:
|
|
635
|
+
value_threshold = 0
|
|
636
|
+
|
|
637
|
+
if self.vars['lighter_background']:
|
|
638
|
+
l_threshold = (1 + value_threshold) * np.max(covering_l_values)
|
|
639
|
+
else:
|
|
640
|
+
l_threshold = (1 - value_threshold) * np.min(covering_l_values)
|
|
641
|
+
if do_threshold_segmentation:
|
|
642
|
+
if self.vars['lighter_background']:
|
|
643
|
+
basic_bckgrnd_values = np.quantile(converted_video, 0.9, axis=(1, 2))
|
|
644
|
+
else:
|
|
645
|
+
basic_bckgrnd_values = np.quantile(converted_video, 0.1, axis=(1, 2))
|
|
646
|
+
luminosity_segmentation, l_threshold_over_time = segment_with_lum_value(converted_video, basic_bckgrnd_values,
|
|
647
|
+
l_threshold, self.vars['lighter_background'])
|
|
648
|
+
else:
|
|
649
|
+
luminosity_segmentation, l_threshold_over_time = segment_with_lum_value(converted_video[:(self.lost_frames + 1), ...],
|
|
650
|
+
basic_bckgrnd_values, l_threshold,
|
|
651
|
+
self.vars['lighter_background'])
|
|
652
|
+
return luminosity_segmentation, l_threshold_over_time
|
|
653
|
+
|
|
654
|
+
def smooth_pixel_slopes(self, converted_video: NDArray) -> NDArray:
|
|
655
|
+
"""
|
|
656
|
+
Apply smoothing to pixel slopes in a video by convolving with a moving average kernel.
|
|
657
|
+
|
|
658
|
+
Parameters
|
|
659
|
+
----------
|
|
660
|
+
converted_video : NDArray
|
|
661
|
+
The input video array to be smoothed.
|
|
662
|
+
|
|
663
|
+
Returns
|
|
664
|
+
-------
|
|
665
|
+
NDArray
|
|
666
|
+
Smoothed video array with pixel slopes averaged using a moving average kernel.
|
|
667
|
+
|
|
668
|
+
Raises
|
|
669
|
+
------
|
|
670
|
+
MemoryError
|
|
671
|
+
If there is not enough RAM available to perform the smoothing operation.
|
|
672
|
+
|
|
673
|
+
Notes
|
|
674
|
+
-----
|
|
675
|
+
This function applies a moving average kernel to each pixel across the frames of
|
|
676
|
+
the input video. The smoothing operation can be repeated based on user-defined settings.
|
|
677
|
+
The precision of the output array is controlled by a flag that determines whether to
|
|
678
|
+
save memory at the cost of accuracy.
|
|
679
|
+
|
|
680
|
+
Examples
|
|
681
|
+
--------
|
|
682
|
+
>>> smoothed = smooth_pixel_slopes(converted_video)
|
|
683
|
+
>>> print(smoothed.shape) # Expected output will vary depending on the input video shape
|
|
684
|
+
"""
|
|
685
|
+
try:
|
|
686
|
+
if self.vars['lose_accuracy_to_save_memory']:
|
|
687
|
+
smoothed_video = np.zeros(self.dims, dtype=np.float32)
|
|
688
|
+
smooth_kernel = np.ones(self.step, dtype=np.float64) / self.step
|
|
689
|
+
for i in np.arange(converted_video.shape[1]):
|
|
690
|
+
for j in np.arange(converted_video.shape[2]):
|
|
691
|
+
padded = np.pad(converted_video[:, i, j] / self.mean_intensity_per_frame,
|
|
692
|
+
(self.step // 2, self.step - 1 - self.step // 2), mode='edge')
|
|
693
|
+
moving_average = np.convolve(padded, smooth_kernel, mode='valid')
|
|
694
|
+
if self.vars['repeat_video_smoothing'] > 1:
|
|
695
|
+
for it in np.arange(1, self.vars['repeat_video_smoothing']):
|
|
696
|
+
padded = np.pad(moving_average,
|
|
697
|
+
(self.step // 2, self.step - 1 - self.step // 2), mode='edge')
|
|
698
|
+
moving_average = np.convolve(padded, smooth_kernel, mode='valid')
|
|
699
|
+
smoothed_video[:, i, j] = moving_average.astype(np.float32)
|
|
700
|
+
else:
|
|
701
|
+
smoothed_video = np.zeros(self.dims, dtype=np.float64)
|
|
702
|
+
smooth_kernel = np.ones(self.step) / self.step
|
|
703
|
+
for i in np.arange(converted_video.shape[1]):
|
|
704
|
+
for j in np.arange(converted_video.shape[2]):
|
|
705
|
+
padded = np.pad(converted_video[:, i, j] / self.mean_intensity_per_frame,
|
|
706
|
+
(self.step // 2, self.step - 1 - self.step // 2), mode='edge')
|
|
707
|
+
moving_average = np.convolve(padded, smooth_kernel, mode='valid')
|
|
708
|
+
if self.vars['repeat_video_smoothing'] > 1:
|
|
709
|
+
for it in np.arange(1, self.vars['repeat_video_smoothing']):
|
|
710
|
+
padded = np.pad(moving_average,
|
|
711
|
+
(self.step // 2, self.step - 1 - self.step // 2), mode='edge')
|
|
712
|
+
moving_average = np.convolve(padded, smooth_kernel, mode='valid')
|
|
713
|
+
smoothed_video[:, i, j] = moving_average
|
|
714
|
+
return smoothed_video
|
|
715
|
+
|
|
716
|
+
except MemoryError:
|
|
717
|
+
logging.error("Not enough RAM available to smooth pixel curves. Detection may fail.")
|
|
718
|
+
smoothed_video = converted_video
|
|
719
|
+
return smoothed_video
|
|
720
|
+
|
|
721
|
+
def lum_slope_segmentation(self, converted_video: NDArray) -> NDArray:
|
|
722
|
+
"""
|
|
723
|
+
Perform lum slope segmentation on the given video.
|
|
724
|
+
|
|
725
|
+
Parameters
|
|
726
|
+
----------
|
|
727
|
+
converted_video : NDArray
|
|
728
|
+
The input video array for segmentation processing.
|
|
729
|
+
|
|
730
|
+
Returns
|
|
731
|
+
-------
|
|
732
|
+
NDArray
|
|
733
|
+
Segmented gradient array of the video. If segmentation fails,
|
|
734
|
+
returns `None` for the corresponding frames.
|
|
735
|
+
|
|
736
|
+
Notes
|
|
737
|
+
-----
|
|
738
|
+
This function may consume significant memory and adjusts
|
|
739
|
+
data types (float32 or float64) based on available RAM.
|
|
740
|
+
|
|
741
|
+
Examples
|
|
742
|
+
--------
|
|
743
|
+
>>> result = lum_slope_segmentation(converted_video)
|
|
744
|
+
"""
|
|
745
|
+
shape_motion_failed : bool = False
|
|
746
|
+
# 2) Contrast increase
|
|
747
|
+
oridx = np.nonzero(self.origin)
|
|
748
|
+
notoridx = np.nonzero(1 - self.origin)
|
|
749
|
+
do_increase_contrast = np.mean(converted_video[0, oridx[0], oridx[1]]) * 10 > np.mean(
|
|
750
|
+
converted_video[0, notoridx[0], notoridx[1]])
|
|
751
|
+
necessary_memory = self.dims[0] * self.dims[1] * self.dims[2] * 64 * 2 * 1.16415e-10
|
|
752
|
+
available_memory = (virtual_memory().available >> 30) - self.vars['min_ram_free']
|
|
753
|
+
if self.vars['lose_accuracy_to_save_memory']:
|
|
754
|
+
derive = converted_video.astype(np.float32)
|
|
755
|
+
else:
|
|
756
|
+
derive = converted_video.astype(np.float64)
|
|
757
|
+
if necessary_memory > available_memory:
|
|
758
|
+
converted_video = None
|
|
759
|
+
|
|
760
|
+
if do_increase_contrast:
|
|
761
|
+
derive = np.square(derive)
|
|
762
|
+
|
|
763
|
+
# 3) Get the gradient
|
|
764
|
+
necessary_memory = derive.size * 64 * 4 * 1.16415e-10
|
|
765
|
+
available_memory = (virtual_memory().available >> 30) - self.vars['min_ram_free']
|
|
766
|
+
if necessary_memory > available_memory:
|
|
767
|
+
for cy in np.arange(self.dims[1]):
|
|
768
|
+
for cx in np.arange(self.dims[2]):
|
|
769
|
+
if self.vars['lose_accuracy_to_save_memory']:
|
|
770
|
+
derive[:, cy, cx] = np.gradient(derive[:, cy, cx], self.step).astype(np.float32)
|
|
771
|
+
else:
|
|
772
|
+
derive[:, cy, cx] = np.gradient(derive[:, cy, cx], self.step)
|
|
773
|
+
else:
|
|
774
|
+
if self.vars['lose_accuracy_to_save_memory']:
|
|
775
|
+
derive = np.gradient(derive, self.step, axis=0).astype(np.float32)
|
|
776
|
+
else:
|
|
777
|
+
derive = np.gradient(derive, self.step, axis=0)
|
|
778
|
+
|
|
779
|
+
# 4) Segment
|
|
780
|
+
if self.vars['lighter_background']:
|
|
781
|
+
covering_slopes = np.min(derive[:self.substantial_time, :, :], 0) * self.substantial_image
|
|
782
|
+
else:
|
|
783
|
+
covering_slopes = np.max(derive[:self.substantial_time, :, :], 0) * self.substantial_image
|
|
784
|
+
covering_slopes = covering_slopes[covering_slopes != 0]
|
|
785
|
+
if len(covering_slopes) == 0:
|
|
786
|
+
shape_motion_failed = True
|
|
787
|
+
|
|
788
|
+
gradient_segmentation = None
|
|
789
|
+
if not shape_motion_failed:
|
|
790
|
+
gradient_segmentation = np.zeros(self.dims, np.uint8)
|
|
791
|
+
####
|
|
792
|
+
# ease_slope_segmentation = 0.8
|
|
793
|
+
value_segmentation_thresholds = np.arange(0.8, -0.7, -0.1)
|
|
794
|
+
validated_thresholds = np.zeros(value_segmentation_thresholds.shape, dtype=bool)
|
|
795
|
+
counter = 0
|
|
796
|
+
while_condition = True
|
|
797
|
+
max_motion_per_frame = (self.dims[1] * self.dims[2]) * self.vars['maximal_growth_factor']
|
|
798
|
+
# Try different values of do_slope_segmentation and keep the one that does not
|
|
799
|
+
# segment more than x percent of the image
|
|
800
|
+
while counter < value_segmentation_thresholds.shape[0]:
|
|
801
|
+
ease_slope_segmentation = value_segmentation_thresholds[counter]
|
|
802
|
+
if self.vars['lighter_background']:
|
|
803
|
+
gradient_threshold = (1 + ease_slope_segmentation) * np.max(covering_slopes)
|
|
804
|
+
sample = np.less(derive[:self.substantial_time], gradient_threshold)
|
|
805
|
+
else:
|
|
806
|
+
gradient_threshold = (1 - ease_slope_segmentation) * np.min(covering_slopes)
|
|
807
|
+
sample = np.greater(derive[:self.substantial_time], gradient_threshold)
|
|
808
|
+
changing_pixel_number = np.sum(np.absolute(np.diff(sample.astype(np.int8), 1, 0)), (1, 2))
|
|
809
|
+
validation = np.max(np.sum(sample, (1, 2))) < max_motion_per_frame and (
|
|
810
|
+
np.max(changing_pixel_number) < max_motion_per_frame)
|
|
811
|
+
validated_thresholds[counter] = validation
|
|
812
|
+
if np.any(validated_thresholds):
|
|
813
|
+
if not validation:
|
|
814
|
+
break
|
|
815
|
+
counter += 1
|
|
816
|
+
# If any threshold is accepted, use their average to proceed the final thresholding
|
|
817
|
+
valid_number = validated_thresholds.sum()
|
|
818
|
+
if valid_number > 0:
|
|
819
|
+
if valid_number > 2:
|
|
820
|
+
index_to_keep = 2
|
|
821
|
+
else:
|
|
822
|
+
index_to_keep = valid_number - 1
|
|
823
|
+
ease_slope_segmentation = value_segmentation_thresholds[
|
|
824
|
+
np.uint8(np.floor(np.mean(np.nonzero(validated_thresholds)[0][index_to_keep])))]
|
|
825
|
+
|
|
826
|
+
if self.vars['lighter_background']:
|
|
827
|
+
gradient_threshold = (1 - ease_slope_segmentation) * np.max(covering_slopes)
|
|
828
|
+
gradient_segmentation[:-self.lost_frames, :, :] = np.less(derive, gradient_threshold)[
|
|
829
|
+
self.lost_frames:, :, :]
|
|
830
|
+
else:
|
|
831
|
+
gradient_threshold = (1 - ease_slope_segmentation) * np.min(covering_slopes)
|
|
832
|
+
gradient_segmentation[:-self.lost_frames, :, :] = np.greater(derive, gradient_threshold)[
|
|
833
|
+
self.lost_frames:, :, :]
|
|
834
|
+
else:
|
|
835
|
+
if self.vars['lighter_background']:
|
|
836
|
+
gradient_segmentation[:-self.lost_frames, :, :] = (derive < (np.min(derive, (1, 2)) * 1.1)[:, None, None])[self.lost_frames:, :, :]
|
|
837
|
+
else:
|
|
838
|
+
gradient_segmentation[:-self.lost_frames, :, :] = (derive > (np.max(derive, (1, 2)) * 0.1)[:, None, None])[self.lost_frames:, :, :]
|
|
839
|
+
return gradient_segmentation
|
|
840
|
+
|
|
841
|
+
def update_ring_width(self):
|
|
842
|
+
"""
|
|
843
|
+
|
|
844
|
+
Update the `pixel_ring_depth` and create an erodila disk.
|
|
845
|
+
|
|
846
|
+
This method ensures that the pixel ring depth is odd and at least 3,
|
|
847
|
+
then creates an erodila disk of that size.
|
|
848
|
+
"""
|
|
849
|
+
# Make sure that self.pixels_depths are odd and greater than 3
|
|
850
|
+
if self.pixel_ring_depth <= 3:
|
|
851
|
+
self.pixel_ring_depth = 3
|
|
852
|
+
if self.pixel_ring_depth % 2 == 0:
|
|
853
|
+
self.pixel_ring_depth = self.pixel_ring_depth + 1
|
|
854
|
+
self.erodila_disk = create_ellipse(self.pixel_ring_depth, self.pixel_ring_depth).astype(np.uint8)
|
|
855
|
+
self.max_distance = self.pixel_ring_depth * self.vars['detection_range_factor']
|
|
856
|
+
|
|
857
|
+
def initialize_post_processing(self):
|
|
858
|
+
"""
|
|
859
|
+
|
|
860
|
+
Initialize post-processing for video analysis.
|
|
861
|
+
|
|
862
|
+
This function initializes various parameters and prepares the binary
|
|
863
|
+
representation used in post-processing of video data. It logs information about
|
|
864
|
+
the settings, handles initial origin states, sets up segmentation data,
|
|
865
|
+
calculates surface areas, and optionally corrects errors around the initial
|
|
866
|
+
shape or prevents fast growth near the periphery.
|
|
867
|
+
|
|
868
|
+
Notes
|
|
869
|
+
-----
|
|
870
|
+
This function performs several initialization steps and logs relevant information,
|
|
871
|
+
including handling different origin states, updating segmentation data, and
|
|
872
|
+
calculating the gravity field based on binary representation.
|
|
873
|
+
|
|
874
|
+
"""
|
|
875
|
+
## Initialization
|
|
876
|
+
logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Starting Post_processing. Fading detection: {self.vars['do_fading']}: {self.vars['fading']}, Subtract background: {self.vars['subtract_background']}, Correct errors around initial shape: {self.vars['correct_errors_around_initial']}, Connect distant shapes: {self.vars['detection_range_factor'] > 0}, How to select appearing cell(s): {self.vars['appearance_detection_method']}")
|
|
877
|
+
self.binary = np.zeros(self.dims[:3], dtype=np.uint8)
|
|
878
|
+
if self.origin.shape[0] != self.binary[self.start - 1, :, :].shape[0] or self.origin.shape[1] != self.binary[self.start - 1, :, :].shape[1]:
|
|
879
|
+
logging.error("Unaltered videos deprecated, they have been created with different settings.\nDelete .npy videos and Data to run Cellects quickly.pkl and re-run")
|
|
880
|
+
|
|
881
|
+
if self.vars['origin_state'] == "invisible":
|
|
882
|
+
self.binary[self.start - 1, :, :] = deepcopy(self.origin)
|
|
883
|
+
self.covering_intensity[self.origin_idx[0], self.origin_idx[1]] = self.converted_video[self.start, self.origin_idx[0], self.origin_idx[1]]
|
|
884
|
+
else:
|
|
885
|
+
if self.vars['origin_state'] == "fluctuating":
|
|
886
|
+
self.covering_intensity[self.origin_idx[0], self.origin_idx[1]] = np.median(self.converted_video[:self.start, self.origin_idx[0], self.origin_idx[1]], axis=0)
|
|
887
|
+
|
|
888
|
+
self.binary[:self.start, :, :] = np.repeat(np.expand_dims(self.origin, 0), self.start, axis=0)
|
|
889
|
+
if self.start < self.step:
|
|
890
|
+
frames_to_assess = self.step
|
|
891
|
+
self.segmented[self.start - 1, ...] = self.binary[self.start - 1, :, :]
|
|
892
|
+
for t in np.arange(self.start, self.lost_frames):
|
|
893
|
+
# Only keep pixels that are always detected
|
|
894
|
+
always_found = np.sum(self.segmented[t:(t + frames_to_assess), ...], 0)
|
|
895
|
+
always_found = always_found == frames_to_assess
|
|
896
|
+
# Remove too small shapes
|
|
897
|
+
without_small, stats, centro = cc(always_found.astype(np.uint8))
|
|
898
|
+
large_enough = np.nonzero(stats[1:, 4] > ((self.vars['first_move_threshold'] + 1) // 2))[0]
|
|
899
|
+
if len(large_enough) > 0:
|
|
900
|
+
always_found *= np.isin(always_found, large_enough + 1)
|
|
901
|
+
always_found = np.logical_or(always_found, self.segmented[t - 1, ...])
|
|
902
|
+
self.segmented[t, ...] *= always_found
|
|
903
|
+
else:
|
|
904
|
+
self.segmented[t, ...] = 0
|
|
905
|
+
self.segmented[t, ...] = np.logical_or(self.segmented[t - 1, ...], self.segmented[t, ...])
|
|
906
|
+
self.mean_distance_per_frame = None
|
|
907
|
+
self.surfarea = np.zeros(self.dims[0], dtype =np.uint64)
|
|
908
|
+
self.surfarea[:self.start] = np.sum(self.binary[:self.start, :, :], (1, 2))
|
|
909
|
+
self.gravity_field = inverted_distance_transform(self.binary[(self.start - 1), :, :],
|
|
910
|
+
np.sqrt(np.sum(self.binary[(self.start - 1), :, :])))
|
|
911
|
+
if self.vars['correct_errors_around_initial']:
|
|
912
|
+
self.rays, self.sun = draw_me_a_sun(self.binary[(self.start - 1), :, :], ray_length_coef=1) # plt.imshow(sun)
|
|
913
|
+
self.holes = np.zeros(self.dims[1:], dtype=np.uint8)
|
|
914
|
+
self.pixel_ring_depth += 2
|
|
915
|
+
self.update_ring_width()
|
|
916
|
+
|
|
917
|
+
if self.vars['prevent_fast_growth_near_periphery']:
|
|
918
|
+
self.near_periphery = np.zeros(self.dims[1:])
|
|
919
|
+
if self.vars['arena_shape'] == 'circle':
|
|
920
|
+
periphery_width = self.vars['periphery_width'] * 2
|
|
921
|
+
elliperiphery = create_ellipse(self.dims[1] - periphery_width, self.dims[2] - periphery_width)
|
|
922
|
+
half_width = periphery_width // 2
|
|
923
|
+
if periphery_width % 2 == 0:
|
|
924
|
+
self.near_periphery[half_width:-half_width, half_width:-half_width] = elliperiphery
|
|
925
|
+
else:
|
|
926
|
+
self.near_periphery[half_width:-half_width - 1, half_width:-half_width - 1] = elliperiphery
|
|
927
|
+
self.near_periphery = 1 - self.near_periphery
|
|
928
|
+
else:
|
|
929
|
+
self.near_periphery[:self.vars['periphery_width'], :] = 1
|
|
930
|
+
self.near_periphery[-self.vars['periphery_width']:, :] = 1
|
|
931
|
+
self.near_periphery[:, :self.vars['periphery_width']] = 1
|
|
932
|
+
self.near_periphery[:, -self.vars['periphery_width']:] = 1
|
|
933
|
+
self.near_periphery = np.nonzero(self.near_periphery)
|
|
934
|
+
|
|
935
|
+
def update_shape(self, show_seg: bool):
|
|
936
|
+
"""
|
|
937
|
+
Update the shape of detected objects in the current frame by analyzing
|
|
938
|
+
segmentation potentials and applying morphological operations.
|
|
939
|
+
|
|
940
|
+
Parameters
|
|
941
|
+
----------
|
|
942
|
+
show_seg : bool
|
|
943
|
+
Flag indicating whether to display segmentation results.
|
|
944
|
+
|
|
945
|
+
Notes
|
|
946
|
+
-----
|
|
947
|
+
This function performs several operations to update the shape of detected objects:
|
|
948
|
+
- Analyzes segmentation potentials from previous frames.
|
|
949
|
+
- Applies morphological operations to refine the shape.
|
|
950
|
+
- Updates internal state variables such as `binary` and `covering_intensity`.
|
|
951
|
+
|
|
952
|
+
"""
|
|
953
|
+
# Get from gradients, a 2D matrix of potentially covered pixels
|
|
954
|
+
# I/ dilate the shape made with covered pixels to assess for covering
|
|
955
|
+
|
|
956
|
+
# I/ 1) Only keep pixels that have been detected at least two times in the three previous frames
|
|
957
|
+
if self.dims[0] < 100:
|
|
958
|
+
new_potentials = self.segmented[self.t, :, :]
|
|
959
|
+
else:
|
|
960
|
+
if self.t > 1:
|
|
961
|
+
new_potentials = np.sum(self.segmented[(self.t - 2): (self.t + 1), :, :], 0, dtype=np.uint8)
|
|
962
|
+
else:
|
|
963
|
+
new_potentials = np.sum(self.segmented[: (self.t + 1), :, :], 0, dtype=np.uint8)
|
|
964
|
+
new_potentials[new_potentials == 1] = 0
|
|
965
|
+
new_potentials[new_potentials > 1] = 1
|
|
966
|
+
|
|
967
|
+
# I/ 2) If an image displays more new potential pixels than 50% of image pixels,
|
|
968
|
+
# one of these images is considered noisy and we try taking only one.
|
|
969
|
+
frame_counter = -1
|
|
970
|
+
maximal_size = 0.5 * new_potentials.size
|
|
971
|
+
if (self.vars["do_threshold_segmentation"] or self.vars["frame_by_frame_segmentation"]) and self.t > np.max((self.start + self.step, 6)):
|
|
972
|
+
maximal_size = np.min((np.max(self.binary[:self.t].sum((1, 2))) * (1 + self.vars['maximal_growth_factor']), self.borders.sum()))
|
|
973
|
+
while np.logical_and(np.sum(new_potentials) > maximal_size,
|
|
974
|
+
frame_counter <= 5): # np.logical_and(np.sum(new_potentials > 0) > 5 * np.sum(dila_ring), frame_counter <= 5):
|
|
975
|
+
frame_counter += 1
|
|
976
|
+
if frame_counter > self.t:
|
|
977
|
+
break
|
|
978
|
+
else:
|
|
979
|
+
if frame_counter < 5:
|
|
980
|
+
new_potentials = self.segmented[self.t - frame_counter, :, :]
|
|
981
|
+
else:
|
|
982
|
+
# If taking only one image is not enough, use the inverse of the fadinged matrix as new_potentials
|
|
983
|
+
# Given it haven't been processed by any slope calculation, it should be less noisy
|
|
984
|
+
new_potentials = np.sum(self.segmented[(self.t - 5): (self.t + 1), :, :], 0, dtype=np.uint8)
|
|
985
|
+
new_potentials[new_potentials < 6] = 0
|
|
986
|
+
new_potentials[new_potentials == 6] = 1
|
|
987
|
+
|
|
988
|
+
|
|
989
|
+
new_shape = deepcopy(self.binary[self.t - 1, :, :])
|
|
990
|
+
new_potentials = cv2.morphologyEx(new_potentials, cv2.MORPH_CLOSE, cross_33)
|
|
991
|
+
new_potentials = cv2.morphologyEx(new_potentials, cv2.MORPH_OPEN, cross_33) * self.borders
|
|
992
|
+
new_shape = np.logical_or(new_shape, new_potentials).astype(np.uint8)
|
|
993
|
+
# Add distant shapes within a radius, score every added pixels according to their distance
|
|
994
|
+
if not self.vars['several_blob_per_arena']:
|
|
995
|
+
if new_shape.sum() == 0:
|
|
996
|
+
new_shape = deepcopy(new_potentials)
|
|
997
|
+
else:
|
|
998
|
+
pads = ProgressivelyAddDistantShapes(new_potentials, new_shape, self.max_distance)
|
|
999
|
+
r = weakref.ref(pads)
|
|
1000
|
+
# If max_distance is non nul look for distant shapes
|
|
1001
|
+
pads.consider_shapes_sizes(self.vars['min_size_for_connection'],
|
|
1002
|
+
self.vars['max_size_for_connection'])
|
|
1003
|
+
pads.connect_shapes(only_keep_connected_shapes=True, rank_connecting_pixels=True)
|
|
1004
|
+
|
|
1005
|
+
new_shape = deepcopy(pads.expanded_shape)
|
|
1006
|
+
new_shape[new_shape > 1] = 1
|
|
1007
|
+
if np.logical_and(self.t > self.step, self.t < self.dims[0]):
|
|
1008
|
+
if np.any(pads.expanded_shape > 5):
|
|
1009
|
+
# Add distant shapes back in time at the covering speed of neighbors
|
|
1010
|
+
self.binary[self.t][np.nonzero(new_shape)] = 1
|
|
1011
|
+
self.binary[(self.step):(self.t + 1), :, :] = \
|
|
1012
|
+
pads.modify_past_analysis(self.binary[(self.step):(self.t + 1), :, :],
|
|
1013
|
+
self.segmented[(self.step):(self.t + 1), :, :])
|
|
1014
|
+
new_shape = deepcopy(self.binary[self.t, :, :])
|
|
1015
|
+
pads = None
|
|
1016
|
+
|
|
1017
|
+
# Fill holes
|
|
1018
|
+
new_shape = cv2.morphologyEx(new_shape, cv2.MORPH_CLOSE, cross_33)
|
|
1019
|
+
|
|
1020
|
+
if self.vars['do_fading'] and (self.t > self.step + self.lost_frames):
|
|
1021
|
+
# Shape Erosion
|
|
1022
|
+
# I/ After a substantial growth, erode the shape made with covered pixels to assess for fading
|
|
1023
|
+
# Use the newly covered pixels to calculate their mean covering intensity
|
|
1024
|
+
new_idx = np.nonzero(np.logical_xor(new_shape, self.binary[self.t - 1, :, :]))
|
|
1025
|
+
start_intensity_monitoring = self.t - self.lost_frames - self.step
|
|
1026
|
+
end_intensity_monitoring = self.t - self.lost_frames
|
|
1027
|
+
self.covering_intensity[new_idx[0], new_idx[1]] = np.median(self.converted_video[start_intensity_monitoring:end_intensity_monitoring, new_idx[0], new_idx[1]], axis=0)
|
|
1028
|
+
previous_binary = self.binary[self.t - 1, :, :]
|
|
1029
|
+
greyscale_image = self.converted_video[self.t - self.lost_frames, :, :]
|
|
1030
|
+
protect_from_fading = None
|
|
1031
|
+
if self.vars['origin_state'] == 'constant':
|
|
1032
|
+
protect_from_fading = self.origin
|
|
1033
|
+
new_shape, self.covering_intensity = cell_leaving_detection(new_shape, self.covering_intensity, previous_binary, greyscale_image, self.vars['fading'], self.vars['lighter_background'], self.vars['several_blob_per_arena'], self.erodila_disk, protect_from_fading)
|
|
1034
|
+
|
|
1035
|
+
self.covering_intensity *= new_shape
|
|
1036
|
+
self.binary[self.t, :, :] = new_shape * self.borders
|
|
1037
|
+
self.surfarea[self.t] = np.sum(self.binary[self.t, :, :])
|
|
1038
|
+
|
|
1039
|
+
# Calculate the mean distance covered per frame and correct for a ring of not really fading pixels
|
|
1040
|
+
if self.mean_distance_per_frame is None:
|
|
1041
|
+
if self.vars['correct_errors_around_initial'] and not self.vars['several_blob_per_arena']:
|
|
1042
|
+
if np.logical_and((self.t % 20) == 0,
|
|
1043
|
+
np.logical_and(self.surfarea[self.t] > self.substantial_growth,
|
|
1044
|
+
self.surfarea[self.t] < self.substantial_growth * 2)):
|
|
1045
|
+
shape = self.binary[self.t, :, :] * self.sun
|
|
1046
|
+
back = (1 - self.binary[self.t, :, :]) * self.sun
|
|
1047
|
+
for ray in self.rays:
|
|
1048
|
+
# For each sun's ray, see how they cross the shape/back and
|
|
1049
|
+
# store the gravity_field value of these pixels (distance to the original shape).
|
|
1050
|
+
ray_through_shape = (shape == ray) * self.gravity_field
|
|
1051
|
+
ray_through_back = (back == ray) * self.gravity_field
|
|
1052
|
+
if np.any(ray_through_shape):
|
|
1053
|
+
if np.any(ray_through_back):
|
|
1054
|
+
# If at least one back pixel is nearer to the original shape than a shape pixel,
|
|
1055
|
+
# there is a hole to fill.
|
|
1056
|
+
if np.any(ray_through_back > np.min(ray_through_shape[ray_through_shape > 0])):
|
|
1057
|
+
# Check if the nearest pixels are shape, if so, supress them until the nearest pixel
|
|
1058
|
+
# becomes back
|
|
1059
|
+
while np.max(ray_through_back) <= np.max(ray_through_shape):
|
|
1060
|
+
ray_through_shape[ray_through_shape == np.max(ray_through_shape)] = 0
|
|
1061
|
+
# Now, all back pixels that are nearer than the closest shape pixel should get filled
|
|
1062
|
+
# To do so, replace back pixels further than the nearest shape pixel by 0
|
|
1063
|
+
ray_through_back[ray_through_back < np.max(ray_through_shape)] = 0
|
|
1064
|
+
self.holes[np.nonzero(ray_through_back)] = 1
|
|
1065
|
+
else:
|
|
1066
|
+
self.rays = np.concatenate((self.rays[:(ray - 2)], self.rays[(ray - 1):]))
|
|
1067
|
+
ray_through_shape = None
|
|
1068
|
+
ray_through_back = None
|
|
1069
|
+
if np.any(self.surfarea[:self.t] > self.substantial_growth * 2):
|
|
1070
|
+
|
|
1071
|
+
if self.vars['correct_errors_around_initial'] and not self.vars['several_blob_per_arena']:
|
|
1072
|
+
# Apply the hole correction
|
|
1073
|
+
self.holes = cv2.morphologyEx(self.holes, cv2.MORPH_CLOSE, cross_33, iterations=10)
|
|
1074
|
+
# If some holes are not covered by now
|
|
1075
|
+
if np.any(self.holes * (1 - self.binary[self.t, :, :])):
|
|
1076
|
+
self.binary[:(self.t + 1), :, :], holes_time_end, distance_against_time = \
|
|
1077
|
+
dynamically_expand_to_fill_holes(self.binary[:(self.t + 1), :, :], self.holes)
|
|
1078
|
+
if holes_time_end is not None:
|
|
1079
|
+
self.binary[holes_time_end:(self.t + 1), :, :] += self.binary[holes_time_end, :, :]
|
|
1080
|
+
self.binary[holes_time_end:(self.t + 1), :, :][
|
|
1081
|
+
self.binary[holes_time_end:(self.t + 1), :, :] > 1] = 1
|
|
1082
|
+
self.surfarea[:(self.t + 1)] = np.sum(self.binary[:(self.t + 1), :, :], (1, 2))
|
|
1083
|
+
|
|
1084
|
+
else:
|
|
1085
|
+
distance_against_time = [1, 2]
|
|
1086
|
+
else:
|
|
1087
|
+
distance_against_time = [1, 2]
|
|
1088
|
+
distance_against_time = np.diff(distance_against_time)
|
|
1089
|
+
if len(distance_against_time) > 0:
|
|
1090
|
+
self.mean_distance_per_frame = np.mean(- distance_against_time)
|
|
1091
|
+
else:
|
|
1092
|
+
self.mean_distance_per_frame = 1
|
|
1093
|
+
|
|
1094
|
+
if self.vars['prevent_fast_growth_near_periphery']:
|
|
1095
|
+
# growth_near_periphery = np.diff(self.binary[self.t-1:self.t+1, :, :] * self.near_periphery, axis=0)
|
|
1096
|
+
growth_near_periphery = np.diff(self.binary[self.t-1:self.t+1, self.near_periphery[0], self.near_periphery[1]], axis=0)
|
|
1097
|
+
if (growth_near_periphery == 1).sum() > self.vars['max_periphery_growth']:
|
|
1098
|
+
# self.binary[self.t, self.near_periphery[0], self.near_periphery[1]] = self.binary[self.t - 1, self.near_periphery[0], self.near_periphery[1]]
|
|
1099
|
+
periphery_to_remove = np.zeros(self.dims[1:], dtype=np.uint8)
|
|
1100
|
+
periphery_to_remove[self.near_periphery[0], self.near_periphery[1]] = self.binary[self.t, self.near_periphery[0], self.near_periphery[1]]
|
|
1101
|
+
shapes, stats, centers = cc(periphery_to_remove)
|
|
1102
|
+
periphery_to_remove = np.nonzero(np.isin(shapes, np.nonzero(stats[:, 4] > self.vars['max_periphery_growth'])[0][1:]))
|
|
1103
|
+
self.binary[self.t, periphery_to_remove[0], periphery_to_remove[1]] = self.binary[self.t - 1, periphery_to_remove[0], periphery_to_remove[1]]
|
|
1104
|
+
if not self.vars['several_blob_per_arena']:
|
|
1105
|
+
shapes, stats, centers = cc(self.binary[self.t, ...])
|
|
1106
|
+
shapes[shapes != 1] = 0
|
|
1107
|
+
self.binary[self.t, ...] = shapes
|
|
1108
|
+
|
|
1109
|
+
# Display
|
|
1110
|
+
if show_seg:
|
|
1111
|
+
if self.visu is not None:
|
|
1112
|
+
im_to_display = deepcopy(self.visu[self.t, ...])
|
|
1113
|
+
contours = np.nonzero(cv2.morphologyEx(self.binary[self.t, :, :], cv2.MORPH_GRADIENT, cross_33))
|
|
1114
|
+
if self.vars['lighter_background']:
|
|
1115
|
+
im_to_display[contours[0], contours[1]] = 0
|
|
1116
|
+
else:
|
|
1117
|
+
im_to_display[contours[0], contours[1]] = 255
|
|
1118
|
+
else:
|
|
1119
|
+
im_to_display = self.binary[self.t, :, :] * 255
|
|
1120
|
+
imtoshow = cv2.resize(im_to_display, (540, 540))
|
|
1121
|
+
cv2.imshow("shape_motion", imtoshow)
|
|
1122
|
+
cv2.waitKey(1)
|
|
1123
|
+
self.t += 1
|
|
1124
|
+
|
|
1125
|
+
def get_descriptors_from_binary(self, release_memory: bool=True):
|
|
1126
|
+
"""
|
|
1127
|
+
|
|
1128
|
+
Methods: get_descriptors_from_binary
|
|
1129
|
+
|
|
1130
|
+
Summary
|
|
1131
|
+
-------
|
|
1132
|
+
Generates shape descriptors for binary images, computes these descriptors for each frame and handles colony
|
|
1133
|
+
tracking. This method can optionally release memory to reduce usage, apply scaling factors to descriptors
|
|
1134
|
+
in millimeters and computes solidity separately if requested.
|
|
1135
|
+
|
|
1136
|
+
Parameters
|
|
1137
|
+
----------
|
|
1138
|
+
release_memory : bool, optional
|
|
1139
|
+
Flag to determine whether memory should be released after computation. Default is True.
|
|
1140
|
+
|
|
1141
|
+
Other Parameters
|
|
1142
|
+
----------------
|
|
1143
|
+
**self.one_row_per_frame**
|
|
1144
|
+
DataFrame to store one row of descriptors per frame.
|
|
1145
|
+
- **'arena'**: Arena identifier, repeated for each frame.
|
|
1146
|
+
- **'time'**: Array of time values corresponding to frames.
|
|
1147
|
+
|
|
1148
|
+
**self.binary**
|
|
1149
|
+
3D array representing binary images over time.
|
|
1150
|
+
- **t,x,y**: Time index, x-coordinate, and y-coordinate.
|
|
1151
|
+
|
|
1152
|
+
**self.dims**
|
|
1153
|
+
Tuple containing image dimensions.
|
|
1154
|
+
- **0**: Number of time frames.
|
|
1155
|
+
- **1,2**: Image width and height respectively.
|
|
1156
|
+
|
|
1157
|
+
**self.surfarea**
|
|
1158
|
+
Array containing surface areas for each frame.
|
|
1159
|
+
|
|
1160
|
+
**self.time_interval**
|
|
1161
|
+
Time interval between frames, calculated only if provided timings are non-zero.
|
|
1162
|
+
|
|
1163
|
+
Notes
|
|
1164
|
+
-----
|
|
1165
|
+
This method uses various helper methods and classes like `ShapeDescriptors` for computing shape descriptors,
|
|
1166
|
+
`PercentAndTimeTracker` for progress tracking, and other image processing techniques such as connected components analysis.
|
|
1167
|
+
|
|
1168
|
+
"""
|
|
1169
|
+
##
|
|
1170
|
+
if release_memory:
|
|
1171
|
+
self.substantial_image = None
|
|
1172
|
+
self.covering_intensity = None
|
|
1173
|
+
self.segmented = None
|
|
1174
|
+
self.gravity_field = None
|
|
1175
|
+
self.sun = None
|
|
1176
|
+
self.rays = None
|
|
1177
|
+
self.holes = None
|
|
1178
|
+
collect()
|
|
1179
|
+
self.surfarea = self.binary.sum((1, 2))
|
|
1180
|
+
timings = self.vars['exif']
|
|
1181
|
+
if len(timings) < self.dims[0]:
|
|
1182
|
+
timings = np.arange(self.dims[0])
|
|
1183
|
+
if np.any(timings > 0):
|
|
1184
|
+
self.time_interval = np.mean(np.diff(timings))
|
|
1185
|
+
else:
|
|
1186
|
+
self.time_interval = 1.
|
|
1187
|
+
timings = timings[:self.dims[0]]
|
|
1188
|
+
|
|
1189
|
+
# Detect first motion
|
|
1190
|
+
self.one_descriptor_per_arena['first_move'] = detect_first_move(self.surfarea, self.vars['first_move_threshold'])
|
|
1191
|
+
|
|
1192
|
+
self.compute_solidity_separately: bool = self.vars['iso_digi_analysis'] and not self.vars['several_blob_per_arena'] and not self.vars['descriptors']['solidity']
|
|
1193
|
+
if self.compute_solidity_separately:
|
|
1194
|
+
self.solidity = np.zeros(self.dims[0], dtype=np.float64)
|
|
1195
|
+
if not self.vars['several_blob_per_arena']:
|
|
1196
|
+
# solidity must be added if detect growth transition is computed
|
|
1197
|
+
if self.compute_solidity_separately:
|
|
1198
|
+
for t in np.arange(self.dims[0]):
|
|
1199
|
+
solidity = ShapeDescriptors(self.binary[t, :, :], ["solidity"])
|
|
1200
|
+
self.solidity[t] = solidity.descriptors["solidity"]
|
|
1201
|
+
self.one_row_per_frame = compute_one_descriptor_per_frame(self.binary,
|
|
1202
|
+
self.one_descriptor_per_arena['arena'], timings,
|
|
1203
|
+
self.vars['descriptors'],
|
|
1204
|
+
self.vars['output_in_mm'],
|
|
1205
|
+
self.vars['average_pixel_size'],
|
|
1206
|
+
self.vars['do_fading'],
|
|
1207
|
+
self.vars['save_coord_specimen'])
|
|
1208
|
+
else:
|
|
1209
|
+
self.one_row_per_frame = compute_one_descriptor_per_colony(self.binary,
|
|
1210
|
+
self.one_descriptor_per_arena['arena'], timings,
|
|
1211
|
+
self.vars['descriptors'],
|
|
1212
|
+
self.vars['output_in_mm'],
|
|
1213
|
+
self.vars['average_pixel_size'],
|
|
1214
|
+
self.vars['do_fading'],
|
|
1215
|
+
self.vars['first_move_threshold'],
|
|
1216
|
+
self.vars['save_coord_specimen'])
|
|
1217
|
+
self.one_descriptor_per_arena["final_area"] = self.binary[-1, :, :].sum()
|
|
1218
|
+
if self.vars['output_in_mm']:
|
|
1219
|
+
self.one_descriptor_per_arena = scale_descriptors(self.one_descriptor_per_arena, self.vars['average_pixel_size'])
|
|
1220
|
+
|
|
1221
|
+
def detect_growth_transitions(self):
|
|
1222
|
+
"""
|
|
1223
|
+
Detect growth transitions in a biological image processing context.
|
|
1224
|
+
|
|
1225
|
+
Analyzes the growth transitions of a shape within an arena, determining
|
|
1226
|
+
whether growth is isotropic and identifying any breaking points.
|
|
1227
|
+
|
|
1228
|
+
Notes:
|
|
1229
|
+
This method modifies the `one_descriptor_per_arena` dictionary in place
|
|
1230
|
+
to include growth transition information.
|
|
1231
|
+
|
|
1232
|
+
"""
|
|
1233
|
+
if self.vars['iso_digi_analysis'] and not self.vars['several_blob_per_arena']:
|
|
1234
|
+
self.one_descriptor_per_arena['iso_digi_transi'] = pd.NA
|
|
1235
|
+
if not pd.isna(self.one_descriptor_per_arena['first_move']):
|
|
1236
|
+
logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Starting growth transition analysis.")
|
|
1237
|
+
|
|
1238
|
+
# II) Once a pseudopod is deployed, look for a disk/ around the original shape
|
|
1239
|
+
growth_begining = self.surfarea < ((self.surfarea[0] * 1.2) + ((self.dims[1] / 4) * (self.dims[2] / 4)))
|
|
1240
|
+
dilated_origin = cv2.dilate(self.binary[self.one_descriptor_per_arena['first_move'], :, :], kernel=cross_33, iterations=10, borderType=cv2.BORDER_CONSTANT, borderValue=0)
|
|
1241
|
+
isisotropic = np.sum(self.binary[:, :, :] * dilated_origin, (1, 2))
|
|
1242
|
+
isisotropic *= growth_begining
|
|
1243
|
+
# Ask if the dilated origin area is 90% covered during the growth beginning
|
|
1244
|
+
isisotropic = isisotropic > 0.9 * dilated_origin.sum()
|
|
1245
|
+
if np.any(isisotropic):
|
|
1246
|
+
self.one_descriptor_per_arena['is_growth_isotropic'] = 1
|
|
1247
|
+
# Determine a solidity reference to look for a potential breaking of the isotropic growth
|
|
1248
|
+
if self.compute_solidity_separately:
|
|
1249
|
+
solidity_reference = np.mean(self.solidity[:self.one_descriptor_per_arena['first_move']])
|
|
1250
|
+
different_solidity = self.solidity < (0.9 * solidity_reference)
|
|
1251
|
+
del self.solidity
|
|
1252
|
+
else:
|
|
1253
|
+
solidity_reference = np.mean(
|
|
1254
|
+
self.one_row_per_frame.iloc[:(self.one_descriptor_per_arena['first_move']), :]["solidity"])
|
|
1255
|
+
different_solidity = self.one_row_per_frame["solidity"].values < (0.9 * solidity_reference)
|
|
1256
|
+
# Make sure that isotropic breaking not occur before isotropic growth
|
|
1257
|
+
if np.any(different_solidity):
|
|
1258
|
+
self.one_descriptor_per_arena["iso_digi_transi"] = np.nonzero(different_solidity)[0][0] * self.time_interval
|
|
1259
|
+
else:
|
|
1260
|
+
self.one_descriptor_per_arena['is_growth_isotropic'] = 0
|
|
1261
|
+
else:
|
|
1262
|
+
self.one_descriptor_per_arena['is_growth_isotropic'] = pd.NA
|
|
1263
|
+
|
|
1264
|
+
|
|
1265
|
+
def check_converted_video_type(self):
|
|
1266
|
+
"""
|
|
1267
|
+
Check if the converted video type is uint8 and normalize it if necessary.
|
|
1268
|
+
"""
|
|
1269
|
+
if self.converted_video.dtype != "uint8":
|
|
1270
|
+
self.converted_video = bracket_to_uint8_image_contrast(self.converted_video)
|
|
1271
|
+
|
|
1272
|
+
def networks_analysis(self, show_seg: bool=False):
|
|
1273
|
+
"""
|
|
1274
|
+
Perform network detection within a given arena.
|
|
1275
|
+
|
|
1276
|
+
This function carries out the task of detecting networks in an arena
|
|
1277
|
+
based on several parameters and variables. It involves checking video
|
|
1278
|
+
type, performing network detection over time, potentially detecting
|
|
1279
|
+
pseudopods, and smoothing segmentation. The results can be visualized or saved.
|
|
1280
|
+
Extract and analyze graphs from a binary representation of network dynamics, producing vertex
|
|
1281
|
+
and edge tables that represent the graph structure over time.
|
|
1282
|
+
|
|
1283
|
+
Args:
|
|
1284
|
+
None
|
|
1285
|
+
|
|
1286
|
+
Attributes:
|
|
1287
|
+
vars (dict): Dictionary of variables that control the graph extraction process.
|
|
1288
|
+
- 'save_graph': Boolean indicating if graph extraction should be performed.
|
|
1289
|
+
- 'save_coord_network': Boolean indicating if the coordinate network should be saved.
|
|
1290
|
+
|
|
1291
|
+
one_descriptor_per_arena (dict): Dictionary containing descriptors for each arena.
|
|
1292
|
+
|
|
1293
|
+
dims (tuple): Tuple containing dimension information.
|
|
1294
|
+
- [0]: Integer representing the number of time steps.
|
|
1295
|
+
- [1]: Integer representing the y-dimension size.
|
|
1296
|
+
- [2]: Integer representing the x-dimension size.
|
|
1297
|
+
|
|
1298
|
+
origin (np.ndarray): Binary image representing the origin of the network.
|
|
1299
|
+
|
|
1300
|
+
binary (np.ndarray): Binary representation of network dynamics over time.
|
|
1301
|
+
Shape: (time_steps, y_dimension, x_dimension).
|
|
1302
|
+
|
|
1303
|
+
converted_video (np.ndarray): Converted video data.
|
|
1304
|
+
Shape: (y_dimension, x_dimension, time_steps).
|
|
1305
|
+
|
|
1306
|
+
network_dynamics (np.ndarray): Network dynamics representation.
|
|
1307
|
+
Shape: (time_steps, y_dimension, x_dimension).
|
|
1308
|
+
|
|
1309
|
+
Notes:
|
|
1310
|
+
- This method performs graph extraction and saves the vertex and edge tables to CSV files.
|
|
1311
|
+
- The CSV files are named according to the arena, time steps, and dimensions.
|
|
1312
|
+
|
|
1313
|
+
Args:
|
|
1314
|
+
show_seg: bool = False
|
|
1315
|
+
A flag that determines whether to display the segmentation visually.
|
|
1316
|
+
"""
|
|
1317
|
+
coord_pseudopods = None
|
|
1318
|
+
if not self.vars['several_blob_per_arena'] and self.vars['save_coord_network']:
|
|
1319
|
+
self.check_converted_video_type()
|
|
1320
|
+
|
|
1321
|
+
if self.vars['origin_state'] == "constant":
|
|
1322
|
+
self.coord_network, coord_pseudopods = detect_network_dynamics(self.converted_video, self.binary,
|
|
1323
|
+
self.one_descriptor_per_arena['arena'], 0,
|
|
1324
|
+
self.visu, self.origin, True, True,
|
|
1325
|
+
self.vars['save_coord_network'], show_seg)
|
|
1326
|
+
else:
|
|
1327
|
+
self.coord_network, coord_pseudopods = detect_network_dynamics(self.converted_video, self.binary,
|
|
1328
|
+
self.one_descriptor_per_arena['arena'], 0,
|
|
1329
|
+
self.visu, None, True, True,
|
|
1330
|
+
self.vars['save_coord_network'], show_seg)
|
|
1331
|
+
|
|
1332
|
+
if not self.vars['several_blob_per_arena'] and self.vars['save_graph']:
|
|
1333
|
+
if self.coord_network is None:
|
|
1334
|
+
self.coord_network = np.array(np.nonzero(self.binary))
|
|
1335
|
+
if self.vars['origin_state'] == "constant":
|
|
1336
|
+
extract_graph_dynamics(self.converted_video, self.coord_network, self.one_descriptor_per_arena['arena'],
|
|
1337
|
+
0, self.origin, coord_pseudopods)
|
|
1338
|
+
else:
|
|
1339
|
+
extract_graph_dynamics(self.converted_video, self.coord_network, self.one_descriptor_per_arena['arena'],
|
|
1340
|
+
0, None, coord_pseudopods)
|
|
1341
|
+
|
|
1342
|
+
def study_cytoscillations(self, show_seg: bool=False):
|
|
1343
|
+
"""
|
|
1344
|
+
|
|
1345
|
+
Study the cytoskeletal oscillations within a video frame by frame.
|
|
1346
|
+
|
|
1347
|
+
This method performs an analysis of cytoskeletal oscillations in the video,
|
|
1348
|
+
identifying regions of influx and efflux based on pixel connectivity.
|
|
1349
|
+
It also handles memory allocation for the oscillations video, computes
|
|
1350
|
+
connected components, and optionally displays the segmented regions.
|
|
1351
|
+
|
|
1352
|
+
Args:
|
|
1353
|
+
show_seg (bool): If True, display the segmentation results.
|
|
1354
|
+
"""
|
|
1355
|
+
if self.vars['save_coord_thickening_slimming'] or self.vars['oscilacyto_analysis']:
|
|
1356
|
+
oscillations_video = detect_oscillations_dynamics(self.converted_video, self.binary,
|
|
1357
|
+
self.one_descriptor_per_arena['arena'], self.start,
|
|
1358
|
+
self.vars['expected_oscillation_period'],
|
|
1359
|
+
self.time_interval,
|
|
1360
|
+
self.vars['minimal_oscillating_cluster_size'],
|
|
1361
|
+
self.vars['min_ram_free'],
|
|
1362
|
+
self.vars['lose_accuracy_to_save_memory'],
|
|
1363
|
+
self.vars['save_coord_thickening_slimming'])
|
|
1364
|
+
del oscillations_video
|
|
1365
|
+
|
|
1366
|
+
|
|
1367
|
+
def fractal_descriptions(self):
|
|
1368
|
+
"""
|
|
1369
|
+
|
|
1370
|
+
Method for analyzing fractal patterns in binary data.
|
|
1371
|
+
|
|
1372
|
+
Fractal analysis is performed on the binary representation of the data,
|
|
1373
|
+
optionally considering network dynamics if specified. The results
|
|
1374
|
+
include fractal dimensions, R-values, and box counts for the data.
|
|
1375
|
+
|
|
1376
|
+
If network analysis is enabled, additional fractal dimensions,
|
|
1377
|
+
R-values, and box counts are calculated for the inner network.
|
|
1378
|
+
If 'output_in_mm' is True, then values in mm can be obtained.
|
|
1379
|
+
|
|
1380
|
+
"""
|
|
1381
|
+
if self.vars['fractal_analysis']:
|
|
1382
|
+
logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Starting fractal analysis.")
|
|
1383
|
+
|
|
1384
|
+
if self.vars['save_coord_network']:
|
|
1385
|
+
box_counting_dimensions = np.zeros((self.dims[0], 7), dtype=np.float64)
|
|
1386
|
+
else:
|
|
1387
|
+
box_counting_dimensions = np.zeros((self.dims[0], 3), dtype=np.float64)
|
|
1388
|
+
|
|
1389
|
+
for t in np.arange(self.dims[0]):
|
|
1390
|
+
if self.vars['save_coord_network']:
|
|
1391
|
+
current_network = np.zeros(self.dims[1:], dtype=np.uint8)
|
|
1392
|
+
net_t = self.coord_network[1:, self.coord_network[0, :] == t]
|
|
1393
|
+
current_network[net_t[0], net_t[1]] = 1
|
|
1394
|
+
box_counting_dimensions[t, 0] = current_network.sum()
|
|
1395
|
+
zoomed_binary, side_lengths = prepare_box_counting(self.binary[t, ...], min_mesh_side=self.vars[
|
|
1396
|
+
'fractal_box_side_threshold'], zoom_step=self.vars['fractal_zoom_step'], contours=True)
|
|
1397
|
+
box_counting_dimensions[t, 1], box_counting_dimensions[t, 2], box_counting_dimensions[
|
|
1398
|
+
t, 3] = box_counting_dimension(zoomed_binary, side_lengths)
|
|
1399
|
+
zoomed_binary, side_lengths = prepare_box_counting(current_network,
|
|
1400
|
+
min_mesh_side=self.vars[
|
|
1401
|
+
'fractal_box_side_threshold'],
|
|
1402
|
+
zoom_step=self.vars['fractal_zoom_step'],
|
|
1403
|
+
contours=False)
|
|
1404
|
+
box_counting_dimensions[t, 4], box_counting_dimensions[t, 5], box_counting_dimensions[
|
|
1405
|
+
t, 6] = box_counting_dimension(zoomed_binary, side_lengths)
|
|
1406
|
+
else:
|
|
1407
|
+
zoomed_binary, side_lengths = prepare_box_counting(self.binary[t, ...],
|
|
1408
|
+
min_mesh_side=self.vars['fractal_box_side_threshold'],
|
|
1409
|
+
zoom_step=self.vars['fractal_zoom_step'], contours=True)
|
|
1410
|
+
box_counting_dimensions[t, :] = box_counting_dimension(zoomed_binary, side_lengths)
|
|
1411
|
+
|
|
1412
|
+
if self.vars['save_coord_network']:
|
|
1413
|
+
self.one_row_per_frame["inner_network_size"] = box_counting_dimensions[:, 0]
|
|
1414
|
+
self.one_row_per_frame["fractal_dimension"] = box_counting_dimensions[:, 1]
|
|
1415
|
+
self.one_row_per_frame["fractal_r_value"] = box_counting_dimensions[:, 2]
|
|
1416
|
+
self.one_row_per_frame["fractal_box_nb"] = box_counting_dimensions[:, 3]
|
|
1417
|
+
self.one_row_per_frame["inner_network_fractal_dimension"] = box_counting_dimensions[:, 4]
|
|
1418
|
+
self.one_row_per_frame["inner_network_fractal_r_value"] = box_counting_dimensions[:, 5]
|
|
1419
|
+
self.one_row_per_frame["inner_network_fractal_box_nb"] = box_counting_dimensions[:, 6]
|
|
1420
|
+
if self.vars['output_in_mm']:
|
|
1421
|
+
self.one_row_per_frame["inner_network_size"] *= self.vars['average_pixel_size']
|
|
1422
|
+
else:
|
|
1423
|
+
self.one_row_per_frame["fractal_dimension"] = box_counting_dimensions[:, 0]
|
|
1424
|
+
self.one_row_per_frame["fractal_box_nb"] = box_counting_dimensions[:, 1]
|
|
1425
|
+
self.one_row_per_frame["fractal_r_value"] = box_counting_dimensions[:, 2]
|
|
1426
|
+
|
|
1427
|
+
if self.vars['save_coord_network']:
|
|
1428
|
+
del self.coord_network
|
|
1429
|
+
|
|
1430
|
+
def save_efficiency_tests(self):
|
|
1431
|
+
"""
|
|
1432
|
+
Provide images allowing to assess the analysis efficiency
|
|
1433
|
+
|
|
1434
|
+
This method generates two test images used for assessing
|
|
1435
|
+
the efficiency of the analysis. It performs various operations on
|
|
1436
|
+
video frames to create these images, including copying and manipulating
|
|
1437
|
+
frames from the video, detecting contours on binary images,
|
|
1438
|
+
and drawing the arena label on the left of the frames.
|
|
1439
|
+
"""
|
|
1440
|
+
# Provide images allowing to assess the analysis efficiency
|
|
1441
|
+
if self.dims[0] > 1:
|
|
1442
|
+
after_one_tenth_of_time = np.ceil(self.dims[0] / 10).astype(np.uint64)
|
|
1443
|
+
else:
|
|
1444
|
+
after_one_tenth_of_time = 0
|
|
1445
|
+
|
|
1446
|
+
last_good_detection = self.dims[0] - 1
|
|
1447
|
+
if self.dims[0] > self.lost_frames:
|
|
1448
|
+
if self.vars['do_threshold_segmentation']:
|
|
1449
|
+
last_good_detection -= self.lost_frames
|
|
1450
|
+
else:
|
|
1451
|
+
last_good_detection = 0
|
|
1452
|
+
if self.visu is None:
|
|
1453
|
+
if len(self.converted_video.shape) == 3:
|
|
1454
|
+
self.converted_video = np.stack((self.converted_video, self.converted_video, self.converted_video),
|
|
1455
|
+
axis=3)
|
|
1456
|
+
self.efficiency_test_1 = deepcopy(self.converted_video[after_one_tenth_of_time, ...])
|
|
1457
|
+
self.efficiency_test_2 = deepcopy(self.converted_video[last_good_detection, ...])
|
|
1458
|
+
else:
|
|
1459
|
+
self.efficiency_test_1 = deepcopy(self.visu[after_one_tenth_of_time, :, :, :])
|
|
1460
|
+
self.efficiency_test_2 = deepcopy(self.visu[last_good_detection, :, :, :])
|
|
1461
|
+
|
|
1462
|
+
position = (25, self.dims[1] // 2)
|
|
1463
|
+
text = str(self.one_descriptor_per_arena['arena'])
|
|
1464
|
+
contours = np.nonzero(get_contours(self.binary[after_one_tenth_of_time, :, :]))
|
|
1465
|
+
self.efficiency_test_1[contours[0], contours[1], :] = self.vars['contour_color']
|
|
1466
|
+
self.efficiency_test_1 = cv2.putText(self.efficiency_test_1, text, position, cv2.FONT_HERSHEY_SIMPLEX, 1,
|
|
1467
|
+
(self.vars["contour_color"], self.vars["contour_color"],
|
|
1468
|
+
self.vars["contour_color"], 255), 3)
|
|
1469
|
+
|
|
1470
|
+
eroded_binary = cv2.erode(self.binary[last_good_detection, :, :], cross_33)
|
|
1471
|
+
contours = np.nonzero(self.binary[last_good_detection, :, :] - eroded_binary)
|
|
1472
|
+
self.efficiency_test_2[contours[0], contours[1], :] = self.vars['contour_color']
|
|
1473
|
+
self.efficiency_test_2 = cv2.putText(self.efficiency_test_2, text, position, cv2.FONT_HERSHEY_SIMPLEX, 1,
|
|
1474
|
+
(self.vars["contour_color"], self.vars["contour_color"],
|
|
1475
|
+
self.vars["contour_color"], 255), 3)
|
|
1476
|
+
|
|
1477
|
+
def save_video(self):
|
|
1478
|
+
"""
|
|
1479
|
+
Save processed video with contours and other annotations.
|
|
1480
|
+
|
|
1481
|
+
This method processes the binary image to extract contours, overlay them
|
|
1482
|
+
on a video, and save the resulting video file.
|
|
1483
|
+
|
|
1484
|
+
Notes:
|
|
1485
|
+
- This method uses OpenCV for image processing and contour extraction.
|
|
1486
|
+
- The processed video includes contours colored according to the
|
|
1487
|
+
`contour_color` specified in the variables.
|
|
1488
|
+
- Additional annotations such as time in minutes are added to each
|
|
1489
|
+
frame if applicable.
|
|
1490
|
+
|
|
1491
|
+
"""
|
|
1492
|
+
if self.vars['save_processed_videos']:
|
|
1493
|
+
self.check_converted_video_type()
|
|
1494
|
+
if len(self.converted_video.shape) == 3:
|
|
1495
|
+
self.converted_video = np.stack((self.converted_video, self.converted_video, self.converted_video),
|
|
1496
|
+
axis=3)
|
|
1497
|
+
for t in np.arange(self.dims[0]):
|
|
1498
|
+
|
|
1499
|
+
eroded_binary = cv2.erode(self.binary[t, :, :], cross_33)
|
|
1500
|
+
contours = np.nonzero(self.binary[t, :, :] - eroded_binary)
|
|
1501
|
+
self.converted_video[t, contours[0], contours[1], :] = self.vars['contour_color']
|
|
1502
|
+
if "iso_digi_transi" in self.one_descriptor_per_arena.keys():
|
|
1503
|
+
if self.vars['iso_digi_analysis'] and not self.vars['several_blob_per_arena'] and not pd.isna(self.one_descriptor_per_arena["iso_digi_transi"]):
|
|
1504
|
+
if self.one_descriptor_per_arena['is_growth_isotropic'] == 1:
|
|
1505
|
+
if t < self.one_descriptor_per_arena["iso_digi_transi"]:
|
|
1506
|
+
self.converted_video[t, contours[0], contours[1], :] = 0, 0, 255
|
|
1507
|
+
del self.binary
|
|
1508
|
+
del self.surfarea
|
|
1509
|
+
del self.borders
|
|
1510
|
+
del self.origin
|
|
1511
|
+
del self.origin_idx
|
|
1512
|
+
del self.mean_intensity_per_frame
|
|
1513
|
+
del self.erodila_disk
|
|
1514
|
+
collect()
|
|
1515
|
+
if self.visu is None:
|
|
1516
|
+
true_frame_width = self.dims[2]
|
|
1517
|
+
if len(self.vars['background_list']) == 0:
|
|
1518
|
+
self.background = None
|
|
1519
|
+
else:
|
|
1520
|
+
self.background = self.vars['background_list'][self.one_descriptor_per_arena['arena'] - 1]
|
|
1521
|
+
if os.path.isfile(f"ind_{self.one_descriptor_per_arena['arena']}.npy"):
|
|
1522
|
+
self.visu = video2numpy(f"ind_{self.one_descriptor_per_arena['arena']}.npy",
|
|
1523
|
+
None, true_frame_width=true_frame_width)
|
|
1524
|
+
else:
|
|
1525
|
+
self.visu = self.converted_video
|
|
1526
|
+
if len(self.visu.shape) == 3:
|
|
1527
|
+
self.visu = np.stack((self.visu, self.visu, self.visu), axis=3)
|
|
1528
|
+
self.converted_video = np.concatenate((self.visu, self.converted_video), axis=2)
|
|
1529
|
+
|
|
1530
|
+
if np.any(self.one_row_per_frame['time'] > 0):
|
|
1531
|
+
position = (5, self.dims[1] - 5)
|
|
1532
|
+
print(self.vars['time_step_is_arbitrary'])
|
|
1533
|
+
if self.vars['time_step_is_arbitrary']:
|
|
1534
|
+
time_unit = ""
|
|
1535
|
+
else:
|
|
1536
|
+
time_unit = " min"
|
|
1537
|
+
for t in np.arange(self.dims[0]):
|
|
1538
|
+
image = self.converted_video[t, ...]
|
|
1539
|
+
text = str(self.one_row_per_frame['time'][t]) + time_unit
|
|
1540
|
+
image = cv2.putText(image, # numpy array on which text is written
|
|
1541
|
+
text, # text
|
|
1542
|
+
position, # position at which writing has to start
|
|
1543
|
+
cv2.FONT_HERSHEY_SIMPLEX, # font family
|
|
1544
|
+
1, # font size
|
|
1545
|
+
(self.vars["contour_color"], self.vars["contour_color"], self.vars["contour_color"], 255), #(209, 80, 0, 255),
|
|
1546
|
+
2) # font stroke
|
|
1547
|
+
self.converted_video[t, ...] = image
|
|
1548
|
+
vid_name = f"ind_{self.one_descriptor_per_arena['arena']}{self.vars['videos_extension']}"
|
|
1549
|
+
write_video(self.converted_video, vid_name, is_color=True, fps=self.vars['video_fps'])
|
|
1550
|
+
|
|
1551
|
+
def save_results(self):
|
|
1552
|
+
"""
|
|
1553
|
+
Save the results of testing and video processing.
|
|
1554
|
+
|
|
1555
|
+
This method handles the saving of efficiency tests, video files,
|
|
1556
|
+
and CSV data related to test results. It checks for existing files before writing new data.
|
|
1557
|
+
Additionally, it cleans up temporary files if configured to do so.
|
|
1558
|
+
"""
|
|
1559
|
+
self.save_efficiency_tests()
|
|
1560
|
+
self.save_video()
|
|
1561
|
+
if self.vars['several_blob_per_arena']:
|
|
1562
|
+
try:
|
|
1563
|
+
with open(f"one_row_per_frame_arena{self.one_descriptor_per_arena['arena']}.csv", 'w') as file:
|
|
1564
|
+
self.one_row_per_frame.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
1565
|
+
except PermissionError:
|
|
1566
|
+
logging.error(f"Never let one_row_per_frame_arena{self.one_descriptor_per_arena['arena']}.csv open when Cellects runs")
|
|
1567
|
+
|
|
1568
|
+
create_new_csv: bool = False
|
|
1569
|
+
if os.path.isfile("one_row_per_arena.csv"):
|
|
1570
|
+
try:
|
|
1571
|
+
with open(f"one_row_per_arena.csv", 'r') as file:
|
|
1572
|
+
stats = pd.read_csv(file, header=0, sep=";")
|
|
1573
|
+
except PermissionError:
|
|
1574
|
+
logging.error("Never let one_row_per_arena.csv open when Cellects runs")
|
|
1575
|
+
|
|
1576
|
+
if len(self.one_descriptor_per_arena) == len(stats.columns) - 1:
|
|
1577
|
+
try:
|
|
1578
|
+
with open(f"one_row_per_arena.csv", 'w') as file:
|
|
1579
|
+
stats.iloc[(self.one_descriptor_per_arena['arena'] - 1), 1:] = self.one_descriptor_per_arena.values()
|
|
1580
|
+
stats.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
1581
|
+
except PermissionError:
|
|
1582
|
+
logging.error("Never let one_row_per_arena.csv open when Cellects runs")
|
|
1583
|
+
else:
|
|
1584
|
+
create_new_csv = True
|
|
1585
|
+
else:
|
|
1586
|
+
create_new_csv = True
|
|
1587
|
+
if create_new_csv:
|
|
1588
|
+
with open(f"one_row_per_arena.csv", 'w') as file:
|
|
1589
|
+
stats = pd.DataFrame(np.zeros((len(self.vars['analyzed_individuals']), len(self.one_descriptor_per_arena))),
|
|
1590
|
+
columns=list(self.one_descriptor_per_arena.keys()))
|
|
1591
|
+
stats.iloc[(self.one_descriptor_per_arena['arena'] - 1), :] = self.one_descriptor_per_arena.values()
|
|
1592
|
+
stats.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
1593
|
+
if not self.vars['keep_unaltered_videos'] and os.path.isfile(f"ind_{self.one_descriptor_per_arena['arena']}.npy"):
|
|
1594
|
+
os.remove(f"ind_{self.one_descriptor_per_arena['arena']}.npy")
|
|
1595
|
+
|
|
1596
|
+
def change_results_of_one_arena(self, save_video: bool = True):
|
|
1597
|
+
"""
|
|
1598
|
+
Manages the saving and updating of CSV files based on data extracted from analyzed
|
|
1599
|
+
one arena. Specifically handles three CSV files: "one_row_per_arena.csv",
|
|
1600
|
+
"one_row_per_frame.csv", and "one_row_per_oscillating_cluster.csv".
|
|
1601
|
+
Each file is updated or created based on the presence of existing data.
|
|
1602
|
+
The method ensures that each CSV file contains the relevant information for
|
|
1603
|
+
the given arena, frame, and oscillator cluster data.
|
|
1604
|
+
"""
|
|
1605
|
+
if save_video:
|
|
1606
|
+
self.save_video()
|
|
1607
|
+
# I/ Update/Create one_row_per_arena.csv
|
|
1608
|
+
create_new_csv: bool = False
|
|
1609
|
+
if os.path.isfile("one_row_per_arena.csv"):
|
|
1610
|
+
try:
|
|
1611
|
+
with open(f"one_row_per_arena.csv", 'r') as file:
|
|
1612
|
+
stats = pd.read_csv(file, header=0, sep=";")
|
|
1613
|
+
for stat_name, stat_value in self.one_descriptor_per_arena.items():
|
|
1614
|
+
if stat_name in stats.columns:
|
|
1615
|
+
stats.loc[(self.one_descriptor_per_arena['arena'] - 1), stat_name] = np.uint32(self.one_descriptor_per_arena[stat_name])
|
|
1616
|
+
with open(f"one_row_per_arena.csv", 'w') as file:
|
|
1617
|
+
stats.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
1618
|
+
except PermissionError:
|
|
1619
|
+
logging.error("Never let one_row_per_arena.csv open when Cellects runs")
|
|
1620
|
+
except Exception as e:
|
|
1621
|
+
logging.error(f"{e}")
|
|
1622
|
+
create_new_csv = True
|
|
1623
|
+
else:
|
|
1624
|
+
create_new_csv = True
|
|
1625
|
+
if create_new_csv:
|
|
1626
|
+
logging.info("Create a new one_row_per_arena.csv file")
|
|
1627
|
+
try:
|
|
1628
|
+
with open(f"one_row_per_arena.csv", 'w') as file:
|
|
1629
|
+
stats = pd.DataFrame(np.zeros((len(self.vars['analyzed_individuals']), len(self.one_descriptor_per_arena))),
|
|
1630
|
+
columns=list(self.one_descriptor_per_arena.keys()))
|
|
1631
|
+
stats.iloc[(self.one_descriptor_per_arena['arena'] - 1), :] = self.one_descriptor_per_arena.values() # np.array(list(self.one_descriptor_per_arena.values()), dtype=np.uint32)
|
|
1632
|
+
stats.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
1633
|
+
except PermissionError:
|
|
1634
|
+
logging.error("Never let one_row_per_arena.csv open when Cellects runs")
|
|
1635
|
+
|
|
1636
|
+
# II/ Update/Create one_row_per_frame.csv
|
|
1637
|
+
create_new_csv = False
|
|
1638
|
+
if os.path.isfile("one_row_per_frame.csv"):
|
|
1639
|
+
try:
|
|
1640
|
+
with open(f"one_row_per_frame.csv", 'r') as file:
|
|
1641
|
+
descriptors = pd.read_csv(file, header=0, sep=";")
|
|
1642
|
+
for stat_name, stat_value in self.one_row_per_frame.items():
|
|
1643
|
+
if stat_name in descriptors.columns:
|
|
1644
|
+
descriptors.loc[((self.one_descriptor_per_arena['arena'] - 1) * self.dims[0]):((self.one_descriptor_per_arena['arena']) * self.dims[0] - 1), stat_name] = self.one_row_per_frame.loc[:, stat_name].values[:]
|
|
1645
|
+
with open(f"one_row_per_frame.csv", 'w') as file:
|
|
1646
|
+
descriptors.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
1647
|
+
except PermissionError:
|
|
1648
|
+
logging.error("Never let one_row_per_frame.csv open when Cellects runs")
|
|
1649
|
+
except Exception as e:
|
|
1650
|
+
logging.error(f"{e}")
|
|
1651
|
+
create_new_csv = True
|
|
1652
|
+
else:
|
|
1653
|
+
create_new_csv = True
|
|
1654
|
+
if create_new_csv:
|
|
1655
|
+
logging.info("Create a new one_row_per_frame.csv file")
|
|
1656
|
+
try:
|
|
1657
|
+
with open(f"one_row_per_frame.csv", 'w') as file:
|
|
1658
|
+
descriptors = pd.DataFrame(np.zeros((len(self.vars['analyzed_individuals']) * self.dims[0], len(self.one_row_per_frame.columns))),
|
|
1659
|
+
columns=list(self.one_row_per_frame.keys()))
|
|
1660
|
+
descriptors.iloc[((self.one_descriptor_per_arena['arena'] - 1) * self.dims[0]):((self.one_descriptor_per_arena['arena']) * self.dims[0]), :] = self.one_row_per_frame
|
|
1661
|
+
descriptors.to_csv(file, sep=';', index=False, lineterminator='\n')
|
|
1662
|
+
except PermissionError:
|
|
1663
|
+
logging.error("Never let one_row_per_frame.csv open when Cellects runs")
|
|
1664
|
+
|