cellects 0.1.3__py3-none-any.whl → 0.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cellects/__main__.py +65 -25
- cellects/config/all_vars_dict.py +18 -17
- cellects/core/cellects_threads.py +1034 -396
- cellects/core/motion_analysis.py +1664 -2010
- cellects/core/one_image_analysis.py +1082 -1061
- cellects/core/program_organizer.py +1687 -1316
- cellects/core/script_based_run.py +80 -76
- cellects/gui/advanced_parameters.py +365 -326
- cellects/gui/cellects.py +102 -91
- cellects/gui/custom_widgets.py +4 -3
- cellects/gui/first_window.py +226 -104
- cellects/gui/if_several_folders_window.py +117 -68
- cellects/gui/image_analysis_window.py +841 -450
- cellects/gui/required_output.py +100 -56
- cellects/gui/ui_strings.py +840 -0
- cellects/gui/video_analysis_window.py +317 -135
- cellects/image_analysis/cell_leaving_detection.py +64 -4
- cellects/image_analysis/image_segmentation.py +451 -22
- cellects/image_analysis/morphological_operations.py +2166 -1635
- cellects/image_analysis/network_functions.py +616 -253
- cellects/image_analysis/one_image_analysis_threads.py +94 -153
- cellects/image_analysis/oscillations_functions.py +131 -0
- cellects/image_analysis/progressively_add_distant_shapes.py +2 -3
- cellects/image_analysis/shape_descriptors.py +517 -466
- cellects/utils/formulas.py +169 -6
- cellects/utils/load_display_save.py +362 -105
- cellects/utils/utilitarian.py +86 -9
- cellects-0.2.7.dist-info/LICENSE +675 -0
- cellects-0.2.7.dist-info/METADATA +829 -0
- cellects-0.2.7.dist-info/RECORD +44 -0
- cellects/core/one_video_per_blob.py +0 -540
- cellects/image_analysis/cluster_flux_study.py +0 -102
- cellects-0.1.3.dist-info/LICENSE.odt +0 -0
- cellects-0.1.3.dist-info/METADATA +0 -176
- cellects-0.1.3.dist-info/RECORD +0 -44
- {cellects-0.1.3.dist-info → cellects-0.2.7.dist-info}/WHEEL +0 -0
- {cellects-0.1.3.dist-info → cellects-0.2.7.dist-info}/entry_points.txt +0 -0
- {cellects-0.1.3.dist-info → cellects-0.2.7.dist-info}/top_level.txt +0 -0
|
@@ -1,1316 +1,1687 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""This file contains the class constituting the link between the graphical interface and the computations
|
|
3
|
-
First, Cellects analyze one image in order to get a color space combination maximizing the contrast between the specimens
|
|
4
|
-
and the background.
|
|
5
|
-
Second, Cellects automatically delineate each arena.
|
|
6
|
-
Third, Cellects write one video for each arena.
|
|
7
|
-
Fourth, Cellects segments the video and apply post-processing algorithms to improve the segmentation.
|
|
8
|
-
Fifth, Cellects extract variables and store them in .csv files.
|
|
9
|
-
"""
|
|
10
|
-
|
|
11
|
-
import
|
|
12
|
-
import
|
|
13
|
-
import
|
|
14
|
-
import
|
|
15
|
-
from copy import deepcopy
|
|
16
|
-
import
|
|
17
|
-
|
|
18
|
-
import
|
|
19
|
-
import
|
|
20
|
-
|
|
21
|
-
from
|
|
22
|
-
import
|
|
23
|
-
from
|
|
24
|
-
|
|
25
|
-
from cellects.
|
|
26
|
-
from cellects.
|
|
27
|
-
from cellects.
|
|
28
|
-
from cellects.utils.
|
|
29
|
-
from cellects.
|
|
30
|
-
from cellects.core.cellects_paths import CELLECTS_DIR, ALL_VARS_PKL_FILE
|
|
31
|
-
from cellects.
|
|
32
|
-
from cellects.
|
|
33
|
-
from cellects.
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
self.all['
|
|
143
|
-
self.
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
self.
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
if
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
if
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
self.
|
|
515
|
-
|
|
516
|
-
self.first_im
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
if
|
|
525
|
-
self.vars[
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
self.last_im =
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
if self.all[
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
if
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
self.
|
|
696
|
-
|
|
697
|
-
self.vars['
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
if
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
self.vars['
|
|
836
|
-
self.vars['
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
self.
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
self.
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
self.
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
self.
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
self.
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
self.
|
|
1138
|
-
self.
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1156
|
-
|
|
1157
|
-
if
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
self.
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
self.
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
self.
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
|
|
1239
|
-
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
|
|
1284
|
-
|
|
1285
|
-
|
|
1286
|
-
|
|
1287
|
-
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""This file contains the class constituting the link between the graphical interface and the computations.
|
|
3
|
+
First, Cellects analyze one image in order to get a color space combination maximizing the contrast between the specimens
|
|
4
|
+
and the background.
|
|
5
|
+
Second, Cellects automatically delineate each arena.
|
|
6
|
+
Third, Cellects write one video for each arena.
|
|
7
|
+
Fourth, Cellects segments the video and apply post-processing algorithms to improve the segmentation.
|
|
8
|
+
Fifth, Cellects extract variables and store them in .csv files.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import pickle
|
|
12
|
+
import sys
|
|
13
|
+
import os
|
|
14
|
+
import logging
|
|
15
|
+
from copy import deepcopy
|
|
16
|
+
import psutil
|
|
17
|
+
import cv2
|
|
18
|
+
from numba.typed import Dict as TDict
|
|
19
|
+
import pandas as pd
|
|
20
|
+
import numpy as np
|
|
21
|
+
from numpy.typing import NDArray
|
|
22
|
+
from psutil import virtual_memory
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
import natsort
|
|
25
|
+
from cellects.utils.formulas import bracket_to_uint8_image_contrast
|
|
26
|
+
from cellects.utils.load_display_save import extract_time
|
|
27
|
+
from cellects.image_analysis.network_functions import detect_network_dynamics, extract_graph_dynamics
|
|
28
|
+
from cellects.utils.load_display_save import PickleRick, readim, is_raw_image, read_h5_array, get_h5_keys
|
|
29
|
+
from cellects.utils.utilitarian import insensitive_glob, vectorized_len
|
|
30
|
+
from cellects.core.cellects_paths import CELLECTS_DIR, ALL_VARS_PKL_FILE
|
|
31
|
+
from cellects.config.all_vars_dict import DefaultDicts
|
|
32
|
+
from cellects.image_analysis.shape_descriptors import from_shape_descriptors_class, compute_one_descriptor_per_frame, compute_one_descriptor_per_colony
|
|
33
|
+
from cellects.image_analysis.morphological_operations import create_ellipse, rank_from_top_to_bottom_from_left_to_right, \
|
|
34
|
+
get_quick_bounding_boxes, get_bb_with_moving_centers, get_contours, keep_one_connected_component, box_counting_dimension, prepare_box_counting
|
|
35
|
+
from cellects.image_analysis.progressively_add_distant_shapes import ProgressivelyAddDistantShapes
|
|
36
|
+
from cellects.core.one_image_analysis import OneImageAnalysis
|
|
37
|
+
from cellects.utils.load_display_save import read_and_rotate, video2numpy
|
|
38
|
+
from cellects.image_analysis.morphological_operations import shape_selection, draw_img_with_mask
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class ProgramOrganizer:
|
|
42
|
+
"""
|
|
43
|
+
Organizes and manages variables, configuration settings, and processing workflows for motion analysis in a Cellects project.
|
|
44
|
+
|
|
45
|
+
This class maintains global state and analysis-specific data structures, handles file operations,
|
|
46
|
+
processes image/video inputs, and generates output tables. It provides methods to load/save configurations,
|
|
47
|
+
segment images, track objects across frames, and export results with metadata.
|
|
48
|
+
|
|
49
|
+
Attributes
|
|
50
|
+
----------
|
|
51
|
+
one_arena_done : bool
|
|
52
|
+
Flag indicating whether a single arena has been processed.
|
|
53
|
+
reduce_image_dim : bool
|
|
54
|
+
Whether image dimensions should be reduced (e.g., from color to grayscale).
|
|
55
|
+
first_exp_ready_to_run : bool
|
|
56
|
+
Indicates if the initial experiment setup is complete and ready for execution.
|
|
57
|
+
data_to_save : dict of {str: bool}
|
|
58
|
+
Specifies which data types (first image, coordinates, EXIF) require saving.
|
|
59
|
+
videos : OneVideoPerBlob or None
|
|
60
|
+
Video processing container instance.
|
|
61
|
+
motion : MotionAnalysis or None
|
|
62
|
+
Motion tracking and analysis module.
|
|
63
|
+
all : dict
|
|
64
|
+
Global configuration parameters for the entire workflow.
|
|
65
|
+
vars : dict
|
|
66
|
+
Analysis-specific variables used by `MotionAnalysis`.
|
|
67
|
+
first_im, last_im : np.ndarray or None
|
|
68
|
+
First and last images of the dataset for preprocessing.
|
|
69
|
+
data_list : list of str
|
|
70
|
+
List of video/image file paths in the working directory.
|
|
71
|
+
computed_video_options : np.ndarray of bool
|
|
72
|
+
Flags indicating which video processing options have been applied.
|
|
73
|
+
one_row_per_arena, one_row_per_frame : pd.DataFrame or None
|
|
74
|
+
Result tables for different levels of analysis (per arena, per frame, and oscillating clusters).
|
|
75
|
+
|
|
76
|
+
Methods:
|
|
77
|
+
--------
|
|
78
|
+
save_variable_dict() : Save configuration dictionaries to file.
|
|
79
|
+
load_variable_dict() : Load saved configuration or initialize defaults.
|
|
80
|
+
look_for_data() : Discover video/image files in the working directory.
|
|
81
|
+
update_folder_id(...) : Update folder-specific metadata based on file structure.
|
|
82
|
+
...
|
|
83
|
+
|
|
84
|
+
"""
|
|
85
|
+
def __init__(self):
|
|
86
|
+
"""
|
|
87
|
+
This class stores all variables required for analysis as well as
|
|
88
|
+
methods to process it.
|
|
89
|
+
Global variables (i.e. that does not concern the MotionAnalysis)
|
|
90
|
+
are directly stored in self.
|
|
91
|
+
Variables used in the MotionAnalysis class are stored in a dict
|
|
92
|
+
called self.vars
|
|
93
|
+
"""
|
|
94
|
+
if os.path.isfile('PickleRick.pkl'):
|
|
95
|
+
os.remove('PickleRick.pkl')
|
|
96
|
+
if os.path.isfile('PickleRick0.pkl'):
|
|
97
|
+
os.remove('PickleRick0.pkl')
|
|
98
|
+
self.one_arena_done: bool = False
|
|
99
|
+
self.reduce_image_dim: bool = False
|
|
100
|
+
self.first_exp_ready_to_run: bool = False
|
|
101
|
+
self.data_to_save = {'first_image': False, 'coordinates': False, 'exif': False, 'vars': False}
|
|
102
|
+
self.sample_number = None
|
|
103
|
+
self.top = None
|
|
104
|
+
self.motion = None
|
|
105
|
+
self.analysis_instance = None
|
|
106
|
+
self.computed_video_options = np.zeros(5, bool)
|
|
107
|
+
self.vars = {}
|
|
108
|
+
self.all = {}
|
|
109
|
+
self.all['folder_list'] = []
|
|
110
|
+
self.vars['first_detection_frame'] = 0
|
|
111
|
+
self.first_im = None
|
|
112
|
+
self.last_im = None
|
|
113
|
+
self.vars['background_list'] = []
|
|
114
|
+
self.starting_blob_hsize_in_pixels = None
|
|
115
|
+
self.vars['first_move_threshold'] = None
|
|
116
|
+
self.vars['convert_for_origin'] = None
|
|
117
|
+
self.vars['convert_for_motion'] = None
|
|
118
|
+
self.current_combination_id = 0
|
|
119
|
+
self.data_list = []
|
|
120
|
+
self.one_row_per_arena = None
|
|
121
|
+
self.one_row_per_frame = None
|
|
122
|
+
self.not_analyzed_individuals = None
|
|
123
|
+
self.visualize: bool = True
|
|
124
|
+
|
|
125
|
+
def update_variable_dict(self):
|
|
126
|
+
"""
|
|
127
|
+
|
|
128
|
+
Update the `all` and `vars` dictionaries with new data from `DefaultDicts`.
|
|
129
|
+
|
|
130
|
+
This method updates the `all` and `vars` dictionaries of the current object with
|
|
131
|
+
data from a new instance of `DefaultDicts`. It checks if any keys or descriptors
|
|
132
|
+
are missing and adds them accordingly.
|
|
133
|
+
|
|
134
|
+
Examples
|
|
135
|
+
--------
|
|
136
|
+
>>> organizer = ProgramOrganizer()
|
|
137
|
+
>>> organizer.update_variable_dict()
|
|
138
|
+
"""
|
|
139
|
+
dd = DefaultDicts()
|
|
140
|
+
all = len(dd.all) != len(self.all)
|
|
141
|
+
vars = len(dd.vars) != len(self.vars)
|
|
142
|
+
all_desc = not 'descriptors' in self.all or len(dd.all['descriptors']) != len(self.all['descriptors'])
|
|
143
|
+
vars_desc = not 'descriptors' in self.vars or len(dd.vars['descriptors']) != len(self.vars['descriptors'])
|
|
144
|
+
if all:
|
|
145
|
+
for key, val in dd.all.items():
|
|
146
|
+
if not key in self.all:
|
|
147
|
+
self.all[key] = val
|
|
148
|
+
if vars:
|
|
149
|
+
for key, val in dd.vars.items():
|
|
150
|
+
if not key in self.vars:
|
|
151
|
+
self.vars[key] = val
|
|
152
|
+
if all_desc:
|
|
153
|
+
for key, val in dd.all['descriptors'].items():
|
|
154
|
+
if not key in self.all['descriptors']:
|
|
155
|
+
self.all['descriptors'][key] = val
|
|
156
|
+
if vars_desc:
|
|
157
|
+
for key, val in dd.vars['descriptors'].items():
|
|
158
|
+
if not key in self.vars['descriptors']:
|
|
159
|
+
self.vars['descriptors'][key] = val
|
|
160
|
+
self._set_analyzed_individuals()
|
|
161
|
+
|
|
162
|
+
def save_variable_dict(self):
|
|
163
|
+
"""
|
|
164
|
+
Saves the configuration dictionaries (`self.all` and `self.vars`) to a pickle file.
|
|
165
|
+
|
|
166
|
+
If bio_mask or back_mask are not required for all folders, they are excluded from the saved data.
|
|
167
|
+
|
|
168
|
+
Notes
|
|
169
|
+
-----
|
|
170
|
+
This method is used to preserve state between Cellects sessions or restart scenarios.
|
|
171
|
+
"""
|
|
172
|
+
logging.info("Save the parameters dictionaries in the Cellects folder")
|
|
173
|
+
self.all['vars'] = self.vars
|
|
174
|
+
all_vars = deepcopy(self.all)
|
|
175
|
+
if not self.all['keep_cell_and_back_for_all_folders']:
|
|
176
|
+
all_vars['bio_mask'] = None
|
|
177
|
+
all_vars['back_mask'] = None
|
|
178
|
+
pickle_rick = PickleRick(0)
|
|
179
|
+
pickle_rick.write_file(all_vars, ALL_VARS_PKL_FILE)
|
|
180
|
+
|
|
181
|
+
def load_variable_dict(self):
|
|
182
|
+
"""
|
|
183
|
+
Loads configuration dictionaries from a pickle file if available, otherwise initializes defaults.
|
|
184
|
+
|
|
185
|
+
Tries to load saved parameters. If the file doesn't exist or loading fails due to corruption,
|
|
186
|
+
default values are used instead (logging relevant warnings).
|
|
187
|
+
|
|
188
|
+
Raises
|
|
189
|
+
------
|
|
190
|
+
FileNotFoundError
|
|
191
|
+
If no valid configuration file is found and default initialization fails.
|
|
192
|
+
|
|
193
|
+
Notes
|
|
194
|
+
-----
|
|
195
|
+
This method ensures robust operation by handling missing or corrupted configuration files gracefully.
|
|
196
|
+
"""
|
|
197
|
+
if os.path.isfile(ALL_VARS_PKL_FILE):
|
|
198
|
+
logging.info("Load the parameters from all_vars.pkl in the config of the Cellects folder")
|
|
199
|
+
try:
|
|
200
|
+
with open(ALL_VARS_PKL_FILE, 'rb') as fileopen:
|
|
201
|
+
self.all = pickle.load(fileopen)
|
|
202
|
+
self.vars = self.all['vars']
|
|
203
|
+
self.update_variable_dict()
|
|
204
|
+
logging.info("Success to load the parameters dictionaries from the Cellects folder")
|
|
205
|
+
except Exception as exc:
|
|
206
|
+
logging.error(f"Initialize default parameters because error: {exc}")
|
|
207
|
+
default_dicts = DefaultDicts()
|
|
208
|
+
self.all = default_dicts.all
|
|
209
|
+
self.vars = default_dicts.vars
|
|
210
|
+
else:
|
|
211
|
+
logging.info("Initialize default parameters")
|
|
212
|
+
default_dicts = DefaultDicts()
|
|
213
|
+
self.all = default_dicts.all
|
|
214
|
+
self.vars = default_dicts.vars
|
|
215
|
+
if self.all['cores'] == 1:
|
|
216
|
+
self.all['cores'] = os.cpu_count() - 1
|
|
217
|
+
|
|
218
|
+
def look_for_data(self):
|
|
219
|
+
"""
|
|
220
|
+
Discovers all relevant video/image data in the working directory.
|
|
221
|
+
|
|
222
|
+
Uses natural sorting to handle filenames with numeric suffixes. Validates file consistency and logs warnings
|
|
223
|
+
if filename patterns are inconsistent across folders.
|
|
224
|
+
|
|
225
|
+
Raises
|
|
226
|
+
------
|
|
227
|
+
ValueError
|
|
228
|
+
If no files match the specified naming convention.
|
|
229
|
+
|
|
230
|
+
Notes
|
|
231
|
+
-----
|
|
232
|
+
This method assumes all data files follow a predictable pattern with numeric extensions. Use caution in
|
|
233
|
+
unpredictable directory structures where this may fail silently or produce incorrect results.
|
|
234
|
+
|
|
235
|
+
Examples
|
|
236
|
+
--------
|
|
237
|
+
>>> organizer.look_for_data()
|
|
238
|
+
>>> print(organizer.data_list)
|
|
239
|
+
['/path/to/video1.avi', '/path/to/video2.avi']
|
|
240
|
+
"""
|
|
241
|
+
os.chdir(Path(self.all['global_pathway']))
|
|
242
|
+
logging.info(f"Dir: {self.all['global_pathway']}")
|
|
243
|
+
self.data_list = insensitive_glob(self.all['radical'] + '*' + self.all['extension']) # Provides a list ordered by last modification date
|
|
244
|
+
self.all['folder_list'] = []
|
|
245
|
+
self.all['folder_number'] = 1
|
|
246
|
+
if len(self.data_list) > 0:
|
|
247
|
+
self._sort_data_list()
|
|
248
|
+
self.sample_number = self.all['first_folder_sample_number']
|
|
249
|
+
else:
|
|
250
|
+
content = os.listdir()
|
|
251
|
+
for obj in content:
|
|
252
|
+
if not os.path.isfile(obj):
|
|
253
|
+
data_list = insensitive_glob(obj + "/" + self.all['radical'] + '*' + self.all['extension'])
|
|
254
|
+
if len(data_list) > 0:
|
|
255
|
+
self.all['folder_list'].append(obj)
|
|
256
|
+
self.all['folder_number'] += 1
|
|
257
|
+
self.all['folder_list'] = np.sort(self.all['folder_list'])
|
|
258
|
+
|
|
259
|
+
if isinstance(self.all['sample_number_per_folder'], int) or len(self.all['sample_number_per_folder']) == 1:
|
|
260
|
+
self.all['sample_number_per_folder'] = np.repeat(self.all['sample_number_per_folder'],
|
|
261
|
+
self.all['folder_number'])
|
|
262
|
+
|
|
263
|
+
def _sort_data_list(self):
|
|
264
|
+
"""
|
|
265
|
+
Sorts the data list using natural sorting.
|
|
266
|
+
|
|
267
|
+
Extended Description
|
|
268
|
+
--------------------
|
|
269
|
+
This function sorts the `data_list` attribute of an instance using the natsort library,
|
|
270
|
+
which is useful when filenames have a mixture of numbers and letters.
|
|
271
|
+
"""
|
|
272
|
+
if len(self.data_list) > 0:
|
|
273
|
+
lengths = vectorized_len(self.data_list)
|
|
274
|
+
if len(lengths) > 1 and np.max(np.diff(lengths)) > np.log10(len(self.data_list)):
|
|
275
|
+
logging.error(f"File names present strong variations and cannot be correctly sorted.")
|
|
276
|
+
wrong_images = np.nonzero(np.char.startswith(self.data_list, "Analysis efficiency, ", ))[0]
|
|
277
|
+
for w_im in wrong_images[::-1]:
|
|
278
|
+
self.data_list.pop(w_im)
|
|
279
|
+
self.data_list = natsort.natsorted(self.data_list)
|
|
280
|
+
if self.all['im_or_vid'] == 1:
|
|
281
|
+
self.vars['video_list'] = self.data_list
|
|
282
|
+
else:
|
|
283
|
+
self.vars['video_list'] = None
|
|
284
|
+
|
|
285
|
+
def update_folder_id(self, sample_number: int, folder_name: str=""):
|
|
286
|
+
"""
|
|
287
|
+
Update the current working directory and data list based on the given sample number
|
|
288
|
+
and optional folder name.
|
|
289
|
+
|
|
290
|
+
Parameters
|
|
291
|
+
----------
|
|
292
|
+
sample_number : int
|
|
293
|
+
The number of samples to analyze.
|
|
294
|
+
folder_name : str, optional
|
|
295
|
+
The name of the folder to change to. Default is an empty string.
|
|
296
|
+
|
|
297
|
+
Notes
|
|
298
|
+
-----
|
|
299
|
+
This function changes the current working directory to the specified folder name
|
|
300
|
+
and updates the data list based on the file names in that directory. It also performs
|
|
301
|
+
sorting of the data list and checks for strong variations in file names.
|
|
302
|
+
|
|
303
|
+
"""
|
|
304
|
+
os.chdir(Path(self.all['global_pathway']) / folder_name)
|
|
305
|
+
self.data_list = insensitive_glob(
|
|
306
|
+
self.all['radical'] + '*' + self.all['extension']) # Provides a list ordered by last modification date
|
|
307
|
+
# Sorting is necessary when some modifications (like rotation) modified the last modification date
|
|
308
|
+
self._sort_data_list()
|
|
309
|
+
if self.all['im_or_vid'] == 1:
|
|
310
|
+
self.sample_number = sample_number
|
|
311
|
+
else:
|
|
312
|
+
self.vars['img_number'] = len(self.data_list)
|
|
313
|
+
self.sample_number = sample_number
|
|
314
|
+
if not 'analyzed_individuals' in self.vars:
|
|
315
|
+
self._set_analyzed_individuals()
|
|
316
|
+
|
|
317
|
+
def _set_analyzed_individuals(self):
|
|
318
|
+
"""
|
|
319
|
+
Set the analyzed individuals variable in the dataset.
|
|
320
|
+
"""
|
|
321
|
+
if self.sample_number is not None:
|
|
322
|
+
self.vars['analyzed_individuals'] = np.arange(self.sample_number) + 1
|
|
323
|
+
if self.not_analyzed_individuals is not None:
|
|
324
|
+
self.vars['analyzed_individuals'] = np.delete(self.vars['analyzed_individuals'],
|
|
325
|
+
self.not_analyzed_individuals - 1)
|
|
326
|
+
|
|
327
|
+
def load_data_to_run_cellects_quickly(self):
|
|
328
|
+
"""
|
|
329
|
+
Load data from a pickle file and update the current state of the object.
|
|
330
|
+
|
|
331
|
+
Summarizes, loads, and validates data needed to run Cellects,
|
|
332
|
+
updating the object's state accordingly. If the necessary data
|
|
333
|
+
are not present or valid, it ensures the experiment is marked as
|
|
334
|
+
not ready to run.
|
|
335
|
+
|
|
336
|
+
Parameters
|
|
337
|
+
----------
|
|
338
|
+
self : CellectsObject
|
|
339
|
+
The instance of the class (assumed to be a subclass of
|
|
340
|
+
CellectsObject) that this method belongs to.
|
|
341
|
+
|
|
342
|
+
Returns
|
|
343
|
+
-------
|
|
344
|
+
None
|
|
345
|
+
|
|
346
|
+
Notes
|
|
347
|
+
-----
|
|
348
|
+
This function relies on the presence of a pickle file 'Data to run Cellects quickly.pkl'.
|
|
349
|
+
It updates the state of various attributes based on the loaded data
|
|
350
|
+
and logs appropriate messages.
|
|
351
|
+
"""
|
|
352
|
+
self.first_im = None
|
|
353
|
+
current_global_pathway = self.all['global_pathway']
|
|
354
|
+
folder_number = self.all['folder_number']
|
|
355
|
+
if folder_number > 1:
|
|
356
|
+
folder_list = deepcopy(self.all['folder_list'])
|
|
357
|
+
sample_number_per_folder = deepcopy(self.all['sample_number_per_folder'])
|
|
358
|
+
|
|
359
|
+
if os.path.isfile('Data to run Cellects quickly.pkl'):
|
|
360
|
+
pickle_rick = PickleRick()
|
|
361
|
+
data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
|
|
362
|
+
if data_to_run_cellects_quickly is None:
|
|
363
|
+
data_to_run_cellects_quickly = {}
|
|
364
|
+
|
|
365
|
+
if ('validated_shapes' in data_to_run_cellects_quickly) and ('coordinates' in data_to_run_cellects_quickly) and ('all' in data_to_run_cellects_quickly):
|
|
366
|
+
logging.info("Success to load Data to run Cellects quickly.pkl from the user chosen directory")
|
|
367
|
+
self.all = data_to_run_cellects_quickly['all']
|
|
368
|
+
# If you want to add a new variable, first run an updated version of all_vars_dict,
|
|
369
|
+
# then put a breakpoint here and run the following + self.save_data_to_run_cellects_quickly() :
|
|
370
|
+
self.vars = self.all['vars']
|
|
371
|
+
self.update_variable_dict()
|
|
372
|
+
folder_changed = False
|
|
373
|
+
if current_global_pathway != self.all['global_pathway']:
|
|
374
|
+
folder_changed = True
|
|
375
|
+
logging.info(
|
|
376
|
+
"Although the folder is ready, it is not at the same place as it was during creation, updating")
|
|
377
|
+
self.all['global_pathway'] = current_global_pathway
|
|
378
|
+
if folder_number > 1:
|
|
379
|
+
self.all['global_pathway'] = current_global_pathway
|
|
380
|
+
self.all['folder_list'] = folder_list
|
|
381
|
+
self.all['folder_number'] = folder_number
|
|
382
|
+
self.all['sample_number_per_folder'] = sample_number_per_folder
|
|
383
|
+
|
|
384
|
+
if len(self.data_list) == 0:
|
|
385
|
+
self.look_for_data()
|
|
386
|
+
if folder_changed and folder_number > 1 and len(self.all['folder_list']) > 0:
|
|
387
|
+
self.update_folder_id(self.all['sample_number_per_folder'][0], self.all['folder_list'][0])
|
|
388
|
+
self.get_first_image()
|
|
389
|
+
self.get_last_image()
|
|
390
|
+
(ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = data_to_run_cellects_quickly[
|
|
391
|
+
'coordinates']
|
|
392
|
+
if self.all['automatically_crop']:
|
|
393
|
+
self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
|
|
394
|
+
logging.info("Crop first image")
|
|
395
|
+
self.first_image.automatically_crop(self.first_image.crop_coord)
|
|
396
|
+
logging.info("Crop last image")
|
|
397
|
+
self.last_image.automatically_crop(self.first_image.crop_coord)
|
|
398
|
+
else:
|
|
399
|
+
self.first_image.crop_coord = None
|
|
400
|
+
self.first_image.validated_shapes = data_to_run_cellects_quickly['validated_shapes']
|
|
401
|
+
self.first_image.im_combinations = []
|
|
402
|
+
self.current_combination_id = 0
|
|
403
|
+
self.first_image.im_combinations.append({})
|
|
404
|
+
self.first_image.im_combinations[self.current_combination_id]['csc'] = self.vars['convert_for_origin']
|
|
405
|
+
self.first_image.im_combinations[self.current_combination_id]['binary_image'] = self.first_image.validated_shapes
|
|
406
|
+
self.first_image.im_combinations[self.current_combination_id]['shape_number'] = data_to_run_cellects_quickly['shape_number']
|
|
407
|
+
|
|
408
|
+
self.first_exp_ready_to_run = True
|
|
409
|
+
if self.vars['subtract_background'] and len(self.vars['background_list']) == 0:
|
|
410
|
+
self.first_exp_ready_to_run = False
|
|
411
|
+
else:
|
|
412
|
+
self.first_exp_ready_to_run = False
|
|
413
|
+
else:
|
|
414
|
+
self.first_exp_ready_to_run = False
|
|
415
|
+
if self.first_exp_ready_to_run:
|
|
416
|
+
logging.info("The current (or the first) folder is ready to run")
|
|
417
|
+
else:
|
|
418
|
+
logging.info("The current (or the first) folder is not ready to run")
|
|
419
|
+
|
|
420
|
+
def save_data_to_run_cellects_quickly(self, new_one_if_does_not_exist: bool=True):
|
|
421
|
+
"""
|
|
422
|
+
Save data to a pickled file if it does not exist or update existing data.
|
|
423
|
+
|
|
424
|
+
Parameters
|
|
425
|
+
----------
|
|
426
|
+
new_one_if_does_not_exist : bool, optional
|
|
427
|
+
Whether to create a new data file if it does not already exist.
|
|
428
|
+
Default is True.
|
|
429
|
+
|
|
430
|
+
Notes
|
|
431
|
+
-----
|
|
432
|
+
This method logs various information about its operations and handles the writing of data to a pickled file.
|
|
433
|
+
"""
|
|
434
|
+
data_to_run_cellects_quickly = None
|
|
435
|
+
if os.path.isfile('Data to run Cellects quickly.pkl'):
|
|
436
|
+
logging.info("Update -Data to run Cellects quickly.pkl- in the user chosen directory")
|
|
437
|
+
pickle_rick = PickleRick()
|
|
438
|
+
data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
|
|
439
|
+
if data_to_run_cellects_quickly is None:
|
|
440
|
+
os.remove('Data to run Cellects quickly.pkl')
|
|
441
|
+
logging.error("Failed to load Data to run Cellects quickly.pkl before update. Remove pre existing.")
|
|
442
|
+
else:
|
|
443
|
+
if new_one_if_does_not_exist:
|
|
444
|
+
logging.info("Create Data to run Cellects quickly.pkl in the user chosen directory")
|
|
445
|
+
data_to_run_cellects_quickly = {}
|
|
446
|
+
if data_to_run_cellects_quickly is not None:
|
|
447
|
+
if self.data_to_save['first_image']:
|
|
448
|
+
data_to_run_cellects_quickly['validated_shapes'] = self.first_image.im_combinations[self.current_combination_id]['binary_image']
|
|
449
|
+
data_to_run_cellects_quickly['shape_number'] = self.first_image.im_combinations[self.current_combination_id]['shape_number']
|
|
450
|
+
# data_to_run_cellects_quickly['converted_image'] = self.first_image.im_combinations[self.current_combination_id]['converted_image']
|
|
451
|
+
if self.data_to_save['coordinates']:
|
|
452
|
+
data_to_run_cellects_quickly['coordinates'] = self._list_coordinates()
|
|
453
|
+
logging.info("When they exist, do overwrite unaltered video")
|
|
454
|
+
self.all['overwrite_unaltered_videos'] = True
|
|
455
|
+
if self.data_to_save['exif']:
|
|
456
|
+
self.vars['exif'] = self.extract_exif()
|
|
457
|
+
self.all['vars'] = self.vars
|
|
458
|
+
data_to_run_cellects_quickly['all'] = self.all
|
|
459
|
+
pickle_rick = PickleRick()
|
|
460
|
+
pickle_rick.write_file(data_to_run_cellects_quickly, 'Data to run Cellects quickly.pkl')
|
|
461
|
+
|
|
462
|
+
def _list_coordinates(self):
|
|
463
|
+
"""
|
|
464
|
+
Summarize the coordinates of images and video.
|
|
465
|
+
|
|
466
|
+
Combine the crop coordinates from the first image with additional
|
|
467
|
+
coordinates for left, right, top, and bottom boundaries to form a list of
|
|
468
|
+
video coordinates. If the crop coordinates are not already set, initialize
|
|
469
|
+
them to cover the entire image.
|
|
470
|
+
|
|
471
|
+
Returns
|
|
472
|
+
-------
|
|
473
|
+
list of int
|
|
474
|
+
A list containing the coordinates [left, right, top, bottom] for video.
|
|
475
|
+
|
|
476
|
+
"""
|
|
477
|
+
if self.first_image.crop_coord is None:
|
|
478
|
+
self.first_image.crop_coord = [0, self.first_image.image.shape[0], 0,
|
|
479
|
+
self.first_image.image.shape[1]]
|
|
480
|
+
videos_coordinates = self.first_image.crop_coord + [self.left, self.right, self.top, self.bot]
|
|
481
|
+
return videos_coordinates
|
|
482
|
+
|
|
483
|
+
def get_first_image(self, first_im: NDArray=None, sample_number: int=None):
|
|
484
|
+
"""
|
|
485
|
+
Load and process the first image or frame from a video.
|
|
486
|
+
|
|
487
|
+
This method handles loading the first image or the first frame of a video
|
|
488
|
+
depending on whether the data is an image or a video. It performs necessary
|
|
489
|
+
preprocessing and initializes relevant attributes for subsequent analysis.
|
|
490
|
+
"""
|
|
491
|
+
self.reduce_image_dim = False
|
|
492
|
+
if first_im is not None:
|
|
493
|
+
self.first_im = first_im
|
|
494
|
+
if sample_number is not None:
|
|
495
|
+
self.sample_number = sample_number
|
|
496
|
+
else:
|
|
497
|
+
logging.info("Load first image")
|
|
498
|
+
just_read_image = self.first_im is not None
|
|
499
|
+
# just_read_image = self.analysis_instance is not None
|
|
500
|
+
if self.all['im_or_vid'] == 1:
|
|
501
|
+
if not just_read_image:
|
|
502
|
+
self.analysis_instance = video2numpy(self.data_list[0])
|
|
503
|
+
self.sample_number = len(self.data_list)
|
|
504
|
+
self.vars['img_number'] = self.analysis_instance.shape[0]
|
|
505
|
+
self.first_im = self.analysis_instance[0, ...]
|
|
506
|
+
else:
|
|
507
|
+
self.first_im = self.analysis_instance[self.vars['first_detection_frame'], ...]
|
|
508
|
+
self.vars['dims'] = self.analysis_instance.shape[:3]
|
|
509
|
+
|
|
510
|
+
else:
|
|
511
|
+
self.vars['img_number'] = len(self.data_list)
|
|
512
|
+
self.all['raw_images'] = is_raw_image(self.data_list[0])
|
|
513
|
+
self.first_im = readim(self.data_list[self.vars['first_detection_frame']], self.all['raw_images'])
|
|
514
|
+
self.vars['dims'] = [self.vars['img_number'], self.first_im.shape[0], self.first_im.shape[1]]
|
|
515
|
+
|
|
516
|
+
if len(self.first_im.shape) == 3:
|
|
517
|
+
if np.all(np.equal(self.first_im[:, :, 0], self.first_im[:, :, 1])) and np.all(
|
|
518
|
+
np.equal(self.first_im[:, :, 1], self.first_im[:, :, 2])):
|
|
519
|
+
self.reduce_image_dim = True
|
|
520
|
+
if self.reduce_image_dim:
|
|
521
|
+
self.first_im = self.first_im[:, :, 0]
|
|
522
|
+
self.first_image = OneImageAnalysis(self.first_im)
|
|
523
|
+
self.vars['already_greyscale'] = self.first_image.already_greyscale
|
|
524
|
+
if self.vars['already_greyscale']:
|
|
525
|
+
self.vars["convert_for_origin"] = {"bgr": np.array((1, 1, 1), dtype=np.uint8), "logical": "None"}
|
|
526
|
+
self.vars["convert_for_motion"] = {"bgr": np.array((1, 1, 1), dtype=np.uint8), "logical": "None"}
|
|
527
|
+
if np.mean((np.mean(self.first_image.image[2, :, ...]), np.mean(self.first_image.image[-3, :, ...]), np.mean(self.first_image.image[:, 2, ...]), np.mean(self.first_image.image[:, -3, ...]))) > 127:
|
|
528
|
+
self.vars['contour_color']: np.uint8 = 0
|
|
529
|
+
else:
|
|
530
|
+
self.vars['contour_color']: np.uint8 = 255
|
|
531
|
+
if self.vars['first_detection_frame'] > 0:
|
|
532
|
+
self.vars['origin_state'] = 'invisible'
|
|
533
|
+
|
|
534
|
+
def get_last_image(self, last_im: NDArray=None):
|
|
535
|
+
"""
|
|
536
|
+
|
|
537
|
+
Load the last image from a video or image list and process it based on given parameters.
|
|
538
|
+
|
|
539
|
+
Parameters
|
|
540
|
+
----------
|
|
541
|
+
last_im : NDArray, optional
|
|
542
|
+
The last image to be loaded. If not provided, the last image will be loaded from the data list.
|
|
543
|
+
"""
|
|
544
|
+
logging.info("Load last image")
|
|
545
|
+
if last_im is not None:
|
|
546
|
+
self.last_im = last_im
|
|
547
|
+
else:
|
|
548
|
+
if self.all['im_or_vid'] == 1:
|
|
549
|
+
cap = cv2.VideoCapture(self.data_list[0])
|
|
550
|
+
counter = 0
|
|
551
|
+
while cap.isOpened() and counter < self.vars['img_number']:
|
|
552
|
+
ret, frame = cap.read()
|
|
553
|
+
if self.reduce_image_dim:
|
|
554
|
+
frame = frame[:, :, 0]
|
|
555
|
+
self.analysis_instance[-1, ...] = frame
|
|
556
|
+
counter += 1
|
|
557
|
+
self.last_im = frame
|
|
558
|
+
cap.release()
|
|
559
|
+
else:
|
|
560
|
+
is_landscape = self.first_image.image.shape[0] < self.first_image.image.shape[1]
|
|
561
|
+
self.last_im = read_and_rotate(self.data_list[-1], self.first_im, self.all['raw_images'], is_landscape)
|
|
562
|
+
if self.reduce_image_dim:
|
|
563
|
+
self.last_im = self.last_im[:, :, 0]
|
|
564
|
+
self.last_image = OneImageAnalysis(self.last_im)
|
|
565
|
+
|
|
566
|
+
def extract_exif(self):
|
|
567
|
+
"""
|
|
568
|
+
Extract EXIF data from image or video files.
|
|
569
|
+
|
|
570
|
+
Notes
|
|
571
|
+
-----
|
|
572
|
+
If `extract_time_interval` is True and unsuccessful, arbitrary time steps will be used.
|
|
573
|
+
Timings are normalized to minutes for consistency across different files.
|
|
574
|
+
"""
|
|
575
|
+
self.vars['time_step_is_arbitrary'] = True
|
|
576
|
+
if self.all['im_or_vid'] == 1:
|
|
577
|
+
self.vars['dims'] = self.analysis_instance.shape
|
|
578
|
+
timings = np.arange(self.vars['dims'][0])
|
|
579
|
+
else:
|
|
580
|
+
timings = np.arange(len(self.data_list))
|
|
581
|
+
if sys.platform.startswith('win'):
|
|
582
|
+
pathway = os.getcwd() + '\\'
|
|
583
|
+
else:
|
|
584
|
+
pathway = os.getcwd() + '/'
|
|
585
|
+
if not 'extract_time_interval' in self.all:
|
|
586
|
+
self.all['extract_time_interval'] = True
|
|
587
|
+
if self.all['extract_time_interval']:
|
|
588
|
+
self.vars['time_step'] = 1
|
|
589
|
+
try:
|
|
590
|
+
timings = extract_time(self.data_list, pathway, self.all['raw_images'])
|
|
591
|
+
timings = timings - timings[0]
|
|
592
|
+
timings = timings / 60
|
|
593
|
+
time_step = np.diff(timings)
|
|
594
|
+
if len(time_step) > 0:
|
|
595
|
+
time_step = np.mean(time_step)
|
|
596
|
+
digit_nb = 0
|
|
597
|
+
for i in str(time_step):
|
|
598
|
+
if i in {'.'}:
|
|
599
|
+
pass
|
|
600
|
+
elif i in {'0'}:
|
|
601
|
+
digit_nb += 1
|
|
602
|
+
else:
|
|
603
|
+
break
|
|
604
|
+
self.vars['time_step'] = np.round(time_step, digit_nb + 1)
|
|
605
|
+
self.vars['time_step_is_arbitrary'] = False
|
|
606
|
+
except:
|
|
607
|
+
pass
|
|
608
|
+
else:
|
|
609
|
+
timings = np.arange(0, len(self.data_list) * self.vars['time_step'], self.vars['time_step'])
|
|
610
|
+
self.vars['time_step_is_arbitrary'] = False
|
|
611
|
+
return timings
|
|
612
|
+
|
|
613
|
+
def fast_first_image_segmentation(self):
|
|
614
|
+
"""
|
|
615
|
+
Segment the first or subsequent image in a series for biological and background masks.
|
|
616
|
+
|
|
617
|
+
Notes
|
|
618
|
+
-----
|
|
619
|
+
This function processes the first or subsequent image in a sequence, applying biological and background masks,
|
|
620
|
+
segmenting the image, and updating internal data structures accordingly. The function is specific to handling
|
|
621
|
+
image sequences for biological analysis
|
|
622
|
+
|
|
623
|
+
"""
|
|
624
|
+
if not "color_number" in self.vars:
|
|
625
|
+
self.update_variable_dict()
|
|
626
|
+
if self.vars['convert_for_origin'] is None:
|
|
627
|
+
self.vars['convert_for_origin'] = {"logical": 'None', "PCA": np.ones(3, dtype=np.uint8)}
|
|
628
|
+
self.first_image.convert_and_segment(self.vars['convert_for_origin'], self.vars["color_number"],
|
|
629
|
+
self.all["bio_mask"], self.all["back_mask"], subtract_background=None,
|
|
630
|
+
subtract_background2=None,
|
|
631
|
+
rolling_window_segmentation=self.vars["rolling_window_segmentation"],
|
|
632
|
+
filter_spec=self.vars["filter_spec"])
|
|
633
|
+
if not self.first_image.drift_correction_already_adjusted:
|
|
634
|
+
self.vars['drift_already_corrected'] = self.first_image.check_if_image_border_attest_drift_correction()
|
|
635
|
+
if self.vars['drift_already_corrected']:
|
|
636
|
+
logging.info("Cellects detected that the images have already been corrected for drift")
|
|
637
|
+
self.first_image.convert_and_segment(self.vars['convert_for_origin'], self.vars["color_number"],
|
|
638
|
+
self.all["bio_mask"], self.all["back_mask"],
|
|
639
|
+
subtract_background=None, subtract_background2=None,
|
|
640
|
+
rolling_window_segmentation=self.vars["rolling_window_segmentation"],
|
|
641
|
+
filter_spec=self.vars["filter_spec"],
|
|
642
|
+
allowed_window=self.first_image.drift_mask_coord)
|
|
643
|
+
|
|
644
|
+
shapes_features = shape_selection(self.first_image.binary_image, true_shape_number=self.sample_number,
|
|
645
|
+
horizontal_size=self.starting_blob_hsize_in_pixels,
|
|
646
|
+
spot_shape=self.all['starting_blob_shape'],
|
|
647
|
+
several_blob_per_arena=self.vars['several_blob_per_arena'],
|
|
648
|
+
bio_mask=self.all["bio_mask"], back_mask=self.all["back_mask"])
|
|
649
|
+
self.first_image.validated_shapes, shape_number, stats, centroids = shapes_features
|
|
650
|
+
self.first_image.shape_number = shape_number
|
|
651
|
+
if self.first_image.im_combinations is None:
|
|
652
|
+
self.first_image.im_combinations = []
|
|
653
|
+
if len(self.first_image.im_combinations) == 0:
|
|
654
|
+
self.first_image.im_combinations.append({})
|
|
655
|
+
self.current_combination_id = np.min((self.current_combination_id, len(self.first_image.im_combinations) - 1))
|
|
656
|
+
self.first_image.im_combinations[self.current_combination_id]['csc'] = self.vars['convert_for_origin']
|
|
657
|
+
self.first_image.im_combinations[self.current_combination_id]['binary_image'] = self.first_image.validated_shapes
|
|
658
|
+
if self.first_image.greyscale is not None:
|
|
659
|
+
greyscale = self.first_image.greyscale
|
|
660
|
+
else:
|
|
661
|
+
greyscale = self.first_image.image
|
|
662
|
+
self.first_image.im_combinations[self.current_combination_id]['converted_image'] = bracket_to_uint8_image_contrast(greyscale)
|
|
663
|
+
self.first_image.im_combinations[self.current_combination_id]['shape_number'] = shape_number
|
|
664
|
+
|
|
665
|
+
def fast_last_image_segmentation(self, biomask: NDArray[np.uint8] = None, backmask: NDArray[np.uint8] = None):
|
|
666
|
+
"""
|
|
667
|
+
Segment the first or subsequent image in a series for biological and background masks.
|
|
668
|
+
|
|
669
|
+
Parameters
|
|
670
|
+
----------
|
|
671
|
+
biomask : NDArray[np.uint8], optional
|
|
672
|
+
The biological mask to be applied to the image.
|
|
673
|
+
backmask : NDArray[np.uint8], optional
|
|
674
|
+
The background mask to be applied to the image.
|
|
675
|
+
|
|
676
|
+
Returns
|
|
677
|
+
-------
|
|
678
|
+
None
|
|
679
|
+
|
|
680
|
+
Notes
|
|
681
|
+
-----
|
|
682
|
+
This function processes the first or subsequent image in a sequence, applying biological and background masks,
|
|
683
|
+
segmenting the image, and updating internal data structures accordingly. The function is specific to handling
|
|
684
|
+
image sequences for biological analysis
|
|
685
|
+
|
|
686
|
+
"""
|
|
687
|
+
if self.vars['convert_for_motion'] is None:
|
|
688
|
+
self.vars['convert_for_motion'] = {"logical": 'None', "PCA": np.ones(3, dtype=np.uint8)}
|
|
689
|
+
self.cropping(is_first_image=False)
|
|
690
|
+
self.last_image.convert_and_segment(self.vars['convert_for_motion'], self.vars["color_number"],
|
|
691
|
+
biomask, backmask, self.first_image.subtract_background,
|
|
692
|
+
self.first_image.subtract_background2,
|
|
693
|
+
rolling_window_segmentation=self.vars["rolling_window_segmentation"],
|
|
694
|
+
filter_spec=self.vars["filter_spec"])
|
|
695
|
+
if self.vars['drift_already_corrected'] and not self.last_image.drift_correction_already_adjusted and not self.vars["rolling_window_segmentation"]['do']:
|
|
696
|
+
self.last_image.check_if_image_border_attest_drift_correction()
|
|
697
|
+
self.last_image.convert_and_segment(self.vars['convert_for_motion'], self.vars["color_number"],
|
|
698
|
+
biomask, backmask, self.first_image.subtract_background,
|
|
699
|
+
self.first_image.subtract_background2,
|
|
700
|
+
allowed_window=self.last_image.drift_mask_coord,
|
|
701
|
+
filter_spec=self.vars["filter_spec"])
|
|
702
|
+
|
|
703
|
+
if self.last_image.im_combinations is None:
|
|
704
|
+
self.last_image.im_combinations = []
|
|
705
|
+
if len(self.last_image.im_combinations) == 0:
|
|
706
|
+
self.last_image.im_combinations.append({})
|
|
707
|
+
self.current_combination_id = np.min((self.current_combination_id, len(self.last_image.im_combinations) - 1))
|
|
708
|
+
self.last_image.im_combinations[self.current_combination_id]['csc'] = self.vars['convert_for_motion']
|
|
709
|
+
self.last_image.im_combinations[self.current_combination_id]['binary_image'] = self.last_image.binary_image
|
|
710
|
+
if self.last_image.greyscale is not None:
|
|
711
|
+
greyscale = self.last_image.greyscale
|
|
712
|
+
else:
|
|
713
|
+
greyscale = self.last_image.image
|
|
714
|
+
self.last_image.im_combinations[self.current_combination_id]['converted_image'] = bracket_to_uint8_image_contrast(greyscale)
|
|
715
|
+
|
|
716
|
+
def cropping(self, is_first_image: bool):
|
|
717
|
+
"""
|
|
718
|
+
Crops the image based on specified conditions and settings.
|
|
719
|
+
|
|
720
|
+
This method checks if drift correction has already been applied.
|
|
721
|
+
If the image is the first one and hasn't been cropped yet, it will attempt
|
|
722
|
+
to use pre-stored coordinates or compute new crop coordinates. If automatic
|
|
723
|
+
cropping is enabled, it will apply the cropping process.
|
|
724
|
+
|
|
725
|
+
Parameters
|
|
726
|
+
----------
|
|
727
|
+
is_first_image : bool
|
|
728
|
+
Indicates whether the image being processed is the first one in the sequence.
|
|
729
|
+
"""
|
|
730
|
+
if not self.vars['drift_already_corrected']:
|
|
731
|
+
if is_first_image:
|
|
732
|
+
if not self.first_image.cropped:
|
|
733
|
+
if (not self.all['overwrite_unaltered_videos'] and os.path.isfile('Data to run Cellects quickly.pkl')):
|
|
734
|
+
pickle_rick = PickleRick()
|
|
735
|
+
data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
|
|
736
|
+
if data_to_run_cellects_quickly is not None:
|
|
737
|
+
if 'coordinates' in data_to_run_cellects_quickly:
|
|
738
|
+
logging.info("Get crop coordinates from Data to run Cellects quickly.pkl")
|
|
739
|
+
(ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = \
|
|
740
|
+
data_to_run_cellects_quickly['coordinates']
|
|
741
|
+
self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
|
|
742
|
+
else:
|
|
743
|
+
self.first_image.get_crop_coordinates()
|
|
744
|
+
else:
|
|
745
|
+
self.first_image.get_crop_coordinates()
|
|
746
|
+
|
|
747
|
+
else:
|
|
748
|
+
self.first_image.get_crop_coordinates()
|
|
749
|
+
if self.all['automatically_crop']:
|
|
750
|
+
self.first_image.automatically_crop(self.first_image.crop_coord)
|
|
751
|
+
else:
|
|
752
|
+
self.first_image.crop_coord = None
|
|
753
|
+
else:
|
|
754
|
+
if not self.last_image.cropped and self.all['automatically_crop']:
|
|
755
|
+
self.last_image.automatically_crop(self.first_image.crop_coord)
|
|
756
|
+
|
|
757
|
+
def get_average_pixel_size(self):
|
|
758
|
+
"""
|
|
759
|
+
Calculate the average pixel size and related variables.
|
|
760
|
+
|
|
761
|
+
Logs information about calculation steps, computes the average
|
|
762
|
+
pixel size based on image or cell scaling settings,
|
|
763
|
+
and sets initial thresholds for object detection.
|
|
764
|
+
|
|
765
|
+
Notes
|
|
766
|
+
-----
|
|
767
|
+
- The average pixel size is determined by either image dimensions or blob sizes.
|
|
768
|
+
- Thresholds for automatic detection are set based on configuration settings.
|
|
769
|
+
|
|
770
|
+
"""
|
|
771
|
+
logging.info("Getting average pixel size")
|
|
772
|
+
(self.first_image.shape_number,
|
|
773
|
+
self.first_image.shapes,
|
|
774
|
+
self.first_image.stats,
|
|
775
|
+
centroids) = cv2.connectedComponentsWithStats(
|
|
776
|
+
self.first_image.validated_shapes,
|
|
777
|
+
connectivity=8)
|
|
778
|
+
self.first_image.shape_number -= 1
|
|
779
|
+
if self.all['scale_with_image_or_cells'] == 0:
|
|
780
|
+
self.vars['average_pixel_size'] = np.square(
|
|
781
|
+
self.all['image_horizontal_size_in_mm'] /
|
|
782
|
+
self.first_im.shape[1])
|
|
783
|
+
else:
|
|
784
|
+
self.vars['average_pixel_size'] = np.square(
|
|
785
|
+
self.all['starting_blob_hsize_in_mm'] /
|
|
786
|
+
np.mean(self.first_image.stats[1:, 2]))
|
|
787
|
+
if self.all['set_spot_size']:
|
|
788
|
+
self.starting_blob_hsize_in_pixels = (
|
|
789
|
+
self.all['starting_blob_hsize_in_mm'] /
|
|
790
|
+
np.sqrt(self.vars['average_pixel_size']))
|
|
791
|
+
else:
|
|
792
|
+
self.starting_blob_hsize_in_pixels = None
|
|
793
|
+
|
|
794
|
+
if self.all['automatic_size_thresholding']:
|
|
795
|
+
self.vars['first_move_threshold'] = 10
|
|
796
|
+
else:
|
|
797
|
+
self.vars['first_move_threshold'] = np.round(
|
|
798
|
+
self.all['first_move_threshold_in_mm²'] /
|
|
799
|
+
self.vars['average_pixel_size']).astype(np.uint8)
|
|
800
|
+
logging.info(f"The average pixel size is: {self.vars['average_pixel_size']} mm²")
|
|
801
|
+
|
|
802
|
+
def get_background_to_subtract(self):
|
|
803
|
+
"""
|
|
804
|
+
Determine if background subtraction should be applied to the image.
|
|
805
|
+
|
|
806
|
+
Extended Description
|
|
807
|
+
--------------------
|
|
808
|
+
This function checks whether background subtraction should be applied.
|
|
809
|
+
It utilizes the 'subtract_background' flag and potentially converts
|
|
810
|
+
the image for motion estimation.
|
|
811
|
+
|
|
812
|
+
Parameters
|
|
813
|
+
----------
|
|
814
|
+
self : object
|
|
815
|
+
The instance of the class containing this method.
|
|
816
|
+
Must have attributes `vars` and `first_image`.
|
|
817
|
+
"""
|
|
818
|
+
if self.vars['subtract_background']:
|
|
819
|
+
self.first_image.generate_subtract_background(self.vars['convert_for_motion'], self.vars['drift_already_corrected'])
|
|
820
|
+
|
|
821
|
+
def find_if_lighter_background(self):
|
|
822
|
+
"""
|
|
823
|
+
Determines whether the background is lighter or darker than the cells.
|
|
824
|
+
|
|
825
|
+
This function analyzes images to determine if their backgrounds are lighter
|
|
826
|
+
or darker relative to the cells, updating attributes accordingly for analysis and display purposes.
|
|
827
|
+
|
|
828
|
+
|
|
829
|
+
Notes
|
|
830
|
+
-----
|
|
831
|
+
This function modifies instance variables and does not return any value.
|
|
832
|
+
The analysis involves comparing mean pixel values in specific areas of the image.
|
|
833
|
+
"""
|
|
834
|
+
logging.info("Find if the background is lighter or darker than the cells")
|
|
835
|
+
self.vars['lighter_background']: bool = True
|
|
836
|
+
self.vars['contour_color']: np.uint8 = 0
|
|
837
|
+
are_dicts_equal: bool = True
|
|
838
|
+
if self.vars['convert_for_origin'] is not None and self.vars['convert_for_origin'] is not None:
|
|
839
|
+
for key in self.vars['convert_for_origin'].keys():
|
|
840
|
+
are_dicts_equal = are_dicts_equal and np.all(key in self.vars['convert_for_motion'] and self.vars['convert_for_origin'][key] == self.vars['convert_for_motion'][key])
|
|
841
|
+
|
|
842
|
+
for key in self.vars['convert_for_motion'].keys():
|
|
843
|
+
are_dicts_equal = are_dicts_equal and np.all(key in self.vars['convert_for_origin'] and self.vars['convert_for_motion'][key] == self.vars['convert_for_origin'][key])
|
|
844
|
+
else:
|
|
845
|
+
self.vars['convert_for_origin'] = {"logical": 'None', "PCA": np.ones(3, dtype=np.uint8)}
|
|
846
|
+
are_dicts_equal = True
|
|
847
|
+
if are_dicts_equal:
|
|
848
|
+
if self.first_im is None:
|
|
849
|
+
self.get_first_image()
|
|
850
|
+
self.fast_first_image_segmentation()
|
|
851
|
+
self.cropping(is_first_image=True)
|
|
852
|
+
among = np.nonzero(self.first_image.validated_shapes)
|
|
853
|
+
not_among = np.nonzero(1 - self.first_image.validated_shapes)
|
|
854
|
+
# Use the converted image to tell if the background is lighter, for analysis purposes
|
|
855
|
+
if self.first_image.image[among[0], among[1]].mean() > self.first_image.image[not_among[0], not_among[1]].mean():
|
|
856
|
+
self.vars['lighter_background'] = False
|
|
857
|
+
# Use the original image to tell if the background is lighter, for display purposes
|
|
858
|
+
if self.first_image.bgr[among[0], among[1], ...].mean() > self.first_image.bgr[not_among[0], not_among[1], ...].mean():
|
|
859
|
+
self.vars['contour_color'] = 255
|
|
860
|
+
else:
|
|
861
|
+
if self.last_im is None:
|
|
862
|
+
self.get_last_image()
|
|
863
|
+
# self.cropping(is_first_image=False)
|
|
864
|
+
self.fast_last_image_segmentation()
|
|
865
|
+
if self.last_image.binary_image.sum() == 0:
|
|
866
|
+
self.fast_last_image_segmentation()
|
|
867
|
+
among = np.nonzero(self.last_image.binary_image)
|
|
868
|
+
not_among = np.nonzero(1 - self.last_image.binary_image)
|
|
869
|
+
# Use the converted image to tell if the background is lighter, for analysis purposes
|
|
870
|
+
if self.last_image.image[among[0], among[1]].mean() > self.last_image.image[not_among[0], not_among[1]].mean():
|
|
871
|
+
self.vars['lighter_background'] = False
|
|
872
|
+
# Use the original image to tell if the background is lighter, for display purposes
|
|
873
|
+
if self.last_image.bgr[among[0], among[1], ...].mean() > self.last_image.bgr[not_among[0], not_among[1], ...].mean():
|
|
874
|
+
self.vars['contour_color'] = 255
|
|
875
|
+
if self.vars['origin_state'] == "invisible":
|
|
876
|
+
binary_image = deepcopy(self.first_image.binary_image)
|
|
877
|
+
self.first_image.convert_and_segment(self.vars['convert_for_motion'], self.vars["color_number"],
|
|
878
|
+
None, None, subtract_background=None,
|
|
879
|
+
subtract_background2=None,
|
|
880
|
+
rolling_window_segmentation=self.vars['rolling_window_segmentation'],
|
|
881
|
+
filter_spec=self.vars["filter_spec"])
|
|
882
|
+
covered_values = self.first_image.image[np.nonzero(binary_image)]
|
|
883
|
+
self.vars['luminosity_threshold'] = 127
|
|
884
|
+
if len(covered_values) > 0:
|
|
885
|
+
if self.vars['lighter_background']:
|
|
886
|
+
if np.max(covered_values) < 255:
|
|
887
|
+
self.vars['luminosity_threshold'] = np.max(covered_values) + 1
|
|
888
|
+
else:
|
|
889
|
+
if np.min(covered_values) > 0:
|
|
890
|
+
self.vars['luminosity_threshold'] = np.min(covered_values) - 1
|
|
891
|
+
|
|
892
|
+
def delineate_each_arena(self):
|
|
893
|
+
"""
|
|
894
|
+
Determine the coordinates of each arena for video analysis.
|
|
895
|
+
|
|
896
|
+
The function processes video frames to identify bounding boxes around
|
|
897
|
+
specimens and determines valid arenas for analysis. In case of existing data,
|
|
898
|
+
it uses previously computed coordinates if available and valid.
|
|
899
|
+
|
|
900
|
+
Returns
|
|
901
|
+
-------
|
|
902
|
+
analysis_status : dict
|
|
903
|
+
A dictionary containing flags and messages indicating the status of
|
|
904
|
+
the analysis.
|
|
905
|
+
- 'continue' (bool): Whether to continue processing.
|
|
906
|
+
- 'message' (str): Informational or error message.
|
|
907
|
+
|
|
908
|
+
Raises
|
|
909
|
+
------
|
|
910
|
+
None
|
|
911
|
+
|
|
912
|
+
Notes
|
|
913
|
+
-----
|
|
914
|
+
This function relies on the existence of certain attributes and variables
|
|
915
|
+
defined in the class instance.
|
|
916
|
+
|
|
917
|
+
Examples
|
|
918
|
+
--------
|
|
919
|
+
>>> self.delineate_each_arena()
|
|
920
|
+
{'continue': True, 'message': ''}
|
|
921
|
+
"""
|
|
922
|
+
analysis_status = {"continue": True, "message": ""}
|
|
923
|
+
if not self.vars['several_blob_per_arena'] and (self.sample_number > 1):
|
|
924
|
+
compute_get_bb: bool = True
|
|
925
|
+
if (not self.all['overwrite_unaltered_videos'] and os.path.isfile('Data to run Cellects quickly.pkl')):
|
|
926
|
+
|
|
927
|
+
pickle_rick = PickleRick()
|
|
928
|
+
data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
|
|
929
|
+
if data_to_run_cellects_quickly is not None:
|
|
930
|
+
if 'coordinates' in data_to_run_cellects_quickly:
|
|
931
|
+
(ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = \
|
|
932
|
+
data_to_run_cellects_quickly['coordinates']
|
|
933
|
+
self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
|
|
934
|
+
if (self.first_image.image.shape[0] == (ccy2 - ccy1)) and (
|
|
935
|
+
self.first_image.image.shape[1] == (ccx2 - ccx1)): # maybe useless now
|
|
936
|
+
logging.info("Get the coordinates of all arenas from Data to run Cellects quickly.pkl")
|
|
937
|
+
compute_get_bb = False
|
|
938
|
+
|
|
939
|
+
if compute_get_bb:
|
|
940
|
+
motion_list = None
|
|
941
|
+
if self.all['are_gravity_centers_moving']:
|
|
942
|
+
motion_list = self._segment_blob_motion(sample_size=5)
|
|
943
|
+
# if self.all['im_or_vid'] == 1:
|
|
944
|
+
self.get_bounding_boxes(are_gravity_centers_moving=self.all['are_gravity_centers_moving'] == 1,
|
|
945
|
+
motion_list=motion_list, all_specimens_have_same_direction=self.all['all_specimens_have_same_direction'])
|
|
946
|
+
|
|
947
|
+
if np.any(self.ordered_stats[:, 4] > 100 * np.median(self.ordered_stats[:, 4])):
|
|
948
|
+
analysis_status['message'] = "A specimen is at least 100 times larger: click previous and retry by specifying 'back' areas."
|
|
949
|
+
analysis_status['continue'] = False
|
|
950
|
+
if np.any(self.ordered_stats[:, 4] < 0.01 * np.median(self.ordered_stats[:, 4])):
|
|
951
|
+
analysis_status['message'] = "A specimen is at least 100 times smaller: click previous and retry by specifying 'back' areas."
|
|
952
|
+
analysis_status['continue'] = False
|
|
953
|
+
del self.ordered_stats
|
|
954
|
+
logging.info(
|
|
955
|
+
str(self.not_analyzed_individuals) + " individuals are out of picture scope and cannot be analyzed")
|
|
956
|
+
|
|
957
|
+
else:
|
|
958
|
+
self._whole_image_bounding_boxes()
|
|
959
|
+
self.sample_number = 1
|
|
960
|
+
self._set_analyzed_individuals()
|
|
961
|
+
return analysis_status
|
|
962
|
+
|
|
963
|
+
def _segment_blob_motion(self, sample_size: int) -> list:
|
|
964
|
+
"""
|
|
965
|
+
Segment blob motion from the data list at specified sample sizes.
|
|
966
|
+
|
|
967
|
+
Parameters
|
|
968
|
+
----------
|
|
969
|
+
sample_size : int
|
|
970
|
+
Number of samples to take from the data list.
|
|
971
|
+
|
|
972
|
+
Returns
|
|
973
|
+
-------
|
|
974
|
+
list
|
|
975
|
+
List containing segmented binary images at sampled frames.
|
|
976
|
+
|
|
977
|
+
Notes
|
|
978
|
+
-----
|
|
979
|
+
This function uses numpy for handling array operations and assumes the presence of certain attributes in the object, namely `data_list`, `first_image`, and `vars`.
|
|
980
|
+
|
|
981
|
+
Examples
|
|
982
|
+
--------
|
|
983
|
+
>>> motion_samples = _segment_blob_motion(10)
|
|
984
|
+
>>> print(len(motion_samples)) # Expected output: 10
|
|
985
|
+
"""
|
|
986
|
+
motion_list = list()
|
|
987
|
+
if isinstance(self.data_list, list):
|
|
988
|
+
frame_number = len(self.data_list)
|
|
989
|
+
else:
|
|
990
|
+
frame_number = self.data_list.shape[0]
|
|
991
|
+
sample_numbers = np.floor(np.linspace(0, frame_number, sample_size)).astype(int)
|
|
992
|
+
if not 'lighter_background' in self.vars.keys():
|
|
993
|
+
self.find_if_lighter_background()
|
|
994
|
+
for frame_idx in np.arange(sample_size):
|
|
995
|
+
if frame_idx == 0:
|
|
996
|
+
motion_list.insert(frame_idx, self.first_image.validated_shapes)
|
|
997
|
+
else:
|
|
998
|
+
if isinstance(self.data_list[0], str):
|
|
999
|
+
image = self.data_list[sample_numbers[frame_idx] - 1]
|
|
1000
|
+
else:
|
|
1001
|
+
image = self.data_list[sample_numbers[frame_idx] - 1]
|
|
1002
|
+
if isinstance(image, str):
|
|
1003
|
+
is_landscape = self.first_image.image.shape[0] < self.first_image.image.shape[1]
|
|
1004
|
+
image = read_and_rotate(image, self.first_image.bgr, self.all['raw_images'],
|
|
1005
|
+
is_landscape, self.first_image.crop_coord)
|
|
1006
|
+
# image = readim(image)
|
|
1007
|
+
In = OneImageAnalysis(image)
|
|
1008
|
+
if self.vars['drift_already_corrected']:
|
|
1009
|
+
In.check_if_image_border_attest_drift_correction()
|
|
1010
|
+
# In.adjust_to_drift_correction(self.vars['convert_for_motion']['logical'])
|
|
1011
|
+
In.convert_and_segment(self.vars['convert_for_motion'], self.vars['color_number'], None, None,
|
|
1012
|
+
self.first_image.subtract_background, self.first_image.subtract_background2,
|
|
1013
|
+
self.vars['rolling_window_segmentation'], self.vars['lighter_background'],
|
|
1014
|
+
allowed_window=In.drift_mask_coord, filter_spec=self.vars['filter_spec'])
|
|
1015
|
+
motion_list.insert(frame_idx, In.binary_image)
|
|
1016
|
+
return motion_list
|
|
1017
|
+
|
|
1018
|
+
|
|
1019
|
+
def get_bounding_boxes(self, are_gravity_centers_moving: bool, motion_list: list=(), all_specimens_have_same_direction: bool=True, original_shape_hsize: int=None):
|
|
1020
|
+
"""Get the coordinates of arenas using bounding boxes.
|
|
1021
|
+
|
|
1022
|
+
Parameters
|
|
1023
|
+
----------
|
|
1024
|
+
are_gravity_centers_moving : bool
|
|
1025
|
+
Flag indicating whether gravity centers are moving or not.
|
|
1026
|
+
motion_list : list
|
|
1027
|
+
List of motion information for the specimens.
|
|
1028
|
+
all_specimens_have_same_direction : bool, optional
|
|
1029
|
+
Flag indicating whether all specimens have the same direction,
|
|
1030
|
+
by default True.
|
|
1031
|
+
Notes
|
|
1032
|
+
-----
|
|
1033
|
+
This method uses various internal methods and variables to determine the bounding boxes.
|
|
1034
|
+
"""
|
|
1035
|
+
# 7) Create required empty arrays: especially the bounding box coordinates of each video
|
|
1036
|
+
self.ordered_first_image = None
|
|
1037
|
+
self.shapes_to_remove = None
|
|
1038
|
+
if self.first_image.crop_coord is None:
|
|
1039
|
+
self.first_image.get_crop_coordinates()
|
|
1040
|
+
|
|
1041
|
+
logging.info("Get the coordinates of all arenas using the get_bounding_boxes method of the VideoMaker class")
|
|
1042
|
+
if self.first_image.validated_shapes.any() and self.first_image.shape_number > 0:
|
|
1043
|
+
self.ordered_stats, ordered_centroids, self.ordered_first_image = rank_from_top_to_bottom_from_left_to_right(
|
|
1044
|
+
self.first_image.validated_shapes, self.first_image.y_boundaries, get_ordered_image=True)
|
|
1045
|
+
self.unchanged_ordered_fimg = deepcopy(self.ordered_first_image)
|
|
1046
|
+
self.modif_validated_shapes = deepcopy(self.first_image.validated_shapes)
|
|
1047
|
+
self.standard = - 1
|
|
1048
|
+
counter = 0
|
|
1049
|
+
while np.any(np.less(self.standard, 0)) and counter < 20:
|
|
1050
|
+
counter += 1
|
|
1051
|
+
self.left = np.zeros(self.first_image.shape_number, dtype=np.int64)
|
|
1052
|
+
self.right = np.repeat(self.modif_validated_shapes.shape[1], self.first_image.shape_number)
|
|
1053
|
+
self.top = np.zeros(self.first_image.shape_number, dtype=np.int64)
|
|
1054
|
+
self.bot = np.repeat(self.modif_validated_shapes.shape[0], self.first_image.shape_number)
|
|
1055
|
+
if are_gravity_centers_moving:
|
|
1056
|
+
self.top, self.bot, self.left, self.right, self.ordered_first_image = get_bb_with_moving_centers(motion_list, all_specimens_have_same_direction,
|
|
1057
|
+
original_shape_hsize, self.first_image.validated_shapes,
|
|
1058
|
+
self.first_image.y_boundaries)
|
|
1059
|
+
new_ordered_first_image = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
|
|
1060
|
+
|
|
1061
|
+
for i in np.arange(1, self.first_image.shape_number + 1):
|
|
1062
|
+
previous_shape = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
|
|
1063
|
+
previous_shape[np.nonzero(self.unchanged_ordered_fimg == i)] = 1
|
|
1064
|
+
new_potentials = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
|
|
1065
|
+
new_potentials[np.nonzero(self.ordered_first_image == i)] = 1
|
|
1066
|
+
new_potentials[np.nonzero(self.unchanged_ordered_fimg == i)] = 0
|
|
1067
|
+
|
|
1068
|
+
pads = ProgressivelyAddDistantShapes(new_potentials, previous_shape, max_distance=2)
|
|
1069
|
+
pads.consider_shapes_sizes(min_shape_size=10)
|
|
1070
|
+
pads.connect_shapes(only_keep_connected_shapes=True, rank_connecting_pixels=False)
|
|
1071
|
+
new_ordered_first_image[np.nonzero(pads.expanded_shape)] = i
|
|
1072
|
+
self.ordered_first_image = new_ordered_first_image
|
|
1073
|
+
self.modif_validated_shapes = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
|
|
1074
|
+
self.modif_validated_shapes[np.nonzero(self.ordered_first_image)] = 1
|
|
1075
|
+
self.ordered_stats, ordered_centroids, self.ordered_first_image = rank_from_top_to_bottom_from_left_to_right(
|
|
1076
|
+
self.modif_validated_shapes, self.first_image.y_boundaries, get_ordered_image=True)
|
|
1077
|
+
self.top, self.bot, self.left, self.right = get_quick_bounding_boxes(self.modif_validated_shapes, self.ordered_first_image, self.ordered_stats)
|
|
1078
|
+
else:
|
|
1079
|
+
self.top, self.bot, self.left, self.right = get_quick_bounding_boxes(self.modif_validated_shapes, self.ordered_first_image, self.ordered_stats)
|
|
1080
|
+
self._standardize_video_sizes()
|
|
1081
|
+
if counter == 20:
|
|
1082
|
+
self.top[self.top < 0] = 1
|
|
1083
|
+
self.bot[self.bot >= self.ordered_first_image.shape[0] - 1] = self.ordered_first_image.shape[0] - 2
|
|
1084
|
+
self.left[self.left < 0] = 1
|
|
1085
|
+
self.right[self.right >= self.ordered_first_image.shape[1] - 1] = self.ordered_first_image.shape[1] - 2
|
|
1086
|
+
del self.ordered_first_image
|
|
1087
|
+
del self.unchanged_ordered_fimg
|
|
1088
|
+
del self.modif_validated_shapes
|
|
1089
|
+
del self.standard
|
|
1090
|
+
del self.shapes_to_remove
|
|
1091
|
+
self.bot += 1
|
|
1092
|
+
self.right += 1
|
|
1093
|
+
else:
|
|
1094
|
+
self._whole_image_bounding_boxes()
|
|
1095
|
+
|
|
1096
|
+
def _whole_image_bounding_boxes(self):
|
|
1097
|
+
self.top, self.bot, self.left, self.right = np.array([0]), np.array([self.first_image.image.shape[0]]), np.array([0]), np.array([self.first_image.image.shape[1]])
|
|
1098
|
+
|
|
1099
|
+
def _standardize_video_sizes(self):
|
|
1100
|
+
"""
|
|
1101
|
+
Standardize video sizes by adjusting bounding boxes.
|
|
1102
|
+
|
|
1103
|
+
Extended Description
|
|
1104
|
+
--------------------
|
|
1105
|
+
This function adjusts the bounding boxes of detected shapes in a video frame.
|
|
1106
|
+
It ensures that all bounding boxes are within the frame's boundaries and
|
|
1107
|
+
standardizes their sizes to avoid issues with odd dimensions during video writing.
|
|
1108
|
+
|
|
1109
|
+
Returns
|
|
1110
|
+
-------
|
|
1111
|
+
None
|
|
1112
|
+
The function modifies the following attributes of the class instance:
|
|
1113
|
+
|
|
1114
|
+
Attributes Modified
|
|
1115
|
+
------------------
|
|
1116
|
+
standard : numpy.ndarray
|
|
1117
|
+
Standardized bounding boxes.
|
|
1118
|
+
shapes_to_remove : numpy.ndarray
|
|
1119
|
+
Indices of shapes to be removed from the image.
|
|
1120
|
+
modif_validated_shapes : numpy.ndarray
|
|
1121
|
+
Modified validated shapes after removing out-of-picture areas.
|
|
1122
|
+
ordered_stats : list of float
|
|
1123
|
+
Updated order statistics for the shapes.
|
|
1124
|
+
ordered_centroids : numpy.ndarray
|
|
1125
|
+
Centroids of the ordered shapes.
|
|
1126
|
+
ordered_first_image : numpy.ndarray
|
|
1127
|
+
First image with updated order statistics and centroids.
|
|
1128
|
+
first_image.shape_number : int
|
|
1129
|
+
Updated number of shapes in the first image.
|
|
1130
|
+
not_analyzed_individuals : numpy.ndarray
|
|
1131
|
+
Indices of individuals not analyzed after modifications.
|
|
1132
|
+
|
|
1133
|
+
"""
|
|
1134
|
+
distance_threshold_to_consider_an_arena_out_of_the_picture = None# in pixels, worked nicely with - 50
|
|
1135
|
+
|
|
1136
|
+
# The modifications allowing to not make videos of setups out of view, do not work for moving centers
|
|
1137
|
+
y_diffs = self.bot - self.top
|
|
1138
|
+
x_diffs = self.right - self.left
|
|
1139
|
+
add_to_y = ((np.max(y_diffs) - y_diffs) / 2)
|
|
1140
|
+
add_to_x = ((np.max(x_diffs) - x_diffs) / 2)
|
|
1141
|
+
self.standard = np.zeros((len(self.top), 4), dtype=np.int64)
|
|
1142
|
+
self.standard[:, 0] = self.top - np.uint8(np.floor(add_to_y))
|
|
1143
|
+
self.standard[:, 1] = self.bot + np.uint8(np.ceil(add_to_y))
|
|
1144
|
+
self.standard[:, 2] = self.left - np.uint8(np.floor(add_to_x))
|
|
1145
|
+
self.standard[:, 3] = self.right + np.uint8(np.ceil(add_to_x))
|
|
1146
|
+
|
|
1147
|
+
# Monitor if one bounding box gets out of picture shape
|
|
1148
|
+
out_of_pic = deepcopy(self.standard)
|
|
1149
|
+
out_of_pic[:, 1] = self.ordered_first_image.shape[0] - out_of_pic[:, 1] - 1
|
|
1150
|
+
out_of_pic[:, 3] = self.ordered_first_image.shape[1] - out_of_pic[:, 3] - 1
|
|
1151
|
+
|
|
1152
|
+
if distance_threshold_to_consider_an_arena_out_of_the_picture is None:
|
|
1153
|
+
distance_threshold_to_consider_an_arena_out_of_the_picture = np.min(out_of_pic) - 1
|
|
1154
|
+
|
|
1155
|
+
# If it occurs at least one time, apply a correction, otherwise, continue and write videos
|
|
1156
|
+
# If the overflow is strong, remove the corresponding individuals and remake bounding_box finding
|
|
1157
|
+
if np.any(np.less(out_of_pic, distance_threshold_to_consider_an_arena_out_of_the_picture)):
|
|
1158
|
+
# Remove shapes
|
|
1159
|
+
self.standard = - 1
|
|
1160
|
+
self.shapes_to_remove = np.nonzero(np.less(out_of_pic, - 20))[0]
|
|
1161
|
+
for shape_i in self.shapes_to_remove:
|
|
1162
|
+
self.ordered_first_image[self.ordered_first_image == (shape_i + 1)] = 0
|
|
1163
|
+
self.modif_validated_shapes = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
|
|
1164
|
+
self.modif_validated_shapes[np.nonzero(self.ordered_first_image)] = 1
|
|
1165
|
+
self.ordered_stats, ordered_centroids, self.ordered_first_image = rank_from_top_to_bottom_from_left_to_right(
|
|
1166
|
+
self.modif_validated_shapes, self.first_image.y_boundaries, get_ordered_image=True)
|
|
1167
|
+
|
|
1168
|
+
self.first_image.shape_number = self.first_image.shape_number - len(self.shapes_to_remove)
|
|
1169
|
+
self.not_analyzed_individuals = np.unique(self.unchanged_ordered_fimg -
|
|
1170
|
+
(self.unchanged_ordered_fimg * self.modif_validated_shapes))[1:]
|
|
1171
|
+
|
|
1172
|
+
else:
|
|
1173
|
+
# Reduce all box sizes if necessary and proceed
|
|
1174
|
+
if np.any(np.less(out_of_pic, 0)):
|
|
1175
|
+
# When the overflow is weak, remake standardization with lower "add_to_y" and "add_to_x"
|
|
1176
|
+
overflow = np.nonzero(np.logical_and(np.less(out_of_pic, 0), np.greater_equal(out_of_pic, distance_threshold_to_consider_an_arena_out_of_the_picture)))[0]
|
|
1177
|
+
# Look if overflow occurs on the y axis
|
|
1178
|
+
if np.any(np.less(out_of_pic[overflow, :2], 0)):
|
|
1179
|
+
add_to_top_and_bot = np.min(out_of_pic[overflow, :2])
|
|
1180
|
+
self.standard[:, 0] = self.standard[:, 0] - add_to_top_and_bot
|
|
1181
|
+
self.standard[:, 1] = self.standard[:, 1] + add_to_top_and_bot
|
|
1182
|
+
# Look if overflow occurs on the x axis
|
|
1183
|
+
if np.any(np.less(out_of_pic[overflow, 2:], 0)):
|
|
1184
|
+
add_to_left_and_right = np.min(out_of_pic[overflow, 2:])
|
|
1185
|
+
self.standard[:, 2] = self.standard[:, 2] - add_to_left_and_right
|
|
1186
|
+
self.standard[:, 3] = self.standard[:, 3] + add_to_left_and_right
|
|
1187
|
+
# If x or y sizes are odd, make them even :
|
|
1188
|
+
# Don't know why, but opencv remove 1 to odd shapes when writing videos
|
|
1189
|
+
if (self.standard[0, 1] - self.standard[0, 0]) % 2 != 0:
|
|
1190
|
+
self.standard[:, 1] -= 1
|
|
1191
|
+
if (self.standard[0, 3] - self.standard[0, 2]) % 2 != 0:
|
|
1192
|
+
self.standard[:, 3] -= 1
|
|
1193
|
+
self.top = self.standard[:, 0]
|
|
1194
|
+
self.bot = self.standard[:, 1]
|
|
1195
|
+
self.left = self.standard[:, 2]
|
|
1196
|
+
self.right = self.standard[:, 3]
|
|
1197
|
+
|
|
1198
|
+
def get_origins_and_backgrounds_lists(self):
|
|
1199
|
+
"""
|
|
1200
|
+
Create origins and background lists for image processing.
|
|
1201
|
+
|
|
1202
|
+
Extended Description
|
|
1203
|
+
--------------------
|
|
1204
|
+
This method generates the origin and background lists by slicing the first image
|
|
1205
|
+
and its background subtraction based on predefined boundaries. It handles cases where
|
|
1206
|
+
the top, bottom, left, and right boundaries are not yet initialized.
|
|
1207
|
+
|
|
1208
|
+
Notes
|
|
1209
|
+
-----
|
|
1210
|
+
This method directly modifies the input image data. The `self.vars` dictionary is populated
|
|
1211
|
+
with lists of sliced arrays from the first image and its background.
|
|
1212
|
+
|
|
1213
|
+
Attributes
|
|
1214
|
+
----------
|
|
1215
|
+
self.vars : dict
|
|
1216
|
+
Dictionary to store processed data.
|
|
1217
|
+
self.first_image : ImageObject
|
|
1218
|
+
The first image object containing validated shapes and background subtraction arrays.
|
|
1219
|
+
"""
|
|
1220
|
+
logging.info("Create origins and background lists")
|
|
1221
|
+
if self.top is None:
|
|
1222
|
+
self._whole_image_bounding_boxes()
|
|
1223
|
+
first_im = self.first_image.validated_shapes
|
|
1224
|
+
self.vars['origin_list'] = []
|
|
1225
|
+
self.vars['background_list'] = []
|
|
1226
|
+
self.vars['background_list2'] = []
|
|
1227
|
+
for rep in np.arange(len(self.vars['analyzed_individuals'])):
|
|
1228
|
+
self.vars['origin_list'].append(first_im[self.top[rep]:self.bot[rep], self.left[rep]:self.right[rep]])
|
|
1229
|
+
if self.vars['subtract_background']:
|
|
1230
|
+
for rep in np.arange(len(self.vars['analyzed_individuals'])):
|
|
1231
|
+
self.vars['background_list'].append(
|
|
1232
|
+
self.first_image.subtract_background[self.top[rep]:self.bot[rep], self.left[rep]:self.right[rep]])
|
|
1233
|
+
if self.vars['convert_for_motion']['logical'] != 'None':
|
|
1234
|
+
self.vars['background_list2'].append(self.first_image.subtract_background2[self.top[rep]:
|
|
1235
|
+
self.bot[rep], self.left[rep]:self.right[rep]])
|
|
1236
|
+
|
|
1237
|
+
def complete_image_analysis(self):
|
|
1238
|
+
if not self.visualize and len(self.last_image.im_combinations) > 0:
|
|
1239
|
+
self.last_image.binary_image = self.last_image.im_combinations[self.current_combination_id]['binary_image']
|
|
1240
|
+
self.last_image.image = self.last_image.im_combinations[self.current_combination_id]['converted_image']
|
|
1241
|
+
self.instantiate_tables()
|
|
1242
|
+
if len(self.vars['exif']) > 1:
|
|
1243
|
+
self.vars['exif'] = self.vars['exif'][0]
|
|
1244
|
+
if len(self.last_image.all_c_spaces) == 0:
|
|
1245
|
+
self.last_image.all_c_spaces['bgr'] = self.last_image.bgr.copy()
|
|
1246
|
+
if self.all['bio_mask'] is not None:
|
|
1247
|
+
self.last_image.binary_image[self.all['bio_mask']] = 1
|
|
1248
|
+
if self.all['back_mask'] is not None:
|
|
1249
|
+
self.last_image.binary_image[self.all['back_mask']] = 0
|
|
1250
|
+
for i, arena in enumerate(self.vars['analyzed_individuals']):
|
|
1251
|
+
binary = self.last_image.binary_image[self.top[i]:self.bot[i], self.left[i]:self.right[i]]
|
|
1252
|
+
efficiency_test = self.last_image.all_c_spaces['bgr'][self.top[i]:self.bot[i], self.left[i]:self.right[i], :]
|
|
1253
|
+
if not self.vars['several_blob_per_arena']:
|
|
1254
|
+
binary = keep_one_connected_component(binary)
|
|
1255
|
+
one_row_per_frame = compute_one_descriptor_per_frame(binary[None, :, :],
|
|
1256
|
+
arena,
|
|
1257
|
+
self.vars['exif'],
|
|
1258
|
+
self.vars['descriptors'],
|
|
1259
|
+
self.vars['output_in_mm'],
|
|
1260
|
+
self.vars['average_pixel_size'],
|
|
1261
|
+
self.vars['do_fading'],
|
|
1262
|
+
self.vars['save_coord_specimen'])
|
|
1263
|
+
coord_network = None
|
|
1264
|
+
coord_pseudopods = None
|
|
1265
|
+
if self.vars['save_graph']:
|
|
1266
|
+
if coord_network is None:
|
|
1267
|
+
coord_network = np.array(np.nonzero(binary))
|
|
1268
|
+
extract_graph_dynamics(self.last_image.image[None, :, :], coord_network, arena,
|
|
1269
|
+
0, None, coord_pseudopods)
|
|
1270
|
+
|
|
1271
|
+
else:
|
|
1272
|
+
one_row_per_frame = compute_one_descriptor_per_colony(binary[None, :, :],
|
|
1273
|
+
arena,
|
|
1274
|
+
self.vars['exif'],
|
|
1275
|
+
self.vars['descriptors'],
|
|
1276
|
+
self.vars['output_in_mm'],
|
|
1277
|
+
self.vars['average_pixel_size'],
|
|
1278
|
+
self.vars['do_fading'],
|
|
1279
|
+
self.vars['first_move_threshold'],
|
|
1280
|
+
self.vars['save_coord_specimen'])
|
|
1281
|
+
if self.vars['fractal_analysis']:
|
|
1282
|
+
zoomed_binary, side_lengths = prepare_box_counting(binary,
|
|
1283
|
+
min_mesh_side=self.vars[
|
|
1284
|
+
'fractal_box_side_threshold'],
|
|
1285
|
+
zoom_step=self.vars['fractal_zoom_step'],
|
|
1286
|
+
contours=True)
|
|
1287
|
+
box_counting_dimensions = box_counting_dimension(zoomed_binary, side_lengths)
|
|
1288
|
+
one_row_per_frame["fractal_dimension"] = box_counting_dimensions[0]
|
|
1289
|
+
one_row_per_frame["fractal_box_nb"] = box_counting_dimensions[1]
|
|
1290
|
+
one_row_per_frame["fractal_r_value"] = box_counting_dimensions[2]
|
|
1291
|
+
|
|
1292
|
+
one_descriptor_per_arena = {}
|
|
1293
|
+
one_descriptor_per_arena["arena"] = arena
|
|
1294
|
+
one_descriptor_per_arena["first_move"] = pd.NA
|
|
1295
|
+
one_descriptor_per_arena["final_area"] = binary.sum()
|
|
1296
|
+
one_descriptor_per_arena["iso_digi_transi"] = pd.NA
|
|
1297
|
+
one_descriptor_per_arena["is_growth_isotropic"] = pd.NA
|
|
1298
|
+
self.update_one_row_per_arena(i, one_descriptor_per_arena)
|
|
1299
|
+
self.update_one_row_per_frame(i * 1, (i + 1) * 1, one_row_per_frame)
|
|
1300
|
+
contours = np.nonzero(get_contours(binary))
|
|
1301
|
+
efficiency_test[contours[0], contours[1], :] = np.array((94, 0, 213), dtype=np.uint8)
|
|
1302
|
+
self.add_analysis_visualization_to_first_and_last_images(i, efficiency_test, None)
|
|
1303
|
+
self.save_tables(with_last_image=False)
|
|
1304
|
+
|
|
1305
|
+
def prepare_video_writing(self, img_list: list, min_ram_free: float, in_colors: bool=False, pathway: str=""):
|
|
1306
|
+
"""
|
|
1307
|
+
|
|
1308
|
+
Prepare the raw video (.npy) writing process for Cellects.
|
|
1309
|
+
|
|
1310
|
+
Parameters
|
|
1311
|
+
----------
|
|
1312
|
+
img_list : list
|
|
1313
|
+
List of images to be processed.
|
|
1314
|
+
min_ram_free : float
|
|
1315
|
+
Minimum amount of RAM in GB that should remain free.
|
|
1316
|
+
in_colors : bool, optional
|
|
1317
|
+
Whether the images are in color. Default is False.
|
|
1318
|
+
pathway : str, optional
|
|
1319
|
+
Path to save the video files. Default is an empty string.
|
|
1320
|
+
|
|
1321
|
+
Returns
|
|
1322
|
+
-------
|
|
1323
|
+
tuple
|
|
1324
|
+
A tuple containing:
|
|
1325
|
+
- bunch_nb: int, number of bunches needed for video writing.
|
|
1326
|
+
- video_nb_per_bunch: int, number of videos per bunch.
|
|
1327
|
+
- sizes: ndarray, dimensions of each video.
|
|
1328
|
+
- video_bunch: list or ndarray, initialized video arrays.
|
|
1329
|
+
- vid_names: list, names of the video files.
|
|
1330
|
+
- rom_memory_required: None or float, required ROM memory.
|
|
1331
|
+
- analysis_status: dict, status and message of the analysis process.
|
|
1332
|
+
- remaining: int, remainder videos that do not fit in a complete bunch.
|
|
1333
|
+
|
|
1334
|
+
Notes
|
|
1335
|
+
-----
|
|
1336
|
+
- The function calculates necessary memory and ensures 10% extra to avoid issues.
|
|
1337
|
+
- It checks for available RAM and adjusts the number of bunches accordingly.
|
|
1338
|
+
- If using color images, memory requirements are tripled.
|
|
1339
|
+
|
|
1340
|
+
expected output depends on the provided images and RAM availability
|
|
1341
|
+
"""
|
|
1342
|
+
# 1) Create a list of video names
|
|
1343
|
+
if self.not_analyzed_individuals is not None:
|
|
1344
|
+
number_to_add = len(self.not_analyzed_individuals)
|
|
1345
|
+
else:
|
|
1346
|
+
number_to_add = 0
|
|
1347
|
+
vid_names = list()
|
|
1348
|
+
ind_i = 0
|
|
1349
|
+
counter = 0
|
|
1350
|
+
while ind_i < (self.first_image.shape_number + number_to_add):
|
|
1351
|
+
ind_i += 1
|
|
1352
|
+
while np.any(np.isin(self.not_analyzed_individuals, ind_i)):
|
|
1353
|
+
ind_i += 1
|
|
1354
|
+
vid_names.append(pathway + "ind_" + str(ind_i) + ".npy")
|
|
1355
|
+
counter += 1
|
|
1356
|
+
img_nb = len(img_list)
|
|
1357
|
+
|
|
1358
|
+
# 2) Create a table of the dimensions of each video
|
|
1359
|
+
# Add 10% to the necessary memory to avoid problems
|
|
1360
|
+
necessary_memory = img_nb * np.multiply((self.bot - self.top).astype(np.uint64), (self.right - self.left).astype(np.uint64)).sum() * 8 * 1.16415e-10
|
|
1361
|
+
if in_colors:
|
|
1362
|
+
sizes = np.column_stack(
|
|
1363
|
+
(np.repeat(img_nb, self.first_image.shape_number), self.bot - self.top, self.right - self.left,
|
|
1364
|
+
np.repeat(3, self.first_image.shape_number)))
|
|
1365
|
+
necessary_memory *= 3
|
|
1366
|
+
else:
|
|
1367
|
+
sizes = np.column_stack(
|
|
1368
|
+
(np.repeat(img_nb, self.first_image.shape_number), self.bot - self.top, self.right - self.left))
|
|
1369
|
+
use_list_of_vid = True
|
|
1370
|
+
if np.all(sizes[0, :] == sizes):
|
|
1371
|
+
use_list_of_vid = False
|
|
1372
|
+
available_memory = (psutil.virtual_memory().available >> 30) - min_ram_free
|
|
1373
|
+
if available_memory == 0:
|
|
1374
|
+
analysis_status = {"continue": False, "message": "There are not enough RAM available"}
|
|
1375
|
+
bunch_nb = 1
|
|
1376
|
+
else:
|
|
1377
|
+
bunch_nb = int(np.ceil(necessary_memory / available_memory))
|
|
1378
|
+
if bunch_nb > 1:
|
|
1379
|
+
# The program will need twice the memory to create the second bunch.
|
|
1380
|
+
bunch_nb = int(np.ceil(2 * necessary_memory / available_memory))
|
|
1381
|
+
|
|
1382
|
+
video_nb_per_bunch = np.floor(self.first_image.shape_number / bunch_nb).astype(np.uint8)
|
|
1383
|
+
analysis_status = {"continue": True, "message": ""}
|
|
1384
|
+
video_bunch = None
|
|
1385
|
+
try:
|
|
1386
|
+
if use_list_of_vid:
|
|
1387
|
+
video_bunch = [np.zeros(sizes[i, :], dtype=np.uint8) for i in range(video_nb_per_bunch)]
|
|
1388
|
+
else:
|
|
1389
|
+
video_bunch = np.zeros(np.append(sizes[0, :], video_nb_per_bunch), dtype=np.uint8)
|
|
1390
|
+
except ValueError as v_err:
|
|
1391
|
+
analysis_status = {"continue": False, "message": "Probably failed to detect the right cell(s) number, do the first image analysis manually."}
|
|
1392
|
+
logging.error(f"{analysis_status['message']} error is: {v_err}")
|
|
1393
|
+
# Check for available ROM memory
|
|
1394
|
+
if (psutil.disk_usage('/')[2] >> 30) < (necessary_memory + 2):
|
|
1395
|
+
rom_memory_required = necessary_memory + 2
|
|
1396
|
+
else:
|
|
1397
|
+
rom_memory_required = None
|
|
1398
|
+
remaining = self.first_image.shape_number % bunch_nb
|
|
1399
|
+
if remaining > 0:
|
|
1400
|
+
bunch_nb += 1
|
|
1401
|
+
is_landscape = self.first_image.image.shape[0] < self.first_image.image.shape[1]
|
|
1402
|
+
logging.info(f"Cellects will start writing {self.first_image.shape_number} videos. Given available memory, it will do it in {bunch_nb} time(s)")
|
|
1403
|
+
return bunch_nb, video_nb_per_bunch, sizes, video_bunch, vid_names, rom_memory_required, analysis_status, remaining, use_list_of_vid, is_landscape
|
|
1404
|
+
|
|
1405
|
+
|
|
1406
|
+
|
|
1407
|
+
def update_output_list(self):
|
|
1408
|
+
"""
|
|
1409
|
+
Update the output list with various descriptors from the analysis results.
|
|
1410
|
+
|
|
1411
|
+
This method processes different types of descriptors and assigns them to
|
|
1412
|
+
the `self.vars['descriptors']` dictionary. It handles special cases for
|
|
1413
|
+
descriptors related to 'xy' dimensions and ensures that all relevant metrics
|
|
1414
|
+
are stored in the output list.
|
|
1415
|
+
"""
|
|
1416
|
+
self.vars['descriptors'] = {}
|
|
1417
|
+
for descriptor in self.all['descriptors'].keys():
|
|
1418
|
+
if descriptor == 'standard_deviation_xy':
|
|
1419
|
+
self.vars['descriptors']['standard_deviation_x'] = self.all['descriptors'][descriptor]
|
|
1420
|
+
self.vars['descriptors']['standard_deviation_y'] = self.all['descriptors'][descriptor]
|
|
1421
|
+
elif descriptor == 'skewness_xy':
|
|
1422
|
+
self.vars['descriptors']['skewness_x'] = self.all['descriptors'][descriptor]
|
|
1423
|
+
self.vars['descriptors']['skewness_y'] = self.all['descriptors'][descriptor]
|
|
1424
|
+
elif descriptor == 'kurtosis_xy':
|
|
1425
|
+
self.vars['descriptors']['kurtosis_x'] = self.all['descriptors'][descriptor]
|
|
1426
|
+
self.vars['descriptors']['kurtosis_y'] = self.all['descriptors'][descriptor]
|
|
1427
|
+
elif descriptor == 'major_axes_len_and_angle':
|
|
1428
|
+
self.vars['descriptors']['major_axis_len'] = self.all['descriptors'][descriptor]
|
|
1429
|
+
self.vars['descriptors']['minor_axis_len'] = self.all['descriptors'][descriptor]
|
|
1430
|
+
self.vars['descriptors']['axes_orientation'] = self.all['descriptors'][descriptor]
|
|
1431
|
+
else:
|
|
1432
|
+
if np.isin(descriptor, list(from_shape_descriptors_class.keys())):
|
|
1433
|
+
|
|
1434
|
+
self.vars['descriptors'][descriptor] = self.all['descriptors'][descriptor]
|
|
1435
|
+
self.vars['descriptors']['newly_explored_area'] = self.vars['do_fading']
|
|
1436
|
+
|
|
1437
|
+
def update_available_core_nb(self, image_bit_number=256, video_bit_number=140):# video_bit_number=176
|
|
1438
|
+
"""
|
|
1439
|
+
Update available computation resources based on memory and processing constraints.
|
|
1440
|
+
|
|
1441
|
+
Parameters
|
|
1442
|
+
----------
|
|
1443
|
+
image_bit_number : int, optional
|
|
1444
|
+
Number of bits per image pixel (default is 256).
|
|
1445
|
+
video_bit_number : int, optional
|
|
1446
|
+
Number of bits per video frame pixel (default is 140).
|
|
1447
|
+
|
|
1448
|
+
Other Parameters
|
|
1449
|
+
----------------
|
|
1450
|
+
lose_accuracy_to_save_memory : bool
|
|
1451
|
+
Flag to reduce accuracy for memory savings.
|
|
1452
|
+
convert_for_motion : dict
|
|
1453
|
+
Conversion settings for motion analysis.
|
|
1454
|
+
already_greyscale : bool
|
|
1455
|
+
Flag indicating if the image is already greyscale.
|
|
1456
|
+
save_coord_thickening_slimming : bool
|
|
1457
|
+
Flag to save coordinates for thickening and slimming.
|
|
1458
|
+
oscilacyto_analysis : bool
|
|
1459
|
+
Flag indicating if oscilacyto analysis is enabled.
|
|
1460
|
+
save_coord_network : bool
|
|
1461
|
+
Flag to save coordinates for network analysis.
|
|
1462
|
+
|
|
1463
|
+
Returns
|
|
1464
|
+
-------
|
|
1465
|
+
float
|
|
1466
|
+
Rounded absolute difference between available memory and necessary memory in GB.
|
|
1467
|
+
|
|
1468
|
+
Notes
|
|
1469
|
+
-----
|
|
1470
|
+
Performance considerations and limitations should be noted here if applicable.
|
|
1471
|
+
|
|
1472
|
+
"""
|
|
1473
|
+
if self.vars['lose_accuracy_to_save_memory']:
|
|
1474
|
+
video_bit_number -= 56
|
|
1475
|
+
if self.vars['convert_for_motion']['logical'] != 'None':
|
|
1476
|
+
video_bit_number += 64
|
|
1477
|
+
if self.vars['lose_accuracy_to_save_memory']:
|
|
1478
|
+
video_bit_number -= 56
|
|
1479
|
+
if self.vars['already_greyscale']:
|
|
1480
|
+
video_bit_number -= 64
|
|
1481
|
+
if self.vars['save_coord_thickening_slimming'] or self.vars['oscilacyto_analysis']:
|
|
1482
|
+
video_bit_number += 16
|
|
1483
|
+
image_bit_number += 128
|
|
1484
|
+
if self.vars['save_coord_network']:
|
|
1485
|
+
video_bit_number += 8
|
|
1486
|
+
image_bit_number += 64
|
|
1487
|
+
|
|
1488
|
+
if isinstance(self.bot, list):
|
|
1489
|
+
one_image_memory = np.multiply((self.bot[0] - self.top[0]),
|
|
1490
|
+
(self.right[0] - self.left[0])).max().astype(np.uint64)
|
|
1491
|
+
else:
|
|
1492
|
+
one_image_memory = np.multiply((self.bot - self.top).astype(np.uint64),
|
|
1493
|
+
(self.right - self.left).astype(np.uint64)).max()
|
|
1494
|
+
one_video_memory = self.vars['img_number'] * one_image_memory
|
|
1495
|
+
necessary_memory = (one_image_memory * image_bit_number + one_video_memory * video_bit_number) * 1.16415e-10
|
|
1496
|
+
available_memory = (virtual_memory().available >> 30) - self.vars['min_ram_free']
|
|
1497
|
+
max_repeat_in_memory = (available_memory // necessary_memory).astype(np.uint16)
|
|
1498
|
+
if max_repeat_in_memory > 1:
|
|
1499
|
+
max_repeat_in_memory = np.max(((available_memory // (2 * necessary_memory)).astype(np.uint16), 1))
|
|
1500
|
+
|
|
1501
|
+
|
|
1502
|
+
self.cores = np.min((self.all['cores'], max_repeat_in_memory))
|
|
1503
|
+
if self.cores > self.sample_number:
|
|
1504
|
+
self.cores = self.sample_number
|
|
1505
|
+
return np.round(np.absolute(available_memory - necessary_memory), 3)
|
|
1506
|
+
|
|
1507
|
+
|
|
1508
|
+
def update_one_row_per_arena(self, i: int, table_to_add):
|
|
1509
|
+
"""
|
|
1510
|
+
Update one row of the dataframe per arena.
|
|
1511
|
+
|
|
1512
|
+
Add a row to a DataFrame for each arena, based on the provided table_to_add. If no previous rows exist,
|
|
1513
|
+
initialize a new DataFrame with zeros.
|
|
1514
|
+
|
|
1515
|
+
Parameters
|
|
1516
|
+
----------
|
|
1517
|
+
i : int
|
|
1518
|
+
Index of the arena to update.
|
|
1519
|
+
table_to_add : dict
|
|
1520
|
+
Dictionary containing values to add. Keys are column names, values are the data.
|
|
1521
|
+
|
|
1522
|
+
"""
|
|
1523
|
+
if not self.vars['several_blob_per_arena']:
|
|
1524
|
+
if self.one_row_per_arena is None:
|
|
1525
|
+
self.one_row_per_arena = pd.DataFrame(np.zeros((len(self.vars['analyzed_individuals']), len(table_to_add)), dtype=float),
|
|
1526
|
+
columns=table_to_add.keys())
|
|
1527
|
+
self.one_row_per_arena.iloc[i, :] = table_to_add.values()
|
|
1528
|
+
|
|
1529
|
+
|
|
1530
|
+
def update_one_row_per_frame(self, i: int, j: int, table_to_add):
|
|
1531
|
+
"""
|
|
1532
|
+
Update a range of rows in `self.one_row_per_frame` DataFrame with values from
|
|
1533
|
+
`table_to_add`.
|
|
1534
|
+
|
|
1535
|
+
Parameters
|
|
1536
|
+
----------
|
|
1537
|
+
i : int
|
|
1538
|
+
The starting row index to update in `self.one_row_per_frame`.
|
|
1539
|
+
j : int
|
|
1540
|
+
The ending row index (exclusive) to update in `self.one_row_per_frame`.
|
|
1541
|
+
table_to_add : dict
|
|
1542
|
+
A dictionary where keys are column labels and values are lists or arrays of
|
|
1543
|
+
data to insert into `self.one_row_per_frame`.
|
|
1544
|
+
Notes
|
|
1545
|
+
-----
|
|
1546
|
+
Ensures that one row per arena is being updated. If `self.one_row_per_frame` is
|
|
1547
|
+
None, it initializes a DataFrame to hold the data.
|
|
1548
|
+
"""
|
|
1549
|
+
if not self.vars['several_blob_per_arena']:
|
|
1550
|
+
if self.one_row_per_frame is None:
|
|
1551
|
+
self.one_row_per_frame = pd.DataFrame(index=range(len(self.vars['analyzed_individuals']) *
|
|
1552
|
+
self.vars['img_number']),
|
|
1553
|
+
columns=table_to_add.keys())
|
|
1554
|
+
|
|
1555
|
+
self.one_row_per_frame.iloc[i:j, :] = table_to_add
|
|
1556
|
+
|
|
1557
|
+
|
|
1558
|
+
def instantiate_tables(self):
|
|
1559
|
+
"""
|
|
1560
|
+
Update output list and prepare results tables and validation images.
|
|
1561
|
+
|
|
1562
|
+
Extended Description
|
|
1563
|
+
--------------------
|
|
1564
|
+
This method performs necessary preparations for processing image sequences,
|
|
1565
|
+
including updating the output list and initializing key attributes required
|
|
1566
|
+
for subsequent operations.
|
|
1567
|
+
|
|
1568
|
+
"""
|
|
1569
|
+
self.update_output_list()
|
|
1570
|
+
logging.info("Instantiate results tables and validation images")
|
|
1571
|
+
self.fractal_box_sizes = None
|
|
1572
|
+
self.one_row_per_arena = None
|
|
1573
|
+
self.one_row_per_frame = None
|
|
1574
|
+
if self.vars['already_greyscale']:
|
|
1575
|
+
if len(self.first_image.bgr.shape) == 2:
|
|
1576
|
+
self.first_image.bgr = np.stack((self.first_image.bgr, self.first_image.bgr, self.first_image.bgr), axis=2).astype(np.uint8)
|
|
1577
|
+
if len(self.last_image.bgr.shape) == 2:
|
|
1578
|
+
self.last_image.bgr = np.stack((self.last_image.bgr, self.last_image.bgr, self.last_image.bgr), axis=2).astype(np.uint8)
|
|
1579
|
+
self.vars["convert_for_motion"] = {"bgr": np.array((1, 1, 1), dtype=np.uint8), "logical": "None"}
|
|
1580
|
+
|
|
1581
|
+
def add_analysis_visualization_to_first_and_last_images(self, i: int, first_visualization: NDArray, last_visualization: NDArray=None):
|
|
1582
|
+
"""
|
|
1583
|
+
Adds analysis visualizations to the first and last images of a sequence.
|
|
1584
|
+
|
|
1585
|
+
Parameters
|
|
1586
|
+
----------
|
|
1587
|
+
i : int
|
|
1588
|
+
Index of the image in the sequence.
|
|
1589
|
+
first_visualization : NDArray[np.uint8]
|
|
1590
|
+
The visualization to add to the first image.
|
|
1591
|
+
last_visualization : NDArray[np.uint8]
|
|
1592
|
+
The visualization to add to the last image.
|
|
1593
|
+
|
|
1594
|
+
Other Parameters
|
|
1595
|
+
----------------
|
|
1596
|
+
vars : dict
|
|
1597
|
+
Dictionary containing various parameters.
|
|
1598
|
+
arena_shape : str, optional
|
|
1599
|
+
The shape of the arena. Either 'circle' or other shapes.
|
|
1600
|
+
|
|
1601
|
+
Notes
|
|
1602
|
+
-----
|
|
1603
|
+
If `arena_shape` is 'circle', the visualization will be masked by an ellipse.
|
|
1604
|
+
|
|
1605
|
+
"""
|
|
1606
|
+
minmax = (self.top[i], self.bot[i], self.left[i], self.right[i])
|
|
1607
|
+
self.first_image.bgr = draw_img_with_mask(self.first_image.bgr, self.first_image.bgr.shape[:2], minmax,
|
|
1608
|
+
self.vars['arena_shape'], first_visualization)
|
|
1609
|
+
if last_visualization is not None:
|
|
1610
|
+
self.last_image.bgr = draw_img_with_mask(self.last_image.bgr, self.last_image.bgr.shape[:2], minmax,
|
|
1611
|
+
self.vars['arena_shape'], last_visualization)
|
|
1612
|
+
|
|
1613
|
+
# cr = ((self.top[i], self.bot[i]),
|
|
1614
|
+
# (self.left[i], self.right[i]))
|
|
1615
|
+
# if self.vars['arena_shape'] == 'circle':
|
|
1616
|
+
# ellipse = create_ellipse(cr[0][1] - cr[0][0], cr[1][1] - cr[1][0])
|
|
1617
|
+
# ellipse = np.stack((ellipse, ellipse, ellipse), axis=2).astype(np.uint8)
|
|
1618
|
+
# first_visualization *= ellipse
|
|
1619
|
+
# self.first_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] *= (1 - ellipse)
|
|
1620
|
+
# self.first_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] += first_visualization
|
|
1621
|
+
# if last_visualization is not None:
|
|
1622
|
+
# last_visualization *= ellipse
|
|
1623
|
+
# self.last_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] *= (1 - ellipse)
|
|
1624
|
+
# self.last_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] += last_visualization
|
|
1625
|
+
# else:
|
|
1626
|
+
# self.first_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] = first_visualization
|
|
1627
|
+
# if last_visualization is not None:
|
|
1628
|
+
# self.last_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] = last_visualization
|
|
1629
|
+
|
|
1630
|
+
|
|
1631
|
+
def save_tables(self, with_last_image: bool=True):
|
|
1632
|
+
"""
|
|
1633
|
+
Exports analysis results to CSV files and saves visualization outputs.
|
|
1634
|
+
|
|
1635
|
+
Generates the following output:
|
|
1636
|
+
- one_row_per_arena.csv, one_row_per_frame.csv : Tracking data per arena/frame.
|
|
1637
|
+
- software_settings.csv : Full configuration settings for reproducibility.
|
|
1638
|
+
|
|
1639
|
+
Raises
|
|
1640
|
+
------
|
|
1641
|
+
PermissionError
|
|
1642
|
+
If any output file is already open in an external program (logged and re-raised).
|
|
1643
|
+
|
|
1644
|
+
Notes
|
|
1645
|
+
-----
|
|
1646
|
+
Ensure no exported CSV files are open while running this method to avoid permission errors. This
|
|
1647
|
+
function will fail gracefully if the files cannot be overwritten.
|
|
1648
|
+
|
|
1649
|
+
"""
|
|
1650
|
+
logging.info("Save results tables and validation images")
|
|
1651
|
+
if not self.vars['several_blob_per_arena']:
|
|
1652
|
+
try:
|
|
1653
|
+
self.one_row_per_arena.to_csv("one_row_per_arena.csv", sep=";", index=False, lineterminator='\n')
|
|
1654
|
+
del self.one_row_per_arena
|
|
1655
|
+
except PermissionError:
|
|
1656
|
+
logging.error("Never let one_row_per_arena.csv open when Cellects runs")
|
|
1657
|
+
self.message_from_thread.emit(f"Never let one_row_per_arena.csv open when Cellects runs")
|
|
1658
|
+
try:
|
|
1659
|
+
self.one_row_per_frame.to_csv("one_row_per_frame.csv", sep=";", index=False, lineterminator='\n')
|
|
1660
|
+
del self.one_row_per_frame
|
|
1661
|
+
except PermissionError:
|
|
1662
|
+
logging.error("Never let one_row_per_frame.csv open when Cellects runs")
|
|
1663
|
+
self.message_from_thread.emit(f"Never let one_row_per_frame.csv open when Cellects runs")
|
|
1664
|
+
if self.all['extension'] == '.JPG':
|
|
1665
|
+
extension = '.PNG'
|
|
1666
|
+
else:
|
|
1667
|
+
extension = '.JPG'
|
|
1668
|
+
if with_last_image:
|
|
1669
|
+
cv2.imwrite(f"Analysis efficiency, last image{extension}", self.last_image.bgr)
|
|
1670
|
+
cv2.imwrite(
|
|
1671
|
+
f"Analysis efficiency, {np.ceil(self.vars['img_number'] / 10).astype(np.uint64)}th image{extension}",
|
|
1672
|
+
self.first_image.bgr)
|
|
1673
|
+
software_settings = deepcopy(self.vars)
|
|
1674
|
+
for key in ['descriptors', 'analyzed_individuals', 'exif', 'dims', 'origin_list', 'background_list', 'background_list2', 'descriptors', 'folder_list', 'sample_number_per_folder']:
|
|
1675
|
+
software_settings.pop(key, None)
|
|
1676
|
+
global_settings = deepcopy(self.all)
|
|
1677
|
+
for key in ['analyzed_individuals', 'night_mode', 'expert_mode', 'is_auto', 'arena', 'video_option', 'compute_all_options', 'vars', 'dims', 'origin_list', 'background_list', 'background_list2', 'descriptors', 'folder_list', 'sample_number_per_folder']:
|
|
1678
|
+
global_settings.pop(key, None)
|
|
1679
|
+
software_settings.update(global_settings)
|
|
1680
|
+
software_settings = pd.DataFrame.from_dict(software_settings, columns=["Setting"], orient='index')
|
|
1681
|
+
try:
|
|
1682
|
+
software_settings.to_csv("software_settings.csv", sep=";")
|
|
1683
|
+
except PermissionError:
|
|
1684
|
+
logging.error("Never let software_settings.csv open when Cellects runs")
|
|
1685
|
+
self.message_from_thread.emit(f"Never let software_settings.csv open when Cellects runs")
|
|
1686
|
+
|
|
1687
|
+
|