cellects 0.1.2__py3-none-any.whl → 0.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cellects/__main__.py +65 -25
- cellects/config/all_vars_dict.py +18 -17
- cellects/core/cellects_threads.py +1034 -396
- cellects/core/motion_analysis.py +1664 -2010
- cellects/core/one_image_analysis.py +1082 -1061
- cellects/core/program_organizer.py +1687 -1316
- cellects/core/script_based_run.py +80 -76
- cellects/gui/advanced_parameters.py +390 -330
- cellects/gui/cellects.py +102 -91
- cellects/gui/custom_widgets.py +16 -33
- cellects/gui/first_window.py +226 -104
- cellects/gui/if_several_folders_window.py +117 -68
- cellects/gui/image_analysis_window.py +866 -454
- cellects/gui/required_output.py +104 -57
- cellects/gui/ui_strings.py +840 -0
- cellects/gui/video_analysis_window.py +333 -155
- cellects/image_analysis/cell_leaving_detection.py +64 -4
- cellects/image_analysis/image_segmentation.py +451 -22
- cellects/image_analysis/morphological_operations.py +2166 -1635
- cellects/image_analysis/network_functions.py +616 -253
- cellects/image_analysis/one_image_analysis_threads.py +94 -153
- cellects/image_analysis/oscillations_functions.py +131 -0
- cellects/image_analysis/progressively_add_distant_shapes.py +2 -3
- cellects/image_analysis/shape_descriptors.py +517 -466
- cellects/utils/formulas.py +169 -6
- cellects/utils/load_display_save.py +362 -109
- cellects/utils/utilitarian.py +86 -9
- cellects-0.2.6.dist-info/LICENSE +675 -0
- cellects-0.2.6.dist-info/METADATA +829 -0
- cellects-0.2.6.dist-info/RECORD +44 -0
- cellects/core/one_video_per_blob.py +0 -540
- cellects/image_analysis/cluster_flux_study.py +0 -102
- cellects-0.1.2.dist-info/LICENSE.odt +0 -0
- cellects-0.1.2.dist-info/METADATA +0 -132
- cellects-0.1.2.dist-info/RECORD +0 -44
- {cellects-0.1.2.dist-info → cellects-0.2.6.dist-info}/WHEEL +0 -0
- {cellects-0.1.2.dist-info → cellects-0.2.6.dist-info}/entry_points.txt +0 -0
- {cellects-0.1.2.dist-info → cellects-0.2.6.dist-info}/top_level.txt +0 -0
|
@@ -1,1061 +1,1082 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
import
|
|
31
|
-
import
|
|
32
|
-
from
|
|
33
|
-
|
|
34
|
-
import
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
self.
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
self.
|
|
65
|
-
self.
|
|
66
|
-
self.
|
|
67
|
-
|
|
68
|
-
self.
|
|
69
|
-
self.
|
|
70
|
-
self.
|
|
71
|
-
self.
|
|
72
|
-
self.
|
|
73
|
-
self.
|
|
74
|
-
self.
|
|
75
|
-
self.
|
|
76
|
-
self.
|
|
77
|
-
self.
|
|
78
|
-
self.
|
|
79
|
-
self.
|
|
80
|
-
self.
|
|
81
|
-
self.
|
|
82
|
-
self.
|
|
83
|
-
self.
|
|
84
|
-
self.
|
|
85
|
-
self.
|
|
86
|
-
self.
|
|
87
|
-
self.
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
if
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
if logical != 'None':
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
if
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
self.
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
self.
|
|
273
|
-
if
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
self.
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
#
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
self.
|
|
716
|
-
self.
|
|
717
|
-
self.
|
|
718
|
-
self.combination_features[self.saved_csc_nb,
|
|
719
|
-
self.combination_features[self.saved_csc_nb,
|
|
720
|
-
self.combination_features[self.saved_csc_nb,
|
|
721
|
-
self.combination_features[self.saved_csc_nb,
|
|
722
|
-
self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
|
|
723
|
-
if biomask is not None:
|
|
724
|
-
self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
|
|
725
|
-
self.binary_image[biomask[0], biomask[1]])
|
|
726
|
-
if backmask is not None:
|
|
727
|
-
self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
|
|
728
|
-
(1 - self.binary_image)[backmask[0], backmask[1]])
|
|
729
|
-
self.saved_csc_nb += 1
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
#
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
self.
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
if
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
if
|
|
977
|
-
if
|
|
978
|
-
|
|
979
|
-
else:
|
|
980
|
-
if are_zigzag == "
|
|
981
|
-
|
|
982
|
-
else:
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
if
|
|
987
|
-
if
|
|
988
|
-
else:
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
if
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
self.
|
|
1057
|
-
|
|
1058
|
-
self.
|
|
1059
|
-
self.
|
|
1060
|
-
|
|
1061
|
-
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Module providing tools for single-image color space analysis and segmentation.
|
|
4
|
+
|
|
5
|
+
The OneImageAnalysis class offers comprehensive image processing capabilities including
|
|
6
|
+
color space conversion (RGB, HSV, LAB, LUV, HLS, YUV), filtering (Gaussian, median, bilateral),
|
|
7
|
+
segmentation (Otsu thresholding, k-means clustering), and shape-based validation. It supports
|
|
8
|
+
multi-step optimization of color channel combinations to maximize contrast between organisms
|
|
9
|
+
and background through automated selection workflows involving logical operations on segmented regions.
|
|
10
|
+
|
|
11
|
+
Classes
|
|
12
|
+
OneImageAnalysis : Analyze images using multiple color spaces for optimal segmentation
|
|
13
|
+
|
|
14
|
+
Notes
|
|
15
|
+
Uses QThread for background operations during combination processing.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
import logging
|
|
19
|
+
import os
|
|
20
|
+
from copy import deepcopy
|
|
21
|
+
import numpy as np
|
|
22
|
+
import cv2 # named opencv-python
|
|
23
|
+
import multiprocessing.pool as mp
|
|
24
|
+
from numba.typed import List as TList
|
|
25
|
+
from numba.typed import Dict as TDict
|
|
26
|
+
from numpy.typing import NDArray
|
|
27
|
+
from typing import Tuple
|
|
28
|
+
from skimage.measure import perimeter
|
|
29
|
+
from cellects.image_analysis.morphological_operations import cross_33, create_ellipse, spot_size_coefficients
|
|
30
|
+
from cellects.image_analysis.image_segmentation import generate_color_space_combination, get_color_spaces, extract_first_pc, combine_color_spaces, apply_filter, otsu_thresholding, get_otsu_threshold, kmeans, windowed_thresholding
|
|
31
|
+
from cellects.image_analysis.one_image_analysis_threads import SaveCombinationThread, ProcessFirstImage
|
|
32
|
+
from cellects.image_analysis.network_functions import NetworkDetection
|
|
33
|
+
from cellects.utils.formulas import bracket_to_uint8_image_contrast
|
|
34
|
+
from cellects.utils.utilitarian import split_dict, translate_dict
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class OneImageAnalysis:
|
|
38
|
+
"""
|
|
39
|
+
This class takes a 3D matrix (2 space and 1 color [BGR] dimensions),
|
|
40
|
+
Its methods allow image
|
|
41
|
+
- conversion to any bgr/hsv/lab channels
|
|
42
|
+
- croping
|
|
43
|
+
- rotating
|
|
44
|
+
- filtering using some of the mainly used techniques:
|
|
45
|
+
- Gaussian, Median, Bilateral, Laplacian, Mexican hat
|
|
46
|
+
- segmenting using thresholds or kmeans
|
|
47
|
+
- shape selection according to horizontal size or shape ('circle' vs 'quadrilateral')
|
|
48
|
+
|
|
49
|
+
ps: A viewing method displays the image before and after the most advanced modification made in instance
|
|
50
|
+
"""
|
|
51
|
+
def __init__(self, image, shape_number=0):
|
|
52
|
+
self.image = image
|
|
53
|
+
if len(self.image.shape) == 2:
|
|
54
|
+
self.already_greyscale = True
|
|
55
|
+
else:
|
|
56
|
+
self.already_greyscale = False
|
|
57
|
+
self.image2 = None
|
|
58
|
+
self.binary_image2 = None
|
|
59
|
+
self.drift_correction_already_adjusted: bool = False
|
|
60
|
+
# Create empty variables to fill in the following functions
|
|
61
|
+
self.binary_image = np.zeros(self.image.shape[:2], dtype=np.uint8)
|
|
62
|
+
self.previous_binary_image = None
|
|
63
|
+
self.validated_shapes = np.zeros(self.image.shape[:2], dtype=np.uint8)
|
|
64
|
+
self.centroids = 0
|
|
65
|
+
self.shape_number = shape_number
|
|
66
|
+
self.concomp_stats = 0
|
|
67
|
+
self.y_boundaries = None
|
|
68
|
+
self.x_boundaries = None
|
|
69
|
+
self.crop_coord = None
|
|
70
|
+
self.cropped: bool = False
|
|
71
|
+
self.subtract_background = None
|
|
72
|
+
self.subtract_background2 = None
|
|
73
|
+
self.im_combinations = None
|
|
74
|
+
self.bgr = image
|
|
75
|
+
self.colorspace_list = TList(("bgr", "lab", "hsv", "luv", "hls", "yuv"))
|
|
76
|
+
self.spot_shapes = None
|
|
77
|
+
self.all_c_spaces = TDict()
|
|
78
|
+
self.hsv = None
|
|
79
|
+
self.hls = None
|
|
80
|
+
self.lab = None
|
|
81
|
+
self.luv = None
|
|
82
|
+
self.yuv = None
|
|
83
|
+
self.greyscale = None
|
|
84
|
+
self.greyscale2 = None
|
|
85
|
+
self.first_pc_vector = None
|
|
86
|
+
self.drift_mask_coord = None
|
|
87
|
+
self.saved_csc_nb = 0
|
|
88
|
+
|
|
89
|
+
def convert_and_segment(self, c_space_dict: dict, color_number=2, biomask: NDArray[np.uint8]=None,
|
|
90
|
+
backmask: NDArray[np.uint8]=None, subtract_background: NDArray=None,
|
|
91
|
+
subtract_background2: NDArray=None, rolling_window_segmentation: dict=None,
|
|
92
|
+
lighter_background: bool=None,
|
|
93
|
+
allowed_window: NDArray=None, filter_spec: dict=None):
|
|
94
|
+
"""
|
|
95
|
+
Convert an image to grayscale and segment it based on specified parameters.
|
|
96
|
+
|
|
97
|
+
This method converts the given color space dictionary into grayscale
|
|
98
|
+
images, combines them with existing color spaces and performs segmentation.
|
|
99
|
+
It has special handling for images that are already in grayscale.
|
|
100
|
+
|
|
101
|
+
**Args:**
|
|
102
|
+
|
|
103
|
+
- `c_space_dict` (dict): Dictionary containing color spaces.
|
|
104
|
+
- `color_number` (int, optional): Number of colors to use in segmentation. Defaults to 2.
|
|
105
|
+
- `biomask` (NDArray[np.uint8], optional): Biomask for segmentation. Defaults to None.
|
|
106
|
+
- `backmask` (NDArray[np.uint8], optional): Backmask for segmentation. Defaults to None.
|
|
107
|
+
- `subtract_background` (NDArray, optional): Background to subtract. Defaults to None.
|
|
108
|
+
- `subtract_background2` (NDArray, optional): Second background to subtract. Defaults to None.
|
|
109
|
+
- rolling_window_segmentation (dict, optional): Flag for grid segmentation. Defaults to None.
|
|
110
|
+
- `lighter_background` (bool, optional): Flag for lighter background. Defaults to None.
|
|
111
|
+
- `mask` (NDArray, optional): Additional mask for segmentation. Defaults to None.
|
|
112
|
+
- `filter_spec` (dict, optional): Filter specifications. Defaults to None.
|
|
113
|
+
|
|
114
|
+
**Attributes:**
|
|
115
|
+
|
|
116
|
+
- `self.already_greyscale` (bool): Indicates whether the image is already greyscale.
|
|
117
|
+
- `self.all_c_spaces` (list): List of color spaces.
|
|
118
|
+
|
|
119
|
+
"""
|
|
120
|
+
if not self.already_greyscale:
|
|
121
|
+
first_dict, second_dict, c_spaces = split_dict(c_space_dict)
|
|
122
|
+
self.image, self.image2, all_c_spaces, self.first_pc_vector = generate_color_space_combination(self.bgr, c_spaces, first_dict, second_dict, subtract_background, subtract_background2)
|
|
123
|
+
if len(all_c_spaces) > len(self.all_c_spaces):
|
|
124
|
+
self.all_c_spaces = all_c_spaces
|
|
125
|
+
|
|
126
|
+
self.segmentation(logical=c_space_dict['logical'], color_number=color_number, biomask=biomask,
|
|
127
|
+
backmask=backmask, rolling_window_segmentation=rolling_window_segmentation,
|
|
128
|
+
lighter_background=lighter_background, allowed_window=allowed_window, filter_spec=filter_spec)
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def segmentation(self, logical: str='None', color_number: int=2, biomask: NDArray[np.uint8]=None,
|
|
132
|
+
backmask: NDArray[np.uint8]=None, bio_label=None, bio_label2=None,
|
|
133
|
+
rolling_window_segmentation: dict=None, lighter_background: bool=None, allowed_window: Tuple=None,
|
|
134
|
+
filter_spec: dict=None):
|
|
135
|
+
"""
|
|
136
|
+
Implement segmentation on the image using various methods and parameters.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
logical (str): Logical operation to perform between two binary images.
|
|
140
|
+
Options are 'Or', 'And', 'Xor'. Default is 'None'.
|
|
141
|
+
color_number (int): Number of colors to use in segmentation. Must be greater than 2
|
|
142
|
+
for kmeans clustering. Default is 2.
|
|
143
|
+
biomask (NDArray[np.uint8]): Binary mask for biological areas. Default is None.
|
|
144
|
+
backmask (NDArray[np.uint8]): Binary mask for background areas. Default is None.
|
|
145
|
+
bio_label (Any): Label for biological features. Default is None.
|
|
146
|
+
bio_label2 (Any): Secondary label for biological features. Default is None.
|
|
147
|
+
rolling_window_segmentation (dict): Whether to perform grid segmentation. Default is None.
|
|
148
|
+
lighter_background (bool): Indicates if the background is lighter than objects.
|
|
149
|
+
Default is None.
|
|
150
|
+
allowed_window (Tuple): Mask to apply during segmentation. Default is None.
|
|
151
|
+
filter_spec (dict): Dictionary of filters to apply on the image before segmentation.
|
|
152
|
+
|
|
153
|
+
"""
|
|
154
|
+
# 1. Check valid pixels for segmentation (e.g. when there is a drift correction)
|
|
155
|
+
if allowed_window is None:
|
|
156
|
+
min_y, max_y, min_x, max_x = 0, self.image.shape[0] + 1, 0, self.image.shape[1] + 1
|
|
157
|
+
else:
|
|
158
|
+
min_y, max_y, min_x, max_x = allowed_window
|
|
159
|
+
greyscale = self.image[min_y:max_y, min_x:max_x].copy()
|
|
160
|
+
# 2. Apply filter on the greyscale images
|
|
161
|
+
if filter_spec is not None and filter_spec["filter1_type"] != "":
|
|
162
|
+
greyscale = apply_filter(greyscale, filter_spec["filter1_type"], filter_spec["filter1_param"])
|
|
163
|
+
|
|
164
|
+
greyscale2 = None
|
|
165
|
+
if logical != 'None':
|
|
166
|
+
greyscale2 = self.image2[min_y:max_y, min_x:max_x].copy()
|
|
167
|
+
if filter_spec is not None and filter_spec["filter2_type"] != "":
|
|
168
|
+
greyscale2 = apply_filter(greyscale2, filter_spec["filter2_type"], filter_spec["filter2_param"])
|
|
169
|
+
|
|
170
|
+
# 3. Do one of the three segmentation algorithms: kmeans, otsu, windowed
|
|
171
|
+
if color_number > 2:
|
|
172
|
+
binary_image, binary_image2, self.bio_label, self.bio_label2 = kmeans(greyscale, greyscale2, color_number, biomask, backmask, logical, bio_label, bio_label2)
|
|
173
|
+
elif rolling_window_segmentation is not None and rolling_window_segmentation['do']:
|
|
174
|
+
binary_image = windowed_thresholding(greyscale, lighter_background, rolling_window_segmentation['side_len'],
|
|
175
|
+
rolling_window_segmentation['step'], rolling_window_segmentation['min_int_var'])
|
|
176
|
+
else:
|
|
177
|
+
binary_image = otsu_thresholding(greyscale)
|
|
178
|
+
if logical != 'None' and color_number == 2:
|
|
179
|
+
if rolling_window_segmentation is not None and rolling_window_segmentation['do']:
|
|
180
|
+
binary_image2 = windowed_thresholding(greyscale2, lighter_background, rolling_window_segmentation['side_len'],
|
|
181
|
+
rolling_window_segmentation['step'], rolling_window_segmentation['min_int_var'])
|
|
182
|
+
else:
|
|
183
|
+
binary_image2 = otsu_thresholding(greyscale2)
|
|
184
|
+
|
|
185
|
+
# 4. Use previous_binary_image to make sure that the specimens are labelled with ones and the background zeros
|
|
186
|
+
if self.previous_binary_image is not None:
|
|
187
|
+
previous_binary_image = self.previous_binary_image[min_y:max_y, min_x:max_x]
|
|
188
|
+
if not (binary_image * previous_binary_image).any() or (binary_image[0, :].all() and binary_image[-1, :].all() and binary_image[:, 0].all() and binary_image[:, -1].all()):
|
|
189
|
+
# if (binary_image * (1 - previous_binary_image)).sum() > (binary_image * previous_binary_image).sum() + perimeter(binary_image):
|
|
190
|
+
# Ones of the binary image have more in common with the background than with the specimen
|
|
191
|
+
binary_image = 1 - binary_image
|
|
192
|
+
if logical != 'None':
|
|
193
|
+
if (binary_image2 * (1 - previous_binary_image)).sum() > (binary_image2 * previous_binary_image).sum():
|
|
194
|
+
binary_image2 = 1 - binary_image2
|
|
195
|
+
|
|
196
|
+
# 5. Give back the image their original size and combine binary images (optional)
|
|
197
|
+
self.binary_image = np.zeros(self.image.shape, dtype=np.uint8)
|
|
198
|
+
self.binary_image[min_y:max_y, min_x:max_x] = binary_image
|
|
199
|
+
self.greyscale = np.zeros(self.image.shape, dtype=np.uint8)
|
|
200
|
+
self.greyscale[min_y:max_y, min_x:max_x] = greyscale
|
|
201
|
+
if logical != 'None':
|
|
202
|
+
self.binary_image2 = np.zeros(self.image.shape, dtype=np.uint8)
|
|
203
|
+
self.binary_image2[min_y:max_y, min_x:max_x] = binary_image2
|
|
204
|
+
self.greyscale2 = np.zeros(self.image.shape, dtype=np.uint8)
|
|
205
|
+
self.greyscale2[min_y:max_y, min_x:max_x] = greyscale2
|
|
206
|
+
if logical != 'None':
|
|
207
|
+
if logical == 'Or':
|
|
208
|
+
self.binary_image = np.logical_or(self.binary_image, self.binary_image2)
|
|
209
|
+
elif logical == 'And':
|
|
210
|
+
self.binary_image = np.logical_and(self.binary_image, self.binary_image2)
|
|
211
|
+
elif logical == 'Xor':
|
|
212
|
+
self.binary_image = np.logical_xor(self.binary_image, self.binary_image2)
|
|
213
|
+
self.binary_image = self.binary_image.astype(np.uint8)
|
|
214
|
+
|
|
215
|
+
def _get_all_color_spaces(self):
|
|
216
|
+
"""Generate and store all supported color spaces for the image."""
|
|
217
|
+
if len(self.all_c_spaces) < 6 and not self.already_greyscale:
|
|
218
|
+
self.all_c_spaces = get_color_spaces(self.bgr)
|
|
219
|
+
|
|
220
|
+
def generate_subtract_background(self, c_space_dict: dict, drift_corrected: bool=False):
|
|
221
|
+
"""
|
|
222
|
+
Generate a background-subtracted image using specified color space dictionary.
|
|
223
|
+
|
|
224
|
+
This method first checks if color spaces have already been generated or
|
|
225
|
+
if the image is greyscale. If not, it generates color spaces from the BGR
|
|
226
|
+
image. It then converts and segments the image using the provided color space
|
|
227
|
+
dictionary without grid segmentation. A disk-shaped structuring element is
|
|
228
|
+
created and used to perform a morphological opening operation on the image,
|
|
229
|
+
resulting in a background-subtracted version. If there is a second image
|
|
230
|
+
(see Also: image2), the same operation is performed on it.
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
c_space_dict (dict): Dictionary containing color space specifications
|
|
234
|
+
for the segmentation process.
|
|
235
|
+
|
|
236
|
+
Attributes:
|
|
237
|
+
disk_size: Radius of the disk-shaped structuring element
|
|
238
|
+
used for morphological operations, calculated based on image dimensions.
|
|
239
|
+
subtract_background: Background-subtracted version of `image` obtained
|
|
240
|
+
after morphological operations with the disk-shaped structuring element.
|
|
241
|
+
subtract_background2: Background-subtracted version of `image2` obtained
|
|
242
|
+
after morphological operations with the disk-shaped structuring element,
|
|
243
|
+
if `image2` is present."""
|
|
244
|
+
logging.info("Generate background using the generate_subtract_background method of OneImageAnalysis class")
|
|
245
|
+
self._get_all_color_spaces()
|
|
246
|
+
if drift_corrected:
|
|
247
|
+
# self.adjust_to_drift_correction(c_space_dict['logical'])
|
|
248
|
+
self.check_if_image_border_attest_drift_correction()
|
|
249
|
+
self.convert_and_segment(c_space_dict, rolling_window_segmentation=None, allowed_window=self.drift_mask_coord)
|
|
250
|
+
disk_size = np.max((3, int(np.floor(np.sqrt(np.min(self.bgr.shape[:2])) / 2))))
|
|
251
|
+
disk = create_ellipse(disk_size, disk_size).astype(np.uint8)
|
|
252
|
+
self.subtract_background = cv2.morphologyEx(self.image, cv2.MORPH_OPEN, disk)
|
|
253
|
+
if self.image2 is not None:
|
|
254
|
+
self.subtract_background2 = cv2.morphologyEx(self.image2, cv2.MORPH_OPEN, disk)
|
|
255
|
+
|
|
256
|
+
def check_if_image_border_attest_drift_correction(self) -> bool:
|
|
257
|
+
"""
|
|
258
|
+
Check if the given binary image requires border attenuation and drift correction.
|
|
259
|
+
|
|
260
|
+
In order to determine the need for border attenuation or drift correction, this function
|
|
261
|
+
evaluates the borders of a binary image. If any two opposite borders are fully black,
|
|
262
|
+
it assumes that there is an issue requiring correction.
|
|
263
|
+
|
|
264
|
+
Returns:
|
|
265
|
+
bool: True if border attenuation or drift correction is required, False otherwise.
|
|
266
|
+
|
|
267
|
+
"""
|
|
268
|
+
t = np.all(self.binary_image[0, :])
|
|
269
|
+
b = np.all(self.binary_image[-1, :])
|
|
270
|
+
l = np.all(self.binary_image[:, 0])
|
|
271
|
+
r = np.all(self.binary_image[:, -1])
|
|
272
|
+
self.drift_mask_coord = None
|
|
273
|
+
if (t and b) or (t and r) or (t and l) or (t and r) or (b and l) or (b and r) or (l and r):
|
|
274
|
+
cc_nb, shapes = cv2.connectedComponents(self.binary_image)
|
|
275
|
+
if cc_nb > 1:
|
|
276
|
+
if cc_nb == 2:
|
|
277
|
+
drift_mask_coord = np.nonzero(1 - self.binary_image)
|
|
278
|
+
else:
|
|
279
|
+
back = np.unique(np.concatenate((shapes[0, :], shapes[-1, :], shapes[:, 0], shapes[:, -1]), axis=0))
|
|
280
|
+
drift_mask_coord = np.nonzero(np.logical_or(1 - self.binary_image, 1 - np.isin(shapes, back[back != 0])))
|
|
281
|
+
drift_mask_coord = (np.min(drift_mask_coord[0]), np.max(drift_mask_coord[0]) + 1,
|
|
282
|
+
np.min(drift_mask_coord[1]), np.max(drift_mask_coord[1]) + 1)
|
|
283
|
+
self.drift_mask_coord = drift_mask_coord
|
|
284
|
+
return True
|
|
285
|
+
else:
|
|
286
|
+
return False
|
|
287
|
+
else:
|
|
288
|
+
return False
|
|
289
|
+
|
|
290
|
+
def adjust_to_drift_correction(self, logical: str):
|
|
291
|
+
"""
|
|
292
|
+
Adjust the image and binary image to correct for drift.
|
|
293
|
+
|
|
294
|
+
This method applies a drift correction by dilating the binary image, calculating
|
|
295
|
+
the mean value of the drifted region and applying it back to the image. After this,
|
|
296
|
+
it applies Otsu's thresholding method to determine a new binary image and adjusts
|
|
297
|
+
the second image if present. The logical operation specified is then applied to the
|
|
298
|
+
binary images.
|
|
299
|
+
|
|
300
|
+
Args:
|
|
301
|
+
logical (str): Logical operation ('Or', 'And', 'Xor') to apply to the binary
|
|
302
|
+
images."""
|
|
303
|
+
if not self.drift_correction_already_adjusted:
|
|
304
|
+
self.drift_correction_already_adjusted = True
|
|
305
|
+
|
|
306
|
+
mask = cv2.dilate(self.binary_image, kernel=cross_33)
|
|
307
|
+
mask -= self.binary_image
|
|
308
|
+
mask = np.nonzero(mask)
|
|
309
|
+
drift_correction = np.mean(self.image[mask[0], mask[1]])
|
|
310
|
+
self.image[np.nonzero(self.binary_image)] = drift_correction
|
|
311
|
+
threshold = get_otsu_threshold(self.image)
|
|
312
|
+
binary = (self.image > threshold)
|
|
313
|
+
self.binary_image = binary.astype(np.uint8)
|
|
314
|
+
|
|
315
|
+
if self.image2 is not None:
|
|
316
|
+
drift_correction2 = np.mean(self.image2[mask[0], mask[1]])
|
|
317
|
+
self.image2[np.nonzero(self.binary_image)] = drift_correction2
|
|
318
|
+
threshold = get_otsu_threshold(self.image2)
|
|
319
|
+
binary1 = (self.image2 > threshold)
|
|
320
|
+
binary2 = np.logical_not(binary1)
|
|
321
|
+
if binary1.sum() < binary2.sum():
|
|
322
|
+
binary = binary1
|
|
323
|
+
else:
|
|
324
|
+
binary = binary2
|
|
325
|
+
while np.any(binary * self.binary_image2) and threshold > 1:
|
|
326
|
+
threshold -= 1
|
|
327
|
+
binary1 = (self.image2 > threshold)
|
|
328
|
+
binary2 = np.logical_not(binary1)
|
|
329
|
+
if binary1.sum() < binary2.sum():
|
|
330
|
+
binary = binary1
|
|
331
|
+
else:
|
|
332
|
+
binary = binary2
|
|
333
|
+
self.binary_image2 = binary.astype(np.uint8)
|
|
334
|
+
if logical == 'Or':
|
|
335
|
+
self.binary_image = np.logical_or(self.binary_image, self.binary_image2)
|
|
336
|
+
elif logical == 'And':
|
|
337
|
+
self.binary_image = np.logical_and(self.binary_image, self.binary_image2)
|
|
338
|
+
elif logical == 'Xor':
|
|
339
|
+
self.binary_image = np.logical_xor(self.binary_image, self.binary_image2)
|
|
340
|
+
self.binary_image = self.binary_image.astype(np.uint8)
|
|
341
|
+
|
|
342
|
+
def find_first_im_csc(self, sample_number: int=None, several_blob_per_arena:bool=True, spot_shape: str=None,
|
|
343
|
+
spot_size=None, kmeans_clust_nb: int=None, biomask: NDArray[np.uint8]=None,
|
|
344
|
+
backmask: NDArray[np.uint8]=None, color_space_dictionaries: TList=None, basic: bool=True):
|
|
345
|
+
"""
|
|
346
|
+
Prepare color space lists, dictionaries and matrices.
|
|
347
|
+
|
|
348
|
+
Args:
|
|
349
|
+
sample_number: An integer representing the sample number. Defaults to None.
|
|
350
|
+
several_blob_per_arena: A boolean indicating whether there are several blobs per arena. Defaults to True.
|
|
351
|
+
spot_shape: A string representing the shape of the spot. Defaults to None.
|
|
352
|
+
spot_size: An integer representing the size of the spot. Defaults to None.
|
|
353
|
+
kmeans_clust_nb: An integer representing the number of clusters for K-means. Defaults to None.
|
|
354
|
+
biomask: A 2D numpy array of type np.uint8 representing the bio mask. Defaults to None.
|
|
355
|
+
backmask: A 2D numpy array of type np.uint8 representing the background mask. Defaults to None.
|
|
356
|
+
color_space_dictionaries: A list of dictionaries containing color space information. Defaults to None.
|
|
357
|
+
basic: A boolean indicating whether to process the data basic. Defaults to True.
|
|
358
|
+
|
|
359
|
+
Note:
|
|
360
|
+
This method processes the input data to find the first image that matches certain criteria, using various color spaces and masks.
|
|
361
|
+
|
|
362
|
+
"""
|
|
363
|
+
logging.info(f"Start automatic detection of the first image")
|
|
364
|
+
self.im_combinations = []
|
|
365
|
+
self.saved_images_list = TList()
|
|
366
|
+
self.converted_images_list = TList()
|
|
367
|
+
self.saved_color_space_list = list()
|
|
368
|
+
self.saved_csc_nb = 0
|
|
369
|
+
|
|
370
|
+
if self.image.any():
|
|
371
|
+
self._get_all_color_spaces()
|
|
372
|
+
if color_space_dictionaries is None:
|
|
373
|
+
if basic:
|
|
374
|
+
colorspace_list = ["bgr", "lab", "hsv", "luv", "hls", "yuv"]
|
|
375
|
+
else:
|
|
376
|
+
colorspace_list = ["bgr"]
|
|
377
|
+
color_space_dictionaries = TList()
|
|
378
|
+
for i, c_space in enumerate(colorspace_list):
|
|
379
|
+
for i in np.arange(3):
|
|
380
|
+
channels = np.array((0, 0, 0), dtype=np.int8)
|
|
381
|
+
channels[i] = 1
|
|
382
|
+
csc_dict = TDict()
|
|
383
|
+
csc_dict[c_space] = channels
|
|
384
|
+
color_space_dictionaries.append(csc_dict)
|
|
385
|
+
|
|
386
|
+
self.combination_features = np.zeros((len(color_space_dictionaries) + 50, 11), dtype=np.uint32)
|
|
387
|
+
unaltered_cc_nb, cc_nb, area, width_std, height_std, area_std, biosum, backsum = 3, 4, 5, 6, 7, 8, 9, 10
|
|
388
|
+
self.save_combination_thread = SaveCombinationThread(self)
|
|
389
|
+
get_one_channel_result = True
|
|
390
|
+
combine_channels = False
|
|
391
|
+
logging.info(f"Try detection with each available color space channel, one by one.")
|
|
392
|
+
for csc_dict in color_space_dictionaries:
|
|
393
|
+
list_args = [self, get_one_channel_result, combine_channels, csc_dict, several_blob_per_arena,
|
|
394
|
+
sample_number, spot_size, spot_shape, kmeans_clust_nb, biomask, backmask, None]
|
|
395
|
+
ProcessFirstImage(list_args)
|
|
396
|
+
|
|
397
|
+
if sample_number is not None and basic:
|
|
398
|
+
# Try to add csc together
|
|
399
|
+
possibilities = []
|
|
400
|
+
if self.saved_csc_nb > 6:
|
|
401
|
+
different_color_spaces = np.unique(self.saved_color_space_list)
|
|
402
|
+
for color_space in different_color_spaces:
|
|
403
|
+
csc_idx = np.nonzero(np.isin(self.saved_color_space_list, color_space))[0]
|
|
404
|
+
possibilities.append(csc_idx[0] + np.argmin(self.combination_features[csc_idx, area_std]))
|
|
405
|
+
if len(possibilities) <= 6:
|
|
406
|
+
remaining_possibilities = np.arange(len(self.saved_color_space_list))
|
|
407
|
+
remaining_possibilities = remaining_possibilities[np.logical_not(np.isin(remaining_possibilities, possibilities))]
|
|
408
|
+
while len(possibilities) <= 6:
|
|
409
|
+
new_possibility = np.argmin(self.combination_features[remaining_possibilities, area_std])
|
|
410
|
+
possibilities.append(new_possibility)
|
|
411
|
+
remaining_possibilities = remaining_possibilities[remaining_possibilities != new_possibility]
|
|
412
|
+
|
|
413
|
+
|
|
414
|
+
pool = mp.ThreadPool(processes=os.cpu_count() - 1)
|
|
415
|
+
get_one_channel_result = False
|
|
416
|
+
combine_channels = True
|
|
417
|
+
list_args = [[self, get_one_channel_result, combine_channels, i, several_blob_per_arena, sample_number,
|
|
418
|
+
spot_size, spot_shape, kmeans_clust_nb, biomask, backmask, possibilities] for i in possibilities]
|
|
419
|
+
for process_i in pool.imap_unordered(ProcessFirstImage, list_args):
|
|
420
|
+
pass
|
|
421
|
+
|
|
422
|
+
# Get the most and the least covered images and the 2 best biomask and backmask scores
|
|
423
|
+
# To try combinations of those
|
|
424
|
+
if self.saved_csc_nb <= 1:
|
|
425
|
+
csc_dict = {'bgr': np.array((1, 1, 1))}
|
|
426
|
+
list_args = [self, False, False, csc_dict, several_blob_per_arena,
|
|
427
|
+
sample_number, spot_size, spot_shape, kmeans_clust_nb, biomask, backmask, None]
|
|
428
|
+
process_i = ProcessFirstImage(list_args)
|
|
429
|
+
process_i.image = self.bgr.mean(axis=-1)
|
|
430
|
+
process_i.binary_image = otsu_thresholding(process_i.image)
|
|
431
|
+
process_i.csc_dict = csc_dict
|
|
432
|
+
process_i.total_area = process_i.binary_image.sum()
|
|
433
|
+
process_i.process_binary_image()
|
|
434
|
+
process_i.unaltered_concomp_nb, shapes = cv2.connectedComponents(process_i.validated_shapes)
|
|
435
|
+
self.save_combination_features(process_i)
|
|
436
|
+
self.combination_features = self.combination_features[:self.saved_csc_nb, :]
|
|
437
|
+
fit = np.array([True])
|
|
438
|
+
else:
|
|
439
|
+
coverage = np.argsort(self.combination_features[:self.saved_csc_nb, area])
|
|
440
|
+
most1 = coverage[-1]; most2 = coverage[-2]
|
|
441
|
+
least1 = coverage[0]; least2 = coverage[1]
|
|
442
|
+
if biomask is not None:
|
|
443
|
+
bio_sort = np.argsort(self.combination_features[:self.saved_csc_nb, biosum])
|
|
444
|
+
bio1 = bio_sort[-1]; bio2 = bio_sort[-2]
|
|
445
|
+
if backmask is not None:
|
|
446
|
+
back_sort = np.argsort(self.combination_features[:self.saved_csc_nb, backsum])
|
|
447
|
+
back1 = back_sort[-1]; back2 = back_sort[-2]
|
|
448
|
+
|
|
449
|
+
# Try a logical And between the most covered images
|
|
450
|
+
# Should only need one instanciation
|
|
451
|
+
process_i = ProcessFirstImage(
|
|
452
|
+
[self, False, False, None, several_blob_per_arena, sample_number, spot_size, spot_shape, kmeans_clust_nb, biomask, backmask, None])
|
|
453
|
+
process_i.binary_image = np.logical_and(self.saved_images_list[most1], self.saved_images_list[most2]).astype(np.uint8)
|
|
454
|
+
process_i.image = self.converted_images_list[most1]
|
|
455
|
+
process_i.process_binary_image()
|
|
456
|
+
process_i.csc_dict = {list(self.saved_color_space_list[most1].keys())[0]: self.combination_features[most1, :3],
|
|
457
|
+
"logical": "And",
|
|
458
|
+
list(self.saved_color_space_list[most2].keys())[0] + "2": self.combination_features[most2, :3]}
|
|
459
|
+
process_i.unaltered_concomp_nb = np.min(self.combination_features[(most1, most2), unaltered_cc_nb])
|
|
460
|
+
process_i.total_area = process_i.binary_image.sum()
|
|
461
|
+
self.save_combination_features(process_i)
|
|
462
|
+
process_i.image = self.converted_images_list[least1]
|
|
463
|
+
process_i.binary_image = np.logical_or(self.saved_images_list[least1], self.saved_images_list[least2]).astype(np.uint8)
|
|
464
|
+
process_i.process_binary_image()
|
|
465
|
+
process_i.csc_dict = {list(self.saved_color_space_list[least1].keys())[0]: self.combination_features[least1, :3],
|
|
466
|
+
"logical": "Or",
|
|
467
|
+
list(self.saved_color_space_list[least2].keys())[0] + "2": self.combination_features[least2, :3]}
|
|
468
|
+
process_i.unaltered_concomp_nb = np.max(self.combination_features[(least1, least2), unaltered_cc_nb])
|
|
469
|
+
process_i.total_area = process_i.binary_image.sum()
|
|
470
|
+
self.save_combination_features(process_i)
|
|
471
|
+
|
|
472
|
+
# self.save_combination_features(csc_dict, unaltered_concomp_nb, self.binary_image.sum(), biomask, backmask)
|
|
473
|
+
|
|
474
|
+
# If most images are very low in biosum or backsum, try to mix them together to improve that score
|
|
475
|
+
# Do a logical And between the two best biomasks
|
|
476
|
+
if biomask is not None:
|
|
477
|
+
if not np.all(np.isin((bio1, bio2), (most1, most2))):
|
|
478
|
+
process_i.image = self.converted_images_list[bio1]
|
|
479
|
+
process_i.binary_image = np.logical_and(self.saved_images_list[bio1], self.saved_images_list[bio2]).astype(
|
|
480
|
+
np.uint8)
|
|
481
|
+
process_i.process_binary_image()
|
|
482
|
+
process_i.csc_dict = {list(self.saved_color_space_list[bio1].keys())[0]: self.combination_features[bio1, :3],
|
|
483
|
+
"logical": "And",
|
|
484
|
+
list(self.saved_color_space_list[bio2].keys())[0] + "2": self.combination_features[bio2,:3]}
|
|
485
|
+
process_i.unaltered_concomp_nb = np.min(self.combination_features[(bio1, bio2), unaltered_cc_nb])
|
|
486
|
+
process_i.total_area = process_i.binary_image.sum()
|
|
487
|
+
|
|
488
|
+
self.save_combination_features(process_i)
|
|
489
|
+
|
|
490
|
+
# Do a logical And between the two best backmask
|
|
491
|
+
if backmask is not None:
|
|
492
|
+
if not np.all(np.isin((back1, back2), (most1, most2))):
|
|
493
|
+
process_i.image = self.converted_images_list[back1]
|
|
494
|
+
process_i.binary_image = np.logical_and(self.saved_images_list[back1], self.saved_images_list[back2]).astype(
|
|
495
|
+
np.uint8)
|
|
496
|
+
process_i.process_binary_image()
|
|
497
|
+
process_i.csc_dict = {list(self.saved_color_space_list[back1].keys())[0]: self.combination_features[back1, :3],
|
|
498
|
+
"logical": "And",
|
|
499
|
+
list(self.saved_color_space_list[back2].keys())[0] + "2": self.combination_features[back2,:3]}
|
|
500
|
+
process_i.unaltered_concomp_nb = np.min(self.combination_features[(back1, back2), unaltered_cc_nb])
|
|
501
|
+
process_i.total_area = process_i.binary_image.sum()
|
|
502
|
+
self.save_combination_features(process_i)
|
|
503
|
+
# Do a logical Or between the best biomask and the best backmask
|
|
504
|
+
if biomask is not None and backmask is not None:
|
|
505
|
+
if not np.all(np.isin((bio1, back1), (least1, least2))):
|
|
506
|
+
process_i.image = self.converted_images_list[bio1]
|
|
507
|
+
process_i.binary_image = np.logical_and(self.saved_images_list[bio1], self.saved_images_list[back1]).astype(
|
|
508
|
+
np.uint8)
|
|
509
|
+
process_i.process_binary_image()
|
|
510
|
+
process_i.csc_dict = {list(self.saved_color_space_list[bio1].keys())[0]: self.combination_features[bio1, :3],
|
|
511
|
+
"logical": "Or",
|
|
512
|
+
list(self.saved_color_space_list[back1].keys())[0] + "2": self.combination_features[back1, :3]}
|
|
513
|
+
process_i.unaltered_concomp_nb = np.max(self.combination_features[(bio1, back1), unaltered_cc_nb])
|
|
514
|
+
# self.save_combination_features(csc_dict, unaltered_concomp_nb, self.binary_image.sum(), biomask,
|
|
515
|
+
# backmask)
|
|
516
|
+
process_i.total_area = self.binary_image.sum()
|
|
517
|
+
self.save_combination_features(process_i)
|
|
518
|
+
|
|
519
|
+
if self.save_combination_thread.is_alive():
|
|
520
|
+
self.save_combination_thread.join()
|
|
521
|
+
self.combination_features = self.combination_features[:self.saved_csc_nb, :]
|
|
522
|
+
# Only keep the row that filled conditions
|
|
523
|
+
# Save all combinations if they fulfill the following conditions:
|
|
524
|
+
# - Their conncomp number is lower than 3 times the smaller conncomp number.
|
|
525
|
+
# - OR The minimal area variations
|
|
526
|
+
# - OR The minimal width variations
|
|
527
|
+
# - OR The minimal height variations
|
|
528
|
+
# - AND/OR their segmentation fits with biomask and backmask
|
|
529
|
+
width_std_fit = self.combination_features[:, width_std] == np.min(self.combination_features[:, width_std])
|
|
530
|
+
height_std_fit = self.combination_features[:, height_std] == np.min(self.combination_features[:, height_std])
|
|
531
|
+
area_std_fit = self.combination_features[:, area_std] < np.min(self.combination_features[:, area_std]) * 10
|
|
532
|
+
fit = np.logical_or(np.logical_or(width_std_fit, height_std_fit), area_std_fit)
|
|
533
|
+
biomask_fit = np.ones(self.saved_csc_nb, dtype=bool)
|
|
534
|
+
backmask_fit = np.ones(self.saved_csc_nb, dtype=bool)
|
|
535
|
+
if biomask is not None or backmask is not None:
|
|
536
|
+
if biomask is not None:
|
|
537
|
+
biomask_fit = self.combination_features[:, biosum] > 0.9 * len(biomask[0])
|
|
538
|
+
if backmask is not None:
|
|
539
|
+
backmask_fit = self.combination_features[:, backsum] > 0.9 * len(backmask[0])
|
|
540
|
+
# First test a logical OR between the precedent options and the mask fits.
|
|
541
|
+
fit = np.logical_or(fit, np.logical_and(biomask_fit, backmask_fit))
|
|
542
|
+
# If this is not stringent enough, use a logical AND and increase progressively the proportion of pixels that
|
|
543
|
+
# must match the biomask and the backmask
|
|
544
|
+
if np.sum(fit) > 5:
|
|
545
|
+
to_add = 0
|
|
546
|
+
while np.sum(fit) > 5 and to_add <= 0.25:
|
|
547
|
+
if biomask is not None:
|
|
548
|
+
biomask_fit = self.combination_features[:, biosum] > (0.75 + to_add) * len(biomask[0])
|
|
549
|
+
if backmask is not None:
|
|
550
|
+
backmask_fit = self.combination_features[:, backsum] > (0.75 + to_add) * len(backmask[0])
|
|
551
|
+
test_fit = np.logical_and(fit, np.logical_and(biomask_fit, backmask_fit))
|
|
552
|
+
if np.sum(test_fit) != 0:
|
|
553
|
+
fit = test_fit
|
|
554
|
+
to_add += 0.05
|
|
555
|
+
# If saved_csc_nb is too low, try bool operators to mix them together to fill holes for instance
|
|
556
|
+
# Order the table according to the number of shapes that have been removed by filters
|
|
557
|
+
# cc_efficiency_order = np.argsort(self.combination_features[:, unaltered_cc_nb] - self.combination_features[:, cc_nb])
|
|
558
|
+
cc_efficiency_order = np.argsort(self.combination_features[:, area_std])
|
|
559
|
+
# Save and return a dictionnary containing the selected color space combinations
|
|
560
|
+
# and their corresponding binary images
|
|
561
|
+
|
|
562
|
+
for saved_csc in cc_efficiency_order:
|
|
563
|
+
if fit[saved_csc]:
|
|
564
|
+
self.im_combinations.append({})
|
|
565
|
+
# self.im_combinations.append({})
|
|
566
|
+
# self.im_combinations[len(self.im_combinations) - 1]["csc"] = self.saved_color_space_list[saved_csc]
|
|
567
|
+
self.im_combinations[len(self.im_combinations) - 1]["csc"] = {}
|
|
568
|
+
self.im_combinations[len(self.im_combinations) - 1]["csc"]['logical'] = 'None'
|
|
569
|
+
for k, v in self.saved_color_space_list[saved_csc].items():
|
|
570
|
+
self.im_combinations[len(self.im_combinations) - 1]["csc"][k] = v
|
|
571
|
+
if backmask is not None:
|
|
572
|
+
shape_number, shapes = cv2.connectedComponents(self.saved_images_list[saved_csc], connectivity=8)
|
|
573
|
+
if np.any(shapes[backmask]):
|
|
574
|
+
shapes[np.isin(shapes, np.unique(shapes[backmask]))] = 0
|
|
575
|
+
self.saved_images_list[saved_csc] = (shapes > 0).astype(np.uint8)
|
|
576
|
+
if biomask is not None:
|
|
577
|
+
self.saved_images_list[saved_csc][biomask] = 1
|
|
578
|
+
if backmask is not None or biomask is not None:
|
|
579
|
+
self.combination_features[saved_csc, cc_nb], shapes = cv2.connectedComponents(self.saved_images_list[saved_csc], connectivity=8)
|
|
580
|
+
self.combination_features[saved_csc, cc_nb] -= 1
|
|
581
|
+
self.im_combinations[len(self.im_combinations) - 1]["binary_image"] = self.saved_images_list[saved_csc]
|
|
582
|
+
self.im_combinations[len(self.im_combinations) - 1]["shape_number"] = self.combination_features[saved_csc, cc_nb]
|
|
583
|
+
self.im_combinations[len(self.im_combinations) - 1]["converted_image"] = self.converted_images_list[saved_csc]
|
|
584
|
+
|
|
585
|
+
self.saved_color_space_list = []
|
|
586
|
+
self.saved_images_list = None
|
|
587
|
+
self.converted_images_list = None
|
|
588
|
+
self.combination_features = None
|
|
589
|
+
|
|
590
|
+
def save_combination_features(self, process_i: object):
|
|
591
|
+
"""
|
|
592
|
+
Saves the combination features of a given processed image.
|
|
593
|
+
|
|
594
|
+
Args:
|
|
595
|
+
process_i (object): The processed image object containing various attributes
|
|
596
|
+
such as validated_shapes, image, csc_dict, unaltered_concomp_nb,
|
|
597
|
+
shape_number, total_area, stats, biomask, and backmask.
|
|
598
|
+
|
|
599
|
+
Attributes:
|
|
600
|
+
processed image object
|
|
601
|
+
validated_shapes (array-like): The validated shapes of the processed image.
|
|
602
|
+
image (array-like): The image data.
|
|
603
|
+
csc_dict (dict): Color space conversion dictionary
|
|
604
|
+
"""
|
|
605
|
+
if process_i.validated_shapes.any():
|
|
606
|
+
self.saved_images_list.append(process_i.validated_shapes)
|
|
607
|
+
self.converted_images_list.append(np.round(process_i.image).astype(np.uint8))
|
|
608
|
+
self.saved_color_space_list.append(process_i.csc_dict)
|
|
609
|
+
self.combination_features[self.saved_csc_nb, :3] = list(process_i.csc_dict.values())[0]
|
|
610
|
+
self.combination_features[
|
|
611
|
+
self.saved_csc_nb, 3] = process_i.unaltered_concomp_nb - 1 # unaltered_cc_nb
|
|
612
|
+
self.combination_features[self.saved_csc_nb, 4] = process_i.shape_number # cc_nb
|
|
613
|
+
self.combination_features[self.saved_csc_nb, 5] = process_i.total_area # area
|
|
614
|
+
self.combination_features[self.saved_csc_nb, 6] = np.std(process_i.stats[1:, 2]) # width_std
|
|
615
|
+
self.combination_features[self.saved_csc_nb, 7] = np.std(process_i.stats[1:, 3]) # height_std
|
|
616
|
+
self.combination_features[self.saved_csc_nb, 8] = np.std(process_i.stats[1:, 4]) # area_std
|
|
617
|
+
if process_i.biomask is not None:
|
|
618
|
+
self.combination_features[self.saved_csc_nb, 9] = np.sum(
|
|
619
|
+
process_i.validated_shapes[process_i.biomask[0], process_i.biomask[1]])
|
|
620
|
+
if process_i.backmask is not None:
|
|
621
|
+
self.combination_features[self.saved_csc_nb, 10] = np.sum(
|
|
622
|
+
(1 - process_i.validated_shapes)[process_i.backmask[0], process_i.backmask[1]])
|
|
623
|
+
self.saved_csc_nb += 1
|
|
624
|
+
|
|
625
|
+
def update_current_images(self, current_combination_id: int):
|
|
626
|
+
"""
|
|
627
|
+
Update the current images based on a given combination ID.
|
|
628
|
+
|
|
629
|
+
This method updates two attributes of the instance: `image` and
|
|
630
|
+
`validated_shapes`. The `image` attribute is set to the value of the key
|
|
631
|
+
"converted_image" from a dictionary in `im_combinations` which is
|
|
632
|
+
indexed by the provided `current_combination_id`. Similarly, the
|
|
633
|
+
`validated_shapes` attribute is set to the value of the key "binary_image"
|
|
634
|
+
from the same dictionary.
|
|
635
|
+
|
|
636
|
+
Args:
|
|
637
|
+
current_combination_id (int): The ID of the combination whose
|
|
638
|
+
images should be set as the current ones.
|
|
639
|
+
|
|
640
|
+
"""
|
|
641
|
+
self.image = self.im_combinations[current_combination_id]["converted_image"]
|
|
642
|
+
self.validated_shapes = self.im_combinations[current_combination_id]["binary_image"]
|
|
643
|
+
|
|
644
|
+
def find_last_im_csc(self, concomp_nb: int, total_surfarea: int, max_shape_size: int, arenas_mask: NDArray=None,
|
|
645
|
+
ref_image: NDArray=None, subtract_background: NDArray=None, kmeans_clust_nb: int=None,
|
|
646
|
+
biomask: NDArray[np.uint8]=None, backmask: NDArray[np.uint8]=None,
|
|
647
|
+
color_space_dictionaries: dict=None, basic: bool=True):
|
|
648
|
+
"""
|
|
649
|
+
Find the last image color space configurations that meets given criteria.
|
|
650
|
+
|
|
651
|
+
Args:
|
|
652
|
+
concomp_nb (int): A tuple of two integers representing the minimum and maximum number of connected components.
|
|
653
|
+
total_surfarea (int): The total surface area required for the image.
|
|
654
|
+
max_shape_size (int): The maximum shape size allowed in the image.
|
|
655
|
+
arenas_mask (NDArray, optional): A numpy array representing areas inside the field of interest.
|
|
656
|
+
ref_image (NDArray, optional): A reference image for comparison.
|
|
657
|
+
subtract_background (NDArray, optional): A numpy array representing the background to be subtracted.
|
|
658
|
+
kmeans_clust_nb (int, optional): The number of clusters for k-means clustering.
|
|
659
|
+
biomask (NDArray[np.uint8], optional): A binary mask for biological structures.
|
|
660
|
+
backmask (NDArray[np.uint8], optional): A binary mask for background areas.
|
|
661
|
+
color_space_dictionaries (dict, optional): Dictionaries of color space configurations.
|
|
662
|
+
basic (bool, optional): A flag indicating whether to process colorspaces basic.
|
|
663
|
+
|
|
664
|
+
"""
|
|
665
|
+
logging.info(f"Start automatic detection of the last image")
|
|
666
|
+
self.im_combinations = []
|
|
667
|
+
self.saved_images_list = TList()
|
|
668
|
+
self.converted_images_list = TList()
|
|
669
|
+
self.saved_color_space_list = list()
|
|
670
|
+
self.saved_csc_nb = 0
|
|
671
|
+
|
|
672
|
+
if self.image.any():
|
|
673
|
+
if arenas_mask is None:
|
|
674
|
+
arenas_mask = np.ones_like(self.binary_image)
|
|
675
|
+
out_of_arenas = 1 - arenas_mask
|
|
676
|
+
self._get_all_color_spaces()
|
|
677
|
+
if color_space_dictionaries is None:
|
|
678
|
+
if basic:
|
|
679
|
+
colorspace_list = TList(("bgr", "lab", "hsv", "luv", "hls", "yuv"))
|
|
680
|
+
else:
|
|
681
|
+
colorspace_list = TList(("lab", "hsv"))
|
|
682
|
+
color_space_dictionaries = TList()
|
|
683
|
+
channels = np.array((1, 1, 1), dtype=np.int8)
|
|
684
|
+
csc_dict = TDict()
|
|
685
|
+
csc_dict["bgr"] = channels
|
|
686
|
+
color_space_dictionaries.append(csc_dict)
|
|
687
|
+
for i, c_space in enumerate(colorspace_list):
|
|
688
|
+
for i in np.arange(3):
|
|
689
|
+
channels = np.array((0, 0, 0), dtype=np.int8)
|
|
690
|
+
channels[i] = 1
|
|
691
|
+
csc_dict = TDict()
|
|
692
|
+
csc_dict[c_space] = channels
|
|
693
|
+
color_space_dictionaries.append(csc_dict)
|
|
694
|
+
if ref_image is not None:
|
|
695
|
+
ref_image = cv2.dilate(ref_image, cross_33)
|
|
696
|
+
else:
|
|
697
|
+
ref_image = np.ones(self.bgr.shape[:2], dtype=np.uint8)
|
|
698
|
+
out_of_arenas_threshold = 0.01 * out_of_arenas.sum()
|
|
699
|
+
self.combination_features = np.zeros((len(color_space_dictionaries) + 50, 10), dtype=np.uint32)
|
|
700
|
+
cc_nb_idx, area_idx, out_of_arenas_idx, in_arena_idx, surf_in_common_idx, biosum_idx, backsum_idx = 3, 4, 5, 6, 7, 8, 9
|
|
701
|
+
self.save_combination_thread = SaveCombinationThread(self)
|
|
702
|
+
|
|
703
|
+
# Start with a PCA:
|
|
704
|
+
pca_dict = TDict()
|
|
705
|
+
pca_dict['PCA'] = np.array([1, 1, 1], dtype=np.int8)
|
|
706
|
+
self.image, explained_variance_ratio, first_pc_vector = extract_first_pc(self.bgr)
|
|
707
|
+
self.binary_image = otsu_thresholding(self.image)
|
|
708
|
+
nb, shapes = cv2.connectedComponents(self.binary_image)
|
|
709
|
+
nb -= 1
|
|
710
|
+
surf = self.binary_image.sum()
|
|
711
|
+
outside_pixels = np.sum(self.binary_image * out_of_arenas)
|
|
712
|
+
inside_pixels = np.sum(self.binary_image * arenas_mask)
|
|
713
|
+
in_common = np.sum(ref_image * self.binary_image)
|
|
714
|
+
self.converted_images_list.append(self.image)
|
|
715
|
+
self.saved_images_list.append(self.binary_image)
|
|
716
|
+
self.saved_color_space_list.append(pca_dict)
|
|
717
|
+
self.combination_features[self.saved_csc_nb, :3] = list(pca_dict.values())[0]
|
|
718
|
+
self.combination_features[self.saved_csc_nb, cc_nb_idx] = nb
|
|
719
|
+
self.combination_features[self.saved_csc_nb, area_idx] = surf
|
|
720
|
+
self.combination_features[self.saved_csc_nb, out_of_arenas_idx] = outside_pixels
|
|
721
|
+
self.combination_features[self.saved_csc_nb, in_arena_idx] = inside_pixels
|
|
722
|
+
self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
|
|
723
|
+
if biomask is not None:
|
|
724
|
+
self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
|
|
725
|
+
self.binary_image[biomask[0], biomask[1]])
|
|
726
|
+
if backmask is not None:
|
|
727
|
+
self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
|
|
728
|
+
(1 - self.binary_image)[backmask[0], backmask[1]])
|
|
729
|
+
self.saved_csc_nb += 1
|
|
730
|
+
|
|
731
|
+
potentials = TDict()
|
|
732
|
+
# One channel processing
|
|
733
|
+
for csc_dict in color_space_dictionaries:
|
|
734
|
+
self.image = combine_color_spaces(csc_dict, self.all_c_spaces, subtract_background)
|
|
735
|
+
if kmeans_clust_nb is not None and (biomask is not None or backmask is not None):
|
|
736
|
+
self.binary_image, self.binary_image2, self.bio_label, self.bio_label2 = kmeans(self.image, self.image2, kmeans_clust_nb, biomask, backmask)
|
|
737
|
+
else:
|
|
738
|
+
self.binary_image = otsu_thresholding(self.image)
|
|
739
|
+
surf = np.sum(self.binary_image)
|
|
740
|
+
if surf < total_surfarea:
|
|
741
|
+
nb, shapes = cv2.connectedComponents(self.binary_image)
|
|
742
|
+
outside_pixels = np.sum(self.binary_image * out_of_arenas)
|
|
743
|
+
inside_pixels = np.sum(self.binary_image * arenas_mask)
|
|
744
|
+
if outside_pixels < inside_pixels:
|
|
745
|
+
if (nb > concomp_nb[0] - 1) and (nb < concomp_nb[1]):
|
|
746
|
+
in_common = np.sum(ref_image * self.binary_image)
|
|
747
|
+
if in_common > 0:
|
|
748
|
+
nb, shapes, stats, centroids = cv2.connectedComponentsWithStats(self.binary_image)
|
|
749
|
+
nb -= 1
|
|
750
|
+
if np.all(np.sort(stats[:, 4])[:-1] < max_shape_size):
|
|
751
|
+
c_space = list(csc_dict.keys())[0]
|
|
752
|
+
self.converted_images_list.append(self.image)
|
|
753
|
+
self.saved_images_list.append(self.binary_image)
|
|
754
|
+
self.saved_color_space_list.append(csc_dict)
|
|
755
|
+
self.combination_features[self.saved_csc_nb, :3] = csc_dict[c_space]
|
|
756
|
+
self.combination_features[self.saved_csc_nb, cc_nb_idx] = nb
|
|
757
|
+
self.combination_features[self.saved_csc_nb, area_idx] = surf
|
|
758
|
+
self.combination_features[self.saved_csc_nb, out_of_arenas_idx] = outside_pixels
|
|
759
|
+
self.combination_features[self.saved_csc_nb, in_arena_idx] = inside_pixels
|
|
760
|
+
self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
|
|
761
|
+
if biomask is not None:
|
|
762
|
+
self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
|
|
763
|
+
self.binary_image[biomask[0], biomask[1]])
|
|
764
|
+
if backmask is not None:
|
|
765
|
+
self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
|
|
766
|
+
(1 - self.binary_image)[backmask[0], backmask[1]])
|
|
767
|
+
if np.isin(c_space, list(potentials.keys())):
|
|
768
|
+
potentials[c_space] += csc_dict[c_space]
|
|
769
|
+
else:
|
|
770
|
+
potentials[c_space] = csc_dict[c_space]
|
|
771
|
+
self.saved_csc_nb += 1
|
|
772
|
+
if len(potentials) > 0:
|
|
773
|
+
# All combination processing
|
|
774
|
+
|
|
775
|
+
# Add a combination of all selected channels :
|
|
776
|
+
self.saved_color_space_list.append(potentials)
|
|
777
|
+
self.image = combine_color_spaces(potentials, self.all_c_spaces, subtract_background)
|
|
778
|
+
if kmeans_clust_nb is not None and (biomask is not None or backmask is not None):
|
|
779
|
+
self.binary_image, self.binary_image2, self.bio_label, self.bio_label2 = kmeans(self.image, kmeans_clust_nb=kmeans_clust_nb, biomask=biomask, backmask=backmask)
|
|
780
|
+
else:
|
|
781
|
+
self.binary_image = otsu_thresholding(self.image)
|
|
782
|
+
surf = self.binary_image.sum()
|
|
783
|
+
nb, shapes = cv2.connectedComponents(self.binary_image)
|
|
784
|
+
nb -= 1
|
|
785
|
+
outside_pixels = np.sum(self.binary_image * out_of_arenas)
|
|
786
|
+
inside_pixels = np.sum(self.binary_image * arenas_mask)
|
|
787
|
+
in_common = np.sum(ref_image * self.binary_image)
|
|
788
|
+
self.converted_images_list.append(self.image)
|
|
789
|
+
self.saved_images_list.append(self.binary_image)
|
|
790
|
+
self.saved_color_space_list.append(potentials)
|
|
791
|
+
self.combination_features[self.saved_csc_nb, :3] = list(potentials.values())[0]
|
|
792
|
+
self.combination_features[self.saved_csc_nb, cc_nb_idx] = nb
|
|
793
|
+
self.combination_features[self.saved_csc_nb, area_idx] = surf
|
|
794
|
+
self.combination_features[self.saved_csc_nb, out_of_arenas_idx] = outside_pixels
|
|
795
|
+
self.combination_features[self.saved_csc_nb, in_arena_idx] = inside_pixels
|
|
796
|
+
self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
|
|
797
|
+
if biomask is not None:
|
|
798
|
+
self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
|
|
799
|
+
self.binary_image[biomask[0], biomask[1]])
|
|
800
|
+
if backmask is not None:
|
|
801
|
+
self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
|
|
802
|
+
(1 - self.binary_image)[backmask[0], backmask[1]])
|
|
803
|
+
self.saved_csc_nb += 1
|
|
804
|
+
# All combination processing
|
|
805
|
+
# Try to remove color space one by one
|
|
806
|
+
i = 0
|
|
807
|
+
original_length = len(potentials)
|
|
808
|
+
while np.logical_and(len(potentials) > 1, i < original_length // 2):
|
|
809
|
+
color_space_to_remove = TList()
|
|
810
|
+
# The while loop until one col space remains or the removal of one implies a strong enough area change
|
|
811
|
+
previous_c_space = list(potentials.keys())[-1]
|
|
812
|
+
for c_space in potentials.keys():
|
|
813
|
+
try_potentials = potentials.copy()
|
|
814
|
+
try_potentials.pop(c_space)
|
|
815
|
+
if i > 0:
|
|
816
|
+
try_potentials.pop(previous_c_space)
|
|
817
|
+
self.image = combine_color_spaces(try_potentials, self.all_c_spaces, subtract_background)
|
|
818
|
+
if kmeans_clust_nb is not None and (biomask is not None or backmask is not None):
|
|
819
|
+
self.binary_image, self.binary_image2, self.bio_label, self.bio_label2 = kmeans(self.image, kmeans_clust_nb=kmeans_clust_nb, biomask=biomask, backmask=backmask)
|
|
820
|
+
else:
|
|
821
|
+
self.binary_image = otsu_thresholding(self.image)
|
|
822
|
+
surf = np.sum(self.binary_image)
|
|
823
|
+
if surf < total_surfarea:
|
|
824
|
+
nb, shapes = cv2.connectedComponents(self.binary_image)
|
|
825
|
+
outside_pixels = np.sum(self.binary_image * out_of_arenas)
|
|
826
|
+
inside_pixels = np.sum(self.binary_image * arenas_mask)
|
|
827
|
+
if outside_pixels < inside_pixels:
|
|
828
|
+
if (nb > concomp_nb[0] - 1) and (nb < concomp_nb[1]):
|
|
829
|
+
in_common = np.sum(ref_image * self.binary_image)
|
|
830
|
+
if in_common > 0:
|
|
831
|
+
nb, shapes, stats, centroids = cv2.connectedComponentsWithStats(self.binary_image)
|
|
832
|
+
nb -= 1
|
|
833
|
+
if np.all(np.sort(stats[:, 4])[:-1] < max_shape_size):
|
|
834
|
+
# If a color space remove fits in the requirements, we store its values
|
|
835
|
+
self.converted_images_list.append(self.image)
|
|
836
|
+
self.saved_images_list.append(self.binary_image)
|
|
837
|
+
self.saved_color_space_list.append(try_potentials)
|
|
838
|
+
self.combination_features[self.saved_csc_nb, cc_nb_idx] = nb
|
|
839
|
+
self.combination_features[self.saved_csc_nb, area_idx] = surf
|
|
840
|
+
self.combination_features[self.saved_csc_nb, out_of_arenas_idx] = outside_pixels
|
|
841
|
+
self.combination_features[self.saved_csc_nb, in_arena_idx] = inside_pixels
|
|
842
|
+
self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
|
|
843
|
+
if biomask is not None:
|
|
844
|
+
self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
|
|
845
|
+
self.binary_image[biomask[0], biomask[1]])
|
|
846
|
+
if backmask is not None:
|
|
847
|
+
self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
|
|
848
|
+
(1 - self.binary_image)[backmask[0], backmask[1]])
|
|
849
|
+
self.saved_csc_nb += 1
|
|
850
|
+
color_space_to_remove.append(c_space)
|
|
851
|
+
if i > 0:
|
|
852
|
+
color_space_to_remove.append(previous_c_space)
|
|
853
|
+
# If it does not (if it did not pass every "if" layers), we definitely remove that color space
|
|
854
|
+
previous_c_space = c_space
|
|
855
|
+
color_space_to_remove = np.unique(color_space_to_remove)
|
|
856
|
+
for remove_col_space in color_space_to_remove:
|
|
857
|
+
potentials.pop(remove_col_space)
|
|
858
|
+
i += 1
|
|
859
|
+
if np.logical_and(len(potentials) > 0, i > 1):
|
|
860
|
+
self.converted_images_list.append(self.image)
|
|
861
|
+
self.saved_images_list.append(self.binary_image)
|
|
862
|
+
self.saved_color_space_list.append(potentials)
|
|
863
|
+
self.combination_features[self.saved_csc_nb, :3] = list(potentials.values())[0]
|
|
864
|
+
self.combination_features[self.saved_csc_nb, cc_nb_idx] = nb
|
|
865
|
+
self.combination_features[self.saved_csc_nb, area_idx] = surf
|
|
866
|
+
self.combination_features[self.saved_csc_nb, out_of_arenas_idx] = outside_pixels
|
|
867
|
+
self.combination_features[self.saved_csc_nb, in_arena_idx] = inside_pixels
|
|
868
|
+
self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
|
|
869
|
+
if biomask is not None:
|
|
870
|
+
self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
|
|
871
|
+
self.binary_image[biomask[0], biomask[1]])
|
|
872
|
+
if backmask is not None:
|
|
873
|
+
self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
|
|
874
|
+
(1 - self.binary_image)[backmask[0], backmask[1]])
|
|
875
|
+
self.saved_csc_nb += 1
|
|
876
|
+
|
|
877
|
+
self.combination_features = self.combination_features[:self.saved_csc_nb, :]
|
|
878
|
+
# Among all potentials, select the best one, according to criterion decreasing in importance
|
|
879
|
+
cc_efficiency_order = np.argsort(self.combination_features[:, surf_in_common_idx] + self.combination_features[:, in_arena_idx] - self.combination_features[:, out_of_arenas_idx])
|
|
880
|
+
|
|
881
|
+
# Save and return a dictionnary containing the selected color space combinations
|
|
882
|
+
# and their corresponding binary images
|
|
883
|
+
self.im_combinations = []
|
|
884
|
+
for saved_csc in cc_efficiency_order:
|
|
885
|
+
if len(self.saved_color_space_list[saved_csc]) > 0:
|
|
886
|
+
self.im_combinations.append({})
|
|
887
|
+
self.im_combinations[len(self.im_combinations) - 1]["csc"] = {}
|
|
888
|
+
self.im_combinations[len(self.im_combinations) - 1]["csc"]['logical'] = 'None'
|
|
889
|
+
for k, v in self.saved_color_space_list[saved_csc].items():
|
|
890
|
+
self.im_combinations[len(self.im_combinations) - 1]["csc"][k] = v
|
|
891
|
+
self.im_combinations[len(self.im_combinations) - 1]["binary_image"] = self.saved_images_list[saved_csc]
|
|
892
|
+
self.im_combinations[len(self.im_combinations) - 1]["converted_image"] = np.round(self.converted_images_list[
|
|
893
|
+
saved_csc]).astype(np.uint8)
|
|
894
|
+
self.saved_color_space_list = []
|
|
895
|
+
self.saved_images_list = None
|
|
896
|
+
self.converted_images_list = None
|
|
897
|
+
self.combination_features = None
|
|
898
|
+
|
|
899
|
+
def network_detection(self, arenas_mask: NDArray=None, pseudopod_min_size: int=50, csc_dict: dict=None, biomask=None, backmask=None):
|
|
900
|
+
"""
|
|
901
|
+
Network Detection Function
|
|
902
|
+
|
|
903
|
+
Perform network detection and pseudopod analysis on an image.
|
|
904
|
+
|
|
905
|
+
Parameters
|
|
906
|
+
----------
|
|
907
|
+
arenas_mask : NDArray, optional
|
|
908
|
+
The mask indicating the arena regions in the image.
|
|
909
|
+
pseudopod_min_size : int, optional
|
|
910
|
+
The minimum size for pseudopods to be detected.
|
|
911
|
+
csc_dict : dict, optional
|
|
912
|
+
A dictionary containing color space conversion parameters. If None,
|
|
913
|
+
defaults to {'bgr': np.array((1, 1, 1), np.int8), 'logical': 'None'}
|
|
914
|
+
biomask : NDArray, optional
|
|
915
|
+
The mask for biological objects in the image.
|
|
916
|
+
backmask : NDArray, optional
|
|
917
|
+
The background mask.
|
|
918
|
+
|
|
919
|
+
Notes
|
|
920
|
+
-----
|
|
921
|
+
This function modifies the object's state by setting `self.im_combinations`
|
|
922
|
+
with the results of network detection and pseudopod analysis.
|
|
923
|
+
"""
|
|
924
|
+
logging.info(f"Start automatic detection of network(s) in the last image")
|
|
925
|
+
if len(self.bgr.shape) == 3:
|
|
926
|
+
if csc_dict is None:
|
|
927
|
+
csc_dict = {'bgr': np.array((1, 1, 1), np.int8), 'logical': 'None'}
|
|
928
|
+
self._get_all_color_spaces()
|
|
929
|
+
# csc_dict = translate_dict(csc_dict)
|
|
930
|
+
# self.image = combine_color_spaces(csc_dict, self.all_c_spaces)
|
|
931
|
+
first_dict, second_dict, c_spaces = split_dict(csc_dict)
|
|
932
|
+
self.image, _, _, first_pc_vector = generate_color_space_combination(self.bgr, c_spaces, first_dict, second_dict, all_c_spaces=self.all_c_spaces)
|
|
933
|
+
# if first_pc_vector is not None:
|
|
934
|
+
# csc_dict = {"bgr": first_pc_vector, "logical": 'None'}
|
|
935
|
+
greyscale = self.image
|
|
936
|
+
NetDet = NetworkDetection(greyscale, possibly_filled_pixels=arenas_mask)
|
|
937
|
+
NetDet.get_best_network_detection_method()
|
|
938
|
+
lighter_background = NetDet.greyscale_image[arenas_mask > 0].mean() < NetDet.greyscale_image[arenas_mask== 0].mean()
|
|
939
|
+
NetDet.detect_pseudopods(lighter_background, pseudopod_min_size=pseudopod_min_size, only_one_connected_component=False)
|
|
940
|
+
NetDet.merge_network_with_pseudopods()
|
|
941
|
+
cc_efficiency_order = np.argsort(NetDet.quality_metrics)
|
|
942
|
+
self.im_combinations = []
|
|
943
|
+
for _i in cc_efficiency_order:
|
|
944
|
+
res_i = NetDet.all_results[_i]
|
|
945
|
+
self.im_combinations.append({})
|
|
946
|
+
self.im_combinations[len(self.im_combinations) - 1]["csc"] = csc_dict
|
|
947
|
+
self.im_combinations[len(self.im_combinations) - 1]["converted_image"] = bracket_to_uint8_image_contrast(res_i['filtered'])
|
|
948
|
+
self.im_combinations[len(self.im_combinations) - 1]["binary_image"] = res_i['binary']
|
|
949
|
+
self.im_combinations[len(self.im_combinations) - 1]['filter_spec']= {'filter1_type': res_i['filter'], 'filter1_param': [np.min(res_i['sigmas']), np.max(res_i['sigmas'])], 'filter2_type': "", 'filter2_param': [1., 1.]}
|
|
950
|
+
self.im_combinations[len(self.im_combinations) - 1]['rolling_window']= res_i['rolling_window']
|
|
951
|
+
|
|
952
|
+
def get_crop_coordinates(self):
|
|
953
|
+
"""
|
|
954
|
+
Get the crop coordinates for image processing.
|
|
955
|
+
|
|
956
|
+
This function projects the image on both x and y axes to detect rows
|
|
957
|
+
and columns of arenas, calculates the boundaries for cropping,
|
|
958
|
+
and determines if the arenas are zigzagged.-
|
|
959
|
+
|
|
960
|
+
"""
|
|
961
|
+
logging.info("Project the image on the y axis to detect rows of arenas")
|
|
962
|
+
self.y_boundaries, y_max_sum = self.projection_to_get_peaks_boundaries(axis=1)
|
|
963
|
+
logging.info("Project the image on the x axis to detect columns of arenas")
|
|
964
|
+
self.x_boundaries, x_max_sum = self.projection_to_get_peaks_boundaries(axis=0)
|
|
965
|
+
logging.info("Get crop coordinates using the get_crop_coordinates method of OneImageAnalysis class")
|
|
966
|
+
row_number = len(np.nonzero(self.y_boundaries)[0]) // 2
|
|
967
|
+
col_number = len(np.nonzero(self.x_boundaries)[0]) // 2
|
|
968
|
+
are_zigzag = None
|
|
969
|
+
if col_number > 0 and row_number > 0:
|
|
970
|
+
if (x_max_sum / col_number) * 2 < (y_max_sum / row_number):
|
|
971
|
+
are_zigzag = "columns"
|
|
972
|
+
elif (x_max_sum / col_number) > (y_max_sum / row_number) * 2:
|
|
973
|
+
are_zigzag = "rows"
|
|
974
|
+
# here automatically determine if are zigzag
|
|
975
|
+
x_boundary_number = (self.x_boundaries == 1).sum()
|
|
976
|
+
if x_boundary_number > 1:
|
|
977
|
+
if x_boundary_number < 4:
|
|
978
|
+
x_interval = np.absolute(np.max(np.diff(np.where(self.x_boundaries == 1)[0]))) // 2
|
|
979
|
+
else:
|
|
980
|
+
if are_zigzag == "columns":
|
|
981
|
+
x_interval = np.absolute(np.max(np.diff(np.where(self.x_boundaries == 1)[0][::2]))) // 2
|
|
982
|
+
else:
|
|
983
|
+
x_interval = np.absolute(np.max(np.diff(np.where(self.x_boundaries == 1)[0]))) // 2
|
|
984
|
+
cx_min = np.where(self.x_boundaries == - 1)[0][0] - x_interval.astype(int)
|
|
985
|
+
cx_max = np.where(self.x_boundaries == 1)[0][col_number - 1] + x_interval.astype(int)
|
|
986
|
+
if cx_min < 0: cx_min = 0
|
|
987
|
+
if cx_max > len(self.x_boundaries): cx_max = len(self.x_boundaries) - 1
|
|
988
|
+
else:
|
|
989
|
+
cx_min = 0
|
|
990
|
+
cx_max = len(self.x_boundaries)# - 1
|
|
991
|
+
|
|
992
|
+
y_boundary_number = (self.y_boundaries == 1).sum()
|
|
993
|
+
if y_boundary_number > 1:
|
|
994
|
+
if y_boundary_number < 4:
|
|
995
|
+
y_interval = np.absolute(np.max(np.diff(np.where(self.y_boundaries == 1)[0]))) // 2
|
|
996
|
+
else:
|
|
997
|
+
if are_zigzag == "rows":
|
|
998
|
+
y_interval = np.absolute(np.max(np.diff(np.where(self.y_boundaries == 1)[0][::2]))) // 2
|
|
999
|
+
else:
|
|
1000
|
+
y_interval = np.absolute(np.max(np.diff(np.where(self.y_boundaries == 1)[0]))) // 2
|
|
1001
|
+
cy_min = np.where(self.y_boundaries == - 1)[0][0] - y_interval.astype(int)
|
|
1002
|
+
cy_max = np.where(self.y_boundaries == 1)[0][row_number - 1] + y_interval.astype(int)
|
|
1003
|
+
if cy_min < 0: cy_min = 0
|
|
1004
|
+
if cy_max > len(self.y_boundaries): cy_max = len(self.y_boundaries) - 1
|
|
1005
|
+
else:
|
|
1006
|
+
cy_min = 0
|
|
1007
|
+
cy_max = len(self.y_boundaries)# - 1
|
|
1008
|
+
|
|
1009
|
+
self.crop_coord = [cy_min, cy_max, cx_min, cx_max]
|
|
1010
|
+
|
|
1011
|
+
def projection_to_get_peaks_boundaries(self, axis: int) -> Tuple[NDArray, int]:
|
|
1012
|
+
"""
|
|
1013
|
+
|
|
1014
|
+
Projection to get peaks' boundaries.
|
|
1015
|
+
|
|
1016
|
+
Calculate the projection of an array along a specified axis and
|
|
1017
|
+
identify the boundaries of non-zero peaks.
|
|
1018
|
+
|
|
1019
|
+
Args:
|
|
1020
|
+
axis: int,
|
|
1021
|
+
The axis along which to calculate the projection and identify
|
|
1022
|
+
peaks' boundaries.
|
|
1023
|
+
|
|
1024
|
+
Returns:
|
|
1025
|
+
Tuple[NDArray, int]:
|
|
1026
|
+
A tuple containing two elements: an array representing the slopes
|
|
1027
|
+
of peaks' boundaries and an integer representing the maximum sum
|
|
1028
|
+
along the specified axis.
|
|
1029
|
+
|
|
1030
|
+
"""
|
|
1031
|
+
sums = np.sum(self.validated_shapes, axis)
|
|
1032
|
+
slopes = np.greater(sums, 0)
|
|
1033
|
+
slopes = np.append(0, np.diff(slopes))
|
|
1034
|
+
coord = np.nonzero(slopes)[0]
|
|
1035
|
+
for ci in np.arange(len(coord)):
|
|
1036
|
+
if ci % 2 == 0:
|
|
1037
|
+
slopes[coord[ci]] = - 1
|
|
1038
|
+
return slopes, sums.max()
|
|
1039
|
+
|
|
1040
|
+
def automatically_crop(self, crop_coord):
|
|
1041
|
+
"""
|
|
1042
|
+
Automatically crops the image using the given crop coordinates.
|
|
1043
|
+
|
|
1044
|
+
This method crops various attributes of the image such as the main image,
|
|
1045
|
+
binary image, and color spaces. It also updates internal states related to
|
|
1046
|
+
cropping.
|
|
1047
|
+
|
|
1048
|
+
Args:
|
|
1049
|
+
crop_coord (tuple): The coordinates for cropping in the format
|
|
1050
|
+
(start_y, end_y, start_x, end_x), representing the bounding box region
|
|
1051
|
+
to crop from the image.
|
|
1052
|
+
|
|
1053
|
+
"""
|
|
1054
|
+
if not self.cropped and crop_coord is not None:
|
|
1055
|
+
logging.info("Crop using the automatically_crop method of OneImageAnalysis class")
|
|
1056
|
+
self.cropped = True
|
|
1057
|
+
self.image = self.image[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
|
|
1058
|
+
self.bgr = deepcopy(self.bgr[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...])
|
|
1059
|
+
self._get_all_color_spaces()
|
|
1060
|
+
if self.im_combinations is not None:
|
|
1061
|
+
for i in np.arange(len(self.im_combinations)):
|
|
1062
|
+
self.im_combinations[i]["binary_image"] = self.im_combinations[i]["binary_image"][crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3]]
|
|
1063
|
+
self.im_combinations[i]["converted_image"] = self.im_combinations[i]["converted_image"][crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3]]
|
|
1064
|
+
self.binary_image = self.binary_image[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3]]
|
|
1065
|
+
if self.greyscale is not None:
|
|
1066
|
+
self.greyscale = self.greyscale[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
|
|
1067
|
+
if self.greyscale2 is not None:
|
|
1068
|
+
self.greyscale2 = self.greyscale2[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
|
|
1069
|
+
if self.image2 is not None:
|
|
1070
|
+
self.image2 = self.image2[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
|
|
1071
|
+
if self.binary_image2 is not None:
|
|
1072
|
+
self.binary_image2 = self.binary_image2[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
|
|
1073
|
+
if self.subtract_background is not None:
|
|
1074
|
+
self.subtract_background = self.subtract_background[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
|
|
1075
|
+
if self.subtract_background2 is not None:
|
|
1076
|
+
self.subtract_background2 = self.subtract_background2[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
|
|
1077
|
+
self.validated_shapes = self.validated_shapes[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3]]
|
|
1078
|
+
|
|
1079
|
+
self.y_boundaries, y_max_sum = self.projection_to_get_peaks_boundaries(axis=1)
|
|
1080
|
+
self.x_boundaries, x_max_sum = self.projection_to_get_peaks_boundaries(axis=0)
|
|
1081
|
+
|
|
1082
|
+
|