pytme 0.3.1.post2__cp311-cp311-macosx_15_0_arm64.whl → 0.3.2.dev0__cp311-cp311-macosx_15_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pytme-0.3.2.dev0.data/scripts/estimate_ram_usage.py +97 -0
- {pytme-0.3.1.post2.data → pytme-0.3.2.dev0.data}/scripts/match_template.py +213 -196
- {pytme-0.3.1.post2.data → pytme-0.3.2.dev0.data}/scripts/postprocess.py +40 -78
- {pytme-0.3.1.post2.data → pytme-0.3.2.dev0.data}/scripts/preprocess.py +4 -5
- {pytme-0.3.1.post2.data → pytme-0.3.2.dev0.data}/scripts/preprocessor_gui.py +49 -103
- {pytme-0.3.1.post2.data → pytme-0.3.2.dev0.data}/scripts/pytme_runner.py +46 -69
- {pytme-0.3.1.post2.dist-info → pytme-0.3.2.dev0.dist-info}/METADATA +2 -1
- pytme-0.3.2.dev0.dist-info/RECORD +136 -0
- scripts/estimate_ram_usage.py +97 -0
- scripts/match_template.py +213 -196
- scripts/match_template_devel.py +1339 -0
- scripts/postprocess.py +40 -78
- scripts/preprocess.py +4 -5
- scripts/preprocessor_gui.py +49 -103
- scripts/pytme_runner.py +46 -69
- tests/preprocessing/test_compose.py +31 -30
- tests/preprocessing/test_frequency_filters.py +17 -32
- tests/preprocessing/test_preprocessor.py +0 -19
- tests/preprocessing/test_utils.py +13 -1
- tests/test_analyzer.py +2 -10
- tests/test_backends.py +47 -18
- tests/test_density.py +72 -13
- tests/test_extensions.py +1 -0
- tests/test_matching_cli.py +23 -9
- tests/test_matching_exhaustive.py +5 -5
- tests/test_matching_utils.py +3 -3
- tests/test_orientations.py +12 -0
- tests/test_rotations.py +13 -23
- tests/test_structure.py +1 -7
- tme/__version__.py +1 -1
- tme/analyzer/aggregation.py +47 -16
- tme/analyzer/base.py +34 -0
- tme/analyzer/peaks.py +26 -13
- tme/analyzer/proxy.py +14 -0
- tme/backends/_jax_utils.py +91 -68
- tme/backends/cupy_backend.py +6 -19
- tme/backends/jax_backend.py +103 -98
- tme/backends/matching_backend.py +0 -17
- tme/backends/mlx_backend.py +0 -29
- tme/backends/npfftw_backend.py +100 -97
- tme/backends/pytorch_backend.py +65 -78
- tme/cli.py +2 -2
- tme/density.py +44 -57
- tme/extensions.cpython-311-darwin.so +0 -0
- tme/filters/_utils.py +52 -24
- tme/filters/bandpass.py +99 -105
- tme/filters/compose.py +133 -39
- tme/filters/ctf.py +51 -102
- tme/filters/reconstruction.py +67 -122
- tme/filters/wedge.py +296 -325
- tme/filters/whitening.py +39 -75
- tme/mask.py +2 -2
- tme/matching_data.py +87 -15
- tme/matching_exhaustive.py +70 -120
- tme/matching_optimization.py +9 -63
- tme/matching_scores.py +261 -100
- tme/matching_utils.py +150 -91
- tme/memory.py +1 -0
- tme/orientations.py +17 -3
- tme/preprocessor.py +0 -239
- tme/rotations.py +102 -70
- tme/structure.py +601 -631
- tme/types.py +1 -0
- pytme-0.3.1.post2.dist-info/RECORD +0 -133
- {pytme-0.3.1.post2.data → pytme-0.3.2.dev0.data}/scripts/estimate_memory_usage.py +0 -0
- {pytme-0.3.1.post2.dist-info → pytme-0.3.2.dev0.dist-info}/WHEEL +0 -0
- {pytme-0.3.1.post2.dist-info → pytme-0.3.2.dev0.dist-info}/entry_points.txt +0 -0
- {pytme-0.3.1.post2.dist-info → pytme-0.3.2.dev0.dist-info}/licenses/LICENSE +0 -0
- {pytme-0.3.1.post2.dist-info → pytme-0.3.2.dev0.dist-info}/top_level.txt +0 -0
@@ -87,11 +87,6 @@ def parse_args():
|
|
87
87
|
help="Output prefix. Defaults to basename of first input. Extension is "
|
88
88
|
"added with respect to chosen output format.",
|
89
89
|
)
|
90
|
-
output_group.add_argument(
|
91
|
-
"--angles-clockwise",
|
92
|
-
action="store_true",
|
93
|
-
help="Report Euler angles in clockwise format expected by RELION.",
|
94
|
-
)
|
95
90
|
output_group.add_argument(
|
96
91
|
"--output-format",
|
97
92
|
choices=[
|
@@ -173,13 +168,6 @@ def parse_args():
|
|
173
168
|
default=None,
|
174
169
|
help="Box size of extracted subtomograms, defaults to the centered template.",
|
175
170
|
)
|
176
|
-
additional_group.add_argument(
|
177
|
-
"--mask-subtomograms",
|
178
|
-
action="store_true",
|
179
|
-
default=False,
|
180
|
-
help="Whether to mask subtomograms using the template mask. The mask will be "
|
181
|
-
"rotated according to determined angles.",
|
182
|
-
)
|
183
171
|
additional_group.add_argument(
|
184
172
|
"--invert-target-contrast",
|
185
173
|
action="store_true",
|
@@ -219,20 +207,15 @@ def load_template(
|
|
219
207
|
):
|
220
208
|
try:
|
221
209
|
template = Density.from_file(filepath)
|
222
|
-
center = np.divide(np.subtract(template.shape, 1), 2)
|
223
210
|
template_is_density = True
|
224
211
|
except Exception:
|
225
|
-
template =
|
226
|
-
center = template.center_of_mass()
|
227
|
-
template = Density.from_structure(template, sampling_rate=sampling_rate)
|
212
|
+
template = Density.from_structure(filepath, sampling_rate=sampling_rate)
|
228
213
|
template_is_density = False
|
229
214
|
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
return template, center, translation, template_is_density
|
215
|
+
if centering:
|
216
|
+
template = template.centered(0)
|
217
|
+
center = np.divide(np.subtract(template.shape, 1), 2)
|
218
|
+
return template, center, template_is_density
|
236
219
|
|
237
220
|
|
238
221
|
def load_matching_output(path: str) -> List:
|
@@ -375,7 +358,7 @@ def normalize_input(foregrounds: Tuple[str], backgrounds: Tuple[str]) -> Tuple:
|
|
375
358
|
update = tuple(slice(0, int(x)) for x in np.minimum(out_shape, scores.shape))
|
376
359
|
scores_out = np.full(out_shape, fill_value=0, dtype=np.float32)
|
377
360
|
scores_out[update] = data[0][update] - scores_norm[update]
|
378
|
-
scores_out = np.fmax(scores_out, 0, out=scores_out)
|
361
|
+
# scores_out = np.fmax(scores_out, 0, out=scores_out)
|
379
362
|
scores_out[update] += scores_norm[update].mean()
|
380
363
|
|
381
364
|
# scores_out[update] = np.divide(scores_out[update], 1 - scores_norm[update])
|
@@ -448,34 +431,24 @@ def main():
|
|
448
431
|
if hasattr(cli_args, "no_centering"):
|
449
432
|
cli_args.centering = not cli_args.no_centering
|
450
433
|
|
451
|
-
|
452
|
-
ret = load_template(
|
434
|
+
template, *_ = load_template(
|
453
435
|
filepath=cli_args.template,
|
454
436
|
sampling_rate=sampling_rate,
|
455
437
|
centering=cli_args.centering,
|
456
438
|
)
|
457
|
-
template, center_of_mass, translation, template_is_density = ret
|
458
439
|
|
459
440
|
template_mask = template.empty
|
460
441
|
template_mask.data[:] = 1
|
461
442
|
if cli_args.template_mask is not None:
|
462
443
|
template_mask = Density.from_file(cli_args.template_mask)
|
463
|
-
|
464
|
-
|
465
|
-
np.subtract(template.origin, template_mask.origin), template.sampling_rate
|
466
|
-
)
|
467
|
-
translation = np.add(translation, origin_translation)
|
468
|
-
|
469
|
-
template_mask = template_mask.rigid_transform(
|
470
|
-
rotation_matrix=np.eye(template_mask.data.ndim),
|
471
|
-
translation=-translation,
|
472
|
-
order=1,
|
473
|
-
)
|
444
|
+
if cli_args.centering:
|
445
|
+
template_mask.pad(template.shape, center=True)
|
474
446
|
|
475
447
|
if args.mask_edges and args.min_boundary_distance == 0:
|
476
448
|
max_shape = np.max(template.shape)
|
477
449
|
args.min_boundary_distance = np.ceil(np.divide(max_shape, 2))
|
478
450
|
|
451
|
+
# Do the actual peak calling
|
479
452
|
orientations = args.orientations
|
480
453
|
if orientations is None:
|
481
454
|
translations, rotations, scores, details = [], [], [], []
|
@@ -518,18 +491,20 @@ def main():
|
|
518
491
|
print(f"Determined cutoff --min-score {minimum_score}.")
|
519
492
|
args.min_score = max(minimum_score, 0)
|
520
493
|
|
521
|
-
|
522
|
-
|
523
|
-
|
494
|
+
projection_dims = None
|
495
|
+
batch_dims = getattr(cli_args, "batch_dims", None)
|
496
|
+
if getattr(cli_args, "match_projection", False):
|
497
|
+
projection_dims = batch_dims
|
524
498
|
|
525
499
|
peak_caller_kwargs = {
|
526
500
|
"shape": scores.shape,
|
527
501
|
"num_peaks": args.num_peaks,
|
528
502
|
"min_distance": args.min_distance,
|
529
503
|
"min_boundary_distance": args.min_boundary_distance,
|
530
|
-
"batch_dims": args.batch_dims,
|
531
504
|
"min_score": args.min_score,
|
532
505
|
"max_score": args.max_score,
|
506
|
+
"batch_dims": batch_dims,
|
507
|
+
"projection_dims": projection_dims,
|
533
508
|
}
|
534
509
|
|
535
510
|
peak_caller = PEAK_CALLERS[args.peak_caller](**peak_caller_kwargs)
|
@@ -551,13 +526,9 @@ def main():
|
|
551
526
|
exit(-1)
|
552
527
|
|
553
528
|
for translation, _, score, detail in zip(*candidates):
|
554
|
-
|
555
|
-
rotation = rotation_mapping.get(
|
556
|
-
|
557
|
-
)
|
558
|
-
if rotation.ndim == 2:
|
559
|
-
rotation = euler_from_rotationmatrix(rotation)
|
560
|
-
rotations.append(rotation)
|
529
|
+
index = rotation_array[tuple(translation)]
|
530
|
+
rotation = rotation_mapping.get(index, np.eye(template.data.ndim))
|
531
|
+
rotations.append(euler_from_rotationmatrix(rotation, seq="ZYZ"))
|
561
532
|
|
562
533
|
if len(rotations):
|
563
534
|
rotations = np.vstack(rotations).astype(float)
|
@@ -583,11 +554,10 @@ def main():
|
|
583
554
|
|
584
555
|
if args.peak_oversampling > 1:
|
585
556
|
if data[0].ndim != data[2].ndim:
|
586
|
-
|
557
|
+
exit(
|
587
558
|
"Input pickle does not contain template matching scores."
|
588
559
|
" Cannot oversample peaks."
|
589
560
|
)
|
590
|
-
exit(-1)
|
591
561
|
peak_caller = peak_caller = PEAK_CALLERS[args.peak_caller](shape=scores.shape)
|
592
562
|
orientations.translations = peak_caller.oversample_peaks(
|
593
563
|
scores=data[0],
|
@@ -597,8 +567,6 @@ def main():
|
|
597
567
|
|
598
568
|
if args.local_optimization:
|
599
569
|
target = Density.from_file(cli_args.target, use_memmap=True)
|
600
|
-
orientations.translations = orientations.translations.astype(np.float32)
|
601
|
-
orientations.rotations = orientations.rotations.astype(np.float32)
|
602
570
|
for index, (translation, angles, *_) in enumerate(orientations):
|
603
571
|
score_object = create_score_object(
|
604
572
|
score="FLC",
|
@@ -619,12 +587,11 @@ def main():
|
|
619
587
|
x0=[*init_translation, *angles],
|
620
588
|
)
|
621
589
|
orientations.translations[index] = np.add(translation, center)
|
622
|
-
orientations.rotations[index] =
|
590
|
+
orientations.rotations[index] = euler_from_rotationmatrix(
|
591
|
+
rotation_matrix, seq="ZYZ"
|
592
|
+
)
|
623
593
|
orientations.scores[index] = score * -1
|
624
594
|
|
625
|
-
if args.angles_clockwise:
|
626
|
-
orientations.rotations *= -1
|
627
|
-
|
628
595
|
if args.output_format in ("orientations", "relion4", "relion5"):
|
629
596
|
file_format, extension = "text", "tsv"
|
630
597
|
|
@@ -691,12 +658,6 @@ def main():
|
|
691
658
|
sampling_rate=sampling_rate,
|
692
659
|
origin=np.multiply(cand_start, sampling_rate),
|
693
660
|
)
|
694
|
-
if args.mask_subtomograms:
|
695
|
-
rotation_matrix = euler_to_rotationmatrix(orientations.rotations[index])
|
696
|
-
mask_transfomed = template_mask.rigid_transform(
|
697
|
-
rotation_matrix=rotation_matrix, order=1
|
698
|
-
)
|
699
|
-
out_density.data = out_density.data * mask_transfomed.data
|
700
661
|
out_density.to_file(
|
701
662
|
join(working_directory, f"{args.output_prefix}_{index}.mrc")
|
702
663
|
)
|
@@ -713,10 +674,11 @@ def main():
|
|
713
674
|
out = np.zeros_like(template.data)
|
714
675
|
for index in range(len(cand_slices)):
|
715
676
|
subset = Density(target.data[obs_slices[index]])
|
716
|
-
rotation_matrix = euler_to_rotationmatrix(orientations.rotations[index])
|
717
677
|
|
678
|
+
# We invert to pull the local into the global reference system
|
679
|
+
matrix = euler_to_rotationmatrix(orientations.rotations[index]).T
|
718
680
|
subset = subset.rigid_transform(
|
719
|
-
rotation_matrix=
|
681
|
+
rotation_matrix=matrix,
|
720
682
|
order=1,
|
721
683
|
use_geometric_center=True,
|
722
684
|
)
|
@@ -728,37 +690,37 @@ def main():
|
|
728
690
|
ret.to_file(f"{args.output_prefix}.mrc")
|
729
691
|
exit(0)
|
730
692
|
|
731
|
-
template, center, *_ = load_template(
|
693
|
+
template, center, template_is_density, *_ = load_template(
|
732
694
|
filepath=cli_args.template,
|
733
695
|
sampling_rate=sampling_rate,
|
734
696
|
centering=cli_args.centering,
|
735
697
|
)
|
736
698
|
|
699
|
+
_, ext = splitext(cli_args.template)
|
737
700
|
for index, (translation, angles, *_) in enumerate(orientations):
|
738
|
-
|
701
|
+
rotation = euler_to_rotationmatrix(angles, seq="ZYZ")
|
739
702
|
if template_is_density:
|
740
703
|
transformed_template = template.rigid_transform(
|
741
|
-
rotation_matrix=
|
704
|
+
rotation_matrix=rotation, use_geometric_center=True
|
742
705
|
)
|
743
706
|
# Just adapting the coordinate system not the in-box position
|
744
707
|
shift = np.multiply(np.subtract(translation, center), sampling_rate)
|
745
708
|
transformed_template.origin = np.add(target_origin, shift)
|
746
|
-
|
747
709
|
else:
|
748
710
|
template = Structure.from_file(cli_args.template)
|
749
|
-
|
750
|
-
|
751
|
-
|
752
|
-
|
711
|
+
shift = np.add(np.multiply(translation, sampling_rate), target_origin)
|
712
|
+
translation = np.subtract(shift, template.center_of_mass())
|
713
|
+
|
714
|
+
# Since we move the template's center of mass to the geometric center
|
715
|
+
# during matching and analysis we use the center of mass
|
716
|
+
# directly for rotating structures into the correct orientation
|
753
717
|
transformed_template = template.rigid_transform(
|
754
718
|
translation=translation,
|
755
|
-
rotation_matrix=
|
719
|
+
rotation_matrix=rotation,
|
720
|
+
use_geometric_center=False,
|
756
721
|
)
|
757
|
-
|
758
|
-
transformed_template.to_file(
|
759
|
-
f"{args.output_prefix}_{index}{template_extension}"
|
760
|
-
)
|
761
|
-
index += 1
|
722
|
+
|
723
|
+
transformed_template.to_file(f"{args.output_prefix}_{index}{ext}")
|
762
724
|
|
763
725
|
|
764
726
|
if __name__ == "__main__":
|
@@ -126,7 +126,7 @@ def main():
|
|
126
126
|
if args.align_axis is not None:
|
127
127
|
rmat = data.align_to_axis(axis=args.align_axis, flip=args.flip_axis)
|
128
128
|
data = data.rigid_transform(
|
129
|
-
rotation_matrix=rmat,
|
129
|
+
rotation_matrix=rmat.T, use_geometric_center=True
|
130
130
|
)
|
131
131
|
data = Density.from_structure(data, sampling_rate=sampling_rate)
|
132
132
|
|
@@ -138,11 +138,11 @@ def main():
|
|
138
138
|
if args.align_axis is not None:
|
139
139
|
rmat = data.align_to_axis(axis=args.align_axis, flip=args.flip_axis)
|
140
140
|
data = data.rigid_transform(
|
141
|
-
rotation_matrix=rmat,
|
141
|
+
rotation_matrix=rmat.T, use_geometric_center=True
|
142
142
|
)
|
143
143
|
|
144
144
|
if not args.no_centering:
|
145
|
-
data
|
145
|
+
data = data.centered(0)
|
146
146
|
|
147
147
|
if args.box_size is None:
|
148
148
|
scale = np.divide(data.sampling_rate, args.sampling_rate)
|
@@ -177,9 +177,8 @@ def main():
|
|
177
177
|
lowpass=lowpass,
|
178
178
|
highpass=None,
|
179
179
|
use_gaussian=True,
|
180
|
-
return_real_fourier=True,
|
181
180
|
sampling_rate=data.sampling_rate,
|
182
|
-
)(shape=data.shape)["data"]
|
181
|
+
)(shape=data.shape, return_real_fourier=True)["data"]
|
183
182
|
bpf_mask = be.to_backend_array(bpf_mask)
|
184
183
|
|
185
184
|
data_ft = be.rfftn(be.to_backend_array(data.data), s=data.shape)
|
@@ -26,14 +26,16 @@ from tme.backends import backend as be
|
|
26
26
|
from tme.rotations import align_vectors
|
27
27
|
from tme.matching_utils import create_mask, load_pickle
|
28
28
|
from tme import Preprocessor, Density, Orientations
|
29
|
-
from tme.filters import BandPassReconstructed, CTFReconstructed
|
29
|
+
from tme.filters import BandPassReconstructed, CTFReconstructed, WedgeReconstructed
|
30
30
|
|
31
31
|
preprocessor = Preprocessor()
|
32
32
|
SLIDER_MIN, SLIDER_MAX = 0, 25
|
33
33
|
|
34
34
|
|
35
|
-
def
|
36
|
-
|
35
|
+
def _apply_fourier_filter(arr, arr_filter):
|
36
|
+
arr_ft = np.fft.rfftn(arr, s=arr.shape)
|
37
|
+
arr_ft = np.multiply(arr_ft, arr_filter, out=arr_ft)
|
38
|
+
return np.real(np.fft.irfftn(arr_ft, s=arr.shape))
|
37
39
|
|
38
40
|
|
39
41
|
def bandpass_filter(
|
@@ -48,13 +50,8 @@ def bandpass_filter(
|
|
48
50
|
highpass=highpass_angstrom,
|
49
51
|
sampling_rate=np.max(sampling_rate),
|
50
52
|
use_gaussian=not hard_edges,
|
51
|
-
|
52
|
-
)
|
53
|
-
template_ft = np.fft.rfftn(template, s=template.shape)
|
54
|
-
|
55
|
-
mask = bpf(shape=template.shape)["data"]
|
56
|
-
np.multiply(template_ft, mask, out=template_ft)
|
57
|
-
return np.fft.irfftn(template_ft, s=template.shape).real
|
53
|
+
)(shape=template.shape, return_real_fourier=True)["data"]
|
54
|
+
return _apply_fourier_filter(template, bpf)
|
58
55
|
|
59
56
|
|
60
57
|
def ctf_filter(
|
@@ -70,9 +67,7 @@ def ctf_filter(
|
|
70
67
|
) -> NDArray:
|
71
68
|
fast_shape = [next_fast_len(x) for x in np.multiply(template.shape, 2)]
|
72
69
|
template_pad = be.topleft_pad(template, fast_shape)
|
73
|
-
template_ft = np.fft.rfftn(template_pad, s=template_pad.shape)
|
74
70
|
ctf = CTFReconstructed(
|
75
|
-
shape=fast_shape,
|
76
71
|
defocus_x=[defocus_angstrom],
|
77
72
|
acceleration_voltage=acceleration_voltage * 1e3,
|
78
73
|
spherical_aberration=spherical_aberration * 1e7,
|
@@ -80,22 +75,18 @@ def ctf_filter(
|
|
80
75
|
phase_shift=phase_shift,
|
81
76
|
defocus_angle=defocus_angle,
|
82
77
|
sampling_rate=np.max(sampling_rate),
|
83
|
-
return_real_fourier=True,
|
84
78
|
flip_phase=flip_phase,
|
85
|
-
)
|
86
|
-
|
87
|
-
|
88
|
-
template = be.topleft_pad(template_pad, template.shape)
|
89
|
-
return template
|
79
|
+
)(shape=template.shape, return_real_fourier=True)["data"]
|
80
|
+
template = _apply_fourier_filter(template, ctf)
|
81
|
+
return be.topleft_pad(template_pad, template.shape)
|
90
82
|
|
91
83
|
|
92
|
-
def
|
93
|
-
template
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
)
|
84
|
+
def gaussian_filter(template: NDArray, sigma: float, **kwargs: dict) -> NDArray:
|
85
|
+
return preprocessor.gaussian_filter(template=template, sigma=sigma, **kwargs)
|
86
|
+
|
87
|
+
|
88
|
+
def difference_of_gaussian_filter(template, sigmas: Tuple[float, float], **kwargs):
|
89
|
+
return gaussian_filter(template, sigmas[0]) - gaussian_filter(template, sigmas[1])
|
99
90
|
|
100
91
|
|
101
92
|
def edge_gaussian_filter(
|
@@ -132,11 +123,7 @@ def local_gaussian_filter(
|
|
132
123
|
)
|
133
124
|
|
134
125
|
|
135
|
-
def mean(
|
136
|
-
template: NDArray,
|
137
|
-
width: int,
|
138
|
-
**kwargs: dict,
|
139
|
-
) -> NDArray:
|
126
|
+
def mean(template: NDArray, width: int, **kwargs: dict) -> NDArray:
|
140
127
|
return preprocessor.mean_filter(template=template, width=width)
|
141
128
|
|
142
129
|
|
@@ -147,45 +134,23 @@ def wedge(
|
|
147
134
|
tilt_step: float = 0,
|
148
135
|
opening_axis: int = 2,
|
149
136
|
tilt_axis: int = 0,
|
150
|
-
omit_negative_frequencies: bool = False,
|
151
|
-
infinite_plane: bool = False,
|
152
|
-
weight_angle: bool = False,
|
153
137
|
**kwargs,
|
154
138
|
) -> NDArray:
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
infinite_plane=infinite_plane,
|
166
|
-
)
|
167
|
-
else:
|
168
|
-
weights = None
|
169
|
-
tilt_angles = np.arange(-tilt_start, tilt_stop + tilt_step, tilt_step)
|
170
|
-
if weight_angle:
|
171
|
-
weights = np.cos(np.radians(tilt_angles))
|
172
|
-
|
173
|
-
wedge_mask = preprocessor.step_wedge_mask(
|
174
|
-
tilt_angles=tilt_angles,
|
175
|
-
tilt_axis=tilt_axis,
|
176
|
-
opening_axis=opening_axis,
|
177
|
-
shape=template.shape,
|
178
|
-
weights=weights,
|
179
|
-
omit_negative_frequencies=omit_negative_frequencies,
|
180
|
-
)
|
181
|
-
|
182
|
-
np.multiply(template_ft, wedge_mask, out=template_ft)
|
183
|
-
template = np.real(np.fft.ifftn(template_ft))
|
184
|
-
return template
|
139
|
+
mask = wedge_mask(
|
140
|
+
template=template,
|
141
|
+
fftshift=False,
|
142
|
+
tilt_start=tilt_start,
|
143
|
+
tilt_stop=tilt_stop,
|
144
|
+
tilt_step=tilt_step,
|
145
|
+
return_real_fourier=True,
|
146
|
+
weight_angle=False,
|
147
|
+
)
|
148
|
+
return _apply_fourier_filter(template, mask)
|
185
149
|
|
186
150
|
|
187
151
|
def compute_power_spectrum(template: NDArray) -> NDArray:
|
188
|
-
return np.fft.fftshift(np.log(np.abs(np.fft.fftn(template))))
|
152
|
+
return np.fft.fftshift(np.log(1 + np.abs(np.fft.fftn(template))))
|
153
|
+
# return np.fft.fftshift(np.log(np.abs(np.fft.fftn(template))))
|
189
154
|
|
190
155
|
|
191
156
|
def invert_contrast(template: NDArray) -> NDArray:
|
@@ -515,39 +480,30 @@ def wedge_mask(
|
|
515
480
|
tilt_step: float = 0,
|
516
481
|
opening_axis: int = 2,
|
517
482
|
tilt_axis: int = 0,
|
518
|
-
omit_negative_frequencies: bool = False,
|
519
|
-
infinite_plane: bool = False,
|
520
|
-
weight_angle: bool = False,
|
521
483
|
**kwargs,
|
522
484
|
) -> NDArray:
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
|
531
|
-
infinite_plane=infinite_plane,
|
532
|
-
)
|
533
|
-
wedge_mask = np.fft.fftshift(wedge_mask)
|
534
|
-
return wedge_mask
|
535
|
-
|
536
|
-
weights = None
|
537
|
-
tilt_angles = np.arange(-tilt_start, tilt_stop + tilt_step, tilt_step)
|
538
|
-
if weight_angle:
|
539
|
-
weights = np.cos(np.radians(tilt_angles))
|
540
|
-
|
541
|
-
wedge_mask = preprocessor.step_wedge_mask(
|
542
|
-
tilt_angles=tilt_angles,
|
485
|
+
angles = (tilt_start, tilt_stop)
|
486
|
+
continuous_wedge = tilt_step == 0
|
487
|
+
if not continuous_wedge:
|
488
|
+
angles = np.arange(-tilt_start, tilt_stop + tilt_step, tilt_step)
|
489
|
+
|
490
|
+
return_real_fourier = kwargs.get("return_real_fourier", False)
|
491
|
+
func = WedgeReconstructed(
|
492
|
+
angles=angles,
|
543
493
|
tilt_axis=tilt_axis,
|
544
494
|
opening_axis=opening_axis,
|
545
|
-
|
546
|
-
|
547
|
-
|
495
|
+
frequency_cutoff=0.5,
|
496
|
+
create_continuous_wedge=continuous_wedge,
|
497
|
+
weight_wedge=kwargs.get("weight_angle", False),
|
548
498
|
)
|
549
|
-
|
550
|
-
|
499
|
+
wedge_mask = func(shape=template.shape, return_real_fourier=return_real_fourier)[
|
500
|
+
"data"
|
501
|
+
]
|
502
|
+
if kwargs.get("fftshift", True):
|
503
|
+
axes = [i for i in range(wedge_mask.ndim)]
|
504
|
+
if return_real_fourier:
|
505
|
+
_ = axes.pop(-1)
|
506
|
+
wedge_mask = np.fft.fftshift(wedge_mask, axes=axes)
|
551
507
|
return wedge_mask
|
552
508
|
|
553
509
|
|
@@ -570,8 +526,7 @@ def threshold_mask(
|
|
570
526
|
mask[mask < np.exp(-np.square(sigma))] = 0
|
571
527
|
|
572
528
|
if invert:
|
573
|
-
|
574
|
-
|
529
|
+
mask = 1 - mask
|
575
530
|
return mask
|
576
531
|
|
577
532
|
|
@@ -758,15 +713,6 @@ class MaskWidget(widgets.Container):
|
|
758
713
|
if self.method_dropdown.value == "Shape":
|
759
714
|
new_layer.metadata = {}
|
760
715
|
|
761
|
-
# origin_layer = metadata["origin_layer"]
|
762
|
-
# if origin_layer in self.viewer.layers:
|
763
|
-
# origin_layer = self.viewer.layers[origin_layer]
|
764
|
-
# if np.allclose(origin_layer.data.shape, processed_data.shape):
|
765
|
-
# in_mask = np.sum(np.fmax(origin_layer.data * processed_data, 0))
|
766
|
-
# in_mask /= np.sum(np.fmax(origin_layer.data, 0))
|
767
|
-
# in_mask *= 100
|
768
|
-
# self.density_field.value = f"Positive Density in Mask: {in_mask:.2f}%"
|
769
|
-
|
770
716
|
|
771
717
|
class AlignmentWidget(widgets.Container):
|
772
718
|
def __init__(self, viewer):
|