pytme 0.2.1__cp311-cp311-macosx_14_0_arm64.whl → 0.2.3__cp311-cp311-macosx_14_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. {pytme-0.2.1.data → pytme-0.2.3.data}/scripts/match_template.py +219 -216
  2. {pytme-0.2.1.data → pytme-0.2.3.data}/scripts/postprocess.py +86 -54
  3. pytme-0.2.3.data/scripts/preprocess.py +132 -0
  4. {pytme-0.2.1.data → pytme-0.2.3.data}/scripts/preprocessor_gui.py +181 -94
  5. pytme-0.2.3.dist-info/METADATA +92 -0
  6. pytme-0.2.3.dist-info/RECORD +75 -0
  7. {pytme-0.2.1.dist-info → pytme-0.2.3.dist-info}/WHEEL +1 -1
  8. pytme-0.2.1.data/scripts/preprocess.py → scripts/eval.py +1 -1
  9. scripts/extract_candidates.py +20 -13
  10. scripts/match_template.py +219 -216
  11. scripts/match_template_filters.py +154 -95
  12. scripts/postprocess.py +86 -54
  13. scripts/preprocess.py +95 -56
  14. scripts/preprocessor_gui.py +181 -94
  15. scripts/refine_matches.py +265 -61
  16. tme/__init__.py +0 -1
  17. tme/__version__.py +1 -1
  18. tme/analyzer.py +458 -813
  19. tme/backends/__init__.py +40 -11
  20. tme/backends/_jax_utils.py +187 -0
  21. tme/backends/cupy_backend.py +109 -226
  22. tme/backends/jax_backend.py +230 -152
  23. tme/backends/matching_backend.py +445 -384
  24. tme/backends/mlx_backend.py +32 -59
  25. tme/backends/npfftw_backend.py +240 -507
  26. tme/backends/pytorch_backend.py +30 -151
  27. tme/density.py +248 -371
  28. tme/extensions.cpython-311-darwin.so +0 -0
  29. tme/matching_data.py +328 -284
  30. tme/matching_exhaustive.py +195 -1499
  31. tme/matching_optimization.py +143 -106
  32. tme/matching_scores.py +887 -0
  33. tme/matching_utils.py +287 -388
  34. tme/memory.py +377 -0
  35. tme/orientations.py +78 -21
  36. tme/parser.py +3 -4
  37. tme/preprocessing/_utils.py +61 -32
  38. tme/preprocessing/composable_filter.py +7 -4
  39. tme/preprocessing/compose.py +7 -3
  40. tme/preprocessing/frequency_filters.py +49 -39
  41. tme/preprocessing/tilt_series.py +44 -72
  42. tme/preprocessor.py +560 -526
  43. tme/structure.py +491 -188
  44. tme/types.py +5 -3
  45. pytme-0.2.1.dist-info/METADATA +0 -73
  46. pytme-0.2.1.dist-info/RECORD +0 -73
  47. tme/helpers.py +0 -881
  48. tme/matching_constrained.py +0 -195
  49. {pytme-0.2.1.data → pytme-0.2.3.data}/scripts/estimate_ram_usage.py +0 -0
  50. {pytme-0.2.1.dist-info → pytme-0.2.3.dist-info}/LICENSE +0 -0
  51. {pytme-0.2.1.dist-info → pytme-0.2.3.dist-info}/entry_points.txt +0 -0
  52. {pytme-0.2.1.dist-info → pytme-0.2.3.dist-info}/top_level.txt +0 -0
@@ -8,7 +8,6 @@
8
8
  import os
9
9
  import argparse
10
10
  import warnings
11
- import importlib.util
12
11
  from sys import exit
13
12
  from time import time
14
13
  from typing import Tuple
@@ -22,7 +21,6 @@ from tme.matching_utils import (
22
21
  get_rotation_matrices,
23
22
  get_rotations_around_vector,
24
23
  compute_parallelization_schedule,
25
- euler_from_rotationmatrix,
26
24
  scramble_phases,
27
25
  generate_tempfile_name,
28
26
  write_pickle,
@@ -33,9 +31,9 @@ from tme.analyzer import (
33
31
  MaxScoreOverRotations,
34
32
  PeakCallerMaximumFilter,
35
33
  )
36
- from tme.backends import backend
34
+ from tme.backends import backend as be
37
35
  from tme.preprocessing import Compose
38
-
36
+ from tme.scoring import flc_scoring2
39
37
 
40
38
  def get_func_fullname(func) -> str:
41
39
  """Returns the full name of the given function, including its module."""
@@ -52,7 +50,7 @@ def print_block(name: str, data: dict, label_width=20) -> None:
52
50
 
53
51
  def print_entry() -> None:
54
52
  width = 80
55
- text = f" pyTME v{__version__} "
53
+ text = f" pytme v{__version__} "
56
54
  padding_total = width - len(text) - 2
57
55
  padding_left = padding_total // 2
58
56
  padding_right = padding_total - padding_left
@@ -273,7 +271,7 @@ def setup_filter(args, template: Density, target: Density) -> Tuple[Compose, Com
273
271
  return_real_fourier=True,
274
272
  )
275
273
  ctf.sampling_rate = template.sampling_rate
276
- ctf.flip_phase = not args.no_flip_phase
274
+ ctf.flip_phase = args.no_flip_phase
277
275
  ctf.amplitude_contrast = args.amplitude_contrast
278
276
  ctf.spherical_aberration = args.spherical_aberration
279
277
  ctf.acceleration_voltage = args.acceleration_voltage * 1e3
@@ -306,6 +304,12 @@ def setup_filter(args, template: Density, target: Density) -> Tuple[Compose, Com
306
304
  if highpass is not None:
307
305
  highpass = np.max(np.divide(template.sampling_rate, highpass))
308
306
 
307
+ try:
308
+ if args.lowpass >= args.highpass:
309
+ warnings.warn("--lowpass should be smaller than --highpass.")
310
+ except Exception:
311
+ pass
312
+
309
313
  bandpass = BandPassFilter(
310
314
  use_gaussian=args.no_pass_smooth,
311
315
  lowpass=lowpass,
@@ -313,7 +317,9 @@ def setup_filter(args, template: Density, target: Density) -> Tuple[Compose, Com
313
317
  sampling_rate=template.sampling_rate,
314
318
  )
315
319
  template_filter.append(bandpass)
316
- target_filter.append(bandpass)
320
+
321
+ if not args.no_filter_target:
322
+ target_filter.append(bandpass)
317
323
 
318
324
  if args.whiten_spectrum:
319
325
  whitening_filter = LinearWhiteningFilter()
@@ -335,7 +341,10 @@ def setup_filter(args, template: Density, target: Density) -> Tuple[Compose, Com
335
341
 
336
342
 
337
343
  def parse_args():
338
- parser = argparse.ArgumentParser(description="Perform template matching.")
344
+ parser = argparse.ArgumentParser(
345
+ description="Perform template matching.",
346
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
347
+ )
339
348
 
340
349
  io_group = parser.add_argument_group("Input / Output")
341
350
  io_group.add_argument(
@@ -405,13 +414,6 @@ def parse_args():
405
414
  choices=list(MATCHING_EXHAUSTIVE_REGISTER.keys()),
406
415
  help="Template matching scoring function.",
407
416
  )
408
- scoring_group.add_argument(
409
- "-p",
410
- dest="peak_calling",
411
- action="store_true",
412
- default=False,
413
- help="Perform peak calling instead of score aggregation.",
414
- )
415
417
 
416
418
  angular_group = parser.add_argument_group("Angular Sampling")
417
419
  angular_exclusive = angular_group.add_mutually_exclusive_group(required=True)
@@ -445,7 +447,7 @@ def parse_args():
445
447
  type=check_positive,
446
448
  default=360.0,
447
449
  required=False,
448
- help="Sampling angle along the z-axis of the cone. Defaults to 360.",
450
+ help="Sampling angle along the z-axis of the cone.",
449
451
  )
450
452
  angular_group.add_argument(
451
453
  "--axis_sampling",
@@ -513,8 +515,7 @@ def parse_args():
513
515
  required=False,
514
516
  type=float,
515
517
  default=0.85,
516
- help="Fraction of available memory that can be used. Defaults to 0.85 and is "
517
- "ignored if --ram is set",
518
+ help="Fraction of available memory to be used. Ignored if --ram is set."
518
519
  )
519
520
  computation_group.add_argument(
520
521
  "--temp_directory",
@@ -522,6 +523,13 @@ def parse_args():
522
523
  default=None,
523
524
  help="Directory for temporary objects. Faster I/O improves runtime.",
524
525
  )
526
+ computation_group.add_argument(
527
+ "--backend",
528
+ dest="backend",
529
+ default=None,
530
+ choices=be.available_backends(),
531
+ help="[Expert] Overwrite default computation backend.",
532
+ )
525
533
 
526
534
  filter_group = parser.add_argument_group("Filters")
527
535
  filter_group.add_argument(
@@ -552,9 +560,9 @@ def parse_args():
552
560
  dest="pass_format",
553
561
  type=str,
554
562
  required=False,
563
+ default="sampling_rate",
555
564
  choices=["sampling_rate", "voxel", "frequency"],
556
- help="How values passed to --lowpass and --highpass should be interpreted. "
557
- "By default, they are assumed to be in units of sampling rate, e.g. Ångstrom.",
565
+ help="How values passed to --lowpass and --highpass should be interpreted. ",
558
566
  )
559
567
  filter_group.add_argument(
560
568
  "--whiten_spectrum",
@@ -613,6 +621,13 @@ def parse_args():
613
621
  required=False,
614
622
  help="Analogous to --interpolation_order but for reconstruction.",
615
623
  )
624
+ filter_group.add_argument(
625
+ "--no_filter_target",
626
+ dest="no_filter_target",
627
+ action="store_true",
628
+ default=False,
629
+ help="Whether to not apply potential filters to the target.",
630
+ )
616
631
 
617
632
  ctf_group = parser.add_argument_group("Contrast Transfer Function")
618
633
  ctf_group.add_argument(
@@ -647,7 +662,7 @@ def parse_args():
647
662
  type=float,
648
663
  required=False,
649
664
  default=300,
650
- help="Acceleration voltage in kV, defaults to 300.",
665
+ help="Acceleration voltage in kV.",
651
666
  )
652
667
  ctf_group.add_argument(
653
668
  "--spherical_aberration",
@@ -663,14 +678,14 @@ def parse_args():
663
678
  type=float,
664
679
  required=False,
665
680
  default=0.07,
666
- help="Amplitude contrast, defaults to 0.07.",
681
+ help="Amplitude contrast.",
667
682
  )
668
683
  ctf_group.add_argument(
669
684
  "--no_flip_phase",
670
685
  dest="no_flip_phase",
671
686
  action="store_false",
672
687
  required=False,
673
- help="Whether the phase of the computed CTF should not be flipped.",
688
+ help="Perform phase-flipping CTF correction.",
674
689
  )
675
690
  ctf_group.add_argument(
676
691
  "--correct_defocus_gradient",
@@ -721,14 +736,22 @@ def parse_args():
721
736
  "for numerical stability. When working with very large targets, e.g. tomograms, "
722
737
  "it is safe to use this flag and benefit from the performance gain.",
723
738
  )
739
+ performance_group.add_argument(
740
+ "--no_filter_padding",
741
+ dest="no_filter_padding",
742
+ action="store_true",
743
+ default=False,
744
+ help="Omits padding of optional template filters. Particularly effective when "
745
+ "the target is much larger than the template. However, for fast osciliating "
746
+ "filters setting this flag can introduce aliasing effects.",
747
+ )
724
748
  performance_group.add_argument(
725
749
  "--interpolation_order",
726
750
  dest="interpolation_order",
727
751
  required=False,
728
752
  type=int,
729
753
  default=3,
730
- help="Spline interpolation used for template rotations. If less than zero "
731
- "no interpolation is performed.",
754
+ help="Spline interpolation used for rotations.",
732
755
  )
733
756
  performance_group.add_argument(
734
757
  "--use_mixed_precision",
@@ -755,7 +778,20 @@ def parse_args():
755
778
  default=0,
756
779
  help="Minimum template matching scores to consider for analysis.",
757
780
  )
758
-
781
+ analyzer_group.add_argument(
782
+ "-p",
783
+ dest="peak_calling",
784
+ action="store_true",
785
+ default=False,
786
+ help="Perform peak calling instead of score aggregation.",
787
+ )
788
+ analyzer_group.add_argument(
789
+ "--number_of_peaks",
790
+ dest="number_of_peaks",
791
+ action="store_true",
792
+ default=1000,
793
+ help="Number of peaks to call, 1000 by default..",
794
+ )
759
795
  args = parser.parse_args()
760
796
  args.version = __version__
761
797
 
@@ -926,44 +962,57 @@ def main():
926
962
  template.data, noise_proportion=1.0, normalize_power=True
927
963
  )
928
964
 
929
- available_memory = backend.get_available_memory()
930
- if args.use_gpu:
931
- args.cores = len(args.gpu_indices)
932
- has_torch = importlib.util.find_spec("torch") is not None
933
- has_cupy = importlib.util.find_spec("cupy") is not None
934
-
935
- if not has_torch and not has_cupy:
965
+ # Determine suitable backend for the selected operation
966
+ available_backends = be.available_backends()
967
+ if args.backend is not None:
968
+ req_backend = args.backend
969
+ if req_backend not in available_backends:
936
970
  raise ValueError(
937
- "Found neither CuPy nor PyTorch installation. You need to install"
938
- " either to enable GPU support."
971
+ "Requested backend is not available."
939
972
  )
973
+ available_backends = [req_backend,]
940
974
 
941
- if args.peak_calling:
942
- preferred_backend = "pytorch"
943
- if not has_torch:
944
- preferred_backend = "cupy"
945
- backend.change_backend(backend_name=preferred_backend, device="cuda")
946
- else:
947
- preferred_backend = "cupy"
948
- if not has_cupy:
949
- preferred_backend = "pytorch"
950
- backend.change_backend(backend_name=preferred_backend, device="cuda")
951
- if args.use_mixed_precision and preferred_backend == "pytorch":
975
+ be_selection = ("numpyfftw", "pytorch", "jax", "mlx")
976
+ if args.use_gpu:
977
+ args.cores = len(args.gpu_indices)
978
+ be_selection = ("pytorch", "cupy", "jax")
979
+ if args.use_mixed_precision:
980
+ be_selection = tuple(x for x in be_selection if x in ("cupy", "numpyfftw"))
981
+
982
+ available_backends = [x for x in available_backends if x in be_selection]
983
+ if args.peak_calling:
984
+ if "jax" in available_backends:
985
+ available_backends.remove("jax")
986
+ if args.use_gpu and "pytorch" in available_backends:
987
+ available_backends = ("pytorch",)
988
+ if args.interpolation_order == 3:
952
989
  raise NotImplementedError(
953
- "pytorch backend does not yet support mixed precision."
954
- " Consider installing CuPy to enable this feature."
955
- )
956
- elif args.use_mixed_precision:
957
- backend.change_backend(
958
- backend_name="cupy",
959
- default_dtype=backend._array_backend.float16,
960
- complex_dtype=backend._array_backend.complex64,
961
- default_dtype_int=backend._array_backend.int16,
990
+ "Pytorch does not support --interpolation_order 3, 1 is supported."
962
991
  )
963
- available_memory = backend.get_available_memory() * args.cores
964
- if preferred_backend == "pytorch" and args.interpolation_order == 3:
965
- args.interpolation_order = 1
992
+ ndim = len(template.shape)
993
+ if len(target.shape) == ndim and ndim <= 3 and args.use_gpu:
994
+ available_backends = ["jax", ]
966
995
 
996
+ backend_preference = ("numpyfftw", "pytorch", "jax", "mlx")
997
+ if args.use_gpu:
998
+ backend_preference = ("cupy", "jax", "pytorch")
999
+ for pref in backend_preference:
1000
+ if pref not in available_backends:
1001
+ continue
1002
+ be.change_backend(pref)
1003
+ if pref == "pytorch":
1004
+ be.change_backend(pref, device = "cuda" if args.use_gpu else "cpu")
1005
+
1006
+ if args.use_mixed_precision:
1007
+ be.change_backend(
1008
+ backend_name=pref,
1009
+ default_dtype=be._array_backend.float16,
1010
+ complex_dtype=be._array_backend.complex64,
1011
+ default_dtype_int=be._array_backend.int16,
1012
+ )
1013
+ break
1014
+
1015
+ available_memory = be.get_available_memory() * be.device_count()
967
1016
  if args.memory is None:
968
1017
  args.memory = int(args.memory_scaling * available_memory)
969
1018
 
@@ -980,22 +1029,20 @@ def main():
980
1029
  rotations=parse_rotation_logic(args=args, ndim=template.data.ndim),
981
1030
  )
982
1031
 
983
- template_filter, target_filter = setup_filter(args, template, target)
984
- matching_data.template_filter = template_filter
985
- matching_data.target_filter = target_filter
1032
+ matching_data.template_filter, matching_data.target_filter = setup_filter(
1033
+ args, template, target
1034
+ )
986
1035
 
987
1036
  target_dims = target.metadata.get("batch_dimension", None)
988
- matching_data._set_batch_dimension(target_dims=target_dims, template_dims=None)
989
- args.score = "FLC2" if target_dims is not None else args.score
1037
+ matching_data._set_matching_dimension(target_dims=target_dims, template_dims=None)
1038
+ args.score = "FLC" if target_dims is not None else args.score
990
1039
  args.target_batch, args.template_batch = target_dims, None
991
1040
 
992
1041
  template_box = matching_data._output_template_shape
993
1042
  if not args.pad_fourier:
994
- template_box = np.ones(len(template_box), dtype=int)
1043
+ template_box = tuple(0 for _ in range(len(template_box)))
995
1044
 
996
- target_padding = np.zeros(
997
- (backend.size(matching_data._output_template_shape)), dtype=int
998
- )
1045
+ target_padding = tuple(0 for _ in range(len(template_box)))
999
1046
  if args.pad_target_edges:
1000
1047
  target_padding = matching_data._output_template_shape
1001
1048
 
@@ -1008,10 +1055,10 @@ def main():
1008
1055
  split_only_outer=args.use_gpu,
1009
1056
  matching_method=args.score,
1010
1057
  analyzer_method=callback_class.__name__,
1011
- backend=backend._backend_name,
1012
- float_nbytes=backend.datatype_bytes(backend._float_dtype),
1013
- complex_nbytes=backend.datatype_bytes(backend._complex_dtype),
1014
- integer_nbytes=backend.datatype_bytes(backend._int_dtype),
1058
+ backend=be._backend_name,
1059
+ float_nbytes=be.datatype_bytes(be._float_dtype),
1060
+ complex_nbytes=be.datatype_bytes(be._complex_dtype),
1061
+ integer_nbytes=be.datatype_bytes(be._int_dtype),
1015
1062
  split_axes=target_dims,
1016
1063
  )
1017
1064
 
@@ -1023,33 +1070,44 @@ def main():
1023
1070
  exit(-1)
1024
1071
 
1025
1072
  matching_setup, matching_score = MATCHING_EXHAUSTIVE_REGISTER[args.score]
1073
+ if target_dims is not None:
1074
+ matching_score = flc_scoring2
1026
1075
  n_splits = np.prod(list(splits.values()))
1027
1076
  target_split = ", ".join(
1028
1077
  [":".join([str(x) for x in axis]) for axis in splits.items()]
1029
1078
  )
1030
1079
  gpus_used = 0 if args.gpu_indices is None else len(args.gpu_indices)
1031
1080
  options = {
1032
- "CPU Cores": args.cores,
1033
- "Run on GPU": f"{args.use_gpu} [N={gpus_used}]",
1034
- "Use Mixed Precision": args.use_mixed_precision,
1035
- "Assigned Memory [MB]": f"{args.memory // 1e6} [out of {available_memory//1e6}]",
1036
- "Temporary Directory": args.temp_directory,
1081
+ "Angular Sampling": f"{args.angular_sampling}"
1082
+ f" [{matching_data.rotations.shape[0]} rotations]",
1083
+ "Center Template": not args.no_centering,
1084
+ "Scramble Template": args.scramble_phases,
1085
+ "Invert Contrast": args.invert_target_contrast,
1037
1086
  "Extend Fourier Grid": not args.no_fourier_padding,
1038
1087
  "Extend Target Edges": not args.no_edge_padding,
1039
1088
  "Interpolation Order": args.interpolation_order,
1040
- "Score": f"{args.score}",
1041
1089
  "Setup Function": f"{get_func_fullname(matching_setup)}",
1042
1090
  "Scoring Function": f"{get_func_fullname(matching_score)}",
1043
- "Angular Sampling": f"{args.angular_sampling}"
1044
- f" [{matching_data.rotations.shape[0]} rotations]",
1045
- "Scramble Template": args.scramble_phases,
1046
- "Target Splits": f"{target_split} [N={n_splits}]",
1047
1091
  }
1048
1092
 
1049
1093
  print_block(
1050
- name="Template Matching Options",
1094
+ name="Template Matching",
1051
1095
  data=options,
1052
- label_width=max(len(key) for key in options.keys()) + 2,
1096
+ label_width=max(len(key) for key in options.keys()) + 3,
1097
+ )
1098
+
1099
+ compute_options = {
1100
+ "Backend" :be._BACKEND_REGISTRY[be._backend_name],
1101
+ "Compute Devices" : f"CPU [{args.cores}], GPU [{gpus_used}]",
1102
+ "Use Mixed Precision": args.use_mixed_precision,
1103
+ "Assigned Memory [MB]": f"{args.memory // 1e6} [out of {available_memory//1e6}]",
1104
+ "Temporary Directory": args.temp_directory,
1105
+ "Target Splits": f"{target_split} [N={n_splits}]",
1106
+ }
1107
+ print_block(
1108
+ name="Computation",
1109
+ data=compute_options,
1110
+ label_width=max(len(key) for key in options.keys()) + 3,
1053
1111
  )
1054
1112
 
1055
1113
  filter_args = {
@@ -1067,7 +1125,7 @@ def main():
1067
1125
  filter_args["CTF File"] = args.ctf_file
1068
1126
  filter_args["Defocus"] = args.defocus
1069
1127
  filter_args["Phase Shift"] = args.phase_shift
1070
- filter_args["No Flip Phase"] = args.no_flip_phase
1128
+ filter_args["Flip Phase"] = args.no_flip_phase
1071
1129
  filter_args["Acceleration Voltage"] = args.acceleration_voltage
1072
1130
  filter_args["Spherical Aberration"] = args.spherical_aberration
1073
1131
  filter_args["Amplitude Contrast"] = args.amplitude_contrast
@@ -1078,20 +1136,20 @@ def main():
1078
1136
  print_block(
1079
1137
  name="Filters",
1080
1138
  data=filter_args,
1081
- label_width=max(len(key) for key in options.keys()) + 2,
1139
+ label_width=max(len(key) for key in options.keys()) + 3,
1082
1140
  )
1083
1141
 
1084
1142
  analyzer_args = {
1085
1143
  "score_threshold": args.score_threshold,
1086
- "number_of_peaks": 1000,
1087
- "convolution_mode": "valid",
1144
+ "number_of_peaks": args.number_of_peaks,
1145
+ "min_distance" : max(template.shape) // 2,
1146
+ "min_boundary_distance" : max(template.shape) // 2,
1088
1147
  "use_memmap": args.use_memmap,
1089
1148
  }
1090
- analyzer_args = {"Analyzer": callback_class, **analyzer_args}
1091
1149
  print_block(
1092
- name="Score Analysis Options",
1093
- data=analyzer_args,
1094
- label_width=max(len(key) for key in options.keys()) + 2,
1150
+ name="Analyzer",
1151
+ data={"Analyzer": callback_class, **analyzer_args},
1152
+ label_width=max(len(key) for key in options.keys()) + 3,
1095
1153
  )
1096
1154
  print("\n" + "-" * 80)
1097
1155
 
@@ -1112,6 +1170,7 @@ def main():
1112
1170
  target_splits=splits,
1113
1171
  pad_target_edges=args.pad_target_edges,
1114
1172
  pad_fourier=args.pad_fourier,
1173
+ pad_template_filter=not args.no_filter_padding,
1115
1174
  interpolation_order=args.interpolation_order,
1116
1175
  )
1117
1176
 
@@ -1121,19 +1180,19 @@ def main():
1121
1180
  candidates[0] *= target_mask.data
1122
1181
  with warnings.catch_warnings():
1123
1182
  warnings.simplefilter("ignore", category=UserWarning)
1124
- nbytes = backend.datatype_bytes(backend._float_dtype)
1183
+ nbytes = be.datatype_bytes(be._float_dtype)
1125
1184
  dtype = np.float32 if nbytes == 4 else np.float16
1126
1185
  rot_dim = matching_data.rotations.shape[1]
1127
1186
  candidates[3] = {
1128
- x: euler_from_rotationmatrix(
1129
- np.frombuffer(i, dtype=dtype).reshape(rot_dim, rot_dim)
1130
- )
1187
+ x: np.frombuffer(i, dtype=dtype).reshape(rot_dim, rot_dim)
1131
1188
  for i, x in candidates[3].items()
1132
1189
  }
1190
+ print(np.where(candidates[0] == candidates[0].max()), candidates[0].max())
1133
1191
  candidates.append((target.origin, template.origin, template.sampling_rate, args))
1134
1192
  write_pickle(data=candidates, filename=args.output)
1135
1193
 
1136
1194
  runtime = time() - start
1195
+ print("\n" + "-" * 80)
1137
1196
  print(f"\nRuntime real: {runtime:.3f}s user: {(runtime * args.cores):.3f}s.")
1138
1197
 
1139
1198