pytme 0.3b0.post1__tar.gz → 0.3.1.post1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (132) hide show
  1. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/PKG-INFO +5 -7
  2. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/README.md +2 -2
  3. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/pyproject.toml +2 -3
  4. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/pytme.egg-info/SOURCES.txt +7 -0
  5. pytme-0.3.1.post1/scripts/extract_candidates.py +243 -0
  6. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/scripts/match_template.py +28 -39
  7. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/scripts/postprocess.py +35 -21
  8. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/scripts/preprocessor_gui.py +95 -24
  9. pytme-0.3.1.post1/scripts/pytme_runner.py +1223 -0
  10. pytme-0.3.1.post1/scripts/refine_matches.py +395 -0
  11. pytme-0.3.1.post1/tests/data/.DS_Store +0 -0
  12. pytme-0.3.1.post1/tests/data/Blurring/.DS_Store +0 -0
  13. pytme-0.3.1.post1/tests/data/Maps/.DS_Store +0 -0
  14. pytme-0.3.1.post1/tests/data/Raw/.DS_Store +0 -0
  15. pytme-0.3.1.post1/tests/data/Structures/.DS_Store +0 -0
  16. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/preprocessing/test_utils.py +18 -0
  17. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/test_analyzer.py +2 -3
  18. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/test_backends.py +3 -9
  19. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/test_density.py +0 -1
  20. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/test_extensions.py +0 -1
  21. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/test_matching_utils.py +10 -60
  22. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/test_rotations.py +1 -1
  23. pytme-0.3.1.post1/tme/__version__.py +1 -0
  24. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/analyzer/_utils.py +4 -4
  25. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/analyzer/aggregation.py +35 -15
  26. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/analyzer/peaks.py +11 -10
  27. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/backends/_jax_utils.py +26 -13
  28. pytme-0.3.1.post1/tme/backends/_numpyfftw_utils.py +270 -0
  29. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/backends/cupy_backend.py +16 -55
  30. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/backends/jax_backend.py +76 -37
  31. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/backends/matching_backend.py +17 -51
  32. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/backends/mlx_backend.py +1 -27
  33. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/backends/npfftw_backend.py +71 -65
  34. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/backends/pytorch_backend.py +1 -26
  35. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/density.py +2 -6
  36. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/filters/ctf.py +22 -21
  37. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/filters/wedge.py +10 -7
  38. pytme-0.3.1.post1/tme/mask.py +341 -0
  39. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/matching_data.py +31 -19
  40. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/matching_exhaustive.py +37 -47
  41. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/matching_optimization.py +2 -1
  42. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/matching_scores.py +229 -411
  43. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/matching_utils.py +73 -422
  44. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/memory.py +1 -1
  45. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/orientations.py +13 -8
  46. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/rotations.py +1 -1
  47. pytme-0.3b0.post1/scripts/extract_candidates.py +0 -224
  48. pytme-0.3b0.post1/scripts/pytme_runner.py +0 -769
  49. pytme-0.3b0.post1/scripts/refine_matches.py +0 -625
  50. pytme-0.3b0.post1/tme/__version__.py +0 -1
  51. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/LICENSE +0 -0
  52. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/MANIFEST.in +0 -0
  53. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/scripts/__init__.py +0 -0
  54. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/scripts/estimate_memory_usage.py +0 -0
  55. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/scripts/eval.py +0 -0
  56. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/scripts/match_template_filters.py +0 -0
  57. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/scripts/preprocess.py +0 -0
  58. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/setup.cfg +0 -0
  59. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/setup.py +0 -0
  60. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/__init__.py +0 -0
  61. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/data/Blurring/blob_width18.npy +0 -0
  62. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/data/Blurring/edgegaussian_sigma3.npy +0 -0
  63. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/data/Blurring/gaussian_sigma2.npy +0 -0
  64. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/data/Blurring/hamming_width6.npy +0 -0
  65. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/data/Blurring/kaiserb_width18.npy +0 -0
  66. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/data/Blurring/localgaussian_sigma0510.npy +0 -0
  67. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/data/Blurring/mean_size5.npy +0 -0
  68. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/data/Blurring/ntree_sigma0510.npy +0 -0
  69. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/data/Blurring/rank_rank3.npy +0 -0
  70. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/data/Maps/emd_8621.mrc.gz +0 -0
  71. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/data/README.md +0 -0
  72. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/data/Raw/em_map.map +0 -0
  73. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/data/Structures/1pdj.cif +0 -0
  74. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/data/Structures/1pdj.pdb +0 -0
  75. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/data/Structures/5khe.cif +0 -0
  76. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/data/Structures/5khe.ent +0 -0
  77. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/data/Structures/5khe.pdb +0 -0
  78. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/data/Structures/5uz4.cif +0 -0
  79. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/preprocessing/__init__.py +0 -0
  80. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/preprocessing/test_compose.py +0 -0
  81. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/preprocessing/test_frequency_filters.py +0 -0
  82. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/preprocessing/test_preprocessor.py +0 -0
  83. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/test_matching_cli.py +0 -0
  84. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/test_matching_data.py +0 -0
  85. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/test_matching_exhaustive.py +0 -0
  86. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/test_matching_memory.py +0 -0
  87. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/test_matching_optimization.py +0 -0
  88. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/test_orientations.py +0 -0
  89. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/test_parser.py +0 -0
  90. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tests/test_structure.py +0 -0
  91. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/__init__.py +0 -0
  92. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/analyzer/__init__.py +0 -0
  93. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/analyzer/base.py +0 -0
  94. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/analyzer/proxy.py +0 -0
  95. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/backends/__init__.py +0 -0
  96. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/backends/_cupy_utils.py +0 -0
  97. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/cli.py +0 -0
  98. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/__init__.py +0 -0
  99. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c48n309.npy +0 -0
  100. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c48n527.npy +0 -0
  101. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c48n9.npy +0 -0
  102. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c48u1.npy +0 -0
  103. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c48u1153.npy +0 -0
  104. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c48u1201.npy +0 -0
  105. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c48u1641.npy +0 -0
  106. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c48u181.npy +0 -0
  107. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c48u2219.npy +0 -0
  108. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c48u27.npy +0 -0
  109. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c48u2947.npy +0 -0
  110. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c48u3733.npy +0 -0
  111. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c48u4749.npy +0 -0
  112. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c48u5879.npy +0 -0
  113. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c48u7111.npy +0 -0
  114. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c48u815.npy +0 -0
  115. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c48u83.npy +0 -0
  116. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c48u8649.npy +0 -0
  117. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c600v.npy +0 -0
  118. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/c600vc.npy +0 -0
  119. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/metadata.yaml +0 -0
  120. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/quat_to_numpy.py +0 -0
  121. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/data/scattering_factors.pickle +0 -0
  122. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/external/bindings.cpp +0 -0
  123. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/filters/__init__.py +0 -0
  124. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/filters/_utils.py +0 -0
  125. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/filters/bandpass.py +0 -0
  126. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/filters/compose.py +0 -0
  127. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/filters/reconstruction.py +0 -0
  128. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/filters/whitening.py +0 -0
  129. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/parser.py +0 -0
  130. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/preprocessor.py +0 -0
  131. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/structure.py +0 -0
  132. {pytme-0.3b0.post1 → pytme-0.3.1.post1}/tme/types.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pytme
3
- Version: 0.3b0.post1
3
+ Version: 0.3.1.post1
4
4
  Summary: Python Template Matching Engine
5
5
  Author: Valentin Maurer
6
6
  Author-email: Valentin Maurer <valentin.maurer@embl-hamburg.de>
@@ -25,10 +25,8 @@ Requires-Dist: h5py
25
25
  Requires-Dist: importlib_resources
26
26
  Requires-Dist: joblib
27
27
  Provides-Extra: cupy
28
- Requires-Dist: cupy-cuda12x>=13.0.0; extra == "cupy"
29
- Provides-Extra: cupy-voltools
30
- Requires-Dist: cupy-cuda12x>=13.0.0; extra == "cupy-voltools"
31
- Requires-Dist: voltools; extra == "cupy-voltools"
28
+ Requires-Dist: cupy-cuda12x>13.0.0; extra == "cupy"
29
+ Requires-Dist: voltools; extra == "cupy"
32
30
  Provides-Extra: pytorch
33
31
  Requires-Dist: torch; extra == "pytorch"
34
32
  Requires-Dist: torchvision; extra == "pytorch"
@@ -77,8 +75,8 @@ You can find alternative installation methods in the [documentation](https://kos
77
75
 
78
76
  Learn how to get started with
79
77
 
80
- - [Installation:](https://kosinskilab.github.io/pyTME/quickstart/installation.html).
81
- - [Template matching:](https://kosinskilab.github.io/pyTME/quickstart/matching/particle_picking.html) Find your template of interest.
78
+ - [Installation](https://kosinskilab.github.io/pyTME/quickstart/installation.html)
79
+ - [Template matching](https://kosinskilab.github.io/pyTME/quickstart/matching/particle_picking.html) Find your template of interest.
82
80
  - [Postprocessing](https://kosinskilab.github.io/pyTME/quickstart/postprocessing/motivation.html) Analyze template matching results and downstream integrations.
83
81
 
84
82
  ## How to Cite
@@ -32,8 +32,8 @@ You can find alternative installation methods in the [documentation](https://kos
32
32
 
33
33
  Learn how to get started with
34
34
 
35
- - [Installation:](https://kosinskilab.github.io/pyTME/quickstart/installation.html).
36
- - [Template matching:](https://kosinskilab.github.io/pyTME/quickstart/matching/particle_picking.html) Find your template of interest.
35
+ - [Installation](https://kosinskilab.github.io/pyTME/quickstart/installation.html)
36
+ - [Template matching](https://kosinskilab.github.io/pyTME/quickstart/matching/particle_picking.html) Find your template of interest.
37
37
  - [Postprocessing](https://kosinskilab.github.io/pyTME/quickstart/postprocessing/motivation.html) Analyze template matching results and downstream integrations.
38
38
 
39
39
  ## How to Cite
@@ -7,7 +7,7 @@ name="pytme"
7
7
  authors = [
8
8
  { name = "Valentin Maurer", email = "valentin.maurer@embl-hamburg.de" },
9
9
  ]
10
- version="0.3.b0.post1"
10
+ version="0.3.1.post1"
11
11
  description="Python Template Matching Engine"
12
12
  readme="README.md"
13
13
  requires-python = ">=3.11"
@@ -33,8 +33,7 @@ classifiers = [
33
33
  ]
34
34
 
35
35
  [project.optional-dependencies]
36
- cupy = ["cupy-cuda12x>=13.0.0"]
37
- cupy_voltools = ["cupy-cuda12x>=13.0.0", "voltools"]
36
+ cupy = ["cupy-cuda12x>13.0.0", "voltools"]
38
37
  pytorch = ["torch", "torchvision"]
39
38
  jax = ["jax[cuda12]", "jaxlib"]
40
39
  jax_cpu = ["jax", "jaxlib"]
@@ -29,7 +29,9 @@ tests/test_orientations.py
29
29
  tests/test_parser.py
30
30
  tests/test_rotations.py
31
31
  tests/test_structure.py
32
+ tests/data/.DS_Store
32
33
  tests/data/README.md
34
+ tests/data/Blurring/.DS_Store
33
35
  tests/data/Blurring/blob_width18.npy
34
36
  tests/data/Blurring/edgegaussian_sigma3.npy
35
37
  tests/data/Blurring/gaussian_sigma2.npy
@@ -39,8 +41,11 @@ tests/data/Blurring/localgaussian_sigma0510.npy
39
41
  tests/data/Blurring/mean_size5.npy
40
42
  tests/data/Blurring/ntree_sigma0510.npy
41
43
  tests/data/Blurring/rank_rank3.npy
44
+ tests/data/Maps/.DS_Store
42
45
  tests/data/Maps/emd_8621.mrc.gz
46
+ tests/data/Raw/.DS_Store
43
47
  tests/data/Raw/em_map.map
48
+ tests/data/Structures/.DS_Store
44
49
  tests/data/Structures/1pdj.cif
45
50
  tests/data/Structures/1pdj.pdb
46
51
  tests/data/Structures/5khe.cif
@@ -56,6 +61,7 @@ tme/__init__.py
56
61
  tme/__version__.py
57
62
  tme/cli.py
58
63
  tme/density.py
64
+ tme/mask.py
59
65
  tme/matching_data.py
60
66
  tme/matching_exhaustive.py
61
67
  tme/matching_optimization.py
@@ -77,6 +83,7 @@ tme/analyzer/proxy.py
77
83
  tme/backends/__init__.py
78
84
  tme/backends/_cupy_utils.py
79
85
  tme/backends/_jax_utils.py
86
+ tme/backends/_numpyfftw_utils.py
80
87
  tme/backends/cupy_backend.py
81
88
  tme/backends/jax_backend.py
82
89
  tme/backends/matching_backend.py
@@ -0,0 +1,243 @@
1
+ #!python3
2
+ """Prepare orientations stack for refinement.
3
+
4
+ Copyright (c) 2023 European Molecular Biology Laboratory
5
+
6
+ Author: Valentin Maurer <valentin.maurer@embl-hamburg.de>
7
+ """
8
+ import argparse
9
+ from os import unlink
10
+ from os.path import splitext, basename
11
+
12
+ import numpy as np
13
+ from collections import defaultdict
14
+
15
+ from tme.parser import StarParser
16
+ from tme import Density, Orientations
17
+ from tme.matching_utils import generate_tempfile_name
18
+ from tme.rotations import (
19
+ align_vectors,
20
+ euler_from_rotationmatrix,
21
+ euler_to_rotationmatrix,
22
+ )
23
+
24
+
25
+ class ProgressBar:
26
+ """
27
+ ASCII progress bar.
28
+ """
29
+
30
+ def __init__(self, message: str, nchars: int, total: int):
31
+ self._size = nchars - len(message) - (len(str(total)) + 2) * 2
32
+ self._message = message
33
+ self._total = total
34
+
35
+ def update(self, cur):
36
+ x = int(cur * self._size / self._total)
37
+ print(
38
+ "%s[%s%s] %i/%i\r"
39
+ % (self._message, "#" * x, "." * (self._size - x), cur, self._total),
40
+ end="",
41
+ )
42
+
43
+
44
+ def parse_args():
45
+ parser = argparse.ArgumentParser(
46
+ description="Extract matching candidates for further refinement.",
47
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
48
+ )
49
+
50
+ io_group = parser.add_argument_group("Input / Output")
51
+ io_group.add_argument(
52
+ "--orientations",
53
+ required=True,
54
+ type=str,
55
+ help="Star file with picks and micrograph names.",
56
+ )
57
+ io_group.add_argument(
58
+ "--orientations-scaling",
59
+ required=False,
60
+ type=float,
61
+ default=1.0,
62
+ help="Factor to map candidate coordinates onto the target. Only relevant if "
63
+ "target sampling rate differs from candidate orientation sampling rate.",
64
+ )
65
+ io_group.add_argument(
66
+ "-o",
67
+ "--output-prefix",
68
+ required=True,
69
+ type=str,
70
+ help="Output prefix to use.",
71
+ )
72
+
73
+ alignment_group = parser.add_argument_group("Alignment")
74
+ alignment_group.add_argument(
75
+ "--align-orientations",
76
+ action="store_true",
77
+ required=False,
78
+ help="Whether to align extracted orientations based on their angles. Allows "
79
+ "for efficient subsequent sampling of cone angles.",
80
+ )
81
+ alignment_group.add_argument(
82
+ "--angles-are-vector",
83
+ action="store_true",
84
+ required=False,
85
+ help="Considers euler_z euler_y, euler_x as vector that will be rotated to align "
86
+ "with the z-axis (1,0,0). Only considered when --align_orientations is set.",
87
+ )
88
+ alignment_group.add_argument(
89
+ "--interpolation-order",
90
+ required=False,
91
+ type=int,
92
+ default=1,
93
+ help="Interpolation order for alignment, less than zero is no interpolation.",
94
+ )
95
+ alignment_group.add_argument(
96
+ "--split-by-micrograph",
97
+ action="store_true",
98
+ required=False,
99
+ help="Create separate output files for each micrograph."
100
+ )
101
+
102
+ extraction_group = parser.add_argument_group("Extraction")
103
+ extraction_group.add_argument(
104
+ "--box-size",
105
+ required=True,
106
+ type=int,
107
+ help="Box size for extraction.",
108
+ )
109
+ extraction_group.add_argument(
110
+ "--translation-uncertainty",
111
+ required=False,
112
+ type=int,
113
+ help="Sets box size for extraction to template box plus this value.",
114
+ )
115
+ extraction_group.add_argument(
116
+ "--drop-out-of-box",
117
+ action="store_true",
118
+ required=False,
119
+ help="Whether to drop orientations that fall outside the box. If the "
120
+ "orientations are sensible, it is safe to pass this flag.",
121
+ )
122
+
123
+ args = parser.parse_args()
124
+
125
+ return args
126
+
127
+
128
+ def main():
129
+ args = parse_args()
130
+
131
+ data = StarParser(args.orientations, delimiter="\t")
132
+ key = list(data.keys())[0]
133
+
134
+ index_map = defaultdict(list)
135
+ for index, value in enumerate(data[key]["_rlnMicrographName"]):
136
+ index_map[value].append(index)
137
+
138
+ orientations = Orientations.from_file(args.orientations)
139
+ orientations.translations = np.divide(
140
+ orientations.translations, args.orientations_scaling
141
+ )
142
+
143
+ box_size = np.array(args.box_size)
144
+ box_size = np.repeat(box_size, 3 // box_size.size).astype(int)
145
+ extraction_shape = np.copy(box_size)
146
+
147
+ if args.align_orientations:
148
+ extraction_shape[:] = int(np.linalg.norm(box_size) + 1)
149
+ for index in range(orientations.rotations.shape[0]):
150
+ rotation_matrix = euler_to_rotationmatrix(orientations.rotations[index])
151
+ rotation_matrix = np.linalg.inv(rotation_matrix)
152
+ if args.angles_are_vector:
153
+ rotation_matrix = align_vectors(
154
+ orientations.rotations[index], target_vector=(1, 0, 0)
155
+ )
156
+ orientations.rotations[index] = euler_from_rotationmatrix(rotation_matrix)
157
+
158
+ ret_orientations, ret_dens, ix = [], [], 0
159
+ n_particles = orientations.translations.shape[0]
160
+ pbar = ProgressBar(message="Processing ", nchars=80, total=n_particles)
161
+ for target_path, indices in index_map.items():
162
+
163
+ target = Density.from_file(target_path, use_memmap=True)
164
+
165
+ subset = orientations[indices]
166
+ subset, cand_slices, obs_slices = subset.get_extraction_slices(
167
+ target_shape=target.shape,
168
+ extraction_shape=extraction_shape,
169
+ drop_out_of_box=args.drop_out_of_box,
170
+ return_orientations=True,
171
+ )
172
+
173
+ dens = Density(
174
+ np.memmap(
175
+ generate_tempfile_name(),
176
+ mode="w+",
177
+ shape=(subset.translations.shape[0], *box_size),
178
+ dtype=np.float32,
179
+ ),
180
+ sampling_rate = (1, *target.sampling_rate),
181
+ metadata = {"batch_dimension" : (0,), "path" : target_path}
182
+ )
183
+
184
+ data_subset = np.zeros(extraction_shape, dtype=target.data.dtype)
185
+ for index, (obs_slice, cand_slice) in enumerate(zip(obs_slices, cand_slices)):
186
+ pbar.update(ix + 1)
187
+
188
+ data_subset.fill(0)
189
+ data_subset[cand_slice] = target.data[obs_slice]
190
+ target_subset = Density(
191
+ data_subset,
192
+ sampling_rate=target.sampling_rate,
193
+ origin=target.origin,
194
+ )
195
+
196
+ if args.align_orientations:
197
+ rotation_matrix = euler_to_rotationmatrix(subset.rotations[index])
198
+ target_subset = target_subset.rigid_transform(
199
+ rotation_matrix=rotation_matrix,
200
+ use_geometric_center=True,
201
+ order=args.interpolation_order,
202
+ )
203
+ target_subset.pad(box_size, center=True)
204
+ dens.data[index] = target_subset.data.astype(np.float32)
205
+ ix += 1
206
+
207
+ ret_dens.append(dens)
208
+ ret_orientations.append(subset)
209
+
210
+ if not len(ret_dens):
211
+ exit("Found no valid particles.")
212
+
213
+ print("")
214
+ if not args.split_by_micrograph:
215
+ ret_orientations = [Orientations(
216
+ translations=np.concatenate([x.translations for x in ret_orientations]),
217
+ rotations=np.concatenate([x.rotations for x in ret_orientations]),
218
+ scores=np.concatenate([x.scores for x in ret_orientations]),
219
+ details=np.concatenate([x.details for x in ret_orientations]),
220
+ )]
221
+ dens_data = Density(
222
+ np.concatenate([x.data for x in ret_dens]),
223
+ sampling_rate=ret_dens[0].sampling_rate
224
+ )
225
+ _ = [unlink(x.data.filename) for x in ret_dens]
226
+ dens_data.metadata.update({"batch_dimension" : (0, )})
227
+ ret_dens = [dens_data]
228
+
229
+ for orientation, dens in zip(ret_orientations, ret_dens):
230
+ fname = args.output_prefix
231
+ if args.split_by_micrograph:
232
+ target = splitext(basename(dens.metadata["path"]))[0]
233
+ fname = f"{args.output_prefix}_{target}"
234
+
235
+ dens.to_file(f"{fname}.h5")
236
+ orientation.to_file(f"{fname}_aligned.star")
237
+ try:
238
+ unlink(dens.data.filename)
239
+ except Exception:
240
+ continue
241
+
242
+ if __name__ == "__main__":
243
+ main()
@@ -12,8 +12,8 @@ from sys import exit
12
12
  from time import time
13
13
  from typing import Tuple
14
14
  from copy import deepcopy
15
- from os.path import exists
16
15
  from tempfile import gettempdir
16
+ from os.path import exists, abspath
17
17
 
18
18
  import numpy as np
19
19
 
@@ -576,7 +576,7 @@ def parse_args():
576
576
  "'angles', or a single column file without header. Exposure will be taken from "
577
577
  "the input file , if you are using a tab-separated file, the column names "
578
578
  "'angles' and 'weights' need to be present. It is also possible to specify a "
579
- "continuous wedge mask using e.g., -50,45.",
579
+ "continuous wedge mask using e.g., 50,45.",
580
580
  )
581
581
  filter_group.add_argument(
582
582
  "--tilt-weighting",
@@ -685,13 +685,6 @@ def parse_args():
685
685
  help="Useful if the target does not have a well-defined bounding box. Will be "
686
686
  "activated automatically if splitting is required to avoid boundary artifacts.",
687
687
  )
688
- performance_group.add_argument(
689
- "--pad-filter",
690
- action="store_true",
691
- default=False,
692
- help="Pad the template filter to the shape of the target. Useful for fast "
693
- "oscilating filters to avoid aliasing effects.",
694
- )
695
688
  performance_group.add_argument(
696
689
  "--interpolation-order",
697
690
  required=False,
@@ -700,13 +693,6 @@ def parse_args():
700
693
  help="Spline interpolation used for rotations. Defaults to 3, and 1 for jax "
701
694
  "and pytorch backends.",
702
695
  )
703
- performance_group.add_argument(
704
- "--use-mixed-precision",
705
- action="store_true",
706
- default=False,
707
- help="Use float16 for real values operations where possible. Not supported "
708
- "for jax backend.",
709
- )
710
696
  performance_group.add_argument(
711
697
  "--use-memmap",
712
698
  action="store_true",
@@ -742,6 +728,7 @@ def parse_args():
742
728
  args.interpolation_order = 3
743
729
  if args.backend in ("jax", "pytorch"):
744
730
  args.interpolation_order = 1
731
+ args.reconstruction_interpolation_order = 1
745
732
 
746
733
  if args.interpolation_order < 0:
747
734
  args.interpolation_order = None
@@ -779,6 +766,14 @@ def parse_args():
779
766
  )
780
767
  args.orientations = orientations
781
768
 
769
+ args.target = abspath(args.target)
770
+ if args.target_mask is not None:
771
+ args.target_mask = abspath(args.target_mask)
772
+
773
+ args.template = abspath(args.template)
774
+ if args.template_mask is not None:
775
+ args.template_mask = abspath(args.template_mask)
776
+
782
777
  return args
783
778
 
784
779
 
@@ -796,15 +791,23 @@ def main():
796
791
  sampling_rate=target.sampling_rate,
797
792
  )
798
793
 
794
+ if np.allclose(target.sampling_rate, 1):
795
+ warnings.warn(
796
+ "Target sampling rate is 1.0, which may indicate missing or incorrect "
797
+ "metadata. Verify that your target file contains proper sampling rate "
798
+ "information, as filters (CTF, BandPass) require accurate sampling rates "
799
+ "to function correctly."
800
+ )
801
+
799
802
  if target.sampling_rate.size == template.sampling_rate.size:
800
803
  if not np.allclose(
801
804
  np.round(target.sampling_rate, 2), np.round(template.sampling_rate, 2)
802
805
  ):
803
- print(
804
- f"Resampling template to {target.sampling_rate}. "
805
- "Consider providing a template with the same sampling rate as the target."
806
+ warnings.warn(
807
+ f"Sampling rate mismatch detected: target={target.sampling_rate} "
808
+ f"template={template.sampling_rate}. Proceeding with user-provided "
809
+ f"values. Make sure this is intentional. "
806
810
  )
807
- template = template.resample(target.sampling_rate, order=3)
808
811
 
809
812
  template_mask = load_and_validate_mask(
810
813
  mask_target=template, mask_path=args.template_mask
@@ -881,16 +884,13 @@ def main():
881
884
  print("\n" + "-" * 80)
882
885
 
883
886
  if args.scramble_phases:
884
- template.data = scramble_phases(
885
- template.data, noise_proportion=1.0, normalize_power=False
886
- )
887
+ template.data = scramble_phases(template.data, noise_proportion=1.0)
887
888
 
888
889
  callback_class = MaxScoreOverRotations
889
- if args.peak_calling:
890
- callback_class = PeakCallerMaximumFilter
891
-
892
890
  if args.orientations is not None:
893
891
  callback_class = MaxScoreOverRotationsConstrained
892
+ elif args.peak_calling:
893
+ callback_class = PeakCallerMaximumFilter
894
894
 
895
895
  # Determine suitable backend for the selected operation
896
896
  available_backends = be.available_backends()
@@ -939,16 +939,6 @@ def main():
939
939
  args.use_gpu = False
940
940
  be.change_backend("pytorch", device=device)
941
941
 
942
- # TODO: Make the inverse casting from complex64 -> float 16 stable
943
- # if args.use_mixed_precision:
944
- # be.change_backend(
945
- # backend_name=args.backend,
946
- # float_dtype=be._array_backend.float16,
947
- # complex_dtype=be._array_backend.complex64,
948
- # int_dtype=be._array_backend.int16,
949
- # device=device,
950
- # )
951
-
952
942
  available_memory = be.get_available_memory() * be.device_count()
953
943
  if args.memory is None:
954
944
  args.memory = int(args.memory_scaling * available_memory)
@@ -976,6 +966,8 @@ def main():
976
966
  target_dim=target.metadata.get("batch_dimension", None),
977
967
  template_dim=template.metadata.get("batch_dimension", None),
978
968
  )
969
+ args.batch_dims = tuple(int(x) for x in np.where(matching_data._batch_mask)[0])
970
+
979
971
  splits, schedule = compute_schedule(args, matching_data, callback_class)
980
972
 
981
973
  n_splits = np.prod(list(splits.values()))
@@ -1004,7 +996,6 @@ def main():
1004
996
  compute_options = {
1005
997
  "Backend": be._BACKEND_REGISTRY[be._backend_name],
1006
998
  "Compute Devices": f"CPU [{args.cores}], GPU [{gpus_used}]",
1007
- "Use Mixed Precision": args.use_mixed_precision,
1008
999
  "Assigned Memory [MB]": f"{args.memory // 1e6} [out of {available_memory//1e6}]",
1009
1000
  "Temporary Directory": args.temp_directory,
1010
1001
  "Target Splits": f"{target_split} [N={n_splits}]",
@@ -1025,7 +1016,6 @@ def main():
1025
1016
  "Tilt Angles": args.tilt_angles,
1026
1017
  "Tilt Weighting": args.tilt_weighting,
1027
1018
  "Reconstruction Filter": args.reconstruction_filter,
1028
- "Extend Filter Grid": args.pad_filter,
1029
1019
  }
1030
1020
  if args.ctf_file is not None or args.defocus is not None:
1031
1021
  filter_args["CTF File"] = args.ctf_file
@@ -1081,7 +1071,6 @@ def main():
1081
1071
  callback_class_args=analyzer_args,
1082
1072
  target_splits=splits,
1083
1073
  pad_target_edges=args.pad_edges,
1084
- pad_template_filter=args.pad_filter,
1085
1074
  interpolation_order=args.interpolation_order,
1086
1075
  )
1087
1076
 
@@ -17,7 +17,7 @@ from scipy.special import erfcinv
17
17
 
18
18
  from tme import Density, Structure, Orientations
19
19
  from tme.cli import sanitize_name, print_block, print_entry
20
- from tme.matching_utils import load_pickle, centered_mask, write_pickle
20
+ from tme.matching_utils import load_pickle, center_slice, write_pickle
21
21
  from tme.matching_optimization import create_score_object, optimize_match
22
22
  from tme.rotations import euler_to_rotationmatrix, euler_from_rotationmatrix
23
23
  from tme.analyzer import (
@@ -87,6 +87,11 @@ def parse_args():
87
87
  help="Output prefix. Defaults to basename of first input. Extension is "
88
88
  "added with respect to chosen output format.",
89
89
  )
90
+ output_group.add_argument(
91
+ "--angles-clockwise",
92
+ action="store_true",
93
+ help="Report Euler angles in clockwise format expected by RELION.",
94
+ )
90
95
  output_group.add_argument(
91
96
  "--output-format",
92
97
  choices=[
@@ -112,7 +117,7 @@ def parse_args():
112
117
  peak_group.add_argument(
113
118
  "--peak-caller",
114
119
  choices=list(PEAK_CALLERS.keys()),
115
- default="PeakCallerScipy",
120
+ default="PeakCallerMaximumFilter",
116
121
  help="Peak caller for local maxima identification.",
117
122
  )
118
123
  peak_group.add_argument(
@@ -183,7 +188,7 @@ def parse_args():
183
188
  )
184
189
  additional_group.add_argument(
185
190
  "--n-false-positives",
186
- type=int,
191
+ type=float,
187
192
  default=None,
188
193
  required=False,
189
194
  help="Number of accepted false-positives picks to determine minimum score.",
@@ -313,11 +318,7 @@ def normalize_input(foregrounds: Tuple[str], backgrounds: Tuple[str]) -> Tuple:
313
318
  data = load_matching_output(foreground)
314
319
  scores, _, rotations, rotation_mapping, *_ = data
315
320
 
316
- # We could normalize to unit sdev, but that might lead to unexpected
317
- # results for flat background distributions
318
- scores -= scores.mean()
319
321
  indices = tuple(slice(0, x) for x in scores.shape)
320
-
321
322
  indices_update = scores > scores_out[indices]
322
323
  scores_out[indices][indices_update] = scores[indices_update]
323
324
 
@@ -364,9 +365,8 @@ def normalize_input(foregrounds: Tuple[str], backgrounds: Tuple[str]) -> Tuple:
364
365
  scores_norm = np.full(out_shape_norm, fill_value=0, dtype=np.float32)
365
366
  for background in backgrounds:
366
367
  data_norm = load_matching_output(background)
368
+ scores, _, rotations, rotation_mapping, *_ = data_norm
367
369
 
368
- scores = data_norm[0]
369
- scores -= scores.mean()
370
370
  indices = tuple(slice(0, x) for x in scores.shape)
371
371
  indices_update = scores > scores_norm[indices]
372
372
  scores_norm[indices][indices_update] = scores[indices_update]
@@ -375,7 +375,9 @@ def normalize_input(foregrounds: Tuple[str], backgrounds: Tuple[str]) -> Tuple:
375
375
  update = tuple(slice(0, int(x)) for x in np.minimum(out_shape, scores.shape))
376
376
  scores_out = np.full(out_shape, fill_value=0, dtype=np.float32)
377
377
  scores_out[update] = data[0][update] - scores_norm[update]
378
- scores_out[update] = np.divide(scores_out[update], 1 - scores_norm[update])
378
+ scores_out[update] += scores_norm[update].mean()
379
+
380
+ # scores_out[update] = np.divide(scores_out[update], 1 - scores_norm[update])
379
381
  scores_out = np.fmax(scores_out, 0, out=scores_out)
380
382
  data[0] = scores_out
381
383
 
@@ -478,15 +480,21 @@ def main():
478
480
  if orientations is None:
479
481
  translations, rotations, scores, details = [], [], [], []
480
482
 
481
- # Data processed by normalize_input is guaranteed to have this shape
482
- scores, offset, rotation_array, rotation_mapping, meta = data
483
+ var = None
484
+ # Data processed by normalize_input is guaranteed to have this shape)
485
+ scores, _, rotation_array, rotation_mapping, *_ = data
486
+ if len(data) == 6:
487
+ scores, _, rotation_array, rotation_mapping, var, *_ = data
483
488
 
484
489
  cropped_shape = np.subtract(
485
490
  scores.shape, np.multiply(args.min_boundary_distance, 2)
486
491
  ).astype(int)
487
492
 
488
493
  if args.min_boundary_distance > 0:
489
- scores = centered_mask(scores, new_shape=cropped_shape)
494
+ _scores = np.zeros_like(scores)
495
+ subset = center_slice(scores.shape, cropped_shape)
496
+ _scores[subset] = scores[subset]
497
+ scores = _scores
490
498
 
491
499
  if args.n_false_positives is not None:
492
500
  # Rickgauer et al. 2017
@@ -499,17 +507,20 @@ def main():
499
507
  )
500
508
  args.n_false_positives = max(args.n_false_positives, 1)
501
509
  n_correlations = np.size(scores[cropped_slice]) * len(rotation_mapping)
510
+ std = np.std(scores[cropped_slice])
511
+ if var is not None:
512
+ std = np.asarray(np.sqrt(var)).reshape(())
513
+
502
514
  minimum_score = np.multiply(
503
515
  erfcinv(2 * args.n_false_positives / n_correlations),
504
- np.sqrt(2) * np.std(scores[cropped_slice]),
516
+ np.sqrt(2) * std,
505
517
  )
506
- print(f"Determined minimum score cutoff: {minimum_score}.")
507
- minimum_score = max(minimum_score, 0)
508
- args.min_score = minimum_score
518
+ print(f"Determined cutoff --min-score {minimum_score}.")
519
+ args.min_score = max(minimum_score, 0)
509
520
 
510
521
  args.batch_dims = None
511
- if hasattr(cli_args, "target_batch"):
512
- args.batch_dims = cli_args.target_batch
522
+ if hasattr(cli_args, "batch_dims"):
523
+ args.batch_dims = cli_args.batch_dims
513
524
 
514
525
  peak_caller_kwargs = {
515
526
  "shape": scores.shape,
@@ -517,8 +528,8 @@ def main():
517
528
  "min_distance": args.min_distance,
518
529
  "min_boundary_distance": args.min_boundary_distance,
519
530
  "batch_dims": args.batch_dims,
520
- "minimum_score": args.min_score,
521
- "maximum_score": args.max_score,
531
+ "min_score": args.min_score,
532
+ "max_score": args.max_score,
522
533
  }
523
534
 
524
535
  peak_caller = PEAK_CALLERS[args.peak_caller](**peak_caller_kwargs)
@@ -611,6 +622,9 @@ def main():
611
622
  orientations.rotations[index] = angles
612
623
  orientations.scores[index] = score * -1
613
624
 
625
+ if args.angles_clockwise:
626
+ orientations.rotations *= -1
627
+
614
628
  if args.output_format in ("orientations", "relion4", "relion5"):
615
629
  file_format, extension = "text", "tsv"
616
630