pytme 0.1.5__cp311-cp311-macosx_14_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pytme-0.1.5.data/scripts/estimate_ram_usage.py +81 -0
- pytme-0.1.5.data/scripts/match_template.py +744 -0
- pytme-0.1.5.data/scripts/postprocess.py +279 -0
- pytme-0.1.5.data/scripts/preprocess.py +93 -0
- pytme-0.1.5.data/scripts/preprocessor_gui.py +729 -0
- pytme-0.1.5.dist-info/LICENSE +153 -0
- pytme-0.1.5.dist-info/METADATA +69 -0
- pytme-0.1.5.dist-info/RECORD +63 -0
- pytme-0.1.5.dist-info/WHEEL +5 -0
- pytme-0.1.5.dist-info/entry_points.txt +6 -0
- pytme-0.1.5.dist-info/top_level.txt +2 -0
- scripts/__init__.py +0 -0
- scripts/estimate_ram_usage.py +81 -0
- scripts/match_template.py +744 -0
- scripts/match_template_devel.py +788 -0
- scripts/postprocess.py +279 -0
- scripts/preprocess.py +93 -0
- scripts/preprocessor_gui.py +729 -0
- tme/__init__.py +6 -0
- tme/__version__.py +1 -0
- tme/analyzer.py +1144 -0
- tme/backends/__init__.py +134 -0
- tme/backends/cupy_backend.py +309 -0
- tme/backends/matching_backend.py +1154 -0
- tme/backends/npfftw_backend.py +763 -0
- tme/backends/pytorch_backend.py +526 -0
- tme/data/__init__.py +0 -0
- tme/data/c48n309.npy +0 -0
- tme/data/c48n527.npy +0 -0
- tme/data/c48n9.npy +0 -0
- tme/data/c48u1.npy +0 -0
- tme/data/c48u1153.npy +0 -0
- tme/data/c48u1201.npy +0 -0
- tme/data/c48u1641.npy +0 -0
- tme/data/c48u181.npy +0 -0
- tme/data/c48u2219.npy +0 -0
- tme/data/c48u27.npy +0 -0
- tme/data/c48u2947.npy +0 -0
- tme/data/c48u3733.npy +0 -0
- tme/data/c48u4749.npy +0 -0
- tme/data/c48u5879.npy +0 -0
- tme/data/c48u7111.npy +0 -0
- tme/data/c48u815.npy +0 -0
- tme/data/c48u83.npy +0 -0
- tme/data/c48u8649.npy +0 -0
- tme/data/c600v.npy +0 -0
- tme/data/c600vc.npy +0 -0
- tme/data/metadata.yaml +80 -0
- tme/data/quat_to_numpy.py +42 -0
- tme/data/scattering_factors.pickle +0 -0
- tme/density.py +2314 -0
- tme/extensions.cpython-311-darwin.so +0 -0
- tme/helpers.py +881 -0
- tme/matching_data.py +377 -0
- tme/matching_exhaustive.py +1553 -0
- tme/matching_memory.py +382 -0
- tme/matching_optimization.py +1123 -0
- tme/matching_utils.py +1180 -0
- tme/parser.py +429 -0
- tme/preprocessor.py +1291 -0
- tme/scoring.py +866 -0
- tme/structure.py +1428 -0
- tme/types.py +10 -0
tme/backends/__init__.py
ADDED
@@ -0,0 +1,134 @@
|
|
1
|
+
""" pyTME backend manager.
|
2
|
+
|
3
|
+
Copyright (c) 2023 European Molecular Biology Laboratory
|
4
|
+
|
5
|
+
Author: Valentin Maurer <valentin.maurer@embl-hamburg.de>
|
6
|
+
"""
|
7
|
+
|
8
|
+
from typing import Dict, List
|
9
|
+
|
10
|
+
from .matching_backend import MatchingBackend
|
11
|
+
from .npfftw_backend import NumpyFFTWBackend
|
12
|
+
from .pytorch_backend import PytorchBackend
|
13
|
+
from .cupy_backend import CupyBackend
|
14
|
+
|
15
|
+
|
16
|
+
class BackendManager:
|
17
|
+
"""
|
18
|
+
Manager for template matching backends.
|
19
|
+
|
20
|
+
This class serves as an interface to various computational backends (e.g.,
|
21
|
+
CPU, GPU). It allows users to seamlessly swap between different backend
|
22
|
+
implementations while preserving the consistency and functionality of the API.
|
23
|
+
Direct attribute and method calls to the manager are delegated to the current
|
24
|
+
active backend.
|
25
|
+
|
26
|
+
Attributes
|
27
|
+
----------
|
28
|
+
_BACKEND_REGISTRY : dict
|
29
|
+
A dictionary mapping backend names to their respective classes or instances.
|
30
|
+
_backend : instance of MatchingBackend
|
31
|
+
An instance of the currently active backend.
|
32
|
+
_backend_name : str
|
33
|
+
Name of the current backend.
|
34
|
+
_backend_args : Dict
|
35
|
+
Arguments passed to create current backend.
|
36
|
+
|
37
|
+
Examples
|
38
|
+
--------
|
39
|
+
>>> from tme.backends import backend
|
40
|
+
>>> backend.multiply(arr1, arr2)
|
41
|
+
# This will use the default NumpyFFTWBackend's multiply method
|
42
|
+
|
43
|
+
>>> backend.change_backend("pytorch")
|
44
|
+
>>> backend.multiply(arr1, arr2)
|
45
|
+
# This will use the GPUBackend's multiply method
|
46
|
+
|
47
|
+
Notes
|
48
|
+
-----
|
49
|
+
To add custom backends, use the `add_backend` method. To switch
|
50
|
+
between backends, use the `change_backend` method. Note that the backend
|
51
|
+
has to be reinitialzed when using fork-based parallelism.
|
52
|
+
"""
|
53
|
+
|
54
|
+
def __init__(self):
|
55
|
+
self._BACKEND_REGISTRY = {
|
56
|
+
"cpu_backend": NumpyFFTWBackend,
|
57
|
+
"pytorch": PytorchBackend,
|
58
|
+
"cupy": CupyBackend,
|
59
|
+
}
|
60
|
+
self._backend = NumpyFFTWBackend()
|
61
|
+
self._backend_name = "cpu_backend"
|
62
|
+
self._backend_args = {}
|
63
|
+
|
64
|
+
def __repr__(self):
|
65
|
+
return f"<BackendManager: using {self._backend_name}>"
|
66
|
+
|
67
|
+
def __getattr__(self, name):
|
68
|
+
return getattr(self._backend, name)
|
69
|
+
|
70
|
+
def __dir__(self) -> List:
|
71
|
+
"""
|
72
|
+
Return a list of attributes available in this object,
|
73
|
+
including those from the backend.
|
74
|
+
|
75
|
+
Returns
|
76
|
+
-------
|
77
|
+
list
|
78
|
+
Sorted list of attributes.
|
79
|
+
"""
|
80
|
+
base_attributes = []
|
81
|
+
base_attributes.extend(dir(self.__class__))
|
82
|
+
base_attributes.extend(self.__dict__.keys())
|
83
|
+
base_attributes.extend(dir(self._backend))
|
84
|
+
return sorted(base_attributes)
|
85
|
+
|
86
|
+
def add_backend(self, backend_name: str, backend_class: type):
|
87
|
+
"""
|
88
|
+
Adds a custom backend to the registry.
|
89
|
+
|
90
|
+
Parameters
|
91
|
+
----------
|
92
|
+
backend_name : str
|
93
|
+
Name by which the backend can be referenced.
|
94
|
+
backend_class : :py:class:`MatchingBackend`
|
95
|
+
An instance of the backend to be added.
|
96
|
+
|
97
|
+
Raises
|
98
|
+
------
|
99
|
+
ValueError
|
100
|
+
If the provided backend_instance does not inherit from MatchingBackend.
|
101
|
+
"""
|
102
|
+
if not issubclass(backend_class, MatchingBackend):
|
103
|
+
raise ValueError("backend_class needs to inherit from MatchingBackend.")
|
104
|
+
self._BACKEND_REGISTRY[backend_name] = backend_class
|
105
|
+
|
106
|
+
def change_backend(self, backend_name: str, **backend_kwargs: Dict) -> None:
|
107
|
+
"""
|
108
|
+
Change the backend.
|
109
|
+
|
110
|
+
Parameters
|
111
|
+
----------
|
112
|
+
backend_name : str
|
113
|
+
Name of the new backend that should be used.
|
114
|
+
**backend_kwargs : Dict, optional
|
115
|
+
Parameters passed to __init__ method of backend.
|
116
|
+
|
117
|
+
Raises
|
118
|
+
------
|
119
|
+
NotImplementedError
|
120
|
+
If no backend is found with the provided name.
|
121
|
+
"""
|
122
|
+
if backend_name not in self._BACKEND_REGISTRY:
|
123
|
+
available_backends = ", ".join(
|
124
|
+
[str(x) for x in self._BACKEND_REGISTRY.keys()]
|
125
|
+
)
|
126
|
+
raise NotImplementedError(
|
127
|
+
f"Available backends are {available_backends} - not {backend_name}."
|
128
|
+
)
|
129
|
+
self._backend = self._BACKEND_REGISTRY[backend_name](**backend_kwargs)
|
130
|
+
self._backend_name = backend_name
|
131
|
+
self._backend_args = backend_kwargs
|
132
|
+
|
133
|
+
|
134
|
+
backend = BackendManager()
|
@@ -0,0 +1,309 @@
|
|
1
|
+
""" Backend using cupy and GPU acceleration for
|
2
|
+
template matching.
|
3
|
+
|
4
|
+
Copyright (c) 2023 European Molecular Biology Laboratory
|
5
|
+
|
6
|
+
Author: Valentin Maurer <valentin.maurer@embl-hamburg.de>
|
7
|
+
"""
|
8
|
+
|
9
|
+
import warnings
|
10
|
+
from typing import Tuple, Dict, Callable
|
11
|
+
from contextlib import contextmanager
|
12
|
+
|
13
|
+
import numpy as np
|
14
|
+
from numpy.typing import NDArray
|
15
|
+
|
16
|
+
from .npfftw_backend import NumpyFFTWBackend
|
17
|
+
from ..types import CupyArray
|
18
|
+
|
19
|
+
|
20
|
+
class CupyBackend(NumpyFFTWBackend):
|
21
|
+
"""
|
22
|
+
A cupy based backend for template matching
|
23
|
+
"""
|
24
|
+
|
25
|
+
def __init__(
|
26
|
+
self, default_dtype=None, complex_dtype=None, default_dtype_int=None, **kwargs
|
27
|
+
):
|
28
|
+
import cupy as cp
|
29
|
+
from cupyx.scipy.fft import get_fft_plan
|
30
|
+
from cupyx.scipy.ndimage import affine_transform
|
31
|
+
from cupyx.scipy.ndimage import maximum_filter
|
32
|
+
|
33
|
+
default_dtype = cp.float32 if default_dtype is None else default_dtype
|
34
|
+
complex_dtype = cp.complex64 if complex_dtype is None else complex_dtype
|
35
|
+
default_dtype_int = cp.int32 if default_dtype_int is None else default_dtype_int
|
36
|
+
|
37
|
+
super().__init__(
|
38
|
+
array_backend=cp,
|
39
|
+
default_dtype=default_dtype,
|
40
|
+
complex_dtype=complex_dtype,
|
41
|
+
default_dtype_int=default_dtype_int,
|
42
|
+
)
|
43
|
+
self.get_fft_plan = get_fft_plan
|
44
|
+
self.affine_transform = affine_transform
|
45
|
+
self.maximum_filter = maximum_filter
|
46
|
+
|
47
|
+
def to_backend_array(self, arr: NDArray) -> CupyArray:
|
48
|
+
if isinstance(arr, self._array_backend.ndarray):
|
49
|
+
return arr
|
50
|
+
return self._array_backend.asarray(arr)
|
51
|
+
|
52
|
+
def to_numpy_array(self, arr: CupyArray) -> NDArray:
|
53
|
+
return self._array_backend.asnumpy(arr)
|
54
|
+
|
55
|
+
def to_cpu_array(self, arr: NDArray) -> NDArray:
|
56
|
+
return self.to_numpy_array(arr)
|
57
|
+
|
58
|
+
def sharedarr_to_arr(
|
59
|
+
self, shape: Tuple[int], dtype: str, shm: CupyArray
|
60
|
+
) -> CupyArray:
|
61
|
+
return shm
|
62
|
+
|
63
|
+
@staticmethod
|
64
|
+
def arr_to_sharedarr(
|
65
|
+
arr: CupyArray, shared_memory_handler: type = None
|
66
|
+
) -> CupyArray:
|
67
|
+
return arr
|
68
|
+
|
69
|
+
def preallocate_array(self, shape: Tuple[int], dtype: type) -> NDArray:
|
70
|
+
"""
|
71
|
+
Returns a byte-aligned array of zeros with specified shape and dtype.
|
72
|
+
|
73
|
+
Parameters
|
74
|
+
----------
|
75
|
+
shape : Tuple[int]
|
76
|
+
Desired shape for the array.
|
77
|
+
dtype : type
|
78
|
+
Desired data type for the array.
|
79
|
+
|
80
|
+
Returns
|
81
|
+
-------
|
82
|
+
NDArray
|
83
|
+
Byte-aligned array of zeros with specified shape and dtype.
|
84
|
+
"""
|
85
|
+
arr = self._array_backend.zeros(shape, dtype=dtype)
|
86
|
+
return arr
|
87
|
+
|
88
|
+
def unravel_index(self, indices, shape):
|
89
|
+
return self._array_backend.unravel_index(indices=indices, dims=shape)
|
90
|
+
|
91
|
+
def unique(self, ar, axis=None, *args, **kwargs):
|
92
|
+
if axis is None:
|
93
|
+
return self._array_backend.unique(ar=ar, axis=axis, *args, **kwargs)
|
94
|
+
warnings.warn("Axis argument not yet supported in CupY, falling back to NumPy.")
|
95
|
+
|
96
|
+
ret = np.unique(ar=self.to_numpy_array(ar), axis=axis, *args, **kwargs)
|
97
|
+
if type(ret) != tuple:
|
98
|
+
return self.to_backend_array(ret)
|
99
|
+
return tuple(self.to_backend_array(k) for k in ret)
|
100
|
+
|
101
|
+
def build_fft(
|
102
|
+
self,
|
103
|
+
fast_shape: Tuple[int],
|
104
|
+
fast_ft_shape: Tuple[int],
|
105
|
+
real_dtype: type,
|
106
|
+
complex_dtype: type,
|
107
|
+
fftargs: Dict = {},
|
108
|
+
temp_real: NDArray = None,
|
109
|
+
temp_fft: NDArray = None,
|
110
|
+
) -> Tuple[Callable, Callable]:
|
111
|
+
"""
|
112
|
+
Build pyFFTW builder functions.
|
113
|
+
|
114
|
+
Parameters
|
115
|
+
----------
|
116
|
+
fast_shape : tuple
|
117
|
+
Tuple of integers corresponding to fast convolution shape
|
118
|
+
(see `compute_convolution_shapes`).
|
119
|
+
fast_ft_shape : tuple
|
120
|
+
Tuple of integers corresponding to the shape of the fourier
|
121
|
+
transform array (see `compute_convolution_shapes`).
|
122
|
+
real_dtype : dtype
|
123
|
+
Numpy dtype of the inverse fourier transform.
|
124
|
+
complex_dtype : dtype
|
125
|
+
Numpy dtype of the fourier transform.
|
126
|
+
fftargs : dict, optional
|
127
|
+
Dictionary passed to pyFFTW builders.
|
128
|
+
temp_real : NDArray, optional
|
129
|
+
Temporary real numpy array, by default None.
|
130
|
+
temp_fft : NDArray, optional
|
131
|
+
Temporary fft numpy array, by default None.
|
132
|
+
|
133
|
+
Returns
|
134
|
+
-------
|
135
|
+
tuple
|
136
|
+
Tupple containing callable rfft and irfft object.
|
137
|
+
"""
|
138
|
+
|
139
|
+
if temp_real is None:
|
140
|
+
temp_real = self.preallocate_array(fast_shape, real_dtype)
|
141
|
+
if temp_fft is None:
|
142
|
+
temp_fft = self.preallocate_array(fast_ft_shape, complex_dtype)
|
143
|
+
|
144
|
+
cache = self._array_backend.fft.config.get_plan_cache()
|
145
|
+
cache.set_size(2)
|
146
|
+
|
147
|
+
def rfftn(arr: CupyArray, out: CupyArray) -> None:
|
148
|
+
out[:] = self.fft.rfftn(arr)[:]
|
149
|
+
|
150
|
+
def irfftn(arr: CupyArray, out: CupyArray) -> None:
|
151
|
+
out[:] = self.fft.irfftn(arr)[:]
|
152
|
+
|
153
|
+
return rfftn, irfftn
|
154
|
+
|
155
|
+
def compute_convolution_shapes(
|
156
|
+
self, arr1_shape: Tuple[int], arr2_shape: Tuple[int]
|
157
|
+
) -> Tuple[Tuple[int], Tuple[int], Tuple[int]]:
|
158
|
+
ret = super().compute_convolution_shapes(arr1_shape, arr2_shape)
|
159
|
+
convolution_shape, fast_shape, fast_ft_shape = ret
|
160
|
+
|
161
|
+
# cuFFT plans do not support automatic padding yet.
|
162
|
+
is_odd = fast_shape[-1] % 2
|
163
|
+
fast_shape[-1] += is_odd
|
164
|
+
fast_ft_shape[-1] += is_odd
|
165
|
+
|
166
|
+
return convolution_shape, fast_shape, fast_ft_shape
|
167
|
+
|
168
|
+
def max_filter_coordinates(self, score_space, min_distance: Tuple[int]):
|
169
|
+
score_box = tuple(min_distance for _ in range(score_space.ndim))
|
170
|
+
max_filter = self.maximum_filter(score_space, size=score_box, mode="constant")
|
171
|
+
max_filter = max_filter == score_space
|
172
|
+
|
173
|
+
peaks = self._array_backend.array(self._array_backend.nonzero(max_filter)).T
|
174
|
+
return peaks
|
175
|
+
|
176
|
+
def rotate_array(
|
177
|
+
self,
|
178
|
+
arr: CupyArray,
|
179
|
+
rotation_matrix: CupyArray,
|
180
|
+
arr_mask: CupyArray = None,
|
181
|
+
translation: CupyArray = None,
|
182
|
+
use_geometric_center: bool = False,
|
183
|
+
out: CupyArray = None,
|
184
|
+
out_mask: CupyArray = None,
|
185
|
+
order: int = 3,
|
186
|
+
) -> None:
|
187
|
+
"""
|
188
|
+
Rotates coordinates of arr according to rotation_matrix.
|
189
|
+
|
190
|
+
If no output array is provided, this method will compute an array with
|
191
|
+
sufficient space to hold all elements. If both `arr` and `arr_mask`
|
192
|
+
are provided, `arr_mask` will be centered according to arr.
|
193
|
+
|
194
|
+
Parameters
|
195
|
+
----------
|
196
|
+
arr : CupyArray
|
197
|
+
The input array to be rotated.
|
198
|
+
arr_mask : CupyArray, optional
|
199
|
+
The mask of `arr` that will be equivalently rotated.
|
200
|
+
rotation_matrix : CupyArray
|
201
|
+
The rotation matrix to apply [d x d].
|
202
|
+
translation : CupyArray
|
203
|
+
The translation to apply [d].
|
204
|
+
use_geometric_center : bool, optional
|
205
|
+
Whether the rotation should be centered around the geometric
|
206
|
+
or mass center. Default is mass center.
|
207
|
+
out : CupyArray, optional
|
208
|
+
The output array to write the rotation of `arr` to.
|
209
|
+
out_mask : CupyArray, optional
|
210
|
+
The output array to write the rotation of `arr_mask` to.
|
211
|
+
order : int, optional
|
212
|
+
Spline interpolation order. Has to be in the range 0-5.
|
213
|
+
|
214
|
+
Notes
|
215
|
+
-----
|
216
|
+
Only a box of size arr, arr_mask will be consisdered for interpolation
|
217
|
+
in out, out_mask.
|
218
|
+
"""
|
219
|
+
|
220
|
+
rotate_mask = arr_mask is not None
|
221
|
+
return_type = (out is None) + 2 * rotate_mask * (out_mask is None)
|
222
|
+
translation = self.zeros(arr.ndim) if translation is None else translation
|
223
|
+
|
224
|
+
center = self.divide(self.to_backend_array(arr.shape), 2)
|
225
|
+
if not use_geometric_center:
|
226
|
+
center = self.center_of_mass(arr, cutoff=0)
|
227
|
+
|
228
|
+
rotation_matrix_inverted = self.linalg.inv(rotation_matrix)
|
229
|
+
transformed_center = rotation_matrix_inverted @ center.reshape(-1, 1)
|
230
|
+
transformed_center = transformed_center.reshape(-1)
|
231
|
+
base_offset = self.subtract(center, transformed_center)
|
232
|
+
offset = self.subtract(base_offset, translation)
|
233
|
+
|
234
|
+
out = self.zeros_like(arr) if out is None else out
|
235
|
+
out_slice = tuple(slice(0, stop) for stop in arr.shape)
|
236
|
+
|
237
|
+
# Applying the prefilter leads to the creation of artifacts in the mask.
|
238
|
+
self.affine_transform(
|
239
|
+
input=arr,
|
240
|
+
matrix=rotation_matrix_inverted,
|
241
|
+
offset=offset,
|
242
|
+
mode="constant",
|
243
|
+
output=out[out_slice],
|
244
|
+
order=order,
|
245
|
+
prefilter=True,
|
246
|
+
)
|
247
|
+
|
248
|
+
if rotate_mask:
|
249
|
+
out_mask = self.zeros_like(arr_mask) if out_mask is None else out_mask
|
250
|
+
out_mask_slice = tuple(slice(0, stop) for stop in arr_mask.shape)
|
251
|
+
self.affine_transform(
|
252
|
+
input=arr_mask,
|
253
|
+
matrix=rotation_matrix_inverted,
|
254
|
+
offset=offset,
|
255
|
+
mode="constant",
|
256
|
+
output=out_mask[out_mask_slice],
|
257
|
+
order=order,
|
258
|
+
prefilter=False,
|
259
|
+
)
|
260
|
+
|
261
|
+
match return_type:
|
262
|
+
case 0:
|
263
|
+
return None
|
264
|
+
case 1:
|
265
|
+
return out
|
266
|
+
case 2:
|
267
|
+
return out_mask
|
268
|
+
case 3:
|
269
|
+
return out, out_mask
|
270
|
+
|
271
|
+
def get_available_memory(self) -> int:
|
272
|
+
with self._array_backend.cuda.Device():
|
273
|
+
(
|
274
|
+
free_memory,
|
275
|
+
available_memory,
|
276
|
+
) = self._array_backend.cuda.runtime.memGetInfo()
|
277
|
+
return free_memory
|
278
|
+
|
279
|
+
@contextmanager
|
280
|
+
def set_device(self, device_index: int):
|
281
|
+
"""
|
282
|
+
Set the active GPU device as a context.
|
283
|
+
|
284
|
+
This method sets the active GPU device for operations within the context.
|
285
|
+
|
286
|
+
Parameters
|
287
|
+
----------
|
288
|
+
device_index : int
|
289
|
+
Index of the GPU device to be set as active.
|
290
|
+
|
291
|
+
Yields
|
292
|
+
------
|
293
|
+
None
|
294
|
+
Operates as a context manager, yielding None and providing
|
295
|
+
the set GPU context for enclosed operations.
|
296
|
+
"""
|
297
|
+
with self._array_backend.cuda.Device(device_index):
|
298
|
+
yield
|
299
|
+
|
300
|
+
def device_count(self) -> int:
|
301
|
+
"""
|
302
|
+
Return the number of available GPU devices.
|
303
|
+
|
304
|
+
Returns
|
305
|
+
-------
|
306
|
+
int
|
307
|
+
Number of available GPU devices.
|
308
|
+
"""
|
309
|
+
return self._array_backend.cuda.runtime.getDeviceCount()
|