pytme 0.2.0b0__cp311-cp311-macosx_14_0_arm64.whl → 0.2.2__cp311-cp311-macosx_14_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pytme-0.2.2.data/scripts/match_template.py +1187 -0
- {pytme-0.2.0b0.data → pytme-0.2.2.data}/scripts/postprocess.py +170 -71
- {pytme-0.2.0b0.data → pytme-0.2.2.data}/scripts/preprocessor_gui.py +179 -86
- pytme-0.2.2.dist-info/METADATA +91 -0
- pytme-0.2.2.dist-info/RECORD +74 -0
- {pytme-0.2.0b0.dist-info → pytme-0.2.2.dist-info}/WHEEL +1 -1
- scripts/extract_candidates.py +126 -87
- scripts/match_template.py +596 -209
- scripts/match_template_filters.py +571 -223
- scripts/postprocess.py +170 -71
- scripts/preprocessor_gui.py +179 -86
- scripts/refine_matches.py +567 -159
- tme/__init__.py +0 -1
- tme/__version__.py +1 -1
- tme/analyzer.py +627 -855
- tme/backends/__init__.py +41 -11
- tme/backends/_jax_utils.py +185 -0
- tme/backends/cupy_backend.py +120 -225
- tme/backends/jax_backend.py +282 -0
- tme/backends/matching_backend.py +464 -388
- tme/backends/mlx_backend.py +45 -68
- tme/backends/npfftw_backend.py +256 -514
- tme/backends/pytorch_backend.py +41 -154
- tme/density.py +312 -421
- tme/extensions.cpython-311-darwin.so +0 -0
- tme/matching_data.py +366 -303
- tme/matching_exhaustive.py +279 -1521
- tme/matching_optimization.py +234 -129
- tme/matching_scores.py +884 -0
- tme/matching_utils.py +281 -387
- tme/memory.py +377 -0
- tme/orientations.py +226 -66
- tme/parser.py +3 -4
- tme/preprocessing/__init__.py +2 -0
- tme/preprocessing/_utils.py +217 -0
- tme/preprocessing/composable_filter.py +31 -0
- tme/preprocessing/compose.py +55 -0
- tme/preprocessing/frequency_filters.py +388 -0
- tme/preprocessing/tilt_series.py +1011 -0
- tme/preprocessor.py +574 -530
- tme/structure.py +495 -189
- tme/types.py +5 -3
- pytme-0.2.0b0.data/scripts/match_template.py +0 -800
- pytme-0.2.0b0.dist-info/METADATA +0 -73
- pytme-0.2.0b0.dist-info/RECORD +0 -66
- tme/helpers.py +0 -881
- tme/matching_constrained.py +0 -195
- {pytme-0.2.0b0.data → pytme-0.2.2.data}/scripts/estimate_ram_usage.py +0 -0
- {pytme-0.2.0b0.data → pytme-0.2.2.data}/scripts/preprocess.py +0 -0
- {pytme-0.2.0b0.dist-info → pytme-0.2.2.dist-info}/LICENSE +0 -0
- {pytme-0.2.0b0.dist-info → pytme-0.2.2.dist-info}/entry_points.txt +0 -0
- {pytme-0.2.0b0.dist-info → pytme-0.2.2.dist-info}/top_level.txt +0 -0
tme/backends/pytorch_backend.py
CHANGED
@@ -13,36 +13,38 @@ from multiprocessing.managers import SharedMemoryManager
|
|
13
13
|
|
14
14
|
import numpy as np
|
15
15
|
from .npfftw_backend import NumpyFFTWBackend
|
16
|
-
from ..types import NDArray, TorchTensor
|
16
|
+
from ..types import NDArray, TorchTensor, shm_type
|
17
17
|
|
18
18
|
|
19
19
|
class PytorchBackend(NumpyFFTWBackend):
|
20
20
|
"""
|
21
|
-
A pytorch
|
21
|
+
A pytorch-based matching backend.
|
22
22
|
"""
|
23
23
|
|
24
24
|
def __init__(
|
25
25
|
self,
|
26
26
|
device="cuda",
|
27
|
-
|
27
|
+
float_dtype=None,
|
28
28
|
complex_dtype=None,
|
29
|
-
|
29
|
+
int_dtype=None,
|
30
|
+
overflow_safe_dtype=None,
|
30
31
|
**kwargs,
|
31
32
|
):
|
32
33
|
import torch
|
33
34
|
import torch.nn.functional as F
|
34
35
|
|
35
|
-
|
36
|
+
float_dtype = torch.float32 if float_dtype is None else float_dtype
|
36
37
|
complex_dtype = torch.complex64 if complex_dtype is None else complex_dtype
|
37
|
-
|
38
|
-
|
39
|
-
|
38
|
+
int_dtype = torch.int32 if int_dtype is None else int_dtype
|
39
|
+
if overflow_safe_dtype is None:
|
40
|
+
overflow_safe_dtype = torch.float32
|
40
41
|
|
41
42
|
super().__init__(
|
42
43
|
array_backend=torch,
|
43
|
-
|
44
|
+
float_dtype=float_dtype,
|
44
45
|
complex_dtype=complex_dtype,
|
45
|
-
|
46
|
+
int_dtype=int_dtype,
|
47
|
+
overflow_safe_dtype=overflow_safe_dtype,
|
46
48
|
)
|
47
49
|
self.device = device
|
48
50
|
self.F = F
|
@@ -57,28 +59,26 @@ class PytorchBackend(NumpyFFTWBackend):
|
|
57
59
|
def to_numpy_array(self, arr: TorchTensor) -> NDArray:
|
58
60
|
if isinstance(arr, np.ndarray):
|
59
61
|
return arr
|
60
|
-
|
62
|
+
elif isinstance(arr, self._array_backend.Tensor):
|
63
|
+
return arr.cpu().numpy()
|
64
|
+
return np.array(arr)
|
61
65
|
|
62
66
|
def to_cpu_array(self, arr: TorchTensor) -> NDArray:
|
63
67
|
return arr.cpu()
|
64
68
|
|
69
|
+
def get_fundamental_dtype(self, arr):
|
70
|
+
if self._array_backend.is_floating_point(arr):
|
71
|
+
return float
|
72
|
+
elif self._array_backend.is_complex(arr):
|
73
|
+
return complex
|
74
|
+
return int
|
75
|
+
|
65
76
|
def free_cache(self):
|
66
77
|
self._array_backend.cuda.empty_cache()
|
67
78
|
|
68
79
|
def mod(self, x1, x2, *args, **kwargs):
|
69
|
-
x1 = self.to_backend_array(x1)
|
70
|
-
x2 = self.to_backend_array(x2)
|
71
80
|
return self._array_backend.remainder(x1, x2, *args, **kwargs)
|
72
81
|
|
73
|
-
def sum(self, *args, **kwargs) -> NDArray:
|
74
|
-
return self._array_backend.sum(*args, **kwargs)
|
75
|
-
|
76
|
-
def mean(self, *args, **kwargs) -> NDArray:
|
77
|
-
return self._array_backend.mean(*args, **kwargs)
|
78
|
-
|
79
|
-
def std(self, *args, **kwargs) -> NDArray:
|
80
|
-
return self._array_backend.std(*args, **kwargs)
|
81
|
-
|
82
82
|
def max(self, *args, **kwargs) -> NDArray:
|
83
83
|
ret = self._array_backend.amax(*args, **kwargs)
|
84
84
|
if type(ret) == self._array_backend.Tensor:
|
@@ -110,49 +110,28 @@ class PytorchBackend(NumpyFFTWBackend):
|
|
110
110
|
def zeros(self, shape, dtype=None):
|
111
111
|
return self._array_backend.zeros(shape, dtype=dtype, device=self.device)
|
112
112
|
|
113
|
-
def preallocate_array(self, shape: Tuple[int], dtype: type) -> NDArray:
|
114
|
-
"""
|
115
|
-
Returns a byte-aligned array of zeros with specified shape and dtype.
|
116
|
-
|
117
|
-
Parameters
|
118
|
-
----------
|
119
|
-
shape : Tuple[int]
|
120
|
-
Desired shape for the array.
|
121
|
-
dtype : type
|
122
|
-
Desired data type for the array.
|
123
|
-
|
124
|
-
Returns
|
125
|
-
-------
|
126
|
-
NDArray
|
127
|
-
Byte-aligned array of zeros with specified shape and dtype.
|
128
|
-
"""
|
129
|
-
arr = self._array_backend.zeros(shape, dtype=dtype, device=self.device)
|
130
|
-
return arr
|
131
|
-
|
132
113
|
def full(self, shape, fill_value, dtype=None):
|
133
114
|
return self._array_backend.full(
|
134
115
|
size=shape, dtype=dtype, fill_value=fill_value, device=self.device
|
135
116
|
)
|
136
117
|
|
118
|
+
def arange(self, *args, **kwargs):
|
119
|
+
return self._array_backend.arange(*args, **kwargs, device=self.device)
|
120
|
+
|
137
121
|
def datatype_bytes(self, dtype: type) -> int:
|
138
122
|
temp = self.zeros(1, dtype=dtype)
|
139
123
|
return temp.element_size()
|
140
124
|
|
141
|
-
def fill(self, arr: TorchTensor, value: float):
|
125
|
+
def fill(self, arr: TorchTensor, value: float) -> TorchTensor:
|
142
126
|
arr.fill_(value)
|
127
|
+
return arr
|
143
128
|
|
144
|
-
def astype(self, arr, dtype):
|
129
|
+
def astype(self, arr: TorchTensor, dtype: type) -> TorchTensor:
|
145
130
|
return arr.to(dtype)
|
146
131
|
|
147
132
|
def flip(self, a, axis, **kwargs):
|
148
133
|
return self._array_backend.flip(input=a, dims=axis, **kwargs)
|
149
134
|
|
150
|
-
def arange(self, *args, **kwargs):
|
151
|
-
return self._array_backend.arange(*args, **kwargs, device=self.device)
|
152
|
-
|
153
|
-
def stack(self, *args, **kwargs):
|
154
|
-
return self._array_backend.stack(*args, **kwargs)
|
155
|
-
|
156
135
|
def topk_indices(self, arr, k):
|
157
136
|
temp = arr.reshape(-1)
|
158
137
|
values, indices = self._array_backend.topk(temp, k)
|
@@ -236,12 +215,11 @@ class PytorchBackend(NumpyFFTWBackend):
|
|
236
215
|
def repeat(self, *args, **kwargs):
|
237
216
|
return self._array_backend.repeat_interleave(*args, **kwargs)
|
238
217
|
|
239
|
-
def
|
240
|
-
self, shm: TorchTensor, shape: Tuple[int], dtype: str
|
241
|
-
) -> TorchTensor:
|
218
|
+
def from_sharedarr(self, args) -> TorchTensor:
|
242
219
|
if self.device == "cuda":
|
243
|
-
return
|
220
|
+
return args[0]
|
244
221
|
|
222
|
+
shm, shape, dtype = args
|
245
223
|
required_size = int(self._array_backend.prod(self.to_backend_array(shape)))
|
246
224
|
|
247
225
|
ret = self._array_backend.frombuffer(shm.buf, dtype=dtype)[
|
@@ -249,9 +227,9 @@ class PytorchBackend(NumpyFFTWBackend):
|
|
249
227
|
].reshape(shape)
|
250
228
|
return ret
|
251
229
|
|
252
|
-
def
|
230
|
+
def to_sharedarr(
|
253
231
|
self, arr: TorchTensor, shared_memory_handler: type = None
|
254
|
-
) ->
|
232
|
+
) -> shm_type:
|
255
233
|
if self.device == "cuda":
|
256
234
|
return arr
|
257
235
|
|
@@ -264,7 +242,7 @@ class PytorchBackend(NumpyFFTWBackend):
|
|
264
242
|
|
265
243
|
shm.buf[:nbytes] = arr.numpy().tobytes()
|
266
244
|
|
267
|
-
return shm
|
245
|
+
return shm, arr.shape, arr.dtype
|
268
246
|
|
269
247
|
def transpose(self, arr):
|
270
248
|
return arr.permute(*self._array_backend.arange(arr.ndim - 1, -1, -1))
|
@@ -272,16 +250,17 @@ class PytorchBackend(NumpyFFTWBackend):
|
|
272
250
|
def power(self, *args, **kwargs):
|
273
251
|
return self._array_backend.pow(*args, **kwargs)
|
274
252
|
|
275
|
-
def
|
253
|
+
def rigid_transform(
|
276
254
|
self,
|
277
255
|
arr: TorchTensor,
|
278
256
|
rotation_matrix: TorchTensor,
|
279
257
|
arr_mask: TorchTensor = None,
|
280
258
|
translation: TorchTensor = None,
|
259
|
+
use_geometric_center: bool = False,
|
281
260
|
out: TorchTensor = None,
|
282
261
|
out_mask: TorchTensor = None,
|
283
262
|
order: int = 1,
|
284
|
-
|
263
|
+
cache: bool = False,
|
285
264
|
):
|
286
265
|
"""
|
287
266
|
Rotates the given tensor `arr` based on the provided `rotation_matrix`.
|
@@ -342,8 +321,6 @@ class PytorchBackend(NumpyFFTWBackend):
|
|
342
321
|
raise ValueError(
|
343
322
|
f"Got {order} but supported interpolation orders are: {modes}."
|
344
323
|
)
|
345
|
-
rotate_mask = arr_mask is not None
|
346
|
-
return_type = (out is None) + 2 * rotate_mask * (out_mask is None)
|
347
324
|
|
348
325
|
out = self.zeros_like(arr) if out is None else out
|
349
326
|
if translation is None:
|
@@ -363,7 +340,7 @@ class PytorchBackend(NumpyFFTWBackend):
|
|
363
340
|
mode=mode,
|
364
341
|
)
|
365
342
|
|
366
|
-
if
|
343
|
+
if arr_mask is not None:
|
367
344
|
out_mask_slice = tuple(slice(0, x) for x in arr_mask.shape)
|
368
345
|
if out_mask is None:
|
369
346
|
out_mask = self._array_backend.zeros_like(arr_mask)
|
@@ -374,15 +351,7 @@ class PytorchBackend(NumpyFFTWBackend):
|
|
374
351
|
mode=mode,
|
375
352
|
)
|
376
353
|
|
377
|
-
|
378
|
-
case 0:
|
379
|
-
return None
|
380
|
-
case 1:
|
381
|
-
return out
|
382
|
-
case 2:
|
383
|
-
return out_mask
|
384
|
-
case 3:
|
385
|
-
return out, out_mask
|
354
|
+
return out, out_mask
|
386
355
|
|
387
356
|
def build_fft(
|
388
357
|
self,
|
@@ -391,38 +360,17 @@ class PytorchBackend(NumpyFFTWBackend):
|
|
391
360
|
inverse_fast_shape: Tuple[int] = None,
|
392
361
|
**kwargs,
|
393
362
|
) -> Tuple[Callable, Callable]:
|
394
|
-
"""
|
395
|
-
Build fft builder functions.
|
396
|
-
|
397
|
-
Parameters
|
398
|
-
----------
|
399
|
-
fast_shape : tuple
|
400
|
-
Tuple of integers corresponding to fast convolution shape
|
401
|
-
(see :py:meth:`PytorchBackend.compute_convolution_shapes`).
|
402
|
-
fast_ft_shape : tuple
|
403
|
-
Tuple of integers corresponding to the shape of the Fourier
|
404
|
-
transform array (see :py:meth:`PytorchBackend.compute_convolution_shapes`).
|
405
|
-
inverse_fast_shape : tuple, optional
|
406
|
-
Output shape of the inverse Fourier transform. By default fast_shape.
|
407
|
-
**kwargs : dict, optional
|
408
|
-
Unused keyword arguments.
|
409
|
-
|
410
|
-
Returns
|
411
|
-
-------
|
412
|
-
tuple
|
413
|
-
Tupple containing callable rfft and irfft object.
|
414
|
-
"""
|
415
363
|
if inverse_fast_shape is None:
|
416
364
|
inverse_fast_shape = fast_shape
|
417
365
|
|
418
366
|
def rfftn(
|
419
367
|
arr: TorchTensor, out: TorchTensor, shape: Tuple[int] = fast_shape
|
420
|
-
) ->
|
368
|
+
) -> TorchTensor:
|
421
369
|
return self._array_backend.fft.rfftn(arr, s=shape, out=out)
|
422
370
|
|
423
371
|
def irfftn(
|
424
372
|
arr: TorchTensor, out: TorchTensor, shape: Tuple[int] = inverse_fast_shape
|
425
|
-
) ->
|
373
|
+
) -> TorchTensor:
|
426
374
|
return self._array_backend.fft.irfftn(arr, s=shape, out=out)
|
427
375
|
|
428
376
|
return rfftn, irfftn
|
@@ -434,30 +382,6 @@ class PytorchBackend(NumpyFFTWBackend):
|
|
434
382
|
translation: TorchTensor,
|
435
383
|
mode,
|
436
384
|
) -> TorchTensor:
|
437
|
-
"""
|
438
|
-
Performs an affine transformation on the given tensor.
|
439
|
-
|
440
|
-
The affine transformation is defined by the provided `rotation_matrix`
|
441
|
-
and the `translation` vector. The transformation is applied to the
|
442
|
-
input tensor `arr`.
|
443
|
-
|
444
|
-
Parameters
|
445
|
-
----------
|
446
|
-
arr : TorchTensor
|
447
|
-
The input tensor on which the transformation will be applied.
|
448
|
-
rotation_matrix : TorchTensor
|
449
|
-
The matrix defining the rotation component of the transformation.
|
450
|
-
translation : TorchTensor
|
451
|
-
The vector defining the translation to be applied post rotation.
|
452
|
-
mode : str
|
453
|
-
Interpolation mode to use. Options are: 'nearest', 'bilinear', 'bicubic'.
|
454
|
-
|
455
|
-
Returns
|
456
|
-
-------
|
457
|
-
TorchTensor
|
458
|
-
The tensor after applying the affine transformation.
|
459
|
-
"""
|
460
|
-
|
461
385
|
transformation_matrix = self._array_backend.zeros(
|
462
386
|
arr.ndim, arr.ndim + 1, device=arr.device, dtype=arr.dtype
|
463
387
|
)
|
@@ -484,22 +408,6 @@ class PytorchBackend(NumpyFFTWBackend):
|
|
484
408
|
|
485
409
|
@contextmanager
|
486
410
|
def set_device(self, device_index: int):
|
487
|
-
"""
|
488
|
-
Set the active GPU device as a context.
|
489
|
-
|
490
|
-
This method sets the active GPU device for operations within the context.
|
491
|
-
|
492
|
-
Parameters
|
493
|
-
----------
|
494
|
-
device_index : int
|
495
|
-
Index of the GPU device to be set as active.
|
496
|
-
|
497
|
-
Yields
|
498
|
-
------
|
499
|
-
None
|
500
|
-
Operates as a context manager, yielding None and providing
|
501
|
-
the set GPU context for enclosed operations.
|
502
|
-
"""
|
503
411
|
if self.device == "cuda":
|
504
412
|
with self._array_backend.cuda.device(device_index):
|
505
413
|
yield
|
@@ -507,28 +415,7 @@ class PytorchBackend(NumpyFFTWBackend):
|
|
507
415
|
yield None
|
508
416
|
|
509
417
|
def device_count(self) -> int:
|
510
|
-
"""
|
511
|
-
Return the number of available GPU devices.
|
512
|
-
|
513
|
-
Returns
|
514
|
-
-------
|
515
|
-
int
|
516
|
-
Number of available GPU devices.
|
517
|
-
"""
|
518
418
|
return self._array_backend.cuda.device_count()
|
519
419
|
|
520
420
|
def reverse(self, arr: TorchTensor) -> TorchTensor:
|
521
|
-
"""
|
522
|
-
Reverse the order of elements in a tensor along all its axes.
|
523
|
-
|
524
|
-
Parameters
|
525
|
-
----------
|
526
|
-
tensor : TorchTensor
|
527
|
-
Input tensor.
|
528
|
-
|
529
|
-
Returns
|
530
|
-
-------
|
531
|
-
TorchTensor
|
532
|
-
Reversed tensor.
|
533
|
-
"""
|
534
421
|
return self._array_backend.flip(arr, [i for i in range(arr.ndim)])
|