pytme 0.2.1__cp311-cp311-macosx_14_0_arm64.whl → 0.2.2__cp311-cp311-macosx_14_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. {pytme-0.2.1.data → pytme-0.2.2.data}/scripts/match_template.py +147 -93
  2. {pytme-0.2.1.data → pytme-0.2.2.data}/scripts/postprocess.py +67 -26
  3. {pytme-0.2.1.data → pytme-0.2.2.data}/scripts/preprocessor_gui.py +175 -85
  4. pytme-0.2.2.dist-info/METADATA +91 -0
  5. pytme-0.2.2.dist-info/RECORD +74 -0
  6. {pytme-0.2.1.dist-info → pytme-0.2.2.dist-info}/WHEEL +1 -1
  7. scripts/extract_candidates.py +20 -13
  8. scripts/match_template.py +147 -93
  9. scripts/match_template_filters.py +154 -95
  10. scripts/postprocess.py +67 -26
  11. scripts/preprocessor_gui.py +175 -85
  12. scripts/refine_matches.py +265 -61
  13. tme/__init__.py +0 -1
  14. tme/__version__.py +1 -1
  15. tme/analyzer.py +451 -809
  16. tme/backends/__init__.py +40 -11
  17. tme/backends/_jax_utils.py +185 -0
  18. tme/backends/cupy_backend.py +111 -223
  19. tme/backends/jax_backend.py +214 -150
  20. tme/backends/matching_backend.py +445 -384
  21. tme/backends/mlx_backend.py +32 -59
  22. tme/backends/npfftw_backend.py +239 -507
  23. tme/backends/pytorch_backend.py +21 -145
  24. tme/density.py +233 -363
  25. tme/extensions.cpython-311-darwin.so +0 -0
  26. tme/matching_data.py +322 -285
  27. tme/matching_exhaustive.py +172 -1493
  28. tme/matching_optimization.py +143 -106
  29. tme/matching_scores.py +884 -0
  30. tme/matching_utils.py +280 -386
  31. tme/memory.py +377 -0
  32. tme/orientations.py +52 -12
  33. tme/parser.py +3 -4
  34. tme/preprocessing/_utils.py +61 -32
  35. tme/preprocessing/compose.py +7 -3
  36. tme/preprocessing/frequency_filters.py +49 -39
  37. tme/preprocessing/tilt_series.py +34 -40
  38. tme/preprocessor.py +560 -526
  39. tme/structure.py +491 -188
  40. tme/types.py +5 -3
  41. pytme-0.2.1.dist-info/METADATA +0 -73
  42. pytme-0.2.1.dist-info/RECORD +0 -73
  43. tme/helpers.py +0 -881
  44. tme/matching_constrained.py +0 -195
  45. {pytme-0.2.1.data → pytme-0.2.2.data}/scripts/estimate_ram_usage.py +0 -0
  46. {pytme-0.2.1.data → pytme-0.2.2.data}/scripts/preprocess.py +0 -0
  47. {pytme-0.2.1.dist-info → pytme-0.2.2.dist-info}/LICENSE +0 -0
  48. {pytme-0.2.1.dist-info → pytme-0.2.2.dist-info}/entry_points.txt +0 -0
  49. {pytme-0.2.1.dist-info → pytme-0.2.2.dist-info}/top_level.txt +0 -0
@@ -13,12 +13,12 @@ from multiprocessing.managers import SharedMemoryManager
13
13
 
14
14
  import numpy as np
15
15
  from .npfftw_backend import NumpyFFTWBackend
16
- from ..types import NDArray, TorchTensor
16
+ from ..types import NDArray, TorchTensor, shm_type
17
17
 
18
18
 
19
19
  class PytorchBackend(NumpyFFTWBackend):
20
20
  """
21
- A pytorch based backend for template matching
21
+ A pytorch-based matching backend.
22
22
  """
23
23
 
24
24
  def __init__(
@@ -77,19 +77,8 @@ class PytorchBackend(NumpyFFTWBackend):
77
77
  self._array_backend.cuda.empty_cache()
78
78
 
79
79
  def mod(self, x1, x2, *args, **kwargs):
80
- x1 = self.to_backend_array(x1)
81
- x2 = self.to_backend_array(x2)
82
80
  return self._array_backend.remainder(x1, x2, *args, **kwargs)
83
81
 
84
- def sum(self, *args, **kwargs) -> NDArray:
85
- return self._array_backend.sum(*args, **kwargs)
86
-
87
- def mean(self, *args, **kwargs) -> NDArray:
88
- return self._array_backend.mean(*args, **kwargs)
89
-
90
- def std(self, *args, **kwargs) -> NDArray:
91
- return self._array_backend.std(*args, **kwargs)
92
-
93
82
  def max(self, *args, **kwargs) -> NDArray:
94
83
  ret = self._array_backend.amax(*args, **kwargs)
95
84
  if type(ret) == self._array_backend.Tensor:
@@ -121,49 +110,28 @@ class PytorchBackend(NumpyFFTWBackend):
121
110
  def zeros(self, shape, dtype=None):
122
111
  return self._array_backend.zeros(shape, dtype=dtype, device=self.device)
123
112
 
124
- def preallocate_array(self, shape: Tuple[int], dtype: type) -> NDArray:
125
- """
126
- Returns a byte-aligned array of zeros with specified shape and dtype.
127
-
128
- Parameters
129
- ----------
130
- shape : Tuple[int]
131
- Desired shape for the array.
132
- dtype : type
133
- Desired data type for the array.
134
-
135
- Returns
136
- -------
137
- NDArray
138
- Byte-aligned array of zeros with specified shape and dtype.
139
- """
140
- arr = self._array_backend.zeros(shape, dtype=dtype, device=self.device)
141
- return arr
142
-
143
113
  def full(self, shape, fill_value, dtype=None):
144
114
  return self._array_backend.full(
145
115
  size=shape, dtype=dtype, fill_value=fill_value, device=self.device
146
116
  )
147
117
 
118
+ def arange(self, *args, **kwargs):
119
+ return self._array_backend.arange(*args, **kwargs, device=self.device)
120
+
148
121
  def datatype_bytes(self, dtype: type) -> int:
149
122
  temp = self.zeros(1, dtype=dtype)
150
123
  return temp.element_size()
151
124
 
152
- def fill(self, arr: TorchTensor, value: float):
125
+ def fill(self, arr: TorchTensor, value: float) -> TorchTensor:
153
126
  arr.fill_(value)
127
+ return arr
154
128
 
155
- def astype(self, arr, dtype):
129
+ def astype(self, arr: TorchTensor, dtype: type) -> TorchTensor:
156
130
  return arr.to(dtype)
157
131
 
158
132
  def flip(self, a, axis, **kwargs):
159
133
  return self._array_backend.flip(input=a, dims=axis, **kwargs)
160
134
 
161
- def arange(self, *args, **kwargs):
162
- return self._array_backend.arange(*args, **kwargs, device=self.device)
163
-
164
- def stack(self, *args, **kwargs):
165
- return self._array_backend.stack(*args, **kwargs)
166
-
167
135
  def topk_indices(self, arr, k):
168
136
  temp = arr.reshape(-1)
169
137
  values, indices = self._array_backend.topk(temp, k)
@@ -247,12 +215,11 @@ class PytorchBackend(NumpyFFTWBackend):
247
215
  def repeat(self, *args, **kwargs):
248
216
  return self._array_backend.repeat_interleave(*args, **kwargs)
249
217
 
250
- def sharedarr_to_arr(
251
- self, shm: TorchTensor, shape: Tuple[int], dtype: str
252
- ) -> TorchTensor:
218
+ def from_sharedarr(self, args) -> TorchTensor:
253
219
  if self.device == "cuda":
254
- return shm
220
+ return args[0]
255
221
 
222
+ shm, shape, dtype = args
256
223
  required_size = int(self._array_backend.prod(self.to_backend_array(shape)))
257
224
 
258
225
  ret = self._array_backend.frombuffer(shm.buf, dtype=dtype)[
@@ -260,9 +227,9 @@ class PytorchBackend(NumpyFFTWBackend):
260
227
  ].reshape(shape)
261
228
  return ret
262
229
 
263
- def arr_to_sharedarr(
230
+ def to_sharedarr(
264
231
  self, arr: TorchTensor, shared_memory_handler: type = None
265
- ) -> TorchTensor:
232
+ ) -> shm_type:
266
233
  if self.device == "cuda":
267
234
  return arr
268
235
 
@@ -275,7 +242,7 @@ class PytorchBackend(NumpyFFTWBackend):
275
242
 
276
243
  shm.buf[:nbytes] = arr.numpy().tobytes()
277
244
 
278
- return shm
245
+ return shm, arr.shape, arr.dtype
279
246
 
280
247
  def transpose(self, arr):
281
248
  return arr.permute(*self._array_backend.arange(arr.ndim - 1, -1, -1))
@@ -283,16 +250,17 @@ class PytorchBackend(NumpyFFTWBackend):
283
250
  def power(self, *args, **kwargs):
284
251
  return self._array_backend.pow(*args, **kwargs)
285
252
 
286
- def rotate_array(
253
+ def rigid_transform(
287
254
  self,
288
255
  arr: TorchTensor,
289
256
  rotation_matrix: TorchTensor,
290
257
  arr_mask: TorchTensor = None,
291
258
  translation: TorchTensor = None,
259
+ use_geometric_center: bool = False,
292
260
  out: TorchTensor = None,
293
261
  out_mask: TorchTensor = None,
294
262
  order: int = 1,
295
- **kwargs,
263
+ cache: bool = False,
296
264
  ):
297
265
  """
298
266
  Rotates the given tensor `arr` based on the provided `rotation_matrix`.
@@ -353,8 +321,6 @@ class PytorchBackend(NumpyFFTWBackend):
353
321
  raise ValueError(
354
322
  f"Got {order} but supported interpolation orders are: {modes}."
355
323
  )
356
- rotate_mask = arr_mask is not None
357
- return_type = (out is None) + 2 * rotate_mask * (out_mask is None)
358
324
 
359
325
  out = self.zeros_like(arr) if out is None else out
360
326
  if translation is None:
@@ -374,7 +340,7 @@ class PytorchBackend(NumpyFFTWBackend):
374
340
  mode=mode,
375
341
  )
376
342
 
377
- if rotate_mask:
343
+ if arr_mask is not None:
378
344
  out_mask_slice = tuple(slice(0, x) for x in arr_mask.shape)
379
345
  if out_mask is None:
380
346
  out_mask = self._array_backend.zeros_like(arr_mask)
@@ -385,15 +351,7 @@ class PytorchBackend(NumpyFFTWBackend):
385
351
  mode=mode,
386
352
  )
387
353
 
388
- match return_type:
389
- case 0:
390
- return None
391
- case 1:
392
- return out
393
- case 2:
394
- return out_mask
395
- case 3:
396
- return out, out_mask
354
+ return out, out_mask
397
355
 
398
356
  def build_fft(
399
357
  self,
@@ -402,38 +360,17 @@ class PytorchBackend(NumpyFFTWBackend):
402
360
  inverse_fast_shape: Tuple[int] = None,
403
361
  **kwargs,
404
362
  ) -> Tuple[Callable, Callable]:
405
- """
406
- Build fft builder functions.
407
-
408
- Parameters
409
- ----------
410
- fast_shape : tuple
411
- Tuple of integers corresponding to fast convolution shape
412
- (see :py:meth:`PytorchBackend.compute_convolution_shapes`).
413
- fast_ft_shape : tuple
414
- Tuple of integers corresponding to the shape of the Fourier
415
- transform array (see :py:meth:`PytorchBackend.compute_convolution_shapes`).
416
- inverse_fast_shape : tuple, optional
417
- Output shape of the inverse Fourier transform. By default fast_shape.
418
- **kwargs : dict, optional
419
- Unused keyword arguments.
420
-
421
- Returns
422
- -------
423
- tuple
424
- Tupple containing callable rfft and irfft object.
425
- """
426
363
  if inverse_fast_shape is None:
427
364
  inverse_fast_shape = fast_shape
428
365
 
429
366
  def rfftn(
430
367
  arr: TorchTensor, out: TorchTensor, shape: Tuple[int] = fast_shape
431
- ) -> None:
368
+ ) -> TorchTensor:
432
369
  return self._array_backend.fft.rfftn(arr, s=shape, out=out)
433
370
 
434
371
  def irfftn(
435
372
  arr: TorchTensor, out: TorchTensor, shape: Tuple[int] = inverse_fast_shape
436
- ) -> None:
373
+ ) -> TorchTensor:
437
374
  return self._array_backend.fft.irfftn(arr, s=shape, out=out)
438
375
 
439
376
  return rfftn, irfftn
@@ -445,30 +382,6 @@ class PytorchBackend(NumpyFFTWBackend):
445
382
  translation: TorchTensor,
446
383
  mode,
447
384
  ) -> TorchTensor:
448
- """
449
- Performs an affine transformation on the given tensor.
450
-
451
- The affine transformation is defined by the provided `rotation_matrix`
452
- and the `translation` vector. The transformation is applied to the
453
- input tensor `arr`.
454
-
455
- Parameters
456
- ----------
457
- arr : TorchTensor
458
- The input tensor on which the transformation will be applied.
459
- rotation_matrix : TorchTensor
460
- The matrix defining the rotation component of the transformation.
461
- translation : TorchTensor
462
- The vector defining the translation to be applied post rotation.
463
- mode : str
464
- Interpolation mode to use. Options are: 'nearest', 'bilinear', 'bicubic'.
465
-
466
- Returns
467
- -------
468
- TorchTensor
469
- The tensor after applying the affine transformation.
470
- """
471
-
472
385
  transformation_matrix = self._array_backend.zeros(
473
386
  arr.ndim, arr.ndim + 1, device=arr.device, dtype=arr.dtype
474
387
  )
@@ -495,22 +408,6 @@ class PytorchBackend(NumpyFFTWBackend):
495
408
 
496
409
  @contextmanager
497
410
  def set_device(self, device_index: int):
498
- """
499
- Set the active GPU device as a context.
500
-
501
- This method sets the active GPU device for operations within the context.
502
-
503
- Parameters
504
- ----------
505
- device_index : int
506
- Index of the GPU device to be set as active.
507
-
508
- Yields
509
- ------
510
- None
511
- Operates as a context manager, yielding None and providing
512
- the set GPU context for enclosed operations.
513
- """
514
411
  if self.device == "cuda":
515
412
  with self._array_backend.cuda.device(device_index):
516
413
  yield
@@ -518,28 +415,7 @@ class PytorchBackend(NumpyFFTWBackend):
518
415
  yield None
519
416
 
520
417
  def device_count(self) -> int:
521
- """
522
- Return the number of available GPU devices.
523
-
524
- Returns
525
- -------
526
- int
527
- Number of available GPU devices.
528
- """
529
418
  return self._array_backend.cuda.device_count()
530
419
 
531
420
  def reverse(self, arr: TorchTensor) -> TorchTensor:
532
- """
533
- Reverse the order of elements in a tensor along all its axes.
534
-
535
- Parameters
536
- ----------
537
- tensor : TorchTensor
538
- Input tensor.
539
-
540
- Returns
541
- -------
542
- TorchTensor
543
- Reversed tensor.
544
- """
545
421
  return self._array_backend.flip(arr, [i for i in range(arr.ndim)])