pytme 0.2.1__cp311-cp311-macosx_14_0_arm64.whl → 0.2.3__cp311-cp311-macosx_14_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. {pytme-0.2.1.data → pytme-0.2.3.data}/scripts/match_template.py +219 -216
  2. {pytme-0.2.1.data → pytme-0.2.3.data}/scripts/postprocess.py +86 -54
  3. pytme-0.2.3.data/scripts/preprocess.py +132 -0
  4. {pytme-0.2.1.data → pytme-0.2.3.data}/scripts/preprocessor_gui.py +181 -94
  5. pytme-0.2.3.dist-info/METADATA +92 -0
  6. pytme-0.2.3.dist-info/RECORD +75 -0
  7. {pytme-0.2.1.dist-info → pytme-0.2.3.dist-info}/WHEEL +1 -1
  8. pytme-0.2.1.data/scripts/preprocess.py → scripts/eval.py +1 -1
  9. scripts/extract_candidates.py +20 -13
  10. scripts/match_template.py +219 -216
  11. scripts/match_template_filters.py +154 -95
  12. scripts/postprocess.py +86 -54
  13. scripts/preprocess.py +95 -56
  14. scripts/preprocessor_gui.py +181 -94
  15. scripts/refine_matches.py +265 -61
  16. tme/__init__.py +0 -1
  17. tme/__version__.py +1 -1
  18. tme/analyzer.py +458 -813
  19. tme/backends/__init__.py +40 -11
  20. tme/backends/_jax_utils.py +187 -0
  21. tme/backends/cupy_backend.py +109 -226
  22. tme/backends/jax_backend.py +230 -152
  23. tme/backends/matching_backend.py +445 -384
  24. tme/backends/mlx_backend.py +32 -59
  25. tme/backends/npfftw_backend.py +240 -507
  26. tme/backends/pytorch_backend.py +30 -151
  27. tme/density.py +248 -371
  28. tme/extensions.cpython-311-darwin.so +0 -0
  29. tme/matching_data.py +328 -284
  30. tme/matching_exhaustive.py +195 -1499
  31. tme/matching_optimization.py +143 -106
  32. tme/matching_scores.py +887 -0
  33. tme/matching_utils.py +287 -388
  34. tme/memory.py +377 -0
  35. tme/orientations.py +78 -21
  36. tme/parser.py +3 -4
  37. tme/preprocessing/_utils.py +61 -32
  38. tme/preprocessing/composable_filter.py +7 -4
  39. tme/preprocessing/compose.py +7 -3
  40. tme/preprocessing/frequency_filters.py +49 -39
  41. tme/preprocessing/tilt_series.py +44 -72
  42. tme/preprocessor.py +560 -526
  43. tme/structure.py +491 -188
  44. tme/types.py +5 -3
  45. pytme-0.2.1.dist-info/METADATA +0 -73
  46. pytme-0.2.1.dist-info/RECORD +0 -73
  47. tme/helpers.py +0 -881
  48. tme/matching_constrained.py +0 -195
  49. {pytme-0.2.1.data → pytme-0.2.3.data}/scripts/estimate_ram_usage.py +0 -0
  50. {pytme-0.2.1.dist-info → pytme-0.2.3.dist-info}/LICENSE +0 -0
  51. {pytme-0.2.1.dist-info → pytme-0.2.3.dist-info}/entry_points.txt +0 -0
  52. {pytme-0.2.1.dist-info → pytme-0.2.3.dist-info}/top_level.txt +0 -0
@@ -13,12 +13,12 @@ from multiprocessing.managers import SharedMemoryManager
13
13
 
14
14
  import numpy as np
15
15
  from .npfftw_backend import NumpyFFTWBackend
16
- from ..types import NDArray, TorchTensor
16
+ from ..types import NDArray, TorchTensor, shm_type
17
17
 
18
18
 
19
19
  class PytorchBackend(NumpyFFTWBackend):
20
20
  """
21
- A pytorch based backend for template matching
21
+ A pytorch-based matching backend.
22
22
  """
23
23
 
24
24
  def __init__(
@@ -77,28 +77,17 @@ class PytorchBackend(NumpyFFTWBackend):
77
77
  self._array_backend.cuda.empty_cache()
78
78
 
79
79
  def mod(self, x1, x2, *args, **kwargs):
80
- x1 = self.to_backend_array(x1)
81
- x2 = self.to_backend_array(x2)
82
80
  return self._array_backend.remainder(x1, x2, *args, **kwargs)
83
81
 
84
- def sum(self, *args, **kwargs) -> NDArray:
85
- return self._array_backend.sum(*args, **kwargs)
86
-
87
- def mean(self, *args, **kwargs) -> NDArray:
88
- return self._array_backend.mean(*args, **kwargs)
89
-
90
- def std(self, *args, **kwargs) -> NDArray:
91
- return self._array_backend.std(*args, **kwargs)
92
-
93
82
  def max(self, *args, **kwargs) -> NDArray:
94
83
  ret = self._array_backend.amax(*args, **kwargs)
95
- if type(ret) == self._array_backend.Tensor:
84
+ if isinstance(ret, self._array_backend.Tensor):
96
85
  return ret
97
86
  return ret[0]
98
87
 
99
88
  def min(self, *args, **kwargs) -> NDArray:
100
89
  ret = self._array_backend.amin(*args, **kwargs)
101
- if type(ret) == self._array_backend.Tensor:
90
+ if isinstance(ret, self._array_backend.Tensor):
102
91
  return ret
103
92
  return ret[0]
104
93
 
@@ -121,49 +110,28 @@ class PytorchBackend(NumpyFFTWBackend):
121
110
  def zeros(self, shape, dtype=None):
122
111
  return self._array_backend.zeros(shape, dtype=dtype, device=self.device)
123
112
 
124
- def preallocate_array(self, shape: Tuple[int], dtype: type) -> NDArray:
125
- """
126
- Returns a byte-aligned array of zeros with specified shape and dtype.
127
-
128
- Parameters
129
- ----------
130
- shape : Tuple[int]
131
- Desired shape for the array.
132
- dtype : type
133
- Desired data type for the array.
134
-
135
- Returns
136
- -------
137
- NDArray
138
- Byte-aligned array of zeros with specified shape and dtype.
139
- """
140
- arr = self._array_backend.zeros(shape, dtype=dtype, device=self.device)
141
- return arr
142
-
143
113
  def full(self, shape, fill_value, dtype=None):
144
114
  return self._array_backend.full(
145
115
  size=shape, dtype=dtype, fill_value=fill_value, device=self.device
146
116
  )
147
117
 
118
+ def arange(self, *args, **kwargs):
119
+ return self._array_backend.arange(*args, **kwargs, device=self.device)
120
+
148
121
  def datatype_bytes(self, dtype: type) -> int:
149
122
  temp = self.zeros(1, dtype=dtype)
150
123
  return temp.element_size()
151
124
 
152
- def fill(self, arr: TorchTensor, value: float):
125
+ def fill(self, arr: TorchTensor, value: float) -> TorchTensor:
153
126
  arr.fill_(value)
127
+ return arr
154
128
 
155
- def astype(self, arr, dtype):
129
+ def astype(self, arr: TorchTensor, dtype: type) -> TorchTensor:
156
130
  return arr.to(dtype)
157
131
 
158
132
  def flip(self, a, axis, **kwargs):
159
133
  return self._array_backend.flip(input=a, dims=axis, **kwargs)
160
134
 
161
- def arange(self, *args, **kwargs):
162
- return self._array_backend.arange(*args, **kwargs, device=self.device)
163
-
164
- def stack(self, *args, **kwargs):
165
- return self._array_backend.stack(*args, **kwargs)
166
-
167
135
  def topk_indices(self, arr, k):
168
136
  temp = arr.reshape(-1)
169
137
  values, indices = self._array_backend.topk(temp, k)
@@ -186,7 +154,7 @@ class PytorchBackend(NumpyFFTWBackend):
186
154
  1, -1
187
155
  )
188
156
  if unraveled_coords.size(0) == 1:
189
- return tuple(unraveled_coords[0, :].tolist())
157
+ return (unraveled_coords[0, :],)
190
158
 
191
159
  else:
192
160
  return tuple(unraveled_coords.T)
@@ -238,7 +206,9 @@ class PytorchBackend(NumpyFFTWBackend):
238
206
  else:
239
207
  raise NotImplementedError("Operation only implemented for 2 and 3D inputs.")
240
208
 
241
- pool = func(kernel_size=min_distance, return_indices=True)
209
+ pool = func(
210
+ kernel_size=min_distance, padding=min_distance // 2, return_indices=True
211
+ )
242
212
  _, indices = pool(score_space.reshape(1, 1, *score_space.shape))
243
213
  coordinates = self.unravel_index(indices.reshape(-1), score_space.shape)
244
214
  coordinates = self.transpose(self.stack(coordinates))
@@ -247,12 +217,11 @@ class PytorchBackend(NumpyFFTWBackend):
247
217
  def repeat(self, *args, **kwargs):
248
218
  return self._array_backend.repeat_interleave(*args, **kwargs)
249
219
 
250
- def sharedarr_to_arr(
251
- self, shm: TorchTensor, shape: Tuple[int], dtype: str
252
- ) -> TorchTensor:
220
+ def from_sharedarr(self, args) -> TorchTensor:
253
221
  if self.device == "cuda":
254
- return shm
222
+ return args
255
223
 
224
+ shm, shape, dtype = args
256
225
  required_size = int(self._array_backend.prod(self.to_backend_array(shape)))
257
226
 
258
227
  ret = self._array_backend.frombuffer(shm.buf, dtype=dtype)[
@@ -260,22 +229,21 @@ class PytorchBackend(NumpyFFTWBackend):
260
229
  ].reshape(shape)
261
230
  return ret
262
231
 
263
- def arr_to_sharedarr(
232
+ def to_sharedarr(
264
233
  self, arr: TorchTensor, shared_memory_handler: type = None
265
- ) -> TorchTensor:
234
+ ) -> shm_type:
266
235
  if self.device == "cuda":
267
236
  return arr
268
237
 
269
238
  nbytes = arr.numel() * arr.element_size()
270
239
 
271
- if type(shared_memory_handler) == SharedMemoryManager:
240
+ if isinstance(shared_memory_handler, SharedMemoryManager):
272
241
  shm = shared_memory_handler.SharedMemory(size=nbytes)
273
242
  else:
274
243
  shm = shared_memory.SharedMemory(create=True, size=nbytes)
275
244
 
276
245
  shm.buf[:nbytes] = arr.numpy().tobytes()
277
-
278
- return shm
246
+ return shm, arr.shape, arr.dtype
279
247
 
280
248
  def transpose(self, arr):
281
249
  return arr.permute(*self._array_backend.arange(arr.ndim - 1, -1, -1))
@@ -283,16 +251,17 @@ class PytorchBackend(NumpyFFTWBackend):
283
251
  def power(self, *args, **kwargs):
284
252
  return self._array_backend.pow(*args, **kwargs)
285
253
 
286
- def rotate_array(
254
+ def rigid_transform(
287
255
  self,
288
256
  arr: TorchTensor,
289
257
  rotation_matrix: TorchTensor,
290
258
  arr_mask: TorchTensor = None,
291
259
  translation: TorchTensor = None,
260
+ use_geometric_center: bool = False,
292
261
  out: TorchTensor = None,
293
262
  out_mask: TorchTensor = None,
294
263
  order: int = 1,
295
- **kwargs,
264
+ cache: bool = False,
296
265
  ):
297
266
  """
298
267
  Rotates the given tensor `arr` based on the provided `rotation_matrix`.
@@ -353,8 +322,6 @@ class PytorchBackend(NumpyFFTWBackend):
353
322
  raise ValueError(
354
323
  f"Got {order} but supported interpolation orders are: {modes}."
355
324
  )
356
- rotate_mask = arr_mask is not None
357
- return_type = (out is None) + 2 * rotate_mask * (out_mask is None)
358
325
 
359
326
  out = self.zeros_like(arr) if out is None else out
360
327
  if translation is None:
@@ -374,7 +341,7 @@ class PytorchBackend(NumpyFFTWBackend):
374
341
  mode=mode,
375
342
  )
376
343
 
377
- if rotate_mask:
344
+ if arr_mask is not None:
378
345
  out_mask_slice = tuple(slice(0, x) for x in arr_mask.shape)
379
346
  if out_mask is None:
380
347
  out_mask = self._array_backend.zeros_like(arr_mask)
@@ -385,15 +352,7 @@ class PytorchBackend(NumpyFFTWBackend):
385
352
  mode=mode,
386
353
  )
387
354
 
388
- match return_type:
389
- case 0:
390
- return None
391
- case 1:
392
- return out
393
- case 2:
394
- return out_mask
395
- case 3:
396
- return out, out_mask
355
+ return out, out_mask
397
356
 
398
357
  def build_fft(
399
358
  self,
@@ -402,38 +361,17 @@ class PytorchBackend(NumpyFFTWBackend):
402
361
  inverse_fast_shape: Tuple[int] = None,
403
362
  **kwargs,
404
363
  ) -> Tuple[Callable, Callable]:
405
- """
406
- Build fft builder functions.
407
-
408
- Parameters
409
- ----------
410
- fast_shape : tuple
411
- Tuple of integers corresponding to fast convolution shape
412
- (see :py:meth:`PytorchBackend.compute_convolution_shapes`).
413
- fast_ft_shape : tuple
414
- Tuple of integers corresponding to the shape of the Fourier
415
- transform array (see :py:meth:`PytorchBackend.compute_convolution_shapes`).
416
- inverse_fast_shape : tuple, optional
417
- Output shape of the inverse Fourier transform. By default fast_shape.
418
- **kwargs : dict, optional
419
- Unused keyword arguments.
420
-
421
- Returns
422
- -------
423
- tuple
424
- Tupple containing callable rfft and irfft object.
425
- """
426
364
  if inverse_fast_shape is None:
427
365
  inverse_fast_shape = fast_shape
428
366
 
429
367
  def rfftn(
430
368
  arr: TorchTensor, out: TorchTensor, shape: Tuple[int] = fast_shape
431
- ) -> None:
369
+ ) -> TorchTensor:
432
370
  return self._array_backend.fft.rfftn(arr, s=shape, out=out)
433
371
 
434
372
  def irfftn(
435
373
  arr: TorchTensor, out: TorchTensor, shape: Tuple[int] = inverse_fast_shape
436
- ) -> None:
374
+ ) -> TorchTensor:
437
375
  return self._array_backend.fft.irfftn(arr, s=shape, out=out)
438
376
 
439
377
  return rfftn, irfftn
@@ -445,30 +383,6 @@ class PytorchBackend(NumpyFFTWBackend):
445
383
  translation: TorchTensor,
446
384
  mode,
447
385
  ) -> TorchTensor:
448
- """
449
- Performs an affine transformation on the given tensor.
450
-
451
- The affine transformation is defined by the provided `rotation_matrix`
452
- and the `translation` vector. The transformation is applied to the
453
- input tensor `arr`.
454
-
455
- Parameters
456
- ----------
457
- arr : TorchTensor
458
- The input tensor on which the transformation will be applied.
459
- rotation_matrix : TorchTensor
460
- The matrix defining the rotation component of the transformation.
461
- translation : TorchTensor
462
- The vector defining the translation to be applied post rotation.
463
- mode : str
464
- Interpolation mode to use. Options are: 'nearest', 'bilinear', 'bicubic'.
465
-
466
- Returns
467
- -------
468
- TorchTensor
469
- The tensor after applying the affine transformation.
470
- """
471
-
472
386
  transformation_matrix = self._array_backend.zeros(
473
387
  arr.ndim, arr.ndim + 1, device=arr.device, dtype=arr.dtype
474
388
  )
@@ -495,22 +409,6 @@ class PytorchBackend(NumpyFFTWBackend):
495
409
 
496
410
  @contextmanager
497
411
  def set_device(self, device_index: int):
498
- """
499
- Set the active GPU device as a context.
500
-
501
- This method sets the active GPU device for operations within the context.
502
-
503
- Parameters
504
- ----------
505
- device_index : int
506
- Index of the GPU device to be set as active.
507
-
508
- Yields
509
- ------
510
- None
511
- Operates as a context manager, yielding None and providing
512
- the set GPU context for enclosed operations.
513
- """
514
412
  if self.device == "cuda":
515
413
  with self._array_backend.cuda.device(device_index):
516
414
  yield
@@ -518,28 +416,9 @@ class PytorchBackend(NumpyFFTWBackend):
518
416
  yield None
519
417
 
520
418
  def device_count(self) -> int:
521
- """
522
- Return the number of available GPU devices.
523
-
524
- Returns
525
- -------
526
- int
527
- Number of available GPU devices.
528
- """
419
+ if self.device == "cpu":
420
+ return 1
529
421
  return self._array_backend.cuda.device_count()
530
422
 
531
423
  def reverse(self, arr: TorchTensor) -> TorchTensor:
532
- """
533
- Reverse the order of elements in a tensor along all its axes.
534
-
535
- Parameters
536
- ----------
537
- tensor : TorchTensor
538
- Input tensor.
539
-
540
- Returns
541
- -------
542
- TorchTensor
543
- Reversed tensor.
544
- """
545
424
  return self._array_backend.flip(arr, [i for i in range(arr.ndim)])