pytme 0.1.9__cp311-cp311-macosx_14_0_arm64.whl → 0.2.0__cp311-cp311-macosx_14_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pytme-0.2.0.data/scripts/match_template.py +1019 -0
- pytme-0.2.0.data/scripts/postprocess.py +570 -0
- {pytme-0.1.9.data → pytme-0.2.0.data}/scripts/preprocessor_gui.py +244 -60
- {pytme-0.1.9.dist-info → pytme-0.2.0.dist-info}/METADATA +3 -1
- pytme-0.2.0.dist-info/RECORD +72 -0
- {pytme-0.1.9.dist-info → pytme-0.2.0.dist-info}/WHEEL +1 -1
- scripts/extract_candidates.py +218 -0
- scripts/match_template.py +459 -218
- pytme-0.1.9.data/scripts/match_template.py → scripts/match_template_filters.py +459 -218
- scripts/postprocess.py +380 -435
- scripts/preprocessor_gui.py +244 -60
- scripts/refine_matches.py +218 -0
- tme/__init__.py +2 -1
- tme/__version__.py +1 -1
- tme/analyzer.py +533 -78
- tme/backends/cupy_backend.py +80 -15
- tme/backends/npfftw_backend.py +35 -6
- tme/backends/pytorch_backend.py +15 -7
- tme/density.py +173 -78
- tme/extensions.cpython-311-darwin.so +0 -0
- tme/matching_constrained.py +195 -0
- tme/matching_data.py +76 -33
- tme/matching_exhaustive.py +354 -225
- tme/matching_memory.py +1 -0
- tme/matching_optimization.py +753 -649
- tme/matching_utils.py +152 -8
- tme/orientations.py +561 -0
- tme/preprocessing/__init__.py +2 -0
- tme/preprocessing/_utils.py +176 -0
- tme/preprocessing/composable_filter.py +30 -0
- tme/preprocessing/compose.py +52 -0
- tme/preprocessing/frequency_filters.py +322 -0
- tme/preprocessing/tilt_series.py +967 -0
- tme/preprocessor.py +35 -25
- tme/structure.py +2 -37
- pytme-0.1.9.data/scripts/postprocess.py +0 -625
- pytme-0.1.9.dist-info/RECORD +0 -61
- {pytme-0.1.9.data → pytme-0.2.0.data}/scripts/estimate_ram_usage.py +0 -0
- {pytme-0.1.9.data → pytme-0.2.0.data}/scripts/preprocess.py +0 -0
- {pytme-0.1.9.dist-info → pytme-0.2.0.dist-info}/LICENSE +0 -0
- {pytme-0.1.9.dist-info → pytme-0.2.0.dist-info}/entry_points.txt +0 -0
- {pytme-0.1.9.dist-info → pytme-0.2.0.dist-info}/top_level.txt +0 -0
@@ -1,625 +0,0 @@
|
|
1
|
-
#!python
|
2
|
-
""" CLI to simplify analysing the output of match_template.py.
|
3
|
-
|
4
|
-
Copyright (c) 2023 European Molecular Biology Laboratory
|
5
|
-
|
6
|
-
Author: Valentin Maurer <valentin.maurer@embl-hamburg.de>
|
7
|
-
"""
|
8
|
-
from os import getcwd
|
9
|
-
from os.path import join
|
10
|
-
import argparse
|
11
|
-
from sys import exit
|
12
|
-
from typing import List, Tuple
|
13
|
-
from os.path import splitext
|
14
|
-
from dataclasses import dataclass
|
15
|
-
|
16
|
-
import numpy as np
|
17
|
-
from scipy.spatial.transform import Rotation
|
18
|
-
from numpy.typing import NDArray
|
19
|
-
|
20
|
-
from tme import Density, Structure
|
21
|
-
from tme.analyzer import (
|
22
|
-
PeakCallerSort,
|
23
|
-
PeakCallerMaximumFilter,
|
24
|
-
PeakCallerFast,
|
25
|
-
PeakCallerRecursiveMasking,
|
26
|
-
PeakCallerScipy,
|
27
|
-
)
|
28
|
-
from tme.matching_utils import (
|
29
|
-
load_pickle,
|
30
|
-
euler_to_rotationmatrix,
|
31
|
-
euler_from_rotationmatrix,
|
32
|
-
centered_mask,
|
33
|
-
)
|
34
|
-
|
35
|
-
PEAK_CALLERS = {
|
36
|
-
"PeakCallerSort": PeakCallerSort,
|
37
|
-
"PeakCallerMaximumFilter": PeakCallerMaximumFilter,
|
38
|
-
"PeakCallerFast": PeakCallerFast,
|
39
|
-
"PeakCallerRecursiveMasking": PeakCallerRecursiveMasking,
|
40
|
-
"PeakCallerScipy": PeakCallerScipy,
|
41
|
-
}
|
42
|
-
|
43
|
-
|
44
|
-
def parse_args():
|
45
|
-
parser = argparse.ArgumentParser(
|
46
|
-
description="Peak Calling for Template Matching Outputs"
|
47
|
-
)
|
48
|
-
parser.add_argument(
|
49
|
-
"--input_file",
|
50
|
-
required=True,
|
51
|
-
help="Path to the output of match_template.py.",
|
52
|
-
)
|
53
|
-
parser.add_argument(
|
54
|
-
"--output_prefix",
|
55
|
-
required=True,
|
56
|
-
help="Prefix for the output file name. Extension depends on output_format.",
|
57
|
-
)
|
58
|
-
parser.add_argument(
|
59
|
-
"--number_of_peaks",
|
60
|
-
type=int,
|
61
|
-
default=1000,
|
62
|
-
help="Number of peaks to consider. Note, this is the number of called peaks "
|
63
|
-
", subject to min_distance and min_boundary_distance filtering. Therefore, the "
|
64
|
-
"returned number of peaks will be at most equal to number_of_peaks. "
|
65
|
-
"Ignored when --orientations is provided.",
|
66
|
-
)
|
67
|
-
parser.add_argument(
|
68
|
-
"--min_distance",
|
69
|
-
type=int,
|
70
|
-
default=5,
|
71
|
-
help="Minimum distance between peaks. Ignored when --orientations is provided.",
|
72
|
-
)
|
73
|
-
parser.add_argument(
|
74
|
-
"--min_boundary_distance",
|
75
|
-
type=int,
|
76
|
-
default=0,
|
77
|
-
help="Minimum distance from target boundaries. Ignored when --orientations "
|
78
|
-
"is provided.",
|
79
|
-
)
|
80
|
-
parser.add_argument(
|
81
|
-
"--mask_edges",
|
82
|
-
action="store_true",
|
83
|
-
default=False,
|
84
|
-
help="Whether to mask edges of the input score array according to the template shape."
|
85
|
-
"Uses twice the value of --min_boundary_distance if boht are provided.",
|
86
|
-
)
|
87
|
-
parser.add_argument(
|
88
|
-
"--wedge_mask",
|
89
|
-
type=str,
|
90
|
-
default=None,
|
91
|
-
help="Path to Fourier space mask. Only considered if output_format is relion.",
|
92
|
-
)
|
93
|
-
parser.add_argument(
|
94
|
-
"--peak_caller",
|
95
|
-
choices=list(PEAK_CALLERS.keys()),
|
96
|
-
default="PeakCallerScipy",
|
97
|
-
help="Peak caller to use for analysis. Ignored if input_file contains peaks or when "
|
98
|
-
"--orientations is provided.",
|
99
|
-
)
|
100
|
-
parser.add_argument(
|
101
|
-
"--orientations",
|
102
|
-
default=None,
|
103
|
-
help="Path to orientations file to overwrite orientations computed from"
|
104
|
-
" match_template.py output.",
|
105
|
-
)
|
106
|
-
parser.add_argument(
|
107
|
-
"--output_format",
|
108
|
-
choices=["orientations", "alignment", "extraction", "relion"],
|
109
|
-
default="orientations",
|
110
|
-
help="Choose the output format. Available formats are: "
|
111
|
-
"orientations (translation, rotation, and score), "
|
112
|
-
"alignment (aligned template to target based on orientations), "
|
113
|
-
"extraction (extract regions around peaks from targets, i.e. subtomograms). "
|
114
|
-
"relion (perform extraction step and generate corresponding star files).",
|
115
|
-
)
|
116
|
-
args = parser.parse_args()
|
117
|
-
|
118
|
-
return args
|
119
|
-
|
120
|
-
|
121
|
-
@dataclass
|
122
|
-
class Orientations:
|
123
|
-
#: Return a numpy array with translations of each orientation (n x d).
|
124
|
-
translations: np.ndarray
|
125
|
-
|
126
|
-
#: Return a numpy array with euler angles of each orientation in zxy format (n x d).
|
127
|
-
rotations: np.ndarray
|
128
|
-
|
129
|
-
#: Return a numpy array with the score of each orientation (n, ).
|
130
|
-
scores: np.ndarray
|
131
|
-
|
132
|
-
#: Return a numpy array with additional orientation details (n, ).
|
133
|
-
details: np.ndarray
|
134
|
-
|
135
|
-
def __iter__(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
|
136
|
-
"""
|
137
|
-
Iterate over the current class instance. Each iteration returns a orientation
|
138
|
-
defined by its translation, rotation, score and additional detail.
|
139
|
-
|
140
|
-
Yields
|
141
|
-
------
|
142
|
-
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
|
143
|
-
A tuple of arrays defining the given orientation.
|
144
|
-
"""
|
145
|
-
yield from zip(self.translations, self.rotations, self.scores, self.details)
|
146
|
-
|
147
|
-
def __getitem__(self, indices: List[int]) -> "Orientations":
|
148
|
-
"""
|
149
|
-
Retrieve a subset of orientations based on the provided indices.
|
150
|
-
|
151
|
-
Parameters
|
152
|
-
----------
|
153
|
-
indices : List[int]
|
154
|
-
A list of indices specifying the orientations to be retrieved.
|
155
|
-
|
156
|
-
Returns
|
157
|
-
-------
|
158
|
-
:py:class:`Orientations`
|
159
|
-
A new :py:class:`Orientations`instance containing only the selected orientations.
|
160
|
-
"""
|
161
|
-
indices = np.asarray(indices)
|
162
|
-
attributes = (
|
163
|
-
"translations",
|
164
|
-
"rotations",
|
165
|
-
"scores",
|
166
|
-
"details",
|
167
|
-
)
|
168
|
-
kwargs = {attr: getattr(self, attr)[indices] for attr in attributes}
|
169
|
-
return self.__class__(**kwargs)
|
170
|
-
|
171
|
-
def to_file(self, filename: str, file_format: type, **kwargs) -> None:
|
172
|
-
"""
|
173
|
-
Save the current class instance to a file in the specified format.
|
174
|
-
|
175
|
-
Parameters
|
176
|
-
----------
|
177
|
-
filename : str
|
178
|
-
The name of the file where the orientations will be saved.
|
179
|
-
file_format : type
|
180
|
-
The format in which to save the orientations. Supported formats are 'text' and 'relion'.
|
181
|
-
**kwargs : dict
|
182
|
-
Additional keyword arguments specific to the file format.
|
183
|
-
|
184
|
-
Raises
|
185
|
-
------
|
186
|
-
ValueError
|
187
|
-
If an unsupported file format is specified.
|
188
|
-
"""
|
189
|
-
mapping = {
|
190
|
-
"text": self._to_text,
|
191
|
-
"relion": self._to_relion_star,
|
192
|
-
}
|
193
|
-
|
194
|
-
func = mapping.get(file_format, None)
|
195
|
-
if func is None:
|
196
|
-
raise ValueError(
|
197
|
-
f"{file_format} not implemented. Supported are {','.join(mapping.keys())}."
|
198
|
-
)
|
199
|
-
|
200
|
-
return func(filename=filename, **kwargs)
|
201
|
-
|
202
|
-
def _to_text(self, filename: str) -> None:
|
203
|
-
"""
|
204
|
-
Save orientations in a text file format.
|
205
|
-
|
206
|
-
Parameters
|
207
|
-
----------
|
208
|
-
filename : str
|
209
|
-
The name of the file to save the orientations.
|
210
|
-
|
211
|
-
Notes
|
212
|
-
-----
|
213
|
-
The file is saved with a header specifying each column: z, y, x, euler_z,
|
214
|
-
euler_y, euler_x, score, detail. Each row in the file corresponds to an orientation.
|
215
|
-
"""
|
216
|
-
header = "\t".join(
|
217
|
-
["z", "y", "x", "euler_z", "euler_y", "euler_x", "score", "detail"]
|
218
|
-
)
|
219
|
-
with open(filename, mode="w", encoding="utf-8") as ofile:
|
220
|
-
_ = ofile.write(f"{header}\n")
|
221
|
-
for translation, angles, score, detail in self:
|
222
|
-
translation_string = "\t".join([str(x) for x in translation])
|
223
|
-
angle_string = "\t".join([str(x) for x in angles])
|
224
|
-
_ = ofile.write(
|
225
|
-
f"{translation_string}\t{angle_string}\t{score}\t{detail}\n"
|
226
|
-
)
|
227
|
-
return None
|
228
|
-
|
229
|
-
def _to_relion_star(
|
230
|
-
self,
|
231
|
-
filename: str,
|
232
|
-
name_prefix: str = None,
|
233
|
-
ctf_image: str = None,
|
234
|
-
sampling_rate: float = 1.0,
|
235
|
-
subtomogram_size: int = 0,
|
236
|
-
) -> None:
|
237
|
-
"""
|
238
|
-
Save orientations in RELION's STAR file format.
|
239
|
-
|
240
|
-
Parameters
|
241
|
-
----------
|
242
|
-
filename : str
|
243
|
-
The name of the file to save the orientations.
|
244
|
-
name_prefix : str, optional
|
245
|
-
A prefix to add to the image names in the STAR file.
|
246
|
-
ctf_image : str, optional
|
247
|
-
Path to CTF or wedge mask RELION.
|
248
|
-
sampling_rate : float, optional
|
249
|
-
Subtomogram sampling rate in angstrom per voxel
|
250
|
-
subtomogram_size : int, optional
|
251
|
-
Size of the square shaped subtomogram.
|
252
|
-
|
253
|
-
Notes
|
254
|
-
-----
|
255
|
-
The file is saved with a standard header used in RELION STAR files.
|
256
|
-
Each row in the file corresponds to an orientation.
|
257
|
-
"""
|
258
|
-
optics_header = [
|
259
|
-
"# version 30001",
|
260
|
-
"data_optics",
|
261
|
-
"",
|
262
|
-
"loop_",
|
263
|
-
"_rlnOpticsGroup",
|
264
|
-
"_rlnOpticsGroupName",
|
265
|
-
"_rlnSphericalAberration",
|
266
|
-
"_rlnVoltage",
|
267
|
-
"_rlnImageSize",
|
268
|
-
"_rlnImageDimensionality",
|
269
|
-
"_rlnImagePixelSize",
|
270
|
-
]
|
271
|
-
optics_data = [
|
272
|
-
"1",
|
273
|
-
"opticsGroup1",
|
274
|
-
"2.700000",
|
275
|
-
"300.000000",
|
276
|
-
str(int(subtomogram_size)),
|
277
|
-
"3",
|
278
|
-
str(float(sampling_rate)),
|
279
|
-
]
|
280
|
-
optics_header = "\n".join(optics_header)
|
281
|
-
optics_data = "\t".join(optics_data)
|
282
|
-
|
283
|
-
header = [
|
284
|
-
"data_particles",
|
285
|
-
"",
|
286
|
-
"loop_",
|
287
|
-
"_rlnCoordinateX",
|
288
|
-
"_rlnCoordinateY",
|
289
|
-
"_rlnCoordinateZ",
|
290
|
-
"_rlnImageName",
|
291
|
-
"_rlnAngleRot",
|
292
|
-
"_rlnAngleTilt",
|
293
|
-
"_rlnAnglePsi",
|
294
|
-
"_rlnOpticsGroup",
|
295
|
-
]
|
296
|
-
if ctf_image is not None:
|
297
|
-
header.append("_rlnCtfImage")
|
298
|
-
|
299
|
-
ctf_image = "" if ctf_image is None else f"\t{ctf_image}"
|
300
|
-
|
301
|
-
header = "\n".join(header)
|
302
|
-
name_prefix = "" if name_prefix is None else name_prefix
|
303
|
-
|
304
|
-
with open(filename, mode="w", encoding="utf-8") as ofile:
|
305
|
-
_ = ofile.write(f"{optics_header}\n")
|
306
|
-
_ = ofile.write(f"{optics_data}\n")
|
307
|
-
|
308
|
-
_ = ofile.write("\n# version 30001\n")
|
309
|
-
_ = ofile.write(f"{header}\n")
|
310
|
-
|
311
|
-
# pyTME uses a zyx data layout
|
312
|
-
for index, (translation, rotation, score, detail) in enumerate(self):
|
313
|
-
rotation = Rotation.from_euler("zyx", rotation, degrees=True)
|
314
|
-
rotation = rotation.as_euler(seq="xyx", degrees=True)
|
315
|
-
|
316
|
-
translation_string = "\t".join([str(x) for x in translation][::-1])
|
317
|
-
angle_string = "\t".join([str(x) for x in rotation])
|
318
|
-
name = f"{name_prefix}_{index}.mrc"
|
319
|
-
_ = ofile.write(
|
320
|
-
f"{translation_string}\t{name}\t{angle_string}\t1{ctf_image}\n"
|
321
|
-
)
|
322
|
-
|
323
|
-
return None
|
324
|
-
|
325
|
-
@classmethod
|
326
|
-
def from_file(cls, filename: str, file_format: type, **kwargs) -> "Orientations":
|
327
|
-
"""
|
328
|
-
Create an instance of :py:class:`Orientations` from a file.
|
329
|
-
|
330
|
-
Parameters
|
331
|
-
----------
|
332
|
-
filename : str
|
333
|
-
The name of the file from which to read the orientations.
|
334
|
-
file_format : type
|
335
|
-
The format of the file. Currently, only 'text' format is supported.
|
336
|
-
**kwargs : dict
|
337
|
-
Additional keyword arguments specific to the file format.
|
338
|
-
|
339
|
-
Returns
|
340
|
-
-------
|
341
|
-
:py:class:`Orientations`
|
342
|
-
An instance of :py:class:`Orientations` populated with data from the file.
|
343
|
-
|
344
|
-
Raises
|
345
|
-
------
|
346
|
-
ValueError
|
347
|
-
If an unsupported file format is specified.
|
348
|
-
"""
|
349
|
-
mapping = {
|
350
|
-
"text": cls._from_text,
|
351
|
-
}
|
352
|
-
|
353
|
-
func = mapping.get(file_format, None)
|
354
|
-
if func is None:
|
355
|
-
raise ValueError(
|
356
|
-
f"{file_format} not implemented. Supported are {','.join(mapping.keys())}."
|
357
|
-
)
|
358
|
-
|
359
|
-
translations, rotations, scores, details, *_ = func(filename=filename, **kwargs)
|
360
|
-
return cls(
|
361
|
-
translations=translations,
|
362
|
-
rotations=rotations,
|
363
|
-
scores=scores,
|
364
|
-
details=details,
|
365
|
-
)
|
366
|
-
|
367
|
-
@staticmethod
|
368
|
-
def _from_text(
|
369
|
-
filename: str,
|
370
|
-
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
|
371
|
-
"""
|
372
|
-
Read orientations from a text file.
|
373
|
-
|
374
|
-
Parameters
|
375
|
-
----------
|
376
|
-
filename : str
|
377
|
-
The name of the file from which to read the orientations.
|
378
|
-
|
379
|
-
Returns
|
380
|
-
-------
|
381
|
-
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
|
382
|
-
A tuple containing numpy arrays for translations, rotations, scores,
|
383
|
-
and details.
|
384
|
-
|
385
|
-
Notes
|
386
|
-
-----
|
387
|
-
The text file is expected to have a header and data in columns corresponding to
|
388
|
-
z, y, x, euler_z, euler_y, euler_x, score, detail.
|
389
|
-
"""
|
390
|
-
with open(filename, mode="r", encoding="utf-8") as infile:
|
391
|
-
data = [x.strip().split("\t") for x in infile.read().split("\n")]
|
392
|
-
_ = data.pop(0)
|
393
|
-
|
394
|
-
translation, rotation, score, detail = [], [], [], []
|
395
|
-
for candidate in data:
|
396
|
-
if len(candidate) <= 1:
|
397
|
-
continue
|
398
|
-
if len(candidate) != 8:
|
399
|
-
candidate.append(-1)
|
400
|
-
|
401
|
-
candidate = [float(x) for x in candidate]
|
402
|
-
translation.append((candidate[0], candidate[1], candidate[2]))
|
403
|
-
rotation.append((candidate[3], candidate[4], candidate[5]))
|
404
|
-
score.append(candidate[6])
|
405
|
-
detail.append(candidate[7])
|
406
|
-
|
407
|
-
translation = np.vstack(translation).astype(int)
|
408
|
-
rotation = np.vstack(rotation).astype(float)
|
409
|
-
score = np.array(score).astype(float)
|
410
|
-
detail = np.array(detail).astype(float)
|
411
|
-
|
412
|
-
return translation, rotation, score, detail
|
413
|
-
|
414
|
-
|
415
|
-
def load_template(filepath: str, sampling_rate: NDArray) -> "Density":
|
416
|
-
try:
|
417
|
-
template = Density.from_file(filepath)
|
418
|
-
template, _ = template.centered(0)
|
419
|
-
center_of_mass = template.center_of_mass(template.data)
|
420
|
-
except ValueError:
|
421
|
-
template = Structure.from_file(filepath)
|
422
|
-
center_of_mass = template.center_of_mass()[::-1]
|
423
|
-
template = Density.from_structure(template, sampling_rate=sampling_rate)
|
424
|
-
|
425
|
-
return template, center_of_mass
|
426
|
-
|
427
|
-
|
428
|
-
def main():
|
429
|
-
args = parse_args()
|
430
|
-
data = load_pickle(args.input_file)
|
431
|
-
|
432
|
-
meta = data[-1]
|
433
|
-
target_origin, _, sampling_rate, cli_args = meta
|
434
|
-
|
435
|
-
if args.orientations is not None:
|
436
|
-
orientations = Orientations.from_file(
|
437
|
-
filename=args.orientations, file_format="text"
|
438
|
-
)
|
439
|
-
|
440
|
-
else:
|
441
|
-
translations, rotations, scores, details = [], [], [], []
|
442
|
-
# Output is MaxScoreOverRotations
|
443
|
-
if data[0].ndim == data[2].ndim:
|
444
|
-
scores, offset, rotation_array, rotation_mapping, meta = data
|
445
|
-
if args.mask_edges:
|
446
|
-
template, center_of_mass = load_template(
|
447
|
-
cli_args.template, sampling_rate=sampling_rate
|
448
|
-
)
|
449
|
-
if not cli_args.no_centering:
|
450
|
-
template, *_ = template.centered(0)
|
451
|
-
mask_size = template.shape
|
452
|
-
if args.min_boundary_distance > 0:
|
453
|
-
mask_size = 2 * args.min_boundary_distance
|
454
|
-
scores = centered_mask(scores, np.subtract(scores.shape, mask_size) + 1)
|
455
|
-
|
456
|
-
peak_caller = PEAK_CALLERS[args.peak_caller](
|
457
|
-
number_of_peaks=args.number_of_peaks,
|
458
|
-
min_distance=args.min_distance,
|
459
|
-
min_boundary_distance=args.min_boundary_distance,
|
460
|
-
)
|
461
|
-
peak_caller(scores, rotation_matrix=np.eye(3))
|
462
|
-
candidates = peak_caller.merge(
|
463
|
-
candidates=[tuple(peak_caller)],
|
464
|
-
number_of_peaks=args.number_of_peaks,
|
465
|
-
min_distance=args.min_distance,
|
466
|
-
min_boundary_distance=args.min_boundary_distance,
|
467
|
-
)
|
468
|
-
if len(candidates) == 0:
|
469
|
-
exit(
|
470
|
-
"Found no peaks. Try reducing min_distance or min_boundary_distance."
|
471
|
-
)
|
472
|
-
|
473
|
-
for translation, _, score, detail in zip(*candidates):
|
474
|
-
rotations.append(rotation_mapping[rotation_array[tuple(translation)]])
|
475
|
-
|
476
|
-
else:
|
477
|
-
candidates = data
|
478
|
-
translation, rotation, score, detail, *_ = data
|
479
|
-
for i in range(translation.shape[0]):
|
480
|
-
rotations.append(euler_from_rotationmatrix(rotation[i]))
|
481
|
-
|
482
|
-
rotations = np.vstack(rotations).astype(float)
|
483
|
-
translations, scores, details = candidates[0], candidates[2], candidates[3]
|
484
|
-
orientations = Orientations(
|
485
|
-
translations=translations,
|
486
|
-
rotations=rotations,
|
487
|
-
scores=scores,
|
488
|
-
details=details,
|
489
|
-
)
|
490
|
-
|
491
|
-
if args.output_format == "orientations":
|
492
|
-
orientations.to_file(filename=f"{args.output_prefix}.tsv", file_format="text")
|
493
|
-
exit(0)
|
494
|
-
|
495
|
-
_, template_extension = splitext(cli_args.template)
|
496
|
-
template, center_of_mass = load_template(
|
497
|
-
filepath=cli_args.template, sampling_rate=sampling_rate
|
498
|
-
)
|
499
|
-
template_is_density, index = isinstance(template, Density), 0
|
500
|
-
|
501
|
-
if args.output_format == "relion":
|
502
|
-
new_shape = np.add(template.shape, np.mod(template.shape, 2))
|
503
|
-
new_shape = np.repeat(new_shape.max(), new_shape.size).astype(int)
|
504
|
-
print(f"Padding template from {template.shape} to {new_shape} for RELION.")
|
505
|
-
template.pad(new_shape)
|
506
|
-
|
507
|
-
if args.output_format in ("extraction", "relion"):
|
508
|
-
target = Density.from_file(cli_args.target)
|
509
|
-
|
510
|
-
if not np.all(np.divide(target.shape, template.shape) > 2):
|
511
|
-
print(
|
512
|
-
"Target might be too small relative to template to extract"
|
513
|
-
" meaningful particles."
|
514
|
-
f" Target : {target.shape}, template : {template.shape}."
|
515
|
-
)
|
516
|
-
|
517
|
-
peaks = orientations.translations.astype(int)
|
518
|
-
max_shape = np.max(template.shape).astype(int)
|
519
|
-
half_shape = max_shape // 2
|
520
|
-
|
521
|
-
left_pad = half_shape
|
522
|
-
right_pad = np.add(half_shape, max_shape % 2)
|
523
|
-
starts = np.subtract(peaks, left_pad)
|
524
|
-
stops = np.add(peaks, right_pad)
|
525
|
-
|
526
|
-
candidate_starts = np.maximum(starts, 0).astype(int)
|
527
|
-
candidate_stops = np.minimum(stops, target.shape).astype(int)
|
528
|
-
keep_peaks = (
|
529
|
-
np.sum(
|
530
|
-
np.multiply(starts == candidate_starts, stops == candidate_stops),
|
531
|
-
axis=1,
|
532
|
-
)
|
533
|
-
== peaks.shape[1]
|
534
|
-
)
|
535
|
-
|
536
|
-
orientations = orientations[keep_peaks]
|
537
|
-
working_directory = getcwd()
|
538
|
-
if args.output_format == "relion":
|
539
|
-
orientations.to_file(
|
540
|
-
filename=f"{args.output_prefix}.star",
|
541
|
-
file_format="relion",
|
542
|
-
name_prefix=join(working_directory, args.output_prefix),
|
543
|
-
ctf_image=args.wedge_mask,
|
544
|
-
sampling_rate=target.sampling_rate.max(),
|
545
|
-
subtomogram_size=template.shape[0],
|
546
|
-
)
|
547
|
-
|
548
|
-
peaks = peaks[keep_peaks,]
|
549
|
-
starts = starts[keep_peaks,]
|
550
|
-
stops = stops[keep_peaks,]
|
551
|
-
candidate_starts = candidate_starts[keep_peaks,]
|
552
|
-
candidate_stops = candidate_stops[keep_peaks,]
|
553
|
-
|
554
|
-
if not len(peaks):
|
555
|
-
print(
|
556
|
-
"No peak remaining after filtering. Started with"
|
557
|
-
f" {orientations.translations.shape[0]} filtered to {peaks.shape[0]}."
|
558
|
-
" Consider reducing min_distance, increase num_peaks or use"
|
559
|
-
" a different peak caller."
|
560
|
-
)
|
561
|
-
exit(-1)
|
562
|
-
|
563
|
-
observation_starts = np.subtract(candidate_starts, starts).astype(int)
|
564
|
-
observation_stops = np.subtract(np.add(max_shape, candidate_stops), stops)
|
565
|
-
observation_stops = observation_stops.astype(int)
|
566
|
-
|
567
|
-
candidate_slices = [
|
568
|
-
tuple(slice(s, e) for s, e in zip(start_row, stop_row))
|
569
|
-
for start_row, stop_row in zip(candidate_starts, candidate_stops)
|
570
|
-
]
|
571
|
-
|
572
|
-
observation_slices = [
|
573
|
-
tuple(slice(s, e) for s, e in zip(start_row, stop_row))
|
574
|
-
for start_row, stop_row in zip(observation_starts, observation_stops)
|
575
|
-
]
|
576
|
-
observations = np.zeros(
|
577
|
-
(len(candidate_slices), max_shape, max_shape, max_shape)
|
578
|
-
)
|
579
|
-
|
580
|
-
slices = zip(candidate_slices, observation_slices)
|
581
|
-
for idx, (cand_slice, obs_slice) in enumerate(slices):
|
582
|
-
observations[idx][:] = np.mean(target.data[cand_slice])
|
583
|
-
observations[idx][obs_slice] = target.data[cand_slice]
|
584
|
-
|
585
|
-
for index in range(observations.shape[0]):
|
586
|
-
out_density = Density(
|
587
|
-
data=observations[index],
|
588
|
-
sampling_rate=sampling_rate,
|
589
|
-
origin=candidate_starts[index] * sampling_rate,
|
590
|
-
)
|
591
|
-
# out_density.data = out_density.data * template_mask.data
|
592
|
-
out_density.to_file(
|
593
|
-
join(working_directory, f"{args.output_prefix}_{index}.mrc")
|
594
|
-
)
|
595
|
-
|
596
|
-
exit(0)
|
597
|
-
|
598
|
-
for translation, angles, *_ in orientations:
|
599
|
-
rotation_matrix = euler_to_rotationmatrix(angles)
|
600
|
-
|
601
|
-
if template_is_density:
|
602
|
-
translation = np.subtract(translation, center_of_mass)
|
603
|
-
transformed_template = template.rigid_transform(
|
604
|
-
rotation_matrix=rotation_matrix
|
605
|
-
)
|
606
|
-
new_origin = np.add(target_origin / sampling_rate, translation)
|
607
|
-
transformed_template.origin = np.multiply(new_origin, sampling_rate)
|
608
|
-
else:
|
609
|
-
new_center_of_mass = np.add(
|
610
|
-
np.multiply(translation, sampling_rate), target_origin
|
611
|
-
)
|
612
|
-
translation = np.subtract(new_center_of_mass, center_of_mass)
|
613
|
-
transformed_template = template.rigid_transform(
|
614
|
-
translation=translation[::-1],
|
615
|
-
rotation_matrix=rotation_matrix[::-1, ::-1],
|
616
|
-
)
|
617
|
-
# template_extension should contain the extension '.'
|
618
|
-
transformed_template.to_file(
|
619
|
-
f"{args.output_prefix}_{index}{template_extension}"
|
620
|
-
)
|
621
|
-
index += 1
|
622
|
-
|
623
|
-
|
624
|
-
if __name__ == "__main__":
|
625
|
-
main()
|
pytme-0.1.9.dist-info/RECORD
DELETED
@@ -1,61 +0,0 @@
|
|
1
|
-
pytme-0.1.9.data/scripts/estimate_ram_usage.py,sha256=R1NDpFajcF-MonJ4a43SfDlA-nxBYwK7D2quzCdsVFM,2767
|
2
|
-
pytme-0.1.9.data/scripts/match_template.py,sha256=moMpH8mLk42ZkW0vXzd_TMCWsTUKXohxuWrqHLh8lks,25743
|
3
|
-
pytme-0.1.9.data/scripts/postprocess.py,sha256=MKyIgsXWRlO6cU8qGqe_tSSKIF2pNTRSuaDoyXZ22G8,21938
|
4
|
-
pytme-0.1.9.data/scripts/preprocess.py,sha256=zog-l2Je-GeouJ6SnamOMuHgTn7fFPiGnO5X03y5qSY,2527
|
5
|
-
pytme-0.1.9.data/scripts/preprocessor_gui.py,sha256=AKT_ovrdbPiOJ_ampRK3s0jslUKwdVQRF6XUuJ-6GDo,29000
|
6
|
-
scripts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
|
-
scripts/estimate_ram_usage.py,sha256=rN7haobnHg3YcgGJIp81FNiCzy8-saJGeEurQlmQmNQ,2768
|
8
|
-
scripts/match_template.py,sha256=dEvw9KDqgR7vZcIVn5Bld6FcRt_rp7nblybVuJhLYbs,25744
|
9
|
-
scripts/postprocess.py,sha256=NDTo9qbnAzhxo_tbnR8oWlDrsflUGu-Sj4ULqOPix1U,21939
|
10
|
-
scripts/preprocess.py,sha256=ebJVLxbRlB6TI5YHNr0VavZ4lmaRdf8QVafyiDhh_oU,2528
|
11
|
-
scripts/preprocessor_gui.py,sha256=E0F6quZnu32yZsBle38KDel5naHpC3Aci0uMCyo8DzY,29001
|
12
|
-
tme/__init__.py,sha256=NiPnYfrwb8e04ETubqwd3yzCGBliLBXRvCylD-T59jY,212
|
13
|
-
tme/__version__.py,sha256=XIaxbMbyiP-L3kguR1GhxirFblTXiHR1lMfDVITvHUI,22
|
14
|
-
tme/analyzer.py,sha256=Yh1o4kpjlRSq6ki5KiRsipF3tBwtSrL3KYfEm87NNOc,39188
|
15
|
-
tme/density.py,sha256=AwHhi5sHvOzXeXCnYHB_Mpw1kCG0x0cBg8NmwDalwKA,84326
|
16
|
-
tme/extensions.cpython-311-darwin.so,sha256=vSNHvFMev0jvKCEwE4lzqgg9520Ucxf27gy3qqixYns,395056
|
17
|
-
tme/helpers.py,sha256=TMtBuJoZk6q4M_rfkr8yPpGzJD74ycqyN3yXMuU9mr4,23625
|
18
|
-
tme/matching_data.py,sha256=XBt-HquJlB9uONFxrbTkX0X_PD7eOltNArrEJtaNIOQ,22243
|
19
|
-
tme/matching_exhaustive.py,sha256=TeJ4cPxXq2c7ywi6be-U8uWTCk2JXPuyZjACSiUmMNw,54874
|
20
|
-
tme/matching_memory.py,sha256=GB71b7m-h0fF1A5HCUlAYQh62-Aq2lHgIoRGPYEzZrU,11245
|
21
|
-
tme/matching_optimization.py,sha256=oo9HJV65Ji1oL2JumxgmtrHSMUQsKrpDhdOO5o-fAdM,37388
|
22
|
-
tme/matching_utils.py,sha256=-cs3Nq8d9snaZ0mlgkSqdEc0c9A1TkX1sz93YY7TE_k,37227
|
23
|
-
tme/parser.py,sha256=tA9ABeV95cZn8lJCukVUaocQ9RguR6ZZzQsMXf_-ud0,13887
|
24
|
-
tme/preprocessor.py,sha256=UkWKBR9Z7FU-xUZ0ER_4c6lgLW9-MGmm-YN5Cs2mWD0,50631
|
25
|
-
tme/structure.py,sha256=AKGZML8bnsqZOpAOQADU1XDXZ-ITPhr3Dc0udQUMP6E,54134
|
26
|
-
tme/types.py,sha256=2Tyh_xnMLxIWYb3aJDAUb6GWpaL6gcYMUm2YNbJlAPI,295
|
27
|
-
tme/backends/__init__.py,sha256=xB2GBUFRskppvEs6S74VH0Pi-nXnIvu9_QFhESlcl3Y,4366
|
28
|
-
tme/backends/cupy_backend.py,sha256=CRwGqtycawTIfSfEDWpCQh4mcG03wad9mEODqzy75yQ,10648
|
29
|
-
tme/backends/matching_backend.py,sha256=E3cMXnMEazYJUr9RP5Q5rMEAf3vbkiOwzWrx5amt_nI,29311
|
30
|
-
tme/backends/mlx_backend.py,sha256=MrwICZpUiAcpZXON70r4SH-KsWxfhq1PdHUe80WbT-k,8467
|
31
|
-
tme/backends/npfftw_backend.py,sha256=VsoKwMmusUxW81b2YSa6KO0_pfUw-51fdv9O6BkN0cQ,26200
|
32
|
-
tme/backends/pytorch_backend.py,sha256=lKv_7q0gDrCfbwHR38tfj5ixpD1lfCpqpfqS1KnOqdU,17978
|
33
|
-
tme/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
34
|
-
tme/data/c48n309.npy,sha256=NwH64mOEbm3tStq5c98o81fY1vMOoq4nvXDAh7Z7iZg,296768
|
35
|
-
tme/data/c48n527.npy,sha256=saSUMTa1R0MisPvgFL02a7IHQSwEZ-mJu0v3qJjg5AU,506048
|
36
|
-
tme/data/c48n9.npy,sha256=bDVLV6mWjZHSQfeDc-MOCKKarfc1jaNeVvpoe2xMUy4,8768
|
37
|
-
tme/data/c48u1.npy,sha256=JeXMFzFITs2ezdc3x5lp3jo1cHHHHVADSA1Tpf77kXs,1088
|
38
|
-
tme/data/c48u1153.npy,sha256=ECiEximtYDWtIux3Fwe_EJlyn08gUqP85DN9gjkT9_k,1107008
|
39
|
-
tme/data/c48u1201.npy,sha256=aceC_Jeienz_81X4520nPpZcg5tnRhbW795EqbpWkrg,1153088
|
40
|
-
tme/data/c48u1641.npy,sha256=p4LwW3LzdTjrUUpA7H53RfNWxYfPX0XjeSwZ39Ac78Q,1575488
|
41
|
-
tme/data/c48u181.npy,sha256=mLYXrv1YHLH6DsBp5MkxHkxlxgMnj1mw_KKI0udH-FY,173888
|
42
|
-
tme/data/c48u2219.npy,sha256=p8TQeX8YHu4pdxnwJjEAlQWAPa66W7kpK96iZKZr9JE,2130368
|
43
|
-
tme/data/c48u27.npy,sha256=k03ZNEsoPwBKCy8IeIa5G0WRZqjGZMtX6Ibu7EpJHvU,26048
|
44
|
-
tme/data/c48u2947.npy,sha256=icI97ED6ct66y7FIaJAugmjzrIWk7CINCxtO3wDTnrU,2829248
|
45
|
-
tme/data/c48u3733.npy,sha256=tla-__Pf-hpN6h04vtFIfkkFdCLple11VO06kr1dXkM,3583808
|
46
|
-
tme/data/c48u4749.npy,sha256=tItOA4oV7SiqCCREwz3fyEpZoxM0lCq_jfEo5_-fp2s,4559168
|
47
|
-
tme/data/c48u5879.npy,sha256=bFk89MllIFCX_sLXTYWFquSyN1NuahH4wwnEsPJLxzA,5643968
|
48
|
-
tme/data/c48u7111.npy,sha256=CMy9kI2edH-q9eTIVdgUtXurplYNI7Uqp4dXfkkVdf8,6826688
|
49
|
-
tme/data/c48u815.npy,sha256=bCuJxLtm0Sjg3GGxtyjGzRYZ1G0Gz79XHI-71GvqQnI,782528
|
50
|
-
tme/data/c48u83.npy,sha256=7ODJYnsiuDjGbgd9GFopsyIW2IjrYI0J2X2f-cK868U,79808
|
51
|
-
tme/data/c48u8649.npy,sha256=-IPlpR4zrPQZWhhSPu4zEulFdrCEVgTMFffCB5d-huE,8303168
|
52
|
-
tme/data/c600v.npy,sha256=JqSu3ALoL1A9iguehc0YGUMFPsh2fprHHp76VXeFXIw,2528
|
53
|
-
tme/data/c600vc.npy,sha256=Yht-GFXDSjjGvsjFBvyxxEZAI-ODADPd5gEgFNZQVTA,14528
|
54
|
-
tme/data/metadata.yaml,sha256=fAgX-mEzB0QMHTEtYDG4cSMbJhYxBbDJH3sdvJvL7a8,750
|
55
|
-
tme/data/quat_to_numpy.py,sha256=-gkDZb10fKBxwfYrSLCUWvMB76TzZWELCeKsYProwws,1333
|
56
|
-
pytme-0.1.9.dist-info/LICENSE,sha256=K1IUNSVAz8BXbpH5EA8y5FpaHdvFXnAF2zeK95Lr2bY,18467
|
57
|
-
pytme-0.1.9.dist-info/METADATA,sha256=Pi-HvITCkfSZK8W5Od_Vkv-lvML10ZzB2VdaSgQWJlo,2121
|
58
|
-
pytme-0.1.9.dist-info/WHEEL,sha256=-A_a4qtPtE7THspi2apYwdQWhv5IW90gRqlNhh7cwR4,110
|
59
|
-
pytme-0.1.9.dist-info/entry_points.txt,sha256=ff3LQL3FCWfCYOwFiP9zatm7laUbnwCkuPELkQVyUO4,241
|
60
|
-
pytme-0.1.9.dist-info/top_level.txt,sha256=J8FUkazOb2fZ0n_KexnqCGyNOtie2bwisFSUBiM5-0w,12
|
61
|
-
pytme-0.1.9.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|