pytme 0.1.8__cp311-cp311-macosx_14_0_arm64.whl → 0.2.0__cp311-cp311-macosx_14_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pytme-0.2.0.data/scripts/match_template.py +1019 -0
- pytme-0.2.0.data/scripts/postprocess.py +570 -0
- {pytme-0.1.8.data → pytme-0.2.0.data}/scripts/preprocessor_gui.py +244 -60
- {pytme-0.1.8.dist-info → pytme-0.2.0.dist-info}/METADATA +3 -1
- pytme-0.2.0.dist-info/RECORD +72 -0
- {pytme-0.1.8.dist-info → pytme-0.2.0.dist-info}/WHEEL +1 -1
- scripts/extract_candidates.py +218 -0
- scripts/match_template.py +459 -218
- pytme-0.1.8.data/scripts/match_template.py → scripts/match_template_filters.py +459 -218
- scripts/postprocess.py +380 -435
- scripts/preprocessor_gui.py +244 -60
- scripts/refine_matches.py +218 -0
- tme/__init__.py +2 -1
- tme/__version__.py +1 -1
- tme/analyzer.py +533 -78
- tme/backends/cupy_backend.py +80 -15
- tme/backends/npfftw_backend.py +35 -6
- tme/backends/pytorch_backend.py +15 -7
- tme/density.py +173 -78
- tme/extensions.cpython-311-darwin.so +0 -0
- tme/matching_constrained.py +195 -0
- tme/matching_data.py +78 -32
- tme/matching_exhaustive.py +369 -221
- tme/matching_memory.py +1 -0
- tme/matching_optimization.py +753 -649
- tme/matching_utils.py +152 -8
- tme/orientations.py +561 -0
- tme/preprocessing/__init__.py +2 -0
- tme/preprocessing/_utils.py +176 -0
- tme/preprocessing/composable_filter.py +30 -0
- tme/preprocessing/compose.py +52 -0
- tme/preprocessing/frequency_filters.py +322 -0
- tme/preprocessing/tilt_series.py +967 -0
- tme/preprocessor.py +35 -25
- tme/structure.py +2 -37
- pytme-0.1.8.data/scripts/postprocess.py +0 -625
- pytme-0.1.8.dist-info/RECORD +0 -61
- {pytme-0.1.8.data → pytme-0.2.0.data}/scripts/estimate_ram_usage.py +0 -0
- {pytme-0.1.8.data → pytme-0.2.0.data}/scripts/preprocess.py +0 -0
- {pytme-0.1.8.dist-info → pytme-0.2.0.dist-info}/LICENSE +0 -0
- {pytme-0.1.8.dist-info → pytme-0.2.0.dist-info}/entry_points.txt +0 -0
- {pytme-0.1.8.dist-info → pytme-0.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1019 @@
|
|
1
|
+
#!python
|
2
|
+
""" CLI interface for basic pyTME template matching functions.
|
3
|
+
|
4
|
+
Copyright (c) 2023 European Molecular Biology Laboratory
|
5
|
+
|
6
|
+
Author: Valentin Maurer <valentin.maurer@embl-hamburg.de>
|
7
|
+
"""
|
8
|
+
import os
|
9
|
+
import argparse
|
10
|
+
import warnings
|
11
|
+
import importlib.util
|
12
|
+
from sys import exit
|
13
|
+
from time import time
|
14
|
+
from typing import Tuple
|
15
|
+
from copy import deepcopy
|
16
|
+
from os.path import abspath, exists
|
17
|
+
|
18
|
+
import numpy as np
|
19
|
+
|
20
|
+
from tme import Density, __version__
|
21
|
+
from tme.matching_utils import (
|
22
|
+
get_rotation_matrices,
|
23
|
+
get_rotations_around_vector,
|
24
|
+
compute_parallelization_schedule,
|
25
|
+
euler_from_rotationmatrix,
|
26
|
+
scramble_phases,
|
27
|
+
generate_tempfile_name,
|
28
|
+
write_pickle,
|
29
|
+
)
|
30
|
+
from tme.matching_exhaustive import scan_subsets, MATCHING_EXHAUSTIVE_REGISTER
|
31
|
+
from tme.matching_data import MatchingData
|
32
|
+
from tme.analyzer import (
|
33
|
+
MaxScoreOverRotations,
|
34
|
+
PeakCallerMaximumFilter,
|
35
|
+
)
|
36
|
+
from tme.backends import backend
|
37
|
+
from tme.preprocessing import Compose
|
38
|
+
|
39
|
+
|
40
|
+
def get_func_fullname(func) -> str:
|
41
|
+
"""Returns the full name of the given function, including its module."""
|
42
|
+
return f"<function '{func.__module__}.{func.__name__}'>"
|
43
|
+
|
44
|
+
|
45
|
+
def print_block(name: str, data: dict, label_width=20) -> None:
|
46
|
+
"""Prints a formatted block of information."""
|
47
|
+
print(f"\n> {name}")
|
48
|
+
for key, value in data.items():
|
49
|
+
formatted_value = str(value)
|
50
|
+
print(f" - {key + ':':<{label_width}} {formatted_value}")
|
51
|
+
|
52
|
+
|
53
|
+
def print_entry() -> None:
|
54
|
+
width = 80
|
55
|
+
text = f" pyTME v{__version__} "
|
56
|
+
padding_total = width - len(text) - 2
|
57
|
+
padding_left = padding_total // 2
|
58
|
+
padding_right = padding_total - padding_left
|
59
|
+
|
60
|
+
print("*" * width)
|
61
|
+
print(f"*{ ' ' * padding_left }{text}{ ' ' * padding_right }*")
|
62
|
+
print("*" * width)
|
63
|
+
|
64
|
+
|
65
|
+
def check_positive(value):
|
66
|
+
ivalue = float(value)
|
67
|
+
if ivalue <= 0:
|
68
|
+
raise argparse.ArgumentTypeError("%s is an invalid positive float." % value)
|
69
|
+
return ivalue
|
70
|
+
|
71
|
+
|
72
|
+
def load_and_validate_mask(mask_target: "Density", mask_path: str, **kwargs):
|
73
|
+
"""
|
74
|
+
Loadsa mask in CCP4/MRC format and assess whether the sampling_rate
|
75
|
+
and shape matches its target.
|
76
|
+
|
77
|
+
Parameters
|
78
|
+
----------
|
79
|
+
mask_target : Density
|
80
|
+
Object the mask should be applied to
|
81
|
+
mask_path : str
|
82
|
+
Path to the mask in CCP4/MRC format.
|
83
|
+
kwargs : dict, optional
|
84
|
+
Keyword arguments passed to :py:meth:`tme.density.Density.from_file`.
|
85
|
+
Raise
|
86
|
+
-----
|
87
|
+
ValueError
|
88
|
+
If shape or sampling rate do not match between mask_target and mask
|
89
|
+
|
90
|
+
Returns
|
91
|
+
-------
|
92
|
+
Density
|
93
|
+
A density instance if the mask was validated and loaded otherwise None
|
94
|
+
"""
|
95
|
+
mask = mask_path
|
96
|
+
if mask is not None:
|
97
|
+
mask = Density.from_file(mask, **kwargs)
|
98
|
+
mask.origin = deepcopy(mask_target.origin)
|
99
|
+
if not np.allclose(mask.shape, mask_target.shape):
|
100
|
+
raise ValueError(
|
101
|
+
f"Expected shape of {mask_path} was {mask_target.shape},"
|
102
|
+
f" got f{mask.shape}"
|
103
|
+
)
|
104
|
+
if not np.allclose(mask.sampling_rate, mask_target.sampling_rate):
|
105
|
+
raise ValueError(
|
106
|
+
f"Expected sampling_rate of {mask_path} was {mask_target.sampling_rate}"
|
107
|
+
f", got f{mask.sampling_rate}"
|
108
|
+
)
|
109
|
+
return mask
|
110
|
+
|
111
|
+
|
112
|
+
def crop_data(data: Density, cutoff: float, data_mask: Density = None) -> bool:
|
113
|
+
"""
|
114
|
+
Crop the provided data and mask to a smaller box based on a cutoff value.
|
115
|
+
|
116
|
+
Parameters
|
117
|
+
----------
|
118
|
+
data : Density
|
119
|
+
The data that should be cropped.
|
120
|
+
cutoff : float
|
121
|
+
The threshold value to determine which parts of the data should be kept.
|
122
|
+
data_mask : Density, optional
|
123
|
+
A mask for the data that should be cropped.
|
124
|
+
|
125
|
+
Returns
|
126
|
+
-------
|
127
|
+
bool
|
128
|
+
Returns True if the data was adjusted (cropped), otherwise returns False.
|
129
|
+
|
130
|
+
Notes
|
131
|
+
-----
|
132
|
+
Cropping is performed in place.
|
133
|
+
"""
|
134
|
+
if cutoff is None:
|
135
|
+
return False
|
136
|
+
|
137
|
+
box = data.trim_box(cutoff=cutoff)
|
138
|
+
box_mask = box
|
139
|
+
if data_mask is not None:
|
140
|
+
box_mask = data_mask.trim_box(cutoff=cutoff)
|
141
|
+
box = tuple(
|
142
|
+
slice(min(arr.start, mask.start), max(arr.stop, mask.stop))
|
143
|
+
for arr, mask in zip(box, box_mask)
|
144
|
+
)
|
145
|
+
if box == tuple(slice(0, x) for x in data.shape):
|
146
|
+
return False
|
147
|
+
|
148
|
+
data.adjust_box(box)
|
149
|
+
|
150
|
+
if data_mask:
|
151
|
+
data_mask.adjust_box(box)
|
152
|
+
|
153
|
+
return True
|
154
|
+
|
155
|
+
|
156
|
+
def parse_rotation_logic(args, ndim):
|
157
|
+
if args.angular_sampling is not None:
|
158
|
+
rotations = get_rotation_matrices(
|
159
|
+
angular_sampling=args.angular_sampling,
|
160
|
+
dim=ndim,
|
161
|
+
use_optimized_set=not args.no_use_optimized_set,
|
162
|
+
)
|
163
|
+
if args.angular_sampling >= 180:
|
164
|
+
rotations = np.eye(ndim).reshape(1, ndim, ndim)
|
165
|
+
return rotations
|
166
|
+
|
167
|
+
if args.axis_sampling is None:
|
168
|
+
args.axis_sampling = args.cone_sampling
|
169
|
+
|
170
|
+
rotations = get_rotations_around_vector(
|
171
|
+
cone_angle=args.cone_angle,
|
172
|
+
cone_sampling=args.cone_sampling,
|
173
|
+
axis_angle=args.axis_angle,
|
174
|
+
axis_sampling=args.axis_sampling,
|
175
|
+
n_symmetry=args.axis_symmetry,
|
176
|
+
)
|
177
|
+
return rotations
|
178
|
+
|
179
|
+
|
180
|
+
# TODO: Think about whether wedge mask should also be added to target
|
181
|
+
# For now leave it at the cost of incorrect upper bound on the scores
|
182
|
+
def setup_filter(args, template: Density, target: Density) -> Tuple[Compose, Compose]:
|
183
|
+
from tme.preprocessing import LinearWhiteningFilter, BandPassFilter
|
184
|
+
from tme.preprocessing.tilt_series import (
|
185
|
+
Wedge,
|
186
|
+
WedgeReconstructed,
|
187
|
+
ReconstructFromTilt,
|
188
|
+
)
|
189
|
+
|
190
|
+
template_filter, target_filter = [], []
|
191
|
+
if args.tilt_angles is not None:
|
192
|
+
try:
|
193
|
+
wedge = Wedge.from_file(args.tilt_angles)
|
194
|
+
wedge.weight_type = args.tilt_weighting
|
195
|
+
if args.tilt_weighting in ("angle", None) and args.ctf_file is None:
|
196
|
+
wedge = WedgeReconstructed(
|
197
|
+
angles=wedge.angles, weight_wedge=args.tilt_weighting == "angle"
|
198
|
+
)
|
199
|
+
except FileNotFoundError:
|
200
|
+
tilt_step, create_continuous_wedge = None, True
|
201
|
+
tilt_start, tilt_stop = args.tilt_angles.split(",")
|
202
|
+
if ":" in tilt_stop:
|
203
|
+
create_continuous_wedge = False
|
204
|
+
tilt_stop, tilt_step = tilt_stop.split(":")
|
205
|
+
tilt_start, tilt_stop = float(tilt_start), float(tilt_stop)
|
206
|
+
tilt_angles = (tilt_start, tilt_stop)
|
207
|
+
if tilt_step is not None:
|
208
|
+
tilt_step = float(tilt_step)
|
209
|
+
tilt_angles = np.arange(
|
210
|
+
-tilt_start, tilt_stop + tilt_step, tilt_step
|
211
|
+
).tolist()
|
212
|
+
|
213
|
+
if args.tilt_weighting is not None and tilt_step is None:
|
214
|
+
raise ValueError(
|
215
|
+
"Tilt weighting is not supported for continuous wedges."
|
216
|
+
)
|
217
|
+
if args.tilt_weighting not in ("angle", None):
|
218
|
+
raise ValueError(
|
219
|
+
"Tilt weighting schemes other than 'angle' or 'None' require "
|
220
|
+
"a specification of electron doses."
|
221
|
+
)
|
222
|
+
|
223
|
+
wedge = Wedge(
|
224
|
+
angles=tilt_angles,
|
225
|
+
opening_axis=args.wedge_axes[0],
|
226
|
+
tilt_axis=args.wedge_axes[1],
|
227
|
+
shape=None,
|
228
|
+
weight_type=None,
|
229
|
+
weights=np.ones_like(tilt_angles),
|
230
|
+
)
|
231
|
+
if args.tilt_weighting in ("angle", None) and args.ctf_file is None:
|
232
|
+
wedge = WedgeReconstructed(
|
233
|
+
angles=tilt_angles,
|
234
|
+
weight_wedge=args.tilt_weighting == "angle",
|
235
|
+
create_continuous_wedge=create_continuous_wedge,
|
236
|
+
)
|
237
|
+
|
238
|
+
wedge.opening_axis = args.wedge_axes[0]
|
239
|
+
wedge.tilt_axis = args.wedge_axes[1]
|
240
|
+
wedge.sampling_rate = template.sampling_rate
|
241
|
+
template_filter.append(wedge)
|
242
|
+
if not isinstance(wedge, WedgeReconstructed):
|
243
|
+
template_filter.append(ReconstructFromTilt(
|
244
|
+
reconstruction_filter = args.reconstruction_filter
|
245
|
+
))
|
246
|
+
|
247
|
+
if args.ctf_file is not None:
|
248
|
+
from tme.preprocessing.tilt_series import CTF
|
249
|
+
|
250
|
+
ctf = CTF.from_file(args.ctf_file)
|
251
|
+
n_tilts_ctfs, n_tils_angles = len(ctf.defocus_x), len(wedge.angles)
|
252
|
+
if n_tilts_ctfs != n_tils_angles:
|
253
|
+
raise ValueError(
|
254
|
+
f"CTF file contains {n_tilts_ctfs} micrographs, but match_template "
|
255
|
+
f"recieved {n_tils_angles} tilt angles. Expected one angle "
|
256
|
+
"per micrograph."
|
257
|
+
)
|
258
|
+
ctf.angles = wedge.angles
|
259
|
+
ctf.opening_axis, ctf.tilt_axis = args.wedge_axes
|
260
|
+
|
261
|
+
if isinstance(template_filter[-1], ReconstructFromTilt):
|
262
|
+
template_filter.insert(-1, ctf)
|
263
|
+
else:
|
264
|
+
template_filter.insert(0, ctf)
|
265
|
+
template_filter.insert(1, ReconstructFromTilt(
|
266
|
+
reconstruction_filter = args.reconstruction_filter
|
267
|
+
))
|
268
|
+
|
269
|
+
if args.lowpass or args.highpass is not None:
|
270
|
+
lowpass, highpass = args.lowpass, args.highpass
|
271
|
+
if args.pass_format == "voxel":
|
272
|
+
if lowpass is not None:
|
273
|
+
lowpass = np.max(np.multiply(lowpass, template.sampling_rate))
|
274
|
+
if highpass is not None:
|
275
|
+
highpass = np.max(np.multiply(highpass, template.sampling_rate))
|
276
|
+
elif args.pass_format == "frequency":
|
277
|
+
if lowpass is not None:
|
278
|
+
lowpass = np.max(np.divide(template.sampling_rate, lowpass))
|
279
|
+
if highpass is not None:
|
280
|
+
highpass = np.max(np.divide(template.sampling_rate, highpass))
|
281
|
+
|
282
|
+
bandpass = BandPassFilter(
|
283
|
+
use_gaussian=args.no_pass_smooth,
|
284
|
+
lowpass=lowpass,
|
285
|
+
highpass=highpass,
|
286
|
+
sampling_rate=template.sampling_rate,
|
287
|
+
)
|
288
|
+
template_filter.append(bandpass)
|
289
|
+
target_filter.append(bandpass)
|
290
|
+
|
291
|
+
if args.whiten_spectrum:
|
292
|
+
whitening_filter = LinearWhiteningFilter()
|
293
|
+
template_filter.append(whitening_filter)
|
294
|
+
target_filter.append(whitening_filter)
|
295
|
+
|
296
|
+
template_filter = Compose(template_filter) if len(template_filter) else None
|
297
|
+
target_filter = Compose(target_filter) if len(target_filter) else None
|
298
|
+
|
299
|
+
return template_filter, target_filter
|
300
|
+
|
301
|
+
|
302
|
+
def parse_args():
|
303
|
+
parser = argparse.ArgumentParser(description="Perform template matching.")
|
304
|
+
|
305
|
+
io_group = parser.add_argument_group("Input / Output")
|
306
|
+
io_group.add_argument(
|
307
|
+
"-m",
|
308
|
+
"--target",
|
309
|
+
dest="target",
|
310
|
+
type=str,
|
311
|
+
required=True,
|
312
|
+
help="Path to a target in CCP4/MRC, EM, H5 or another format supported by "
|
313
|
+
"tme.density.Density.from_file "
|
314
|
+
"https://kosinskilab.github.io/pyTME/reference/api/tme.density.Density.from_file.html",
|
315
|
+
)
|
316
|
+
io_group.add_argument(
|
317
|
+
"--target_mask",
|
318
|
+
dest="target_mask",
|
319
|
+
type=str,
|
320
|
+
required=False,
|
321
|
+
help="Path to a mask for the target in a supported format (see target).",
|
322
|
+
)
|
323
|
+
io_group.add_argument(
|
324
|
+
"-i",
|
325
|
+
"--template",
|
326
|
+
dest="template",
|
327
|
+
type=str,
|
328
|
+
required=True,
|
329
|
+
help="Path to a template in PDB/MMCIF or other supported formats (see target).",
|
330
|
+
)
|
331
|
+
io_group.add_argument(
|
332
|
+
"--template_mask",
|
333
|
+
dest="template_mask",
|
334
|
+
type=str,
|
335
|
+
required=False,
|
336
|
+
help="Path to a mask for the template in a supported format (see target).",
|
337
|
+
)
|
338
|
+
io_group.add_argument(
|
339
|
+
"-o",
|
340
|
+
"--output",
|
341
|
+
dest="output",
|
342
|
+
type=str,
|
343
|
+
required=False,
|
344
|
+
default="output.pickle",
|
345
|
+
help="Path to the output pickle file.",
|
346
|
+
)
|
347
|
+
io_group.add_argument(
|
348
|
+
"--invert_target_contrast",
|
349
|
+
dest="invert_target_contrast",
|
350
|
+
action="store_true",
|
351
|
+
default=False,
|
352
|
+
help="Invert the target's contrast and rescale linearly between zero and one. "
|
353
|
+
"This option is intended for targets where templates to-be-matched have "
|
354
|
+
"negative values, e.g. tomograms.",
|
355
|
+
)
|
356
|
+
io_group.add_argument(
|
357
|
+
"--scramble_phases",
|
358
|
+
dest="scramble_phases",
|
359
|
+
action="store_true",
|
360
|
+
default=False,
|
361
|
+
help="Phase scramble the template to generate a noise score background.",
|
362
|
+
)
|
363
|
+
|
364
|
+
scoring_group = parser.add_argument_group("Scoring")
|
365
|
+
scoring_group.add_argument(
|
366
|
+
"-s",
|
367
|
+
dest="score",
|
368
|
+
type=str,
|
369
|
+
default="FLCSphericalMask",
|
370
|
+
choices=list(MATCHING_EXHAUSTIVE_REGISTER.keys()),
|
371
|
+
help="Template matching scoring function.",
|
372
|
+
)
|
373
|
+
scoring_group.add_argument(
|
374
|
+
"-p",
|
375
|
+
dest="peak_calling",
|
376
|
+
action="store_true",
|
377
|
+
default=False,
|
378
|
+
help="Perform peak calling instead of score aggregation.",
|
379
|
+
)
|
380
|
+
|
381
|
+
angular_group = parser.add_argument_group("Angular Sampling")
|
382
|
+
angular_exclusive = angular_group.add_mutually_exclusive_group(required=True)
|
383
|
+
|
384
|
+
angular_exclusive.add_argument(
|
385
|
+
"-a",
|
386
|
+
dest="angular_sampling",
|
387
|
+
type=check_positive,
|
388
|
+
default=None,
|
389
|
+
help="Angular sampling rate using optimized rotational sets."
|
390
|
+
"A lower number yields more rotations. Values >= 180 sample only the identity.",
|
391
|
+
)
|
392
|
+
angular_exclusive.add_argument(
|
393
|
+
"--cone_angle",
|
394
|
+
dest="cone_angle",
|
395
|
+
type=check_positive,
|
396
|
+
default=None,
|
397
|
+
help="Half-angle of the cone to be sampled in degrees. Allows to sample a "
|
398
|
+
"narrow interval around a known orientation, e.g. for surface oversampling.",
|
399
|
+
)
|
400
|
+
angular_group.add_argument(
|
401
|
+
"--cone_sampling",
|
402
|
+
dest="cone_sampling",
|
403
|
+
type=check_positive,
|
404
|
+
default=None,
|
405
|
+
help="Sampling rate of the cone in degrees.",
|
406
|
+
)
|
407
|
+
angular_group.add_argument(
|
408
|
+
"--axis_angle",
|
409
|
+
dest="axis_angle",
|
410
|
+
type=check_positive,
|
411
|
+
default=360.0,
|
412
|
+
required=False,
|
413
|
+
help="Sampling angle along the z-axis of the cone. Defaults to 360.",
|
414
|
+
)
|
415
|
+
angular_group.add_argument(
|
416
|
+
"--axis_sampling",
|
417
|
+
dest="axis_sampling",
|
418
|
+
type=check_positive,
|
419
|
+
default=None,
|
420
|
+
required=False,
|
421
|
+
help="Sampling rate along the z-axis of the cone. Defaults to --cone_sampling.",
|
422
|
+
)
|
423
|
+
angular_group.add_argument(
|
424
|
+
"--axis_symmetry",
|
425
|
+
dest="axis_symmetry",
|
426
|
+
type=check_positive,
|
427
|
+
default=1,
|
428
|
+
required=False,
|
429
|
+
help="N-fold symmetry around z-axis of the cone.",
|
430
|
+
)
|
431
|
+
angular_group.add_argument(
|
432
|
+
"--no_use_optimized_set",
|
433
|
+
dest="no_use_optimized_set",
|
434
|
+
action="store_true",
|
435
|
+
default=False,
|
436
|
+
required=False,
|
437
|
+
help="Whether to use random uniform instead of optimized rotation sets.",
|
438
|
+
)
|
439
|
+
|
440
|
+
computation_group = parser.add_argument_group("Computation")
|
441
|
+
computation_group.add_argument(
|
442
|
+
"-n",
|
443
|
+
dest="cores",
|
444
|
+
required=False,
|
445
|
+
type=int,
|
446
|
+
default=4,
|
447
|
+
help="Number of cores used for template matching.",
|
448
|
+
)
|
449
|
+
computation_group.add_argument(
|
450
|
+
"--use_gpu",
|
451
|
+
dest="use_gpu",
|
452
|
+
action="store_true",
|
453
|
+
default=False,
|
454
|
+
help="Whether to perform computations on the GPU.",
|
455
|
+
)
|
456
|
+
computation_group.add_argument(
|
457
|
+
"--gpu_indices",
|
458
|
+
dest="gpu_indices",
|
459
|
+
type=str,
|
460
|
+
default=None,
|
461
|
+
help="Comma-separated list of GPU indices to use. For example,"
|
462
|
+
" 0,1 for the first and second GPU. Only used if --use_gpu is set."
|
463
|
+
" If not provided but --use_gpu is set, CUDA_VISIBLE_DEVICES will"
|
464
|
+
" be respected.",
|
465
|
+
)
|
466
|
+
computation_group.add_argument(
|
467
|
+
"-r",
|
468
|
+
"--ram",
|
469
|
+
dest="memory",
|
470
|
+
required=False,
|
471
|
+
type=int,
|
472
|
+
default=None,
|
473
|
+
help="Amount of memory that can be used in bytes.",
|
474
|
+
)
|
475
|
+
computation_group.add_argument(
|
476
|
+
"--memory_scaling",
|
477
|
+
dest="memory_scaling",
|
478
|
+
required=False,
|
479
|
+
type=float,
|
480
|
+
default=0.85,
|
481
|
+
help="Fraction of available memory that can be used. Defaults to 0.85 and is "
|
482
|
+
"ignored if --ram is set",
|
483
|
+
)
|
484
|
+
computation_group.add_argument(
|
485
|
+
"--temp_directory",
|
486
|
+
dest="temp_directory",
|
487
|
+
default=None,
|
488
|
+
help="Directory for temporary objects. Faster I/O improves runtime.",
|
489
|
+
)
|
490
|
+
|
491
|
+
filter_group = parser.add_argument_group("Filters")
|
492
|
+
filter_group.add_argument(
|
493
|
+
"--lowpass",
|
494
|
+
dest="lowpass",
|
495
|
+
type=float,
|
496
|
+
required=False,
|
497
|
+
help="Resolution to lowpass filter template and target to in the same unit "
|
498
|
+
"as the sampling rate of template and target (typically Ångstrom).",
|
499
|
+
)
|
500
|
+
filter_group.add_argument(
|
501
|
+
"--highpass",
|
502
|
+
dest="highpass",
|
503
|
+
type=float,
|
504
|
+
required=False,
|
505
|
+
help="Resolution to highpass filter template and target to in the same unit "
|
506
|
+
"as the sampling rate of template and target (typically Ångstrom).",
|
507
|
+
)
|
508
|
+
filter_group.add_argument(
|
509
|
+
"--no_pass_smooth",
|
510
|
+
dest="no_pass_smooth",
|
511
|
+
action="store_false",
|
512
|
+
default=True,
|
513
|
+
help="Whether a hard edge filter should be used for --lowpass and --highpass."
|
514
|
+
)
|
515
|
+
filter_group.add_argument(
|
516
|
+
"--pass_format",
|
517
|
+
dest="pass_format",
|
518
|
+
type=str,
|
519
|
+
required=False,
|
520
|
+
choices=["sampling_rate", "voxel", "frequency"],
|
521
|
+
help="How values passed to --lowpass and --highpass should be interpreted. "
|
522
|
+
"By default, they are assumed to be in units of sampling rate, e.g. Ångstrom."
|
523
|
+
)
|
524
|
+
filter_group.add_argument(
|
525
|
+
"--whiten_spectrum",
|
526
|
+
dest="whiten_spectrum",
|
527
|
+
action="store_true",
|
528
|
+
default=None,
|
529
|
+
help="Apply spectral whitening to template and target based on target spectrum.",
|
530
|
+
)
|
531
|
+
filter_group.add_argument(
|
532
|
+
"--wedge_axes",
|
533
|
+
dest="wedge_axes",
|
534
|
+
type=str,
|
535
|
+
required=False,
|
536
|
+
default=None,
|
537
|
+
help="Indices of wedge opening and tilt axis, e.g. 0,2 for a wedge that is open "
|
538
|
+
"in z-direction and tilted over the x axis.",
|
539
|
+
)
|
540
|
+
filter_group.add_argument(
|
541
|
+
"--tilt_angles",
|
542
|
+
dest="tilt_angles",
|
543
|
+
type=str,
|
544
|
+
required=False,
|
545
|
+
default=None,
|
546
|
+
help="Path to a tab-separated file containing the column angles and optionally "
|
547
|
+
" weights, or comma separated start and stop stage tilt angle, e.g. 50,45, which "
|
548
|
+
" yields a continuous wedge mask. Alternatively, a tilt step size can be "
|
549
|
+
"specified like 50,45:5.0 to sample 5.0 degree tilt angle steps.",
|
550
|
+
)
|
551
|
+
filter_group.add_argument(
|
552
|
+
"--tilt_weighting",
|
553
|
+
dest="tilt_weighting",
|
554
|
+
type=str,
|
555
|
+
required=False,
|
556
|
+
choices=["angle", "relion", "grigorieff"],
|
557
|
+
default=None,
|
558
|
+
help="Weighting scheme used to reweight individual tilts. Available options: "
|
559
|
+
"angle (cosine based weighting), "
|
560
|
+
"relion (relion formalism for wedge weighting) requires,"
|
561
|
+
"grigorieff (exposure filter as defined in Grant and Grigorieff 2015)."
|
562
|
+
"relion and grigorieff require electron doses in --tilt_angles weights column.",
|
563
|
+
)
|
564
|
+
# filter_group.add_argument(
|
565
|
+
# "--ctf_file",
|
566
|
+
# dest="ctf_file",
|
567
|
+
# type=str,
|
568
|
+
# required=False,
|
569
|
+
# default=None,
|
570
|
+
# help="Path to a file with CTF parameters from CTFFIND4.",
|
571
|
+
# )
|
572
|
+
filter_group.add_argument(
|
573
|
+
"--reconstruction_filter",
|
574
|
+
dest="reconstruction_filter",
|
575
|
+
type=str,
|
576
|
+
required=False,
|
577
|
+
choices = ["ram-lak", "ramp", "shepp-logan", "cosine", "hamming"],
|
578
|
+
default=None,
|
579
|
+
help="Filter applied when reconstructing (N+1)-D from N-D filters.",
|
580
|
+
)
|
581
|
+
|
582
|
+
performance_group = parser.add_argument_group("Performance")
|
583
|
+
performance_group.add_argument(
|
584
|
+
"--cutoff_target",
|
585
|
+
dest="cutoff_target",
|
586
|
+
type=float,
|
587
|
+
required=False,
|
588
|
+
default=None,
|
589
|
+
help="Target contour level (used for cropping).",
|
590
|
+
)
|
591
|
+
performance_group.add_argument(
|
592
|
+
"--cutoff_template",
|
593
|
+
dest="cutoff_template",
|
594
|
+
type=float,
|
595
|
+
required=False,
|
596
|
+
default=None,
|
597
|
+
help="Template contour level (used for cropping).",
|
598
|
+
)
|
599
|
+
performance_group.add_argument(
|
600
|
+
"--no_centering",
|
601
|
+
dest="no_centering",
|
602
|
+
action="store_true",
|
603
|
+
help="Assumes the template is already centered and omits centering.",
|
604
|
+
)
|
605
|
+
performance_group.add_argument(
|
606
|
+
"--no_edge_padding",
|
607
|
+
dest="no_edge_padding",
|
608
|
+
action="store_true",
|
609
|
+
default=False,
|
610
|
+
help="Whether to not pad the edges of the target. Can be set if the target"
|
611
|
+
" has a well defined bounding box, e.g. a masked reconstruction.",
|
612
|
+
)
|
613
|
+
performance_group.add_argument(
|
614
|
+
"--no_fourier_padding",
|
615
|
+
dest="no_fourier_padding",
|
616
|
+
action="store_true",
|
617
|
+
default=False,
|
618
|
+
help="Whether input arrays should not be zero-padded to full convolution shape "
|
619
|
+
"for numerical stability. When working with very large targets, e.g. tomograms, "
|
620
|
+
"it is safe to use this flag and benefit from the performance gain.",
|
621
|
+
)
|
622
|
+
performance_group.add_argument(
|
623
|
+
"--interpolation_order",
|
624
|
+
dest="interpolation_order",
|
625
|
+
required=False,
|
626
|
+
type=int,
|
627
|
+
default=3,
|
628
|
+
help="Spline interpolation used for template rotations. If less than zero "
|
629
|
+
"no interpolation is performed.",
|
630
|
+
)
|
631
|
+
performance_group.add_argument(
|
632
|
+
"--use_mixed_precision",
|
633
|
+
dest="use_mixed_precision",
|
634
|
+
action="store_true",
|
635
|
+
default=False,
|
636
|
+
help="Use float16 for real values operations where possible.",
|
637
|
+
)
|
638
|
+
performance_group.add_argument(
|
639
|
+
"--use_memmap",
|
640
|
+
dest="use_memmap",
|
641
|
+
action="store_true",
|
642
|
+
default=False,
|
643
|
+
help="Use memmaps to offload large data objects to disk. "
|
644
|
+
"Particularly useful for large inputs in combination with --use_gpu.",
|
645
|
+
)
|
646
|
+
|
647
|
+
analyzer_group = parser.add_argument_group("Analyzer")
|
648
|
+
analyzer_group.add_argument(
|
649
|
+
"--score_threshold",
|
650
|
+
dest="score_threshold",
|
651
|
+
required=False,
|
652
|
+
type=float,
|
653
|
+
default=0,
|
654
|
+
help="Minimum template matching scores to consider for analysis.",
|
655
|
+
)
|
656
|
+
|
657
|
+
args = parser.parse_args()
|
658
|
+
|
659
|
+
if args.interpolation_order < 0:
|
660
|
+
args.interpolation_order = None
|
661
|
+
|
662
|
+
args.ctf_file = None
|
663
|
+
|
664
|
+
if args.temp_directory is None:
|
665
|
+
default = abspath(".")
|
666
|
+
if os.environ.get("TMPDIR", None) is not None:
|
667
|
+
default = os.environ.get("TMPDIR")
|
668
|
+
args.temp_directory = default
|
669
|
+
|
670
|
+
os.environ["TMPDIR"] = args.temp_directory
|
671
|
+
|
672
|
+
args.pad_target_edges = not args.no_edge_padding
|
673
|
+
args.pad_fourier = not args.no_fourier_padding
|
674
|
+
|
675
|
+
if args.score not in MATCHING_EXHAUSTIVE_REGISTER:
|
676
|
+
raise ValueError(
|
677
|
+
f"score has to be one of {', '.join(MATCHING_EXHAUSTIVE_REGISTER.keys())}"
|
678
|
+
)
|
679
|
+
|
680
|
+
gpu_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None)
|
681
|
+
if args.gpu_indices is not None:
|
682
|
+
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_indices
|
683
|
+
|
684
|
+
if args.use_gpu:
|
685
|
+
gpu_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None)
|
686
|
+
if gpu_devices is None:
|
687
|
+
print(
|
688
|
+
"No GPU indices provided and CUDA_VISIBLE_DEVICES is not set.",
|
689
|
+
"Assuming device 0.",
|
690
|
+
)
|
691
|
+
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
692
|
+
args.gpu_indices = [
|
693
|
+
int(x) for x in os.environ["CUDA_VISIBLE_DEVICES"].split(",")
|
694
|
+
]
|
695
|
+
|
696
|
+
if args.tilt_angles is not None:
|
697
|
+
if args.wedge_axes is None:
|
698
|
+
raise ValueError("Need to specify --wedge_axes when --tilt_angles is set.")
|
699
|
+
if not exists(args.tilt_angles):
|
700
|
+
try:
|
701
|
+
float(args.tilt_angles.split(",")[0])
|
702
|
+
except ValueError:
|
703
|
+
raise ValueError(f"{args.tilt_angles} is not a file nor a range.")
|
704
|
+
|
705
|
+
if args.ctf_file is not None and args.tilt_angles is None:
|
706
|
+
raise ValueError("Need to specify --tilt_angles when --ctf_file is set.")
|
707
|
+
|
708
|
+
if args.wedge_axes is not None:
|
709
|
+
args.wedge_axes = tuple(int(i) for i in args.wedge_axes.split(","))
|
710
|
+
|
711
|
+
return args
|
712
|
+
|
713
|
+
|
714
|
+
def main():
|
715
|
+
args = parse_args()
|
716
|
+
print_entry()
|
717
|
+
|
718
|
+
target = Density.from_file(args.target, use_memmap=True)
|
719
|
+
|
720
|
+
try:
|
721
|
+
template = Density.from_file(args.template)
|
722
|
+
except Exception:
|
723
|
+
template = Density.from_structure(
|
724
|
+
filename_or_structure=args.template,
|
725
|
+
sampling_rate=target.sampling_rate,
|
726
|
+
)
|
727
|
+
|
728
|
+
if not np.allclose(target.sampling_rate, template.sampling_rate):
|
729
|
+
print(
|
730
|
+
f"Resampling template to {target.sampling_rate}. "
|
731
|
+
"Consider providing a template with the same sampling rate as the target."
|
732
|
+
)
|
733
|
+
template = template.resample(target.sampling_rate, order=3)
|
734
|
+
|
735
|
+
template_mask = load_and_validate_mask(
|
736
|
+
mask_target=template, mask_path=args.template_mask
|
737
|
+
)
|
738
|
+
target_mask = load_and_validate_mask(
|
739
|
+
mask_target=target, mask_path=args.target_mask, use_memmap=True
|
740
|
+
)
|
741
|
+
|
742
|
+
initial_shape = target.shape
|
743
|
+
is_cropped = crop_data(
|
744
|
+
data=target, data_mask=target_mask, cutoff=args.cutoff_target
|
745
|
+
)
|
746
|
+
print_block(
|
747
|
+
name="Target",
|
748
|
+
data={
|
749
|
+
"Initial Shape": initial_shape,
|
750
|
+
"Sampling Rate": tuple(np.round(target.sampling_rate, 2)),
|
751
|
+
"Final Shape": target.shape,
|
752
|
+
},
|
753
|
+
)
|
754
|
+
if is_cropped:
|
755
|
+
args.target = generate_tempfile_name(suffix=".mrc")
|
756
|
+
target.to_file(args.target)
|
757
|
+
|
758
|
+
if target_mask:
|
759
|
+
args.target_mask = generate_tempfile_name(suffix=".mrc")
|
760
|
+
target_mask.to_file(args.target_mask)
|
761
|
+
|
762
|
+
if target_mask:
|
763
|
+
print_block(
|
764
|
+
name="Target Mask",
|
765
|
+
data={
|
766
|
+
"Initial Shape": initial_shape,
|
767
|
+
"Sampling Rate": tuple(np.round(target_mask.sampling_rate, 2)),
|
768
|
+
"Final Shape": target_mask.shape,
|
769
|
+
},
|
770
|
+
)
|
771
|
+
|
772
|
+
initial_shape = template.shape
|
773
|
+
_ = crop_data(data=template, data_mask=template_mask, cutoff=args.cutoff_template)
|
774
|
+
|
775
|
+
translation = np.zeros(len(template.shape), dtype=np.float32)
|
776
|
+
if not args.no_centering:
|
777
|
+
template, translation = template.centered(0)
|
778
|
+
print_block(
|
779
|
+
name="Template",
|
780
|
+
data={
|
781
|
+
"Initial Shape": initial_shape,
|
782
|
+
"Sampling Rate": tuple(np.round(template.sampling_rate, 2)),
|
783
|
+
"Final Shape": template.shape,
|
784
|
+
},
|
785
|
+
)
|
786
|
+
|
787
|
+
if template_mask is None:
|
788
|
+
template_mask = template.empty
|
789
|
+
if not args.no_centering:
|
790
|
+
enclosing_box = template.minimum_enclosing_box(
|
791
|
+
0, use_geometric_center=False
|
792
|
+
)
|
793
|
+
template_mask.adjust_box(enclosing_box)
|
794
|
+
|
795
|
+
template_mask.data[:] = 1
|
796
|
+
translation = np.zeros_like(translation)
|
797
|
+
|
798
|
+
template_mask.pad(template.shape, center=False)
|
799
|
+
origin_translation = np.divide(
|
800
|
+
np.subtract(template.origin, template_mask.origin), template.sampling_rate
|
801
|
+
)
|
802
|
+
translation = np.add(translation, origin_translation)
|
803
|
+
|
804
|
+
template_mask = template_mask.rigid_transform(
|
805
|
+
rotation_matrix=np.eye(template_mask.data.ndim),
|
806
|
+
translation=-translation,
|
807
|
+
order=1,
|
808
|
+
)
|
809
|
+
template_mask.origin = template.origin.copy()
|
810
|
+
print_block(
|
811
|
+
name="Template Mask",
|
812
|
+
data={
|
813
|
+
"Inital Shape": initial_shape,
|
814
|
+
"Sampling Rate": tuple(np.round(template_mask.sampling_rate, 2)),
|
815
|
+
"Final Shape": template_mask.shape,
|
816
|
+
},
|
817
|
+
)
|
818
|
+
print("\n" + "-" * 80)
|
819
|
+
|
820
|
+
if args.scramble_phases:
|
821
|
+
template.data = scramble_phases(
|
822
|
+
template.data, noise_proportion=1.0, normalize_power=True
|
823
|
+
)
|
824
|
+
|
825
|
+
available_memory = backend.get_available_memory()
|
826
|
+
if args.use_gpu:
|
827
|
+
args.cores = len(args.gpu_indices)
|
828
|
+
has_torch = importlib.util.find_spec("torch") is not None
|
829
|
+
has_cupy = importlib.util.find_spec("cupy") is not None
|
830
|
+
|
831
|
+
if not has_torch and not has_cupy:
|
832
|
+
raise ValueError(
|
833
|
+
"Found neither CuPy nor PyTorch installation. You need to install"
|
834
|
+
" either to enable GPU support."
|
835
|
+
)
|
836
|
+
|
837
|
+
if args.peak_calling:
|
838
|
+
preferred_backend = "pytorch"
|
839
|
+
if not has_torch:
|
840
|
+
preferred_backend = "cupy"
|
841
|
+
backend.change_backend(backend_name=preferred_backend, device="cuda")
|
842
|
+
else:
|
843
|
+
preferred_backend = "cupy"
|
844
|
+
if not has_cupy:
|
845
|
+
preferred_backend = "pytorch"
|
846
|
+
backend.change_backend(backend_name=preferred_backend, device="cuda")
|
847
|
+
if args.use_mixed_precision and preferred_backend == "pytorch":
|
848
|
+
raise NotImplementedError(
|
849
|
+
"pytorch backend does not yet support mixed precision."
|
850
|
+
" Consider installing CuPy to enable this feature."
|
851
|
+
)
|
852
|
+
elif args.use_mixed_precision:
|
853
|
+
backend.change_backend(
|
854
|
+
backend_name="cupy",
|
855
|
+
default_dtype=backend._array_backend.float16,
|
856
|
+
complex_dtype=backend._array_backend.complex64,
|
857
|
+
default_dtype_int=backend._array_backend.int16,
|
858
|
+
)
|
859
|
+
available_memory = backend.get_available_memory() * args.cores
|
860
|
+
if preferred_backend == "pytorch" and args.interpolation_order == 3:
|
861
|
+
args.interpolation_order = 1
|
862
|
+
|
863
|
+
if args.memory is None:
|
864
|
+
args.memory = int(args.memory_scaling * available_memory)
|
865
|
+
|
866
|
+
target_padding = np.zeros_like(template.shape)
|
867
|
+
if args.pad_target_edges:
|
868
|
+
target_padding = template.shape
|
869
|
+
|
870
|
+
template_box = template.shape
|
871
|
+
if not args.pad_fourier:
|
872
|
+
template_box = np.ones(len(template_box), dtype=int)
|
873
|
+
|
874
|
+
callback_class = MaxScoreOverRotations
|
875
|
+
if args.peak_calling:
|
876
|
+
callback_class = PeakCallerMaximumFilter
|
877
|
+
|
878
|
+
splits, schedule = compute_parallelization_schedule(
|
879
|
+
shape1=target.shape,
|
880
|
+
shape2=template_box,
|
881
|
+
shape1_padding=target_padding,
|
882
|
+
max_cores=args.cores,
|
883
|
+
max_ram=args.memory,
|
884
|
+
split_only_outer=args.use_gpu,
|
885
|
+
matching_method=args.score,
|
886
|
+
analyzer_method=callback_class.__name__,
|
887
|
+
backend=backend._backend_name,
|
888
|
+
float_nbytes=backend.datatype_bytes(backend._default_dtype),
|
889
|
+
complex_nbytes=backend.datatype_bytes(backend._complex_dtype),
|
890
|
+
integer_nbytes=backend.datatype_bytes(backend._default_dtype_int),
|
891
|
+
)
|
892
|
+
|
893
|
+
if splits is None:
|
894
|
+
print(
|
895
|
+
"Found no suitable parallelization schedule. Consider increasing"
|
896
|
+
" available RAM or decreasing number of cores."
|
897
|
+
)
|
898
|
+
exit(-1)
|
899
|
+
|
900
|
+
matching_setup, matching_score = MATCHING_EXHAUSTIVE_REGISTER[args.score]
|
901
|
+
matching_data = MatchingData(target=target, template=template.data)
|
902
|
+
matching_data.rotations = parse_rotation_logic(args=args, ndim=target.data.ndim)
|
903
|
+
|
904
|
+
template_filter, target_filter = setup_filter(args, template, target)
|
905
|
+
matching_data.template_filter = template_filter
|
906
|
+
matching_data.target_filter = target_filter
|
907
|
+
|
908
|
+
matching_data.template_filter = template_filter
|
909
|
+
matching_data._invert_target = args.invert_target_contrast
|
910
|
+
if target_mask is not None:
|
911
|
+
matching_data.target_mask = target_mask
|
912
|
+
if template_mask is not None:
|
913
|
+
matching_data.template_mask = template_mask.data
|
914
|
+
|
915
|
+
n_splits = np.prod(list(splits.values()))
|
916
|
+
target_split = ", ".join(
|
917
|
+
[":".join([str(x) for x in axis]) for axis in splits.items()]
|
918
|
+
)
|
919
|
+
gpus_used = 0 if args.gpu_indices is None else len(args.gpu_indices)
|
920
|
+
options = {
|
921
|
+
"CPU Cores": args.cores,
|
922
|
+
"Run on GPU": f"{args.use_gpu} [N={gpus_used}]",
|
923
|
+
"Use Mixed Precision": args.use_mixed_precision,
|
924
|
+
"Assigned Memory [MB]": f"{args.memory // 1e6} [out of {available_memory//1e6}]",
|
925
|
+
"Temporary Directory": args.temp_directory,
|
926
|
+
"Extend Fourier Grid": not args.no_fourier_padding,
|
927
|
+
"Extend Target Edges": not args.no_edge_padding,
|
928
|
+
"Interpolation Order": args.interpolation_order,
|
929
|
+
"Score": f"{args.score}",
|
930
|
+
"Setup Function": f"{get_func_fullname(matching_setup)}",
|
931
|
+
"Scoring Function": f"{get_func_fullname(matching_score)}",
|
932
|
+
"Angular Sampling": f"{args.angular_sampling}"
|
933
|
+
f" [{matching_data.rotations.shape[0]} rotations]",
|
934
|
+
"Scramble Template": args.scramble_phases,
|
935
|
+
"Target Splits": f"{target_split} [N={n_splits}]",
|
936
|
+
}
|
937
|
+
|
938
|
+
print_block(
|
939
|
+
name="Template Matching Options",
|
940
|
+
data=options,
|
941
|
+
label_width=max(len(key) for key in options.keys()) + 2,
|
942
|
+
)
|
943
|
+
|
944
|
+
filter_args = {
|
945
|
+
"Lowpass": args.lowpass,
|
946
|
+
"Highpass": args.highpass,
|
947
|
+
"Smooth Pass": args.no_pass_smooth,
|
948
|
+
"Pass Format" : args.pass_format,
|
949
|
+
"Spectral Whitening": args.whiten_spectrum,
|
950
|
+
"Wedge Axes": args.wedge_axes,
|
951
|
+
"Tilt Angles": args.tilt_angles,
|
952
|
+
"Tilt Weighting": args.tilt_weighting,
|
953
|
+
"CTF": args.ctf_file,
|
954
|
+
}
|
955
|
+
filter_args = {k: v for k, v in filter_args.items() if v is not None}
|
956
|
+
if len(filter_args):
|
957
|
+
print_block(
|
958
|
+
name="Filters",
|
959
|
+
data=filter_args,
|
960
|
+
label_width=max(len(key) for key in options.keys()) + 2,
|
961
|
+
)
|
962
|
+
|
963
|
+
analyzer_args = {
|
964
|
+
"score_threshold": args.score_threshold,
|
965
|
+
"number_of_peaks": 1000,
|
966
|
+
"convolution_mode": "valid",
|
967
|
+
"use_memmap": args.use_memmap,
|
968
|
+
}
|
969
|
+
analyzer_args = {"Analyzer": callback_class, **analyzer_args}
|
970
|
+
print_block(
|
971
|
+
name="Score Analysis Options",
|
972
|
+
data=analyzer_args,
|
973
|
+
label_width=max(len(key) for key in options.keys()) + 2,
|
974
|
+
)
|
975
|
+
print("\n" + "-" * 80)
|
976
|
+
|
977
|
+
outer_jobs = f"{schedule[0]} job{'s' if schedule[0] > 1 else ''}"
|
978
|
+
inner_jobs = f"{schedule[1]} core{'s' if schedule[1] > 1 else ''}"
|
979
|
+
n_splits = f"{n_splits} split{'s' if n_splits > 1 else ''}"
|
980
|
+
print(f"\nDistributing {n_splits} on {outer_jobs} each using {inner_jobs}.")
|
981
|
+
|
982
|
+
start = time()
|
983
|
+
print("Running Template Matching. This might take a while ...")
|
984
|
+
candidates = scan_subsets(
|
985
|
+
matching_data=matching_data,
|
986
|
+
job_schedule=schedule,
|
987
|
+
matching_score=matching_score,
|
988
|
+
matching_setup=matching_setup,
|
989
|
+
callback_class=callback_class,
|
990
|
+
callback_class_args=analyzer_args,
|
991
|
+
target_splits=splits,
|
992
|
+
pad_target_edges=args.pad_target_edges,
|
993
|
+
pad_fourier=args.pad_fourier,
|
994
|
+
interpolation_order=args.interpolation_order,
|
995
|
+
)
|
996
|
+
|
997
|
+
candidates = list(candidates) if candidates is not None else []
|
998
|
+
if callback_class == MaxScoreOverRotations:
|
999
|
+
if target_mask is not None and args.score != "MCC":
|
1000
|
+
candidates[0] *= target_mask.data
|
1001
|
+
with warnings.catch_warnings():
|
1002
|
+
warnings.simplefilter("ignore", category=UserWarning)
|
1003
|
+
candidates[3] = {
|
1004
|
+
x: euler_from_rotationmatrix(
|
1005
|
+
np.frombuffer(i, dtype=matching_data.rotations.dtype).reshape(
|
1006
|
+
candidates[0].ndim, candidates[0].ndim
|
1007
|
+
)
|
1008
|
+
)
|
1009
|
+
for i, x in candidates[3].items()
|
1010
|
+
}
|
1011
|
+
candidates.append((target.origin, template.origin, target.sampling_rate, args))
|
1012
|
+
write_pickle(data=candidates, filename=args.output)
|
1013
|
+
|
1014
|
+
runtime = time() - start
|
1015
|
+
print(f"\nRuntime real: {runtime:.3f}s user: {(runtime * args.cores):.3f}s.")
|
1016
|
+
|
1017
|
+
|
1018
|
+
if __name__ == "__main__":
|
1019
|
+
main()
|