pytme 0.1.8__cp311-cp311-macosx_14_0_arm64.whl → 0.2.0__cp311-cp311-macosx_14_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pytme-0.2.0.data/scripts/match_template.py +1019 -0
- pytme-0.2.0.data/scripts/postprocess.py +570 -0
- {pytme-0.1.8.data → pytme-0.2.0.data}/scripts/preprocessor_gui.py +244 -60
- {pytme-0.1.8.dist-info → pytme-0.2.0.dist-info}/METADATA +3 -1
- pytme-0.2.0.dist-info/RECORD +72 -0
- {pytme-0.1.8.dist-info → pytme-0.2.0.dist-info}/WHEEL +1 -1
- scripts/extract_candidates.py +218 -0
- scripts/match_template.py +459 -218
- pytme-0.1.8.data/scripts/match_template.py → scripts/match_template_filters.py +459 -218
- scripts/postprocess.py +380 -435
- scripts/preprocessor_gui.py +244 -60
- scripts/refine_matches.py +218 -0
- tme/__init__.py +2 -1
- tme/__version__.py +1 -1
- tme/analyzer.py +533 -78
- tme/backends/cupy_backend.py +80 -15
- tme/backends/npfftw_backend.py +35 -6
- tme/backends/pytorch_backend.py +15 -7
- tme/density.py +173 -78
- tme/extensions.cpython-311-darwin.so +0 -0
- tme/matching_constrained.py +195 -0
- tme/matching_data.py +78 -32
- tme/matching_exhaustive.py +369 -221
- tme/matching_memory.py +1 -0
- tme/matching_optimization.py +753 -649
- tme/matching_utils.py +152 -8
- tme/orientations.py +561 -0
- tme/preprocessing/__init__.py +2 -0
- tme/preprocessing/_utils.py +176 -0
- tme/preprocessing/composable_filter.py +30 -0
- tme/preprocessing/compose.py +52 -0
- tme/preprocessing/frequency_filters.py +322 -0
- tme/preprocessing/tilt_series.py +967 -0
- tme/preprocessor.py +35 -25
- tme/structure.py +2 -37
- pytme-0.1.8.data/scripts/postprocess.py +0 -625
- pytme-0.1.8.dist-info/RECORD +0 -61
- {pytme-0.1.8.data → pytme-0.2.0.data}/scripts/estimate_ram_usage.py +0 -0
- {pytme-0.1.8.data → pytme-0.2.0.data}/scripts/preprocess.py +0 -0
- {pytme-0.1.8.dist-info → pytme-0.2.0.dist-info}/LICENSE +0 -0
- {pytme-0.1.8.dist-info → pytme-0.2.0.dist-info}/entry_points.txt +0 -0
- {pytme-0.1.8.dist-info → pytme-0.2.0.dist-info}/top_level.txt +0 -0
scripts/match_template.py
CHANGED
@@ -11,14 +11,16 @@ import warnings
|
|
11
11
|
import importlib.util
|
12
12
|
from sys import exit
|
13
13
|
from time import time
|
14
|
+
from typing import Tuple
|
14
15
|
from copy import deepcopy
|
15
|
-
from os.path import abspath
|
16
|
+
from os.path import abspath, exists
|
16
17
|
|
17
18
|
import numpy as np
|
18
19
|
|
19
|
-
from tme import Density,
|
20
|
+
from tme import Density, __version__
|
20
21
|
from tme.matching_utils import (
|
21
22
|
get_rotation_matrices,
|
23
|
+
get_rotations_around_vector,
|
22
24
|
compute_parallelization_schedule,
|
23
25
|
euler_from_rotationmatrix,
|
24
26
|
scramble_phases,
|
@@ -32,6 +34,7 @@ from tme.analyzer import (
|
|
32
34
|
PeakCallerMaximumFilter,
|
33
35
|
)
|
34
36
|
from tme.backends import backend
|
37
|
+
from tme.preprocessing import Compose
|
35
38
|
|
36
39
|
|
37
40
|
def get_func_fullname(func) -> str:
|
@@ -150,77 +153,292 @@ def crop_data(data: Density, cutoff: float, data_mask: Density = None) -> bool:
|
|
150
153
|
return True
|
151
154
|
|
152
155
|
|
156
|
+
def parse_rotation_logic(args, ndim):
|
157
|
+
if args.angular_sampling is not None:
|
158
|
+
rotations = get_rotation_matrices(
|
159
|
+
angular_sampling=args.angular_sampling,
|
160
|
+
dim=ndim,
|
161
|
+
use_optimized_set=not args.no_use_optimized_set,
|
162
|
+
)
|
163
|
+
if args.angular_sampling >= 180:
|
164
|
+
rotations = np.eye(ndim).reshape(1, ndim, ndim)
|
165
|
+
return rotations
|
166
|
+
|
167
|
+
if args.axis_sampling is None:
|
168
|
+
args.axis_sampling = args.cone_sampling
|
169
|
+
|
170
|
+
rotations = get_rotations_around_vector(
|
171
|
+
cone_angle=args.cone_angle,
|
172
|
+
cone_sampling=args.cone_sampling,
|
173
|
+
axis_angle=args.axis_angle,
|
174
|
+
axis_sampling=args.axis_sampling,
|
175
|
+
n_symmetry=args.axis_symmetry,
|
176
|
+
)
|
177
|
+
return rotations
|
178
|
+
|
179
|
+
|
180
|
+
# TODO: Think about whether wedge mask should also be added to target
|
181
|
+
# For now leave it at the cost of incorrect upper bound on the scores
|
182
|
+
def setup_filter(args, template: Density, target: Density) -> Tuple[Compose, Compose]:
|
183
|
+
from tme.preprocessing import LinearWhiteningFilter, BandPassFilter
|
184
|
+
from tme.preprocessing.tilt_series import (
|
185
|
+
Wedge,
|
186
|
+
WedgeReconstructed,
|
187
|
+
ReconstructFromTilt,
|
188
|
+
)
|
189
|
+
|
190
|
+
template_filter, target_filter = [], []
|
191
|
+
if args.tilt_angles is not None:
|
192
|
+
try:
|
193
|
+
wedge = Wedge.from_file(args.tilt_angles)
|
194
|
+
wedge.weight_type = args.tilt_weighting
|
195
|
+
if args.tilt_weighting in ("angle", None) and args.ctf_file is None:
|
196
|
+
wedge = WedgeReconstructed(
|
197
|
+
angles=wedge.angles, weight_wedge=args.tilt_weighting == "angle"
|
198
|
+
)
|
199
|
+
except FileNotFoundError:
|
200
|
+
tilt_step, create_continuous_wedge = None, True
|
201
|
+
tilt_start, tilt_stop = args.tilt_angles.split(",")
|
202
|
+
if ":" in tilt_stop:
|
203
|
+
create_continuous_wedge = False
|
204
|
+
tilt_stop, tilt_step = tilt_stop.split(":")
|
205
|
+
tilt_start, tilt_stop = float(tilt_start), float(tilt_stop)
|
206
|
+
tilt_angles = (tilt_start, tilt_stop)
|
207
|
+
if tilt_step is not None:
|
208
|
+
tilt_step = float(tilt_step)
|
209
|
+
tilt_angles = np.arange(
|
210
|
+
-tilt_start, tilt_stop + tilt_step, tilt_step
|
211
|
+
).tolist()
|
212
|
+
|
213
|
+
if args.tilt_weighting is not None and tilt_step is None:
|
214
|
+
raise ValueError(
|
215
|
+
"Tilt weighting is not supported for continuous wedges."
|
216
|
+
)
|
217
|
+
if args.tilt_weighting not in ("angle", None):
|
218
|
+
raise ValueError(
|
219
|
+
"Tilt weighting schemes other than 'angle' or 'None' require "
|
220
|
+
"a specification of electron doses."
|
221
|
+
)
|
222
|
+
|
223
|
+
wedge = Wedge(
|
224
|
+
angles=tilt_angles,
|
225
|
+
opening_axis=args.wedge_axes[0],
|
226
|
+
tilt_axis=args.wedge_axes[1],
|
227
|
+
shape=None,
|
228
|
+
weight_type=None,
|
229
|
+
weights=np.ones_like(tilt_angles),
|
230
|
+
)
|
231
|
+
if args.tilt_weighting in ("angle", None) and args.ctf_file is None:
|
232
|
+
wedge = WedgeReconstructed(
|
233
|
+
angles=tilt_angles,
|
234
|
+
weight_wedge=args.tilt_weighting == "angle",
|
235
|
+
create_continuous_wedge=create_continuous_wedge,
|
236
|
+
)
|
237
|
+
|
238
|
+
wedge.opening_axis = args.wedge_axes[0]
|
239
|
+
wedge.tilt_axis = args.wedge_axes[1]
|
240
|
+
wedge.sampling_rate = template.sampling_rate
|
241
|
+
template_filter.append(wedge)
|
242
|
+
if not isinstance(wedge, WedgeReconstructed):
|
243
|
+
template_filter.append(ReconstructFromTilt(
|
244
|
+
reconstruction_filter = args.reconstruction_filter
|
245
|
+
))
|
246
|
+
|
247
|
+
if args.ctf_file is not None:
|
248
|
+
from tme.preprocessing.tilt_series import CTF
|
249
|
+
|
250
|
+
ctf = CTF.from_file(args.ctf_file)
|
251
|
+
n_tilts_ctfs, n_tils_angles = len(ctf.defocus_x), len(wedge.angles)
|
252
|
+
if n_tilts_ctfs != n_tils_angles:
|
253
|
+
raise ValueError(
|
254
|
+
f"CTF file contains {n_tilts_ctfs} micrographs, but match_template "
|
255
|
+
f"recieved {n_tils_angles} tilt angles. Expected one angle "
|
256
|
+
"per micrograph."
|
257
|
+
)
|
258
|
+
ctf.angles = wedge.angles
|
259
|
+
ctf.opening_axis, ctf.tilt_axis = args.wedge_axes
|
260
|
+
|
261
|
+
if isinstance(template_filter[-1], ReconstructFromTilt):
|
262
|
+
template_filter.insert(-1, ctf)
|
263
|
+
else:
|
264
|
+
template_filter.insert(0, ctf)
|
265
|
+
template_filter.insert(1, ReconstructFromTilt(
|
266
|
+
reconstruction_filter = args.reconstruction_filter
|
267
|
+
))
|
268
|
+
|
269
|
+
if args.lowpass or args.highpass is not None:
|
270
|
+
lowpass, highpass = args.lowpass, args.highpass
|
271
|
+
if args.pass_format == "voxel":
|
272
|
+
if lowpass is not None:
|
273
|
+
lowpass = np.max(np.multiply(lowpass, template.sampling_rate))
|
274
|
+
if highpass is not None:
|
275
|
+
highpass = np.max(np.multiply(highpass, template.sampling_rate))
|
276
|
+
elif args.pass_format == "frequency":
|
277
|
+
if lowpass is not None:
|
278
|
+
lowpass = np.max(np.divide(template.sampling_rate, lowpass))
|
279
|
+
if highpass is not None:
|
280
|
+
highpass = np.max(np.divide(template.sampling_rate, highpass))
|
281
|
+
|
282
|
+
bandpass = BandPassFilter(
|
283
|
+
use_gaussian=args.no_pass_smooth,
|
284
|
+
lowpass=lowpass,
|
285
|
+
highpass=highpass,
|
286
|
+
sampling_rate=template.sampling_rate,
|
287
|
+
)
|
288
|
+
template_filter.append(bandpass)
|
289
|
+
target_filter.append(bandpass)
|
290
|
+
|
291
|
+
if args.whiten_spectrum:
|
292
|
+
whitening_filter = LinearWhiteningFilter()
|
293
|
+
template_filter.append(whitening_filter)
|
294
|
+
target_filter.append(whitening_filter)
|
295
|
+
|
296
|
+
template_filter = Compose(template_filter) if len(template_filter) else None
|
297
|
+
target_filter = Compose(target_filter) if len(target_filter) else None
|
298
|
+
|
299
|
+
return template_filter, target_filter
|
300
|
+
|
301
|
+
|
153
302
|
def parse_args():
|
154
303
|
parser = argparse.ArgumentParser(description="Perform template matching.")
|
155
|
-
|
304
|
+
|
305
|
+
io_group = parser.add_argument_group("Input / Output")
|
306
|
+
io_group.add_argument(
|
156
307
|
"-m",
|
157
308
|
"--target",
|
158
309
|
dest="target",
|
159
310
|
type=str,
|
160
311
|
required=True,
|
161
|
-
help="Path to a target in CCP4/MRC format
|
312
|
+
help="Path to a target in CCP4/MRC, EM, H5 or another format supported by "
|
313
|
+
"tme.density.Density.from_file "
|
314
|
+
"https://kosinskilab.github.io/pyTME/reference/api/tme.density.Density.from_file.html",
|
162
315
|
)
|
163
|
-
|
316
|
+
io_group.add_argument(
|
164
317
|
"--target_mask",
|
165
318
|
dest="target_mask",
|
166
319
|
type=str,
|
167
320
|
required=False,
|
168
|
-
help="Path to a mask for the target
|
169
|
-
)
|
170
|
-
parser.add_argument(
|
171
|
-
"--cutoff_target",
|
172
|
-
dest="cutoff_target",
|
173
|
-
type=float,
|
174
|
-
required=False,
|
175
|
-
help="Target contour level (used for cropping).",
|
176
|
-
default=None,
|
321
|
+
help="Path to a mask for the target in a supported format (see target).",
|
177
322
|
)
|
178
|
-
|
179
|
-
"--cutoff_template",
|
180
|
-
dest="cutoff_template",
|
181
|
-
type=float,
|
182
|
-
required=False,
|
183
|
-
help="Template contour level (used for cropping).",
|
184
|
-
default=None,
|
185
|
-
)
|
186
|
-
parser.add_argument(
|
187
|
-
"--no_centering",
|
188
|
-
dest="no_centering",
|
189
|
-
action="store_true",
|
190
|
-
help="If set, assumes the template is centered and omits centering.",
|
191
|
-
)
|
192
|
-
parser.add_argument(
|
323
|
+
io_group.add_argument(
|
193
324
|
"-i",
|
194
325
|
"--template",
|
195
326
|
dest="template",
|
196
327
|
type=str,
|
197
328
|
required=True,
|
198
|
-
help="Path to a template in PDB/MMCIF or
|
329
|
+
help="Path to a template in PDB/MMCIF or other supported formats (see target).",
|
199
330
|
)
|
200
|
-
|
331
|
+
io_group.add_argument(
|
201
332
|
"--template_mask",
|
202
333
|
dest="template_mask",
|
203
334
|
type=str,
|
204
335
|
required=False,
|
205
|
-
help="Path to a mask for the template in
|
336
|
+
help="Path to a mask for the template in a supported format (see target).",
|
206
337
|
)
|
207
|
-
|
338
|
+
io_group.add_argument(
|
208
339
|
"-o",
|
340
|
+
"--output",
|
209
341
|
dest="output",
|
210
342
|
type=str,
|
211
343
|
required=False,
|
212
344
|
default="output.pickle",
|
213
|
-
help="Path to output pickle file.",
|
345
|
+
help="Path to the output pickle file.",
|
346
|
+
)
|
347
|
+
io_group.add_argument(
|
348
|
+
"--invert_target_contrast",
|
349
|
+
dest="invert_target_contrast",
|
350
|
+
action="store_true",
|
351
|
+
default=False,
|
352
|
+
help="Invert the target's contrast and rescale linearly between zero and one. "
|
353
|
+
"This option is intended for targets where templates to-be-matched have "
|
354
|
+
"negative values, e.g. tomograms.",
|
355
|
+
)
|
356
|
+
io_group.add_argument(
|
357
|
+
"--scramble_phases",
|
358
|
+
dest="scramble_phases",
|
359
|
+
action="store_true",
|
360
|
+
default=False,
|
361
|
+
help="Phase scramble the template to generate a noise score background.",
|
214
362
|
)
|
215
|
-
|
363
|
+
|
364
|
+
scoring_group = parser.add_argument_group("Scoring")
|
365
|
+
scoring_group.add_argument(
|
216
366
|
"-s",
|
217
367
|
dest="score",
|
218
368
|
type=str,
|
219
369
|
default="FLCSphericalMask",
|
370
|
+
choices=list(MATCHING_EXHAUSTIVE_REGISTER.keys()),
|
220
371
|
help="Template matching scoring function.",
|
221
|
-
choices=MATCHING_EXHAUSTIVE_REGISTER.keys(),
|
222
372
|
)
|
223
|
-
|
373
|
+
scoring_group.add_argument(
|
374
|
+
"-p",
|
375
|
+
dest="peak_calling",
|
376
|
+
action="store_true",
|
377
|
+
default=False,
|
378
|
+
help="Perform peak calling instead of score aggregation.",
|
379
|
+
)
|
380
|
+
|
381
|
+
angular_group = parser.add_argument_group("Angular Sampling")
|
382
|
+
angular_exclusive = angular_group.add_mutually_exclusive_group(required=True)
|
383
|
+
|
384
|
+
angular_exclusive.add_argument(
|
385
|
+
"-a",
|
386
|
+
dest="angular_sampling",
|
387
|
+
type=check_positive,
|
388
|
+
default=None,
|
389
|
+
help="Angular sampling rate using optimized rotational sets."
|
390
|
+
"A lower number yields more rotations. Values >= 180 sample only the identity.",
|
391
|
+
)
|
392
|
+
angular_exclusive.add_argument(
|
393
|
+
"--cone_angle",
|
394
|
+
dest="cone_angle",
|
395
|
+
type=check_positive,
|
396
|
+
default=None,
|
397
|
+
help="Half-angle of the cone to be sampled in degrees. Allows to sample a "
|
398
|
+
"narrow interval around a known orientation, e.g. for surface oversampling.",
|
399
|
+
)
|
400
|
+
angular_group.add_argument(
|
401
|
+
"--cone_sampling",
|
402
|
+
dest="cone_sampling",
|
403
|
+
type=check_positive,
|
404
|
+
default=None,
|
405
|
+
help="Sampling rate of the cone in degrees.",
|
406
|
+
)
|
407
|
+
angular_group.add_argument(
|
408
|
+
"--axis_angle",
|
409
|
+
dest="axis_angle",
|
410
|
+
type=check_positive,
|
411
|
+
default=360.0,
|
412
|
+
required=False,
|
413
|
+
help="Sampling angle along the z-axis of the cone. Defaults to 360.",
|
414
|
+
)
|
415
|
+
angular_group.add_argument(
|
416
|
+
"--axis_sampling",
|
417
|
+
dest="axis_sampling",
|
418
|
+
type=check_positive,
|
419
|
+
default=None,
|
420
|
+
required=False,
|
421
|
+
help="Sampling rate along the z-axis of the cone. Defaults to --cone_sampling.",
|
422
|
+
)
|
423
|
+
angular_group.add_argument(
|
424
|
+
"--axis_symmetry",
|
425
|
+
dest="axis_symmetry",
|
426
|
+
type=check_positive,
|
427
|
+
default=1,
|
428
|
+
required=False,
|
429
|
+
help="N-fold symmetry around z-axis of the cone.",
|
430
|
+
)
|
431
|
+
angular_group.add_argument(
|
432
|
+
"--no_use_optimized_set",
|
433
|
+
dest="no_use_optimized_set",
|
434
|
+
action="store_true",
|
435
|
+
default=False,
|
436
|
+
required=False,
|
437
|
+
help="Whether to use random uniform instead of optimized rotation sets.",
|
438
|
+
)
|
439
|
+
|
440
|
+
computation_group = parser.add_argument_group("Computation")
|
441
|
+
computation_group.add_argument(
|
224
442
|
"-n",
|
225
443
|
dest="cores",
|
226
444
|
required=False,
|
@@ -228,7 +446,24 @@ def parse_args():
|
|
228
446
|
default=4,
|
229
447
|
help="Number of cores used for template matching.",
|
230
448
|
)
|
231
|
-
|
449
|
+
computation_group.add_argument(
|
450
|
+
"--use_gpu",
|
451
|
+
dest="use_gpu",
|
452
|
+
action="store_true",
|
453
|
+
default=False,
|
454
|
+
help="Whether to perform computations on the GPU.",
|
455
|
+
)
|
456
|
+
computation_group.add_argument(
|
457
|
+
"--gpu_indices",
|
458
|
+
dest="gpu_indices",
|
459
|
+
type=str,
|
460
|
+
default=None,
|
461
|
+
help="Comma-separated list of GPU indices to use. For example,"
|
462
|
+
" 0,1 for the first and second GPU. Only used if --use_gpu is set."
|
463
|
+
" If not provided but --use_gpu is set, CUDA_VISIBLE_DEVICES will"
|
464
|
+
" be respected.",
|
465
|
+
)
|
466
|
+
computation_group.add_argument(
|
232
467
|
"-r",
|
233
468
|
"--ram",
|
234
469
|
dest="memory",
|
@@ -237,168 +472,186 @@ def parse_args():
|
|
237
472
|
default=None,
|
238
473
|
help="Amount of memory that can be used in bytes.",
|
239
474
|
)
|
240
|
-
|
475
|
+
computation_group.add_argument(
|
241
476
|
"--memory_scaling",
|
242
477
|
dest="memory_scaling",
|
243
478
|
required=False,
|
244
|
-
type=
|
479
|
+
type=float,
|
245
480
|
default=0.85,
|
246
|
-
help="Fraction of available memory that can be used."
|
247
|
-
"
|
481
|
+
help="Fraction of available memory that can be used. Defaults to 0.85 and is "
|
482
|
+
"ignored if --ram is set",
|
248
483
|
)
|
249
|
-
|
250
|
-
"
|
251
|
-
dest="
|
252
|
-
|
253
|
-
|
254
|
-
help="Angular sampling rate for template matching. "
|
255
|
-
"A lower number yields more rotations. Values >= 180 sample only the identity.",
|
484
|
+
computation_group.add_argument(
|
485
|
+
"--temp_directory",
|
486
|
+
dest="temp_directory",
|
487
|
+
default=None,
|
488
|
+
help="Directory for temporary objects. Faster I/O improves runtime.",
|
256
489
|
)
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
490
|
+
|
491
|
+
filter_group = parser.add_argument_group("Filters")
|
492
|
+
filter_group.add_argument(
|
493
|
+
"--lowpass",
|
494
|
+
dest="lowpass",
|
495
|
+
type=float,
|
496
|
+
required=False,
|
497
|
+
help="Resolution to lowpass filter template and target to in the same unit "
|
498
|
+
"as the sampling rate of template and target (typically Ångstrom).",
|
263
499
|
)
|
264
|
-
|
265
|
-
"--
|
266
|
-
dest="
|
500
|
+
filter_group.add_argument(
|
501
|
+
"--highpass",
|
502
|
+
dest="highpass",
|
503
|
+
type=float,
|
504
|
+
required=False,
|
505
|
+
help="Resolution to highpass filter template and target to in the same unit "
|
506
|
+
"as the sampling rate of template and target (typically Ångstrom).",
|
507
|
+
)
|
508
|
+
filter_group.add_argument(
|
509
|
+
"--no_pass_smooth",
|
510
|
+
dest="no_pass_smooth",
|
511
|
+
action="store_false",
|
512
|
+
default=True,
|
513
|
+
help="Whether a hard edge filter should be used for --lowpass and --highpass."
|
514
|
+
)
|
515
|
+
filter_group.add_argument(
|
516
|
+
"--pass_format",
|
517
|
+
dest="pass_format",
|
518
|
+
type=str,
|
519
|
+
required=False,
|
520
|
+
choices=["sampling_rate", "voxel", "frequency"],
|
521
|
+
help="How values passed to --lowpass and --highpass should be interpreted. "
|
522
|
+
"By default, they are assumed to be in units of sampling rate, e.g. Ångstrom."
|
523
|
+
)
|
524
|
+
filter_group.add_argument(
|
525
|
+
"--whiten_spectrum",
|
526
|
+
dest="whiten_spectrum",
|
267
527
|
action="store_true",
|
268
|
-
default=
|
269
|
-
help="
|
528
|
+
default=None,
|
529
|
+
help="Apply spectral whitening to template and target based on target spectrum.",
|
270
530
|
)
|
271
|
-
|
272
|
-
"--
|
273
|
-
dest="
|
531
|
+
filter_group.add_argument(
|
532
|
+
"--wedge_axes",
|
533
|
+
dest="wedge_axes",
|
274
534
|
type=str,
|
535
|
+
required=False,
|
275
536
|
default=None,
|
276
|
-
help="
|
277
|
-
"
|
278
|
-
" If not provided but --use_gpu is set, CUDA_VISIBLE_DEVICES will"
|
279
|
-
" be respected.",
|
537
|
+
help="Indices of wedge opening and tilt axis, e.g. 0,2 for a wedge that is open "
|
538
|
+
"in z-direction and tilted over the x axis.",
|
280
539
|
)
|
281
|
-
|
282
|
-
"--
|
283
|
-
dest="
|
540
|
+
filter_group.add_argument(
|
541
|
+
"--tilt_angles",
|
542
|
+
dest="tilt_angles",
|
543
|
+
type=str,
|
544
|
+
required=False,
|
545
|
+
default=None,
|
546
|
+
help="Path to a tab-separated file containing the column angles and optionally "
|
547
|
+
" weights, or comma separated start and stop stage tilt angle, e.g. 50,45, which "
|
548
|
+
" yields a continuous wedge mask. Alternatively, a tilt step size can be "
|
549
|
+
"specified like 50,45:5.0 to sample 5.0 degree tilt angle steps.",
|
550
|
+
)
|
551
|
+
filter_group.add_argument(
|
552
|
+
"--tilt_weighting",
|
553
|
+
dest="tilt_weighting",
|
554
|
+
type=str,
|
555
|
+
required=False,
|
556
|
+
choices=["angle", "relion", "grigorieff"],
|
557
|
+
default=None,
|
558
|
+
help="Weighting scheme used to reweight individual tilts. Available options: "
|
559
|
+
"angle (cosine based weighting), "
|
560
|
+
"relion (relion formalism for wedge weighting) requires,"
|
561
|
+
"grigorieff (exposure filter as defined in Grant and Grigorieff 2015)."
|
562
|
+
"relion and grigorieff require electron doses in --tilt_angles weights column.",
|
563
|
+
)
|
564
|
+
# filter_group.add_argument(
|
565
|
+
# "--ctf_file",
|
566
|
+
# dest="ctf_file",
|
567
|
+
# type=str,
|
568
|
+
# required=False,
|
569
|
+
# default=None,
|
570
|
+
# help="Path to a file with CTF parameters from CTFFIND4.",
|
571
|
+
# )
|
572
|
+
filter_group.add_argument(
|
573
|
+
"--reconstruction_filter",
|
574
|
+
dest="reconstruction_filter",
|
575
|
+
type=str,
|
576
|
+
required=False,
|
577
|
+
choices = ["ram-lak", "ramp", "shepp-logan", "cosine", "hamming"],
|
578
|
+
default=None,
|
579
|
+
help="Filter applied when reconstructing (N+1)-D from N-D filters.",
|
580
|
+
)
|
581
|
+
|
582
|
+
performance_group = parser.add_argument_group("Performance")
|
583
|
+
performance_group.add_argument(
|
584
|
+
"--cutoff_target",
|
585
|
+
dest="cutoff_target",
|
586
|
+
type=float,
|
587
|
+
required=False,
|
588
|
+
default=None,
|
589
|
+
help="Target contour level (used for cropping).",
|
590
|
+
)
|
591
|
+
performance_group.add_argument(
|
592
|
+
"--cutoff_template",
|
593
|
+
dest="cutoff_template",
|
594
|
+
type=float,
|
595
|
+
required=False,
|
596
|
+
default=None,
|
597
|
+
help="Template contour level (used for cropping).",
|
598
|
+
)
|
599
|
+
performance_group.add_argument(
|
600
|
+
"--no_centering",
|
601
|
+
dest="no_centering",
|
284
602
|
action="store_true",
|
285
|
-
|
286
|
-
help="Invert the target contrast via multiplication with negative one and"
|
287
|
-
" linear rescaling between zero and one. Note that this might lead to"
|
288
|
-
" different baseline scores of individual target splits when using"
|
289
|
-
" unnormalized scores. This option is intended for targets, where the"
|
290
|
-
" object to-be-matched has negative values, i.e. tomograms.",
|
603
|
+
help="Assumes the template is already centered and omits centering.",
|
291
604
|
)
|
292
|
-
|
605
|
+
performance_group.add_argument(
|
293
606
|
"--no_edge_padding",
|
294
607
|
dest="no_edge_padding",
|
295
608
|
action="store_true",
|
296
609
|
default=False,
|
297
|
-
help="Whether to pad the edges of the target.
|
298
|
-
" has a well defined bounding box, e.g. a
|
610
|
+
help="Whether to not pad the edges of the target. Can be set if the target"
|
611
|
+
" has a well defined bounding box, e.g. a masked reconstruction.",
|
299
612
|
)
|
300
|
-
|
613
|
+
performance_group.add_argument(
|
301
614
|
"--no_fourier_padding",
|
302
615
|
dest="no_fourier_padding",
|
303
616
|
action="store_true",
|
304
617
|
default=False,
|
305
|
-
help="Whether input arrays should be zero-padded to
|
306
|
-
"
|
307
|
-
"
|
308
|
-
)
|
309
|
-
parser.add_argument(
|
310
|
-
"--scramble_phases",
|
311
|
-
dest="scramble_phases",
|
312
|
-
action="store_true",
|
313
|
-
default=False,
|
314
|
-
help="Whether to phase scramble the template for subsequent normalization.",
|
618
|
+
help="Whether input arrays should not be zero-padded to full convolution shape "
|
619
|
+
"for numerical stability. When working with very large targets, e.g. tomograms, "
|
620
|
+
"it is safe to use this flag and benefit from the performance gain.",
|
315
621
|
)
|
316
|
-
|
622
|
+
performance_group.add_argument(
|
317
623
|
"--interpolation_order",
|
318
624
|
dest="interpolation_order",
|
319
625
|
required=False,
|
320
626
|
type=int,
|
321
627
|
default=3,
|
322
|
-
help="Spline interpolation used
|
323
|
-
"
|
628
|
+
help="Spline interpolation used for template rotations. If less than zero "
|
629
|
+
"no interpolation is performed.",
|
324
630
|
)
|
325
|
-
|
631
|
+
performance_group.add_argument(
|
326
632
|
"--use_mixed_precision",
|
327
633
|
dest="use_mixed_precision",
|
328
634
|
action="store_true",
|
329
635
|
default=False,
|
330
636
|
help="Use float16 for real values operations where possible.",
|
331
637
|
)
|
332
|
-
|
638
|
+
performance_group.add_argument(
|
333
639
|
"--use_memmap",
|
334
640
|
dest="use_memmap",
|
335
641
|
action="store_true",
|
336
642
|
default=False,
|
337
|
-
help="Use memmaps to offload large data objects to disk.
|
338
|
-
"
|
643
|
+
help="Use memmaps to offload large data objects to disk. "
|
644
|
+
"Particularly useful for large inputs in combination with --use_gpu.",
|
339
645
|
)
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
)
|
346
|
-
parser.add_argument(
|
347
|
-
"--gaussian_sigma",
|
348
|
-
dest="gaussian_sigma",
|
349
|
-
type=float,
|
350
|
-
required=False,
|
351
|
-
help="Sigma parameter for Gaussian filtering the template.",
|
352
|
-
)
|
353
|
-
parser.add_argument(
|
354
|
-
"--bandpass_band",
|
355
|
-
dest="bandpass_band",
|
356
|
-
type=str,
|
357
|
-
required=False,
|
358
|
-
help="Comma separated start and stop frequency for bandpass filtering the"
|
359
|
-
" template, e.g. 0.1, 0.5",
|
360
|
-
)
|
361
|
-
parser.add_argument(
|
362
|
-
"--bandpass_smooth",
|
363
|
-
dest="bandpass_smooth",
|
364
|
-
type=float,
|
365
|
-
required=False,
|
366
|
-
default=None,
|
367
|
-
help="Smooth parameter for the bandpass filter.",
|
368
|
-
)
|
369
|
-
parser.add_argument(
|
370
|
-
"--tilt_range",
|
371
|
-
dest="tilt_range",
|
372
|
-
type=str,
|
373
|
-
required=False,
|
374
|
-
help="Comma separated start and stop stage tilt angle, e.g. '50,45'. Used"
|
375
|
-
" to create a wedge mask to be applied to the template.",
|
376
|
-
)
|
377
|
-
parser.add_argument(
|
378
|
-
"--tilt_step",
|
379
|
-
dest="tilt_step",
|
380
|
-
type=float,
|
381
|
-
required=False,
|
382
|
-
default=None,
|
383
|
-
help="Step size between tilts, e.g. '5'. When set the wedge mask"
|
384
|
-
" reflects the individual tilts, otherwise a continuous mask is used.",
|
385
|
-
)
|
386
|
-
parser.add_argument(
|
387
|
-
"--wedge_axes",
|
388
|
-
dest="wedge_axes",
|
389
|
-
type=str,
|
646
|
+
|
647
|
+
analyzer_group = parser.add_argument_group("Analyzer")
|
648
|
+
analyzer_group.add_argument(
|
649
|
+
"--score_threshold",
|
650
|
+
dest="score_threshold",
|
390
651
|
required=False,
|
391
|
-
default="0,2",
|
392
|
-
help="Axis index of wedge opening and tilt axis, e.g. 0,2 for a wedge that is open in"
|
393
|
-
" z and tilted over x.",
|
394
|
-
)
|
395
|
-
parser.add_argument(
|
396
|
-
"--wedge_smooth",
|
397
|
-
dest="wedge_smooth",
|
398
652
|
type=float,
|
399
|
-
|
400
|
-
|
401
|
-
help="Gaussian sigma used to smooth the wedge mask.",
|
653
|
+
default=0,
|
654
|
+
help="Minimum template matching scores to consider for analysis.",
|
402
655
|
)
|
403
656
|
|
404
657
|
args = parser.parse_args()
|
@@ -406,6 +659,8 @@ def parse_args():
|
|
406
659
|
if args.interpolation_order < 0:
|
407
660
|
args.interpolation_order = None
|
408
661
|
|
662
|
+
args.ctf_file = None
|
663
|
+
|
409
664
|
if args.temp_directory is None:
|
410
665
|
default = abspath(".")
|
411
666
|
if os.environ.get("TMPDIR", None) is not None:
|
@@ -438,6 +693,21 @@ def parse_args():
|
|
438
693
|
int(x) for x in os.environ["CUDA_VISIBLE_DEVICES"].split(",")
|
439
694
|
]
|
440
695
|
|
696
|
+
if args.tilt_angles is not None:
|
697
|
+
if args.wedge_axes is None:
|
698
|
+
raise ValueError("Need to specify --wedge_axes when --tilt_angles is set.")
|
699
|
+
if not exists(args.tilt_angles):
|
700
|
+
try:
|
701
|
+
float(args.tilt_angles.split(",")[0])
|
702
|
+
except ValueError:
|
703
|
+
raise ValueError(f"{args.tilt_angles} is not a file nor a range.")
|
704
|
+
|
705
|
+
if args.ctf_file is not None and args.tilt_angles is None:
|
706
|
+
raise ValueError("Need to specify --tilt_angles when --ctf_file is set.")
|
707
|
+
|
708
|
+
if args.wedge_axes is not None:
|
709
|
+
args.wedge_axes = tuple(int(i) for i in args.wedge_axes.split(","))
|
710
|
+
|
441
711
|
return args
|
442
712
|
|
443
713
|
|
@@ -514,51 +784,6 @@ def main():
|
|
514
784
|
},
|
515
785
|
)
|
516
786
|
|
517
|
-
template_filter = {}
|
518
|
-
if args.gaussian_sigma is not None:
|
519
|
-
template.data = Preprocessor().gaussian_filter(
|
520
|
-
sigma=args.gaussian_sigma, template=template.data
|
521
|
-
)
|
522
|
-
|
523
|
-
if args.bandpass_band is not None:
|
524
|
-
bandpass_start, bandpass_stop = [
|
525
|
-
float(x) for x in args.bandpass_band.split(",")
|
526
|
-
]
|
527
|
-
if args.bandpass_smooth is None:
|
528
|
-
args.bandpass_smooth = 0
|
529
|
-
|
530
|
-
template_filter["bandpass_mask"] = {
|
531
|
-
"minimum_frequency": bandpass_start,
|
532
|
-
"maximum_frequency": bandpass_stop,
|
533
|
-
"gaussian_sigma": args.bandpass_smooth,
|
534
|
-
}
|
535
|
-
|
536
|
-
if args.tilt_range is not None:
|
537
|
-
args.wedge_smooth if args.wedge_smooth is not None else 0
|
538
|
-
tilt_start, tilt_stop = [float(x) for x in args.tilt_range.split(",")]
|
539
|
-
opening_axis, tilt_axis = [int(x) for x in args.wedge_axes.split(",")]
|
540
|
-
|
541
|
-
if args.tilt_step is not None:
|
542
|
-
template_filter["step_wedge_mask"] = {
|
543
|
-
"start_tilt": tilt_start,
|
544
|
-
"stop_tilt": tilt_stop,
|
545
|
-
"tilt_step": args.tilt_step,
|
546
|
-
"sigma": args.wedge_smooth,
|
547
|
-
"opening_axis": opening_axis,
|
548
|
-
"tilt_axis": tilt_axis,
|
549
|
-
"omit_negative_frequencies": True,
|
550
|
-
}
|
551
|
-
else:
|
552
|
-
template_filter["continuous_wedge_mask"] = {
|
553
|
-
"start_tilt": tilt_start,
|
554
|
-
"stop_tilt": tilt_stop,
|
555
|
-
"tilt_axis": tilt_axis,
|
556
|
-
"opening_axis": opening_axis,
|
557
|
-
"infinite_plane": True,
|
558
|
-
"sigma": args.wedge_smooth,
|
559
|
-
"omit_negative_frequencies": True,
|
560
|
-
}
|
561
|
-
|
562
787
|
if template_mask is None:
|
563
788
|
template_mask = template.empty
|
564
789
|
if not args.no_centering:
|
@@ -672,21 +897,13 @@ def main():
|
|
672
897
|
)
|
673
898
|
exit(-1)
|
674
899
|
|
675
|
-
analyzer_args = {
|
676
|
-
"score_threshold": 0,
|
677
|
-
"number_of_peaks": 1000,
|
678
|
-
"convolution_mode": "valid",
|
679
|
-
"use_memmap": args.use_memmap,
|
680
|
-
}
|
681
|
-
|
682
900
|
matching_setup, matching_score = MATCHING_EXHAUSTIVE_REGISTER[args.score]
|
683
901
|
matching_data = MatchingData(target=target, template=template.data)
|
684
|
-
matching_data.rotations =
|
685
|
-
|
686
|
-
)
|
687
|
-
|
688
|
-
|
689
|
-
matching_data.rotations = np.eye(ndim).reshape(1, ndim, ndim)
|
902
|
+
matching_data.rotations = parse_rotation_logic(args=args, ndim=target.data.ndim)
|
903
|
+
|
904
|
+
template_filter, target_filter = setup_filter(args, template, target)
|
905
|
+
matching_data.template_filter = template_filter
|
906
|
+
matching_data.target_filter = target_filter
|
690
907
|
|
691
908
|
matching_data.template_filter = template_filter
|
692
909
|
matching_data._invert_target = args.invert_target_contrast
|
@@ -724,10 +941,35 @@ def main():
|
|
724
941
|
label_width=max(len(key) for key in options.keys()) + 2,
|
725
942
|
)
|
726
943
|
|
727
|
-
|
944
|
+
filter_args = {
|
945
|
+
"Lowpass": args.lowpass,
|
946
|
+
"Highpass": args.highpass,
|
947
|
+
"Smooth Pass": args.no_pass_smooth,
|
948
|
+
"Pass Format" : args.pass_format,
|
949
|
+
"Spectral Whitening": args.whiten_spectrum,
|
950
|
+
"Wedge Axes": args.wedge_axes,
|
951
|
+
"Tilt Angles": args.tilt_angles,
|
952
|
+
"Tilt Weighting": args.tilt_weighting,
|
953
|
+
"CTF": args.ctf_file,
|
954
|
+
}
|
955
|
+
filter_args = {k: v for k, v in filter_args.items() if v is not None}
|
956
|
+
if len(filter_args):
|
957
|
+
print_block(
|
958
|
+
name="Filters",
|
959
|
+
data=filter_args,
|
960
|
+
label_width=max(len(key) for key in options.keys()) + 2,
|
961
|
+
)
|
962
|
+
|
963
|
+
analyzer_args = {
|
964
|
+
"score_threshold": args.score_threshold,
|
965
|
+
"number_of_peaks": 1000,
|
966
|
+
"convolution_mode": "valid",
|
967
|
+
"use_memmap": args.use_memmap,
|
968
|
+
}
|
969
|
+
analyzer_args = {"Analyzer": callback_class, **analyzer_args}
|
728
970
|
print_block(
|
729
971
|
name="Score Analysis Options",
|
730
|
-
data=
|
972
|
+
data=analyzer_args,
|
731
973
|
label_width=max(len(key) for key in options.keys()) + 2,
|
732
974
|
)
|
733
975
|
print("\n" + "-" * 80)
|
@@ -766,7 +1008,6 @@ def main():
|
|
766
1008
|
)
|
767
1009
|
for i, x in candidates[3].items()
|
768
1010
|
}
|
769
|
-
|
770
1011
|
candidates.append((target.origin, template.origin, target.sampling_rate, args))
|
771
1012
|
write_pickle(data=candidates, filename=args.output)
|
772
1013
|
|