pytme 0.2.9.post1__cp311-cp311-macosx_15_0_arm64.whl → 0.3.0__cp311-cp311-macosx_15_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. pytme-0.3.0.data/scripts/estimate_memory_usage.py +76 -0
  2. pytme-0.3.0.data/scripts/match_template.py +1106 -0
  3. {pytme-0.2.9.post1.data → pytme-0.3.0.data}/scripts/postprocess.py +320 -190
  4. {pytme-0.2.9.post1.data → pytme-0.3.0.data}/scripts/preprocess.py +21 -31
  5. {pytme-0.2.9.post1.data → pytme-0.3.0.data}/scripts/preprocessor_gui.py +85 -19
  6. pytme-0.3.0.data/scripts/pytme_runner.py +771 -0
  7. {pytme-0.2.9.post1.dist-info → pytme-0.3.0.dist-info}/METADATA +21 -20
  8. pytme-0.3.0.dist-info/RECORD +126 -0
  9. {pytme-0.2.9.post1.dist-info → pytme-0.3.0.dist-info}/entry_points.txt +2 -1
  10. pytme-0.3.0.dist-info/licenses/LICENSE +339 -0
  11. scripts/estimate_memory_usage.py +76 -0
  12. scripts/eval.py +93 -0
  13. scripts/extract_candidates.py +224 -0
  14. scripts/match_template.py +349 -378
  15. pytme-0.2.9.post1.data/scripts/match_template.py → scripts/match_template_filters.py +213 -148
  16. scripts/postprocess.py +320 -190
  17. scripts/preprocess.py +21 -31
  18. scripts/preprocessor_gui.py +85 -19
  19. scripts/pytme_runner.py +771 -0
  20. scripts/refine_matches.py +625 -0
  21. tests/preprocessing/test_frequency_filters.py +28 -14
  22. tests/test_analyzer.py +41 -36
  23. tests/test_backends.py +1 -0
  24. tests/test_matching_cli.py +109 -54
  25. tests/test_matching_data.py +5 -5
  26. tests/test_matching_exhaustive.py +1 -2
  27. tests/test_matching_optimization.py +4 -9
  28. tests/test_matching_utils.py +1 -1
  29. tests/test_orientations.py +0 -1
  30. tme/__version__.py +1 -1
  31. tme/analyzer/__init__.py +2 -0
  32. tme/analyzer/_utils.py +26 -21
  33. tme/analyzer/aggregation.py +395 -222
  34. tme/analyzer/base.py +127 -0
  35. tme/analyzer/peaks.py +189 -204
  36. tme/analyzer/proxy.py +123 -0
  37. tme/backends/__init__.py +4 -3
  38. tme/backends/_cupy_utils.py +25 -24
  39. tme/backends/_jax_utils.py +20 -18
  40. tme/backends/cupy_backend.py +13 -26
  41. tme/backends/jax_backend.py +24 -23
  42. tme/backends/matching_backend.py +4 -3
  43. tme/backends/mlx_backend.py +4 -3
  44. tme/backends/npfftw_backend.py +34 -30
  45. tme/backends/pytorch_backend.py +18 -4
  46. tme/cli.py +126 -0
  47. tme/density.py +9 -7
  48. tme/extensions.cpython-311-darwin.so +0 -0
  49. tme/filters/__init__.py +3 -3
  50. tme/filters/_utils.py +36 -10
  51. tme/filters/bandpass.py +229 -188
  52. tme/filters/compose.py +5 -4
  53. tme/filters/ctf.py +516 -254
  54. tme/filters/reconstruction.py +91 -32
  55. tme/filters/wedge.py +196 -135
  56. tme/filters/whitening.py +37 -42
  57. tme/matching_data.py +28 -39
  58. tme/matching_exhaustive.py +31 -27
  59. tme/matching_optimization.py +5 -4
  60. tme/matching_scores.py +25 -15
  61. tme/matching_utils.py +193 -27
  62. tme/memory.py +4 -3
  63. tme/orientations.py +22 -9
  64. tme/parser.py +114 -33
  65. tme/preprocessor.py +6 -5
  66. tme/rotations.py +10 -7
  67. tme/structure.py +4 -3
  68. pytme-0.2.9.post1.data/scripts/estimate_ram_usage.py +0 -97
  69. pytme-0.2.9.post1.dist-info/RECORD +0 -119
  70. pytme-0.2.9.post1.dist-info/licenses/LICENSE +0 -153
  71. scripts/estimate_ram_usage.py +0 -97
  72. tests/data/Maps/.DS_Store +0 -0
  73. tests/data/Structures/.DS_Store +0 -0
  74. {pytme-0.2.9.post1.dist-info → pytme-0.3.0.dist-info}/WHEEL +0 -0
  75. {pytme-0.2.9.post1.dist-info → pytme-0.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,625 @@
1
+ #!python3
2
+ """ Iterative template matching parameter tuning.
3
+
4
+ Copyright (c) 2024 European Molecular Biology Laboratory
5
+
6
+ Author: Valentin Maurer <valentin.maurer@embl-hamburg.de>
7
+ """
8
+ import argparse
9
+ import subprocess
10
+ from sys import exit
11
+ from time import time
12
+ from shutil import copyfile
13
+ from typing import Tuple, List, Dict
14
+
15
+ import numpy as np
16
+ from scipy import optimize
17
+ from sklearn.metrics import roc_auc_score
18
+
19
+ from tme import Orientations, Density
20
+ from tme.matching_utils import generate_tempfile_name, load_pickle, write_pickle, create_mask
21
+ from tme.matching_exhaustive import MATCHING_EXHAUSTIVE_REGISTER
22
+
23
+ def parse_range(x : str):
24
+ start, stop,step = x.split(":")
25
+ return range(int(start), int(stop), int(step))
26
+
27
+ def parse_args():
28
+ parser = argparse.ArgumentParser(
29
+ description="Refine template matching candidates using deep matching.",
30
+ )
31
+ io_group = parser.add_argument_group("Input / Output")
32
+ io_group.add_argument(
33
+ "--orientations",
34
+ required=True,
35
+ type=str,
36
+ help="Path to an orientations file in a supported format. See "
37
+ "https://kosinskilab.github.io/pyTME/reference/api/tme.orientations.Orientations.from_file.html"
38
+ " for available options.",
39
+ )
40
+ io_group.add_argument(
41
+ "--output_prefix", required=True, type=str, help="Path to write output to."
42
+ )
43
+ io_group.add_argument(
44
+ "--iterations",
45
+ required=False,
46
+ default=0,
47
+ type=int,
48
+ help="Number of refinement iterations to perform.",
49
+ )
50
+ io_group.add_argument(
51
+ "--verbose",
52
+ action="store_true",
53
+ default=False,
54
+ help="More verbose and more files written to disk.",
55
+ )
56
+ matching_group = parser.add_argument_group("Template Matching")
57
+ matching_group.add_argument(
58
+ "--input_file",
59
+ required=False,
60
+ type=str,
61
+ help="Path to the output of match_template.py.",
62
+ )
63
+ matching_group.add_argument(
64
+ "-m",
65
+ "--target",
66
+ dest="target",
67
+ type=str,
68
+ required=False,
69
+ help="Path to a target in CCP4/MRC, EM, H5 or another format supported by "
70
+ "tme.density.Density.from_file "
71
+ "https://kosinskilab.github.io/pyTME/reference/api/tme.density.Density.from_file.html",
72
+ )
73
+ matching_group.add_argument(
74
+ "--target_mask",
75
+ dest="target_mask",
76
+ type=str,
77
+ required=False,
78
+ help="Path to a mask for the target in a supported format (see target).",
79
+ )
80
+ matching_group.add_argument(
81
+ "-i",
82
+ "--template",
83
+ dest="template",
84
+ type=str,
85
+ required=False,
86
+ help="Path to a template in PDB/MMCIF or other supported formats (see target).",
87
+ )
88
+ matching_group.add_argument(
89
+ "--template_mask",
90
+ dest="template_mask",
91
+ type=str,
92
+ required=False,
93
+ help="Path to a mask for the template in a supported format (see target).",
94
+ )
95
+ matching_group.add_argument(
96
+ "--invert_target_contrast",
97
+ dest="invert_target_contrast",
98
+ action="store_true",
99
+ default=False,
100
+ help="Invert the target's contrast and rescale linearly between zero and one. "
101
+ "This option is intended for targets where templates to-be-matched have "
102
+ "negative values, e.g. tomograms.",
103
+ )
104
+ matching_group.add_argument(
105
+ "--angular_sampling",
106
+ dest="angular_sampling",
107
+ required=True,
108
+ default=None,
109
+ help="Angular sampling rate using optimized rotational sets."
110
+ "A lower number yields more rotations. Values >= 180 sample only the identity.",
111
+ )
112
+ matching_group.add_argument(
113
+ "-s",
114
+ dest="score",
115
+ type=str,
116
+ default="FLCSphericalMask",
117
+ choices=list(MATCHING_EXHAUSTIVE_REGISTER.keys()),
118
+ help="Template matching scoring function.",
119
+ )
120
+ matching_group.add_argument(
121
+ "-n",
122
+ dest="cores",
123
+ required=False,
124
+ type=int,
125
+ default=4,
126
+ help="Number of cores used for template matching.",
127
+ )
128
+ matching_group.add_argument(
129
+ "--use_gpu",
130
+ dest="use_gpu",
131
+ action="store_true",
132
+ default=False,
133
+ help="Whether to perform computations on the GPU.",
134
+ )
135
+ matching_group.add_argument(
136
+ "--no_centering",
137
+ dest="no_centering",
138
+ action="store_true",
139
+ help="Assumes the template is already centered and omits centering.",
140
+ )
141
+ matching_group.add_argument(
142
+ "--no_edge_padding",
143
+ dest="no_edge_padding",
144
+ action="store_true",
145
+ default=False,
146
+ help="Whether to not pad the edges of the target. Can be set if the target"
147
+ " has a well defined bounding box, e.g. a masked reconstruction.",
148
+ )
149
+ matching_group.add_argument(
150
+ "--no_fourier_padding",
151
+ dest="no_fourier_padding",
152
+ action="store_true",
153
+ default=False,
154
+ help="Whether input arrays should not be zero-padded to full convolution shape "
155
+ "for numerical stability. When working with very large targets, e.g. tomograms, "
156
+ "it is safe to use this flag and benefit from the performance gain.",
157
+ )
158
+
159
+ peak_group = parser.add_argument_group("Peak Calling")
160
+ peak_group.add_argument(
161
+ "--number_of_peaks",
162
+ type=int,
163
+ default=100,
164
+ required=False,
165
+ help="Upper limit of peaks to call, subject to filtering parameters. Default 1000. "
166
+ "If minimum_score is provided all peaks scoring higher will be reported.",
167
+ )
168
+ extraction_group = parser.add_argument_group("Extraction")
169
+ extraction_group.add_argument(
170
+ "--keep_out_of_box",
171
+ action="store_true",
172
+ required=False,
173
+ help="Whether to keep orientations that fall outside the box. If the "
174
+ "orientations are sensible, it is safe to pass this flag.",
175
+ )
176
+
177
+ optimization_group = parser.add_argument_group("Optimization")
178
+ optimization_group.add_argument(
179
+ "--lowpass",
180
+ dest="lowpass",
181
+ action="store_true",
182
+ default=False,
183
+ help="Optimize template matching lowpass filter cutoff.",
184
+ )
185
+ optimization_group.add_argument(
186
+ "--highpass",
187
+ dest="highpass",
188
+ action="store_true",
189
+ default=False,
190
+ help="Optimize template matching highpass filter cutoff.",
191
+ )
192
+ optimization_group.add_argument(
193
+ "--lowpass-range",
194
+ dest="lowpass_range",
195
+ type=str,
196
+ default="0:50:5",
197
+ help="Optimize template matching lowpass filter cutoff.",
198
+ )
199
+ optimization_group.add_argument(
200
+ "--highpass-range",
201
+ dest="highpass_range",
202
+ type=str,
203
+ default="0:50:5",
204
+ help="Optimize template matching highpass filter cutoff.",
205
+ )
206
+ optimization_group.add_argument(
207
+ "--translation-uncertainty",
208
+ dest="translation_uncertainty",
209
+ type=int,
210
+ default=None,
211
+ help="Optimize template matching highpass filter cutoff.",
212
+ )
213
+
214
+
215
+ args = parser.parse_args()
216
+
217
+ data_present = args.target is not None and args.template is not None
218
+ if args.input_file is None and not data_present:
219
+ raise ValueError(
220
+ "Either --input_file or --target and --template need to be specified."
221
+ )
222
+ elif args.input_file is not None and data_present:
223
+ raise ValueError(
224
+ "Please specific either --input_file or --target and --template."
225
+ )
226
+
227
+ if args.lowpass_range != "None":
228
+ args.lowpass_range = parse_range(args.lowpass_range)
229
+ else:
230
+ args.lowpass_range = (None, )
231
+ if args.highpass_range != "None":
232
+ args.highpass_range = parse_range(args.highpass_range)
233
+ else:
234
+ args.highpass_range = (None, )
235
+ return args
236
+
237
+
238
+ def argdict_to_command(input_args: Dict, executable: str) -> List:
239
+ ret = []
240
+ for key, value in input_args.items():
241
+ if value is None:
242
+ continue
243
+ elif isinstance(value, bool):
244
+ if value:
245
+ ret.append(key)
246
+ else:
247
+ ret.extend([key, value])
248
+
249
+ ret = [str(x) for x in ret]
250
+ ret.insert(0, executable)
251
+ return " ".join(ret)
252
+
253
+ def run_command(command):
254
+ ret = subprocess.run(command, capture_output=True, shell=True)
255
+ if ret.returncode != 0:
256
+ print(f"Error when executing: {command}.")
257
+ print(f"Stdout: {ret.stdout.decode('utf-8')}")
258
+ print(f"Stderr: {ret.stderr.decode('utf-8')}")
259
+ exit(-1)
260
+
261
+ return None
262
+
263
+ def create_stacking_argdict(args) -> Dict:
264
+ arg_dict = {
265
+ "--target": args.target,
266
+ "--template": args.template,
267
+ "--orientations": args.orientations,
268
+ "--output_file": args.candidate_stack_path,
269
+ "--keep_out_of_box": args.keep_out_of_box,
270
+ }
271
+ return arg_dict
272
+
273
+
274
+ def create_matching_argdict(args) -> Dict:
275
+ arg_dict = {
276
+ "--target": args.target,
277
+ "--template": args.template,
278
+ "--template_mask": args.template_mask,
279
+ "-o": args.match_template_path,
280
+ "-a": args.angular_sampling,
281
+ "-s": args.score,
282
+ "--no_fourier_padding": True,
283
+ "--no_edge_padding": True,
284
+ "--no_centering": args.no_centering,
285
+ "-n": args.cores,
286
+ "--invert_target_contrast": args.invert_target_contrast,
287
+ "--use_gpu": args.use_gpu,
288
+ }
289
+ return arg_dict
290
+
291
+
292
+ def create_postprocessing_argdict(args) -> Dict:
293
+ arg_dict = {
294
+ "--input_file": args.match_template_path,
295
+ "--target_mask": args.target_mask,
296
+ "--output_prefix": args.new_orientations_path,
297
+ "--peak_caller": "PeakCallerMaximumFilter",
298
+ "--number_of_peaks": args.number_of_peaks,
299
+ "--output_format": "orientations",
300
+ "--mask_edges": True,
301
+ }
302
+ if args.target_mask is not None:
303
+ arg_dict["--mask_edges"] = False
304
+ return arg_dict
305
+
306
+
307
+ def update_orientations(old : Orientations, new : Orientations, args, **kwargs) -> Orientations:
308
+ stack_shape = Density.from_file(args.candidate_stack_path, use_memmap=True).shape
309
+ stack_center = np.add(np.divide(stack_shape, 2).astype(int), np.mod(stack_shape, 2))
310
+
311
+ peak_number = new.translations[:, 0].astype(int)
312
+ new_translations = np.add(
313
+ old.translations[peak_number],
314
+ np.subtract(new.translations, stack_center)[:, 1:],
315
+ )
316
+ ret = old.copy()
317
+ ret.scores[:] = 0
318
+ ret.scores[peak_number] = new.scores
319
+ ret.translations[peak_number] = new_translations
320
+
321
+ # The effect of --align_orientations should be handled herer
322
+ return ret
323
+
324
+
325
+ class DeepMatcher:
326
+ def __init__(self, args, margin : float = 0.5):
327
+ self.args = args
328
+ self.margin = margin
329
+ self.orientations = Orientations.from_file(args.orientations)
330
+
331
+ match_template_args = create_matching_argdict(args)
332
+ match_template_args["--target"] = args.candidate_stack_path
333
+ self.match_template_args = match_template_args
334
+
335
+ self.filter_parameters = {}
336
+ if args.lowpass:
337
+ self.filter_parameters["--lowpass"] = 0
338
+ if args.highpass:
339
+ self.filter_parameters["--highpass"] = 200
340
+ # self.filter_parameters["--whiten"] = False
341
+ self.filter_parameters["--no_filter_target"] = False
342
+
343
+
344
+ self.postprocess_args = create_postprocessing_argdict(args)
345
+ self.postprocess_args["--number_of_peaks"] = 1
346
+
347
+ def get_initial_values(self) -> Tuple[float]:
348
+ ret = tuple(float(x) for x in self.filter_parameters.values())
349
+ return ret
350
+
351
+ def format_parameters(self, parameter_values: Tuple[float]) -> Dict:
352
+ ret = {}
353
+ for value, key in zip(parameter_values, self.filter_parameters.keys()):
354
+ ret[key] = value
355
+ if isinstance(self.filter_parameters[key], bool):
356
+ ret[key] = value > 0.5
357
+ return ret
358
+
359
+ def forward(self, x : Tuple[float]):
360
+
361
+
362
+ # Label 1 -> True positive, label 0 -> false positive
363
+ orientations_new = self(x)
364
+ label, score = orientations_new.details, orientations_new.scores
365
+ # loss = np.add(
366
+ # (1 - label) * np.square(score),
367
+ # label * np.square(np.fmax(self.margin - score, 0.0))
368
+ # )
369
+ # loss = loss.mean()
370
+
371
+
372
+
373
+ loss = roc_auc_score(label, score)
374
+ # print(
375
+ # np.mean(score[label == 1]), np.mean(score[label == 0]),
376
+ # *x, loss, time()
377
+ # )
378
+
379
+ return loss
380
+
381
+ def __call__(self, x: Tuple[float]):
382
+ filter_parameters = self.format_parameters(x)
383
+ self.match_template_args.update(filter_parameters)
384
+ match_template = argdict_to_command(
385
+ self.match_template_args,
386
+ executable="python3 $HOME/src/pytme/scripts/match_template_filters.py",
387
+ )
388
+ run_command(match_template)
389
+
390
+ # Assume we get a new peak for each input in the same order
391
+ postprocess = argdict_to_command(
392
+ self.postprocess_args,
393
+ executable="python3 $HOME/src/pytme/scripts/postprocess.py",
394
+ )
395
+ run_command(postprocess)
396
+
397
+ orientations_new = Orientations.from_file(
398
+ f"{self.postprocess_args['--output_prefix']}.tsv"
399
+ )
400
+ orientations_new = update_orientations(
401
+ new=orientations_new,
402
+ old=self.orientations,
403
+ args=self.args
404
+ )
405
+
406
+ label, score = orientations_new.details, orientations_new.scores
407
+ loss = roc_auc_score(label, score)
408
+ print(
409
+ np.mean(score[label == 1]), np.mean(score[label == 0]),
410
+ *x, 0, loss, time()
411
+ )
412
+
413
+
414
+ # Rerun with noise correction
415
+ temp_args = self.match_template_args.copy()
416
+ background_file = generate_tempfile_name(".pickle")
417
+ temp_args["--scramble_phases"] = True
418
+ temp_args["-o"] = background_file
419
+ match_template = argdict_to_command(
420
+ temp_args,
421
+ executable="python3 $HOME/src/pytme/scripts/match_template_filters.py",
422
+ )
423
+ run_command(match_template)
424
+ temp_args = self.match_template_args.copy()
425
+ temp_args["--background_file"] = background_file
426
+ postprocess = argdict_to_command(
427
+ self.postprocess_args,
428
+ executable="python3 $HOME/src/pytme/scripts/postprocess.py",
429
+ )
430
+ run_command(postprocess)
431
+
432
+ orientations_new = Orientations.from_file(
433
+ f"{self.postprocess_args['--output_prefix']}.tsv"
434
+ )
435
+ orientations_new = update_orientations(
436
+ new=orientations_new,
437
+ old=self.orientations,
438
+ args=self.args
439
+ )
440
+
441
+ label, score = orientations_new.details, orientations_new.scores
442
+ loss = roc_auc_score(label, score)
443
+ print(
444
+ np.mean(score[label == 1]), np.mean(score[label == 0]),
445
+ *x, 1, loss, time()
446
+ )
447
+
448
+ return orientations_new
449
+
450
+ # def __call__(self, x: Tuple[float]):
451
+ # filter_parameters = self.format_parameters(x)
452
+ # # print(filter_parameters)
453
+ # self.match_template_args.update(filter_parameters)
454
+ # match_template = argdict_to_command(
455
+ # self.match_template_args,
456
+ # executable="python3 $HOME/src/pytme/scripts/match_template_filters.py",
457
+ # )
458
+ # run_command(match_template)
459
+
460
+ # data = load_pickle(self.args.match_template_path)
461
+ # temp_args = self.match_template_args.copy()
462
+ # temp_args["--scramble_phases"] = True
463
+ # # write_pickle(data, "/home/vmaurer/deep_matching/t.pickle")
464
+
465
+ # match_template = argdict_to_command(
466
+ # temp_args,
467
+ # executable="python3 $HOME/src/pytme/scripts/match_template_filters.py",
468
+ # )
469
+ # run_command(match_template)
470
+ # data_norm = load_pickle(self.args.match_template_path)
471
+ # # write_pickle(data_norm, "/home/vmaurer/deep_matching/noise.pickle")
472
+
473
+ # data[0] = (data[0] - data_norm[0]) / (1 - data_norm[0])
474
+ # data[0] = np.fmax(data[0], 0)
475
+ # write_pickle(data, self.args.match_template_path)
476
+
477
+ # # Assume we get a new peak for each input in the same order
478
+ # postprocess = argdict_to_command(
479
+ # self.postprocess_args,
480
+ # executable="python3 $HOME/src/pytme/scripts/postprocess.py",
481
+ # )
482
+ # run_command(postprocess)
483
+
484
+ # orientations_new = Orientations.from_file(
485
+ # f"{self.postprocess_args['--output_prefix']}.tsv"
486
+ # )
487
+ # orientations_new = update_orientations(
488
+ # new=orientations_new,
489
+ # old=self.orientations,
490
+ # args=self.args
491
+ # )
492
+
493
+ # return orientations_new
494
+
495
+
496
+ def main():
497
+ args = parse_args()
498
+
499
+ if args.input_file is not None:
500
+ data = load_pickle(args.input_file)
501
+ target_origin, _, sampling_rate, cli_args = data[-1]
502
+ args.target, args.template = cli_args.target, cli_args.template
503
+
504
+ args.candidate_stack_path = generate_tempfile_name(suffix=".h5")
505
+ args.new_orientations_path = generate_tempfile_name()
506
+ args.match_template_path = generate_tempfile_name()
507
+
508
+ match_deep = DeepMatcher(args)
509
+ initial_values = match_deep.get_initial_values()
510
+
511
+ # Do a single pass over the data
512
+ if len(initial_values) == 0:
513
+ create_image_stack = create_stacking_argdict(args)
514
+ create_image_stack = argdict_to_command(
515
+ create_image_stack,
516
+ executable="python3 $HOME/src/pytme/scripts/extract_candidates.py",
517
+ )
518
+ run_command(create_image_stack)
519
+
520
+ print("Created image stack")
521
+ if args.verbose:
522
+ copyfile(args.candidate_stack_path, f"{args.output_prefix}_stack.h5")
523
+
524
+ print("Starting matching")
525
+ orientations = match_deep(x=())
526
+
527
+ if args.verbose:
528
+ copyfile(args.match_template_path, f"{args.output_prefix}_stack.pickle")
529
+ print("Completed matching")
530
+ orientations.to_file(f"{args.output_prefix}.tsv")
531
+ exit(0)
532
+
533
+ if args.translation_uncertainty is not None:
534
+ args.target_mask = generate_tempfile_name(suffix=".h5")
535
+
536
+ for current_iteration in range(args.iterations):
537
+ create_image_stack = create_stacking_argdict(args)
538
+ create_image_stack = argdict_to_command(
539
+ create_image_stack,
540
+ executable="python3 $HOME/src/pytme/scripts/extract_candidates.py",
541
+ )
542
+ run_command(create_image_stack)
543
+
544
+ if args.translation_uncertainty is not None:
545
+ dens = Density.from_file(args.candidate_stack_path)
546
+ stack_center = np.add(
547
+ np.divide(dens.data.shape, 2).astype(int), np.mod(dens.data.shape, 2)
548
+ ).astype(int)[1:]
549
+
550
+ out = dens.empty
551
+ out.data[:,...] = create_mask(
552
+ mask_type = "ellipse",
553
+ center = stack_center,
554
+ radius = args.translation_uncertainty,
555
+ shape = dens.data.shape[1:]
556
+ )
557
+ out.to_file(args.target_mask)
558
+
559
+
560
+
561
+ # Perhaps we need a different optimizer here to use sensible steps for each parameter
562
+ parameters, min_loss = (), None
563
+ match_deep = DeepMatcher(args)
564
+ # for lowpass in (0, 10, 20, 50):
565
+ # for highpass in (50, 100, 150, 200):
566
+ # for whiten in (False, True):
567
+ # loss = match_deep.forward((lowpass, highpass, whiten))
568
+ # # print((lowpass, highpass), loss)
569
+ # if min_loss is None:
570
+ # min_loss = loss
571
+ # if loss < min_loss:
572
+ # min_loss = loss
573
+ # parameters = (lowpass, highpass, whiten),
574
+
575
+ # for lowpass in (10, 50, 100, 200):
576
+ # for highpass in (10, 50, 100, 200):
577
+ for lowpass in args.lowpass_range:
578
+ for highpass in args.highpass_range:
579
+ if lowpass is not None and highpass is not None:
580
+ if lowpass >= highpass:
581
+ continue
582
+ for no_filter_target in (True, False):
583
+ loss = match_deep.forward((lowpass, highpass, no_filter_target))
584
+ if min_loss is None:
585
+ min_loss = loss
586
+ if loss < min_loss:
587
+ min_loss = loss
588
+ parameters = (lowpass, highpass, no_filter_target)
589
+
590
+ # print("Final output", min_loss, parameters)
591
+ import sys
592
+ sys.exit(0)
593
+
594
+ # parameters = optimize.minimize(
595
+ # x0=match_deep.get_initial_values(),
596
+ # fun=match_deep.forward,
597
+ # method="L-BFGS-B",
598
+ # options={"maxiter": 100}
599
+ # )
600
+ parameter_dict = match_deep.format_parameters(parameters)
601
+ print("Converged with parameters", parameters)
602
+
603
+ match_template = create_matching_argdict(args)
604
+ match_template.update(parameter_dict)
605
+ match_template = argdict_to_command(
606
+ match_template,
607
+ executable="python3 $HOME/src/pytme/scripts/match_template_filters.py",
608
+ )
609
+ _ = subprocess.run(match_template, capture_output=True, shell=True)
610
+
611
+ # Some form of labelling is necessary for these matches
612
+ # 1. All of them are true positives
613
+ # 2. All of them are true positives up to a certain threshold
614
+ # 3. Kernel fitting
615
+ # 4. Perhaps also sensible to include a certain percentage of low scores as true negatives
616
+ postprocess = create_postprocessing_argdict(args)
617
+ postprocess = argdict_to_command(postprocess, executable="postprocess.py")
618
+ _ = subprocess.run(postprocess, capture_output=True, shell=True)
619
+ args.orientations = f"{args.new_orientations_path}.tsv"
620
+ orientations = Orientations.from_file(args.orientations)
621
+ orientations.to_file(f"{args.output_prefix}_{current_iteration}.tsv")
622
+
623
+
624
+ if __name__ == "__main__":
625
+ main()