fpfind 3.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
fpfind/fpfind.py ADDED
@@ -0,0 +1,1008 @@
1
+ #!/usr/bin/env python3
2
+ """Calculate frequency and time offsets between two parties.
3
+
4
+ The timestamps can either be as 'a1' timestamp files (per readevents),
5
+ or as 'T1'/'T2' epoch files (per qcrypto).
6
+
7
+ Since the time offset will evolve with clock skew, the reference time is defined
8
+ as the common starting time of both parties, and the time offset at said reference.
9
+
10
+ The results are written to stdout, in the order -
11
+ df (absolute), \\t, dt (ns) - where 'df' is the compensating
12
+ frequency offset and 'dt' the compensating timing offset for the target
13
+ timestamps, i.e. '(TARGET - dt) / (1 + df)'.
14
+
15
+ To scan for possible precompensations, use the '--precomp-enable' flag.
16
+ For more detailed options, use multiple help flags.
17
+
18
+ Changelog:
19
+ 2023-01-09, Justin: Refactoring from fpfind.py
20
+ 2023-01-31, Justin: Formalize interface for fpfind.py
21
+ """
22
+
23
+ import sys
24
+ from pathlib import Path
25
+
26
+ import configargparse
27
+ import numpy as np
28
+ import numpy.typing as npt
29
+ from typing_extensions import TypeAlias
30
+
31
+ import fpfind.lib._logging as logging
32
+ from fpfind import VERSION
33
+ from fpfind.lib.constants import (
34
+ EPOCH_LENGTH,
35
+ MAX_FCORR,
36
+ MAX_TIMING_RESOLUTION_NS,
37
+ NTP_MAXDELAY_NS,
38
+ FrequencyCompensation,
39
+ PeakFindingFailed,
40
+ )
41
+ from fpfind.lib.parse_timestamps import read_a1, read_a1_start_end
42
+ from fpfind.lib.typing import TimestampArray
43
+ from fpfind.lib.utils import (
44
+ ArgparseCustomFormatter,
45
+ fold_histogram,
46
+ get_first_overlapping_epoch,
47
+ get_statistics,
48
+ get_timestamp_pattern,
49
+ get_timing_delay_fft,
50
+ histogram_fft3,
51
+ match_dts,
52
+ normalize_timestamps,
53
+ parse_docstring_description,
54
+ )
55
+
56
+ logger, log = logging.get_logger("fpfind")
57
+
58
+ # Disables resolution doubling during initial peak search, used to converge
59
+ # on specific fpfind parameters for peak search. This procedure is a relatively
60
+ # cheap measure to increase peak finding yield, and thus is unlikely needed
61
+ # in production.
62
+ # For internal use only.
63
+ _DISABLE_DOUBLING = False
64
+
65
+ # Toggles interruptible FFT
66
+ ENABLE_INTERRUPT = False
67
+
68
+ # Type aliases
69
+ NDArrayFloat: TypeAlias = npt.NDArray[np.floating]
70
+ NDArrayNumber: TypeAlias = npt.NDArray[np.number]
71
+
72
+
73
+ # Main algorithm
74
+ def time_freq(
75
+ ats: TimestampArray,
76
+ bts: TimestampArray,
77
+ k0: int,
78
+ N: int,
79
+ r0: float,
80
+ r_target: float,
81
+ S0: float,
82
+ max_dt: float,
83
+ max_df: float,
84
+ df_target: float,
85
+ Ts: float,
86
+ convergence_rate: float,
87
+ quick: bool,
88
+ do_frequency_compensation: FrequencyCompensation = FrequencyCompensation.ENABLE,
89
+ ):
90
+ """Perform the actual frequency compensation routine.
91
+
92
+ Timestamps must already by normalized to starting time of 0ns. Whether
93
+ this starting time is also common reference between 'ats' and 'bts' is up to
94
+ implementation.
95
+
96
+ Note some differences with original implementation: the 'separation_duration'
97
+ is left constant.
98
+
99
+ Args:
100
+ ats: Timestamps of reference side, in units of ns.
101
+ bts: Timestamps of compensating side, in units of ns.
102
+ k0: Number of cross-correlations to overlay, usually 1.
103
+ N: Numbers of bins to use in the FFT.
104
+ r0: Initial resolution of cross-correlation.
105
+ r_target: Target resolution desired from routine.
106
+ S0: Height of peak to discriminate as signal, in units of dev.
107
+ Ts: Separation of cross-correlations, in units of ns.
108
+ """
109
+ # Optional new behaviour where the significance evaluation is deferred,
110
+ # instead looking for coincidences in timing differences
111
+ perform_liberal_match = S0 == 0
112
+ r = _r0 = r0
113
+ k = k0
114
+ Ta = r0 * N * k0
115
+
116
+ log(1).debug("Performing peak searching...")
117
+ log(2).debug(
118
+ "Parameters:",
119
+ f"Bins: 2^{np.int32(np.log2(N)):d}",
120
+ f"Bin width: {r0:.0f}ns",
121
+ f"Number of wraps: {k0:d}",
122
+ f"Target resolution: {r_target}ns",
123
+ )
124
+
125
+ # Refinement loop, note resolution/duration will change during loop
126
+ dt = 0
127
+ f = 1
128
+ iter = 1
129
+ perform_coarse_finding = True
130
+ prev_dt1 = 0 # cached dt1 (not _dt1!)
131
+
132
+ while True:
133
+ # Use previously cached base resolution
134
+ if perform_liberal_match:
135
+ r = _r0
136
+
137
+ # Dynamically adjust 'k' to avoid event over- and under-flow
138
+ k_max = int(np.floor(Ta / (r * N)))
139
+ k_act = min(k, max(k_max, 1)) # required because 'k_max' may < 1
140
+ Ta_act = k_act * r * N
141
+ Ta_act = min(Ta_act, Ta)
142
+ log(2).debug(f"Iteration {iter} (r={r:.1f}ns, k={k_act:d})")
143
+
144
+ # Perform cross-correlation
145
+ log(3).debug(f"Performing earlier xcorr (range: [0.00, {Ta_act * 1e-9:.2f}]s)")
146
+ xs, ys = histogram_fft3(
147
+ ats, bts, 0, Ta_act, N, r, Ta, interruptible=ENABLE_INTERRUPT
148
+ )
149
+
150
+ # Calculate timing delay
151
+ dt1 = get_timing_delay_fft(ys, xs)[0] # get smaller candidate
152
+ sig = get_statistics(ys, r).significance
153
+
154
+ # Dynamic search for resolution
155
+ dt1s_early = []
156
+ while perform_liberal_match or perform_coarse_finding:
157
+ log(4).debug(
158
+ f"Peak: S = {sig:.3f}, dt = {dt1}ns (resolution = {r:.0f}ns)",
159
+ )
160
+
161
+ # Deviation zero, due flat cross-correlation
162
+ # Hint: Increase number of bins
163
+ if sig == 0:
164
+ raise PeakFindingFailed(
165
+ "Bin saturation ",
166
+ significance=sig,
167
+ resolution=r,
168
+ dt1=dt1,
169
+ )
170
+
171
+ if abs(dt1) < max_dt:
172
+ dt1s_early.append((dt1, r, sig))
173
+
174
+ # Check if peak threshold exceeded
175
+ if not perform_liberal_match:
176
+ if sig >= S0:
177
+ log(5).debug("Accepted")
178
+ break
179
+ log(5).debug("Rejected")
180
+
181
+ if _DISABLE_DOUBLING:
182
+ raise PeakFindingFailed(
183
+ "Low significance",
184
+ significance=sig,
185
+ resolution=r,
186
+ dt1=dt1,
187
+ )
188
+
189
+ # If peak rejected, merge contiguous bins to double
190
+ # the resolution of the peak search
191
+ r *= 2
192
+ xs, ys = fold_histogram(xs, ys, 2)
193
+ dt1 = get_timing_delay_fft(ys, xs)[0]
194
+ sig = get_statistics(ys, r).significance
195
+
196
+ # Catch runaway resolution doubling, limited by
197
+ if r > MAX_TIMING_RESOLUTION_NS:
198
+ if perform_liberal_match:
199
+ if len(dt1s_early) == 0:
200
+ raise PeakFindingFailed(
201
+ "Time delay OOB ",
202
+ significance=sig,
203
+ resolution=r,
204
+ dt1=dt1,
205
+ )
206
+ break
207
+ raise PeakFindingFailed(
208
+ "Resolution OOB ",
209
+ significance=sig,
210
+ resolution=r,
211
+ dt1=dt1,
212
+ )
213
+
214
+ # Calculate some thresholds to catch when peak was likely not found
215
+ buffer = 1
216
+ threshold_dt = buffer * max(abs(prev_dt1), 1)
217
+
218
+ # If the duration has too few coincidences, the peak may not
219
+ # show up at all. A peak is likely to have already been found, if
220
+ # the current iteration is more than 1. Attempt to retry with
221
+ # larger 'num_wraps' if enabled in settings.
222
+ if (
223
+ not perform_liberal_match
224
+ and not perform_coarse_finding
225
+ and abs(dt1) > threshold_dt
226
+ ):
227
+ log(3).warning(
228
+ "Interrupted due spurious signal:",
229
+ f"early dt = {dt1:10.0f} ns",
230
+ f"threshold dt = {threshold_dt:10.0f} ns",
231
+ )
232
+ k *= 2
233
+ if k <= k_max:
234
+ log(3).debug(
235
+ f"Reattempting with k = {k:d} due missing peak.",
236
+ )
237
+ continue
238
+
239
+ if r > r0:
240
+ raise PeakFindingFailed(
241
+ "Did not converge",
242
+ resolution=r,
243
+ dt1=dt1,
244
+ )
245
+ break
246
+
247
+ # Catch if timing delay exceeded
248
+ if not perform_liberal_match and abs(dt1) > max_dt:
249
+ raise PeakFindingFailed(
250
+ "Time delay OOB ",
251
+ significance=sig,
252
+ resolution=r,
253
+ dt1=dt1,
254
+ )
255
+
256
+ _dt1 = dt1
257
+ df1 = 0 # default values
258
+ if perform_liberal_match or (
259
+ do_frequency_compensation is not FrequencyCompensation.DISABLE
260
+ ):
261
+ # Use previously cached base resolution
262
+ if perform_liberal_match:
263
+ r = _r0
264
+
265
+ # Perform cross-correlation
266
+ log(3).debug(
267
+ f"Performing later xcorr (range: [{Ts * 1e-9:.2f}, {(Ts + Ta_act) * 1e-9:.2f}]s)",
268
+ )
269
+ xs, _ys = histogram_fft3(
270
+ ats, bts, Ts, Ta_act, N, r, Ta, interruptible=ENABLE_INTERRUPT
271
+ )
272
+
273
+ # Calculate timing delay for late set of timestamps
274
+ _dt1 = get_timing_delay_fft(_ys, xs)[0]
275
+ sig = get_statistics(_ys, r).significance
276
+
277
+ # Attempt similar search for late timing difference
278
+ dt1s_late = []
279
+ while perform_liberal_match:
280
+ log(4).debug(
281
+ f"Peak: S = {sig:.3f}, dt = {_dt1}ns (resolution = {r:.0f}ns)",
282
+ )
283
+ if abs(_dt1) < max_dt:
284
+ dt1s_late.append((_dt1, r, sig))
285
+
286
+ r *= 2
287
+ xs, _ys = fold_histogram(xs, _ys, 2)
288
+ _dt1 = get_timing_delay_fft(_ys, xs)[0]
289
+ sig = get_statistics(_ys, r).significance
290
+
291
+ # Check intersections here
292
+ if r > MAX_TIMING_RESOLUTION_NS:
293
+ if len(dt1s_late) == 0:
294
+ raise PeakFindingFailed(
295
+ "Time delay OOB ",
296
+ significance=sig,
297
+ resolution=r,
298
+ dt1=_dt1,
299
+ )
300
+
301
+ allowed_dt_diff = Ts * max_df * 1e9
302
+ dt1, _dt1, r = match_dts(dt1s_early, dt1s_late, allowed_dt_diff)
303
+ break
304
+
305
+ # Evaluate measured frequency difference
306
+ df1 = (_dt1 - dt1) / Ts
307
+
308
+ # Some guard rails to make sure results make sense
309
+ # Something went wrong with peak searching, to return intermediate
310
+ # results which are likely near correct values.
311
+ threshold_df = buffer * max(abs(f - 1), 1e-9)
312
+ if f == 1:
313
+ threshold_df = buffer * MAX_FCORR * 1e6 # [ppm]
314
+
315
+ if (
316
+ not perform_liberal_match
317
+ and not perform_coarse_finding
318
+ and (abs(_dt1) > threshold_dt or abs(df1) > threshold_df)
319
+ ):
320
+ log(3).warning(
321
+ "Interrupted due spurious signal:",
322
+ f"early dt = {dt1:10.0f} ns",
323
+ f"late dt = {_dt1:10.0f} ns",
324
+ f"threshold dt = {threshold_dt:10.0f} ns",
325
+ f"current df = {df1 * 1e6:10.4f} ppm",
326
+ f"threshold df = {threshold_df * 1e6:10.4f} ppm",
327
+ )
328
+ k *= 2
329
+ if k <= k_max:
330
+ log(3).debug(
331
+ f"Reattempting with k = {k:d} due missing peak.",
332
+ )
333
+ continue
334
+
335
+ if r > r0:
336
+ raise PeakFindingFailed(
337
+ "Did not converge",
338
+ resolution=r,
339
+ dt1=_dt1,
340
+ )
341
+ break # terminate if recovery not enabled
342
+
343
+ # Catch if timing delay exceeded
344
+ if not perform_liberal_match and abs(_dt1) > max_dt:
345
+ raise PeakFindingFailed(
346
+ "Time delay OOB ",
347
+ significance=sig,
348
+ resolution=r,
349
+ dt1=_dt1,
350
+ )
351
+
352
+ # Apply recursive relations
353
+ # A quick proof (note dt -> t):
354
+ # iter 0 -> (T - t0)/f0 = T/f0 - (t0/f0)
355
+ # iter 1 -> ((T - t0)/f0 - t1)/f1 = T/f0/f1 - (t0/f0/f1 + t1/f1)
356
+ # We want:
357
+ # iter n -> (T - t)/f = T/f - t/f
358
+ # Thus:
359
+ # f = f0 * f1 * ... * fn
360
+ # t = f * (t0/f0/f1/.../fn + t1/f1/.../fn + ... + tn/fn)
361
+ # = t0 + t1*f0 + t2*f0*f1 + ... + tn*f0*f1*...*(fn-1)
362
+ # Recursive relation:
363
+ # f' = f * fn
364
+ # t' = f * tn + t, i.e. use old value of f
365
+ dt += f * dt1
366
+ f *= 1 + df1
367
+ log(3).debug(
368
+ "Calculated timing delays:",
369
+ f"early dt = {dt1:10.0f} ns",
370
+ f"late dt = {_dt1:10.0f} ns",
371
+ f"accumulated dt = {dt:10.0f} ns",
372
+ f"current df = {df1 * 1e6:10.4f} ppm",
373
+ f"accumulated df = {(f - 1) * 1e6:10.4f} ppm",
374
+ )
375
+
376
+ # Throw error if compensation does not fall within bounds
377
+ if abs(f - 1) >= max_df:
378
+ raise PeakFindingFailed(
379
+ "Compensation OOB",
380
+ significance=sig,
381
+ resolution=r,
382
+ dt1=dt1,
383
+ dt2=_dt1,
384
+ dt=dt,
385
+ df=f - 1,
386
+ )
387
+
388
+ # Stop if resolution met, otherwise refine resolution
389
+ if r <= r_target:
390
+ break
391
+
392
+ # Stop liberal search if current resolution is expected to
393
+ # return a reasonable result
394
+ if r == _r0:
395
+ perform_liberal_match = False
396
+
397
+ # Terminate immediately if 'quick' results desired
398
+ if quick:
399
+ break
400
+
401
+ # Stop attempting frequency compensation if low enough
402
+ if abs(df1) < df_target and (
403
+ do_frequency_compensation is FrequencyCompensation.ENABLE
404
+ ):
405
+ do_frequency_compensation = FrequencyCompensation.DISABLE
406
+ log(3).debug("Disabling frequency compensation.")
407
+
408
+ # if perform_coarse_finding:
409
+ # N //= int(np.round(r / r0))
410
+
411
+ # Update for next iteration
412
+ max_dt1 = max(abs(dt1), abs(_dt1))
413
+ if max_dt1 != 0:
414
+ prev_dt1 = max_dt1
415
+ bts = (bts - dt1) / (1 + df1)
416
+ r = max(r / convergence_rate, r_target)
417
+ iter += 1
418
+ perform_coarse_finding = False # disable coarse search, i.e. iter > 1
419
+
420
+ # Cache resolution for the next iteration
421
+ if perform_liberal_match:
422
+ _r0 = r
423
+
424
+ df = f - 1
425
+ log(3).debug("Returning results.")
426
+ return dt, df
427
+
428
+
429
+ def fpfind(
430
+ alice: TimestampArray,
431
+ bob: TimestampArray,
432
+ num_wraps: int,
433
+ num_bins: int,
434
+ resolution: float,
435
+ target_resolution: float,
436
+ threshold: float,
437
+ max_dt: float,
438
+ max_df: float,
439
+ df_target: float,
440
+ separation_duration: float,
441
+ convergence_rate: float,
442
+ precompensations: list,
443
+ precompensation_fullscan: bool = False,
444
+ quick: bool = False,
445
+ do_frequency_compensation: FrequencyCompensation = FrequencyCompensation.ENABLE,
446
+ ):
447
+ """Performs fpfind procedure.
448
+
449
+ This is effectively a wrapper to 'time_freq' that performs the actual
450
+ frequency compensation routine, but also includes a frequency
451
+ precompensation step, as well as potentially other further refinements
452
+ (currently unimplemented).
453
+
454
+ Timestamps must already by normalized to starting time of 0ns. Whether
455
+ this starting time is also common reference between 'ats' and 'bts' is up to
456
+ implementation.
457
+
458
+ Args:
459
+ alice: Reference timestamps, in 'a1' format.
460
+ bob: Target timestamps, in 'a1' format.
461
+ num_wraps: Number of cross-correlations to overlay, usually 1.
462
+ num_bins: Numbers of bins to use in the FFT.
463
+ resolution: Initial resolution of cross-correlation.
464
+ target_resolution: Target resolution desired from routine.
465
+ threshold: Height of peak to discriminate as signal, in units of dev.
466
+ separation_duration: Separation of cross-correlations, in units of ns.
467
+ precompensations: List of precompensations to apply.
468
+ precompensation_fullscan: Specify whether to continue even after peak found.
469
+ """
470
+ df0s = precompensations
471
+
472
+ # Go through all precompensations
473
+ for df0 in df0s:
474
+ log(1).debug(
475
+ f"Applied initial {df0 * 1e6:.4f} ppm precompensation.",
476
+ )
477
+
478
+ # Apply frequency precompensation df0
479
+ dt = 0
480
+ f = 1 + df0
481
+ try:
482
+ dt1, df1 = time_freq(
483
+ alice,
484
+ (bob - dt) / f,
485
+ num_wraps,
486
+ num_bins,
487
+ resolution,
488
+ target_resolution,
489
+ threshold,
490
+ max_dt,
491
+ max_df,
492
+ df_target,
493
+ separation_duration,
494
+ quick=quick,
495
+ convergence_rate=convergence_rate,
496
+ do_frequency_compensation=do_frequency_compensation,
497
+ )
498
+ except ValueError as e:
499
+ log(0).info(f"Peak finding failed, {df0 * 1e6:7.3f} ppm: {str(e)}")
500
+ continue
501
+
502
+ # Refine estimates, using the same recursive relations
503
+ dt += f * dt1
504
+ f *= 1 + df1
505
+ log(1).debug(
506
+ f"Applied another {df1 * 1e6:.4f} ppm compensation.",
507
+ )
508
+ e = PeakFindingFailed(
509
+ "",
510
+ resolution=target_resolution,
511
+ dt=dt,
512
+ df=f - 1,
513
+ )
514
+ log(0).info(f"Peak found, precomp: {df0 * 1e6:7.3f} ppm: {str(e)}")
515
+ # TODO: Justify the good enough frequency value
516
+ # TODO(2024-01-31): Add looping code to customize refinement steps.
517
+ if precompensation_fullscan:
518
+ continue
519
+ break
520
+
521
+ # No appropriate frequency compensation found
522
+ else:
523
+ raise ValueError("No peak found!") # TODO
524
+
525
+ df = f - 1
526
+ return dt, df
527
+
528
+
529
+ def generate_precompensations(start, stop, step, ordered=False) -> list:
530
+ """Returns set of precompensations to apply before fpfind.
531
+
532
+ The precompensations are in alternating positive/negative to allow
533
+ scanning, e.g. for 10ppm, the sequence of values are:
534
+ 0ppm, 10ppm, -10ppm, 20ppm, -20ppm, ...
535
+
536
+ Examples:
537
+ >>> generate_precompensations(0, 0, 0)
538
+ [0]
539
+ >>> generate_precompensations(1, 10, 5)
540
+ [1, 6, -4, 11, -9]
541
+ >>> generate_precompensations(1, 1, 5)
542
+ [1, 6, -4]
543
+ >>> generate_precompensations(1, 10, 5, ordered=True)
544
+ [1, 6, 11]
545
+ """
546
+ # Zero precompensation if invalid step supplied
547
+ if step == 0:
548
+ return [0]
549
+
550
+ # Prepare pre-compensations
551
+ if ordered:
552
+ df0s = np.arange(0, int((stop - start) // step + 1)) # conventional
553
+ else:
554
+ df0s = np.arange(1, int(stop // step + 1) * 2) // 2 # flip-flop
555
+ df0s[::2] *= -1
556
+
557
+ df0s = df0s.astype(np.float64) * step
558
+ df0s = df0s + start
559
+ return df0s
560
+
561
+
562
+ # fmt: on
563
+ def main():
564
+ global ENABLE_INTERRUPT, _DISABLE_DOUBLING
565
+ script_name = Path(sys.argv[0]).name
566
+
567
+ # Disable Black formatting
568
+ # fmt: off
569
+
570
+ def make_parser(help_verbosity=1):
571
+ # Adapted from <https://discuss.python.org/t/advanced-help-for-argparse/20319/2>
572
+ def adv(description):
573
+ return description if help_verbosity >= 2 else configargparse.SUPPRESS
574
+ def advv(description):
575
+ return description if help_verbosity >= 3 else configargparse.SUPPRESS
576
+ def advvv(description):
577
+ return description if help_verbosity >= 4 else configargparse.SUPPRESS
578
+
579
+ parser = configargparse.ArgumentParser(
580
+ add_config_file_help=help_verbosity >= 2,
581
+ default_config_files=[f"{script_name}.default.conf"],
582
+ description=parse_docstring_description(__doc__),
583
+ formatter_class=ArgparseCustomFormatter,
584
+ add_help=False,
585
+ )
586
+
587
+ # Display arguments (group with defaults)
588
+ pgroup = parser.add_argument_group("display/configuration")
589
+ pgroup.add_argument(
590
+ "-h", "--help", action="count", default=0,
591
+ help="Show this help message, with incremental verbosity, e.g. up to -hhh")
592
+ pgroup.add_argument(
593
+ "-v", "--verbosity", action="count", default=0, # retained for backward compatibility
594
+ help=configargparse.SUPPRESS) # black hole for deprecated option
595
+ pgroup.add_argument(
596
+ "-p", "--quiet", action="count", default=0,
597
+ help="Reduce log verbosity, e.g. -pp for less verbosity")
598
+ pgroup.add_argument(
599
+ "-L", "--logging", metavar="",
600
+ help=adv("Log to file, if specified. Log level follows verbosity"))
601
+ pgroup.add_argument(
602
+ "--config", metavar="", is_config_file_arg=True,
603
+ help=adv("Path to configuration file"))
604
+ pgroup.add_argument(
605
+ "--save", metavar="", is_write_out_config_file_arg=True,
606
+ help=adv("Path to configuration file for saving, then immediately exit"))
607
+ pgroup.add_argument(
608
+ "-I", "--interruptible", action="store_true",
609
+ help=advv("Allow fpfind routine to be interrupted via SIGINT"))
610
+ pgroup.add_argument(
611
+ "-V", "--output", metavar="", type=int, default=0, choices=range(1<<5),
612
+ help=adv(f"{ArgparseCustomFormatter.RAW_INDICATOR}"
613
+ "Specify output verbosity. Results are tab-delimited (default: %(default)d)\n"
614
+ "- Setting bit 0 inverts the freq and time compensations\n"
615
+ "- Setting bit 1 changes freq units, from abs to 2^-34\n"
616
+ "- Setting bit 2 removes time compensation\n"
617
+ "- Setting bit 3 changes time units, from 1ns to 1/8ns\n"
618
+ "- Setting bit 4 adds first epoch used"
619
+ )
620
+ )
621
+
622
+ # Timestamp importing arguments
623
+ pgroup = parser.add_argument_group("importing timestamps")
624
+ pgroup.add_argument(
625
+ "-t", "--reference", metavar="",
626
+ help="Timestamp file in 'a1' format, from low-count side (reference)")
627
+ pgroup.add_argument(
628
+ "-T", "--target", metavar="",
629
+ help="Timestamp file in 'a1' format, from high-count side")
630
+ pgroup.add_argument(
631
+ "-X", "--legacy", action="store_true",
632
+ help="Parse raw timestamps in legacy mode (default: %(default)s)")
633
+ pgroup.add_argument(
634
+ "-Z", "--skip-duration", metavar="", type=float, default=0,
635
+ help=adv("Specify initial duration to skip, in seconds (default: %(default)s)"))
636
+
637
+ # Epoch importing arguments
638
+ pgroup = parser.add_argument_group("importing epochs")
639
+ pgroup.add_argument(
640
+ "-d", "--sendfiles", metavar="",
641
+ help="SENDFILES, from low-count side (reference)")
642
+ pgroup.add_argument(
643
+ "-D", "--t1files", metavar="",
644
+ help="T1FILES, from high-count side")
645
+ pgroup.add_argument(
646
+ "-e", "--first-epoch", metavar="",
647
+ help=adv("Specify filename of first overlapping epoch, optional"))
648
+ pgroup.add_argument(
649
+ "-z", "--skip-epochs", metavar="", type=int, default=0,
650
+ help=adv("Specify number of initial epochs to skip (default: %(default)d)"))
651
+
652
+ # Channel selection
653
+ pgroup = parser.add_argument_group("channel selection")
654
+ pgroup.add_argument(
655
+ "-m", "--reference-pattern", metavar="", type=int,
656
+ help=adv("Pattern mask for selecting detector events from low-count side"))
657
+ pgroup.add_argument(
658
+ "-M", "--target-pattern", metavar="", type=int,
659
+ help=adv("Pattern mask for selecting detector events from high-count side"))
660
+
661
+ # Timing compensation (pfind) parameters
662
+ pgroup = parser.add_argument_group("timing compensation")
663
+ pgroup.add_argument(
664
+ "-k", "--num-wraps", metavar="", type=int, default=1,
665
+ help=adv("Specify number of arrays to wrap (default: %(default)d)"))
666
+ pgroup.add_argument(
667
+ "-q", "--buffer-order", metavar="", type=int, default=26,
668
+ help="Specify FFT buffer order, N = 2**q (default: %(default)d)")
669
+ pgroup.add_argument(
670
+ "-R", "--initial-res", metavar="", type=int, default=16,
671
+ help="Specify initial coarse timing resolution, in units of ns (default: %(default)dns)")
672
+ pgroup.add_argument(
673
+ "-r", "--final-res", metavar="", type=int, default=1,
674
+ help=adv("Specify desired fine timing resolution, in units of ns (default: %(default)dns)"))
675
+ pgroup.add_argument(
676
+ "--max-dt", metavar="", type=float, default=NTP_MAXDELAY_NS,
677
+ help=advv("Expected maximum timing difference, in units of ns (default: 200ms)"))
678
+ pgroup.add_argument(
679
+ "-S", "--peak-threshold", metavar="", type=float, default=6,
680
+ help=adv("Specify the statistical significance threshold (default: %(default).1f)"))
681
+ pgroup.add_argument(
682
+ "--disable-doubling", action="store_true",
683
+ help=advv("Disabling automatic resolution doubling during initial peak search"))
684
+ pgroup.add_argument(
685
+ "--convergence-rate", metavar="", type=float,
686
+ help=configargparse.SUPPRESS) # black hole for deprecated option
687
+ pgroup.add_argument(
688
+ "-Q", "--convergence-order", metavar="", type=float, default=np.e,
689
+ help=adv("Specify the reduction factor in timing uncertainty between iterations, larger = faster (default: %(default).4f)"))
690
+ pgroup.add_argument(
691
+ "-f", "--quick", action="store_true",
692
+ help=advv("Returns the first iteration results immediately"))
693
+
694
+ # Frequency compensation parameters
695
+ pgroup = parser.add_argument_group("frequency compensation")
696
+ pgroup.add_argument(
697
+ "-s", "--separation", metavar="", type=float, default=6,
698
+ help=adv("Specify width of separation, in units of epochs (default: %(default).1f)"))
699
+ pgroup.add_argument(
700
+ "--force-comp", action="store_true",
701
+ help=advv("Forces frequency compensation even when no drift detected"))
702
+ pgroup.add_argument(
703
+ "--disable-comp", action="store_true",
704
+ help=advv("Disables frequency compensation entirely"))
705
+ pgroup.add_argument(
706
+ "--max-df", metavar="", type=float, default=MAX_FCORR,
707
+ help=advv("Expected maximum frequency difference (default: 122ppm)"))
708
+ pgroup.add_argument(
709
+ "--freq-threshold", metavar="", type=float, default=0.1,
710
+ help=advv("Threshold for frequency calculation, in units of ppb (default: %(default).1f)"))
711
+
712
+ # Timing pre-compensation parameters
713
+ #
714
+ # This is used for further fine-tuning of inter-bin timing values, or an overall shift
715
+ # to perform a rough timing correction. Since this is a rarely used option, and to
716
+ # avoid impacting runtime of the original algorithm, this timing precompensation is
717
+ # performed *before* the shifts in frequency.
718
+ #
719
+ # To align with the compensation terminology elucidated in the output inversion
720
+ # explanation, the reference is compensated for by the timing pre-compensation, i.e. alice.
721
+ pgroup = parser.add_argument_group("timing precompensation")
722
+ pgroup.add_argument(
723
+ "--dt", metavar="", type=float, default=0,
724
+ help=advv("Initial timing shift, in units of ns (default: %(default)dns)"))
725
+ pgroup.add_argument(
726
+ "--dt-use-bins", action="store_true",
727
+ help=advv("Change dt units, from ns to timing resolution (i.e. -R)"))
728
+
729
+ # Frequency pre-compensation parameters
730
+ pgroup = parser.add_argument_group("frequency precompensation")
731
+ pgroup.add_argument(
732
+ "-P", "--precomp-enable", action="store_true",
733
+ help="Enable precompensation scanning")
734
+ pgroup.add_argument(
735
+ "--df", "--precomp-start", metavar="", type=float, default=0.0,
736
+ help=adv("Specify the precompensation value (default: 0ppm)"))
737
+ pgroup.add_argument(
738
+ "--precomp-step", metavar="", type=float, default=0.1e-6,
739
+ help=adv("Specify the step value (default: 0.1ppm)"))
740
+ pgroup.add_argument(
741
+ "--precomp-stop", metavar="", type=float, default=20e-6,
742
+ help=adv("Specify the max scan range, one-sided (default: 20ppm)"))
743
+ pgroup.add_argument(
744
+ "--precomp-ordered", action="store_true",
745
+ help=advv("Test precompensations in increasing order (default: %(default)s)"))
746
+ pgroup.add_argument(
747
+ "--precomp-fullscan", action="store_true",
748
+ help=advv("Force all precompensations to be tested (default: %(default)s)"))
749
+
750
+ return parser
751
+
752
+ # fmt: on
753
+ # Parse arguments
754
+ parser = make_parser()
755
+ args = parser.parse_args()
756
+
757
+ # Print advanced help if specified
758
+ if args.help > 0:
759
+ parser = make_parser(help_verbosity=args.help)
760
+ parser.print_help(sys.stderr)
761
+ sys.exit(1)
762
+
763
+ # Check whether options have been supplied, and print help otherwise
764
+ args_sources = parser.get_source_to_settings_dict().keys()
765
+ config_supplied = any(map(lambda x: x.startswith("config_file"), args_sources))
766
+ if len(sys.argv) == 1 and not config_supplied:
767
+ parser.print_help(sys.stderr)
768
+ sys.exit(1)
769
+
770
+ # Set log arguments
771
+ if args.logging is not None:
772
+ logging.set_logfile(logger, args.logging)
773
+
774
+ # Set logging level
775
+ # Default level should be DEBUG (instead of WARNING) for better
776
+ # accessibility to the tool. '--verbosity' kept for legacy reasons.
777
+ if args.quiet > 0:
778
+ verbosity = max(2 - args.quiet, 0)
779
+ elif args.verbosity == 0:
780
+ verbosity = 2
781
+ else:
782
+ verbosity = args.verbosity
783
+ logging.set_verbosity(logger, verbosity)
784
+ log(0).info(f"fpfind {VERSION}")
785
+ log(0).info(f"{args}")
786
+ if args.quiet == 0 and args.verbosity > 0:
787
+ log(0).warning(
788
+ "'-v'/'--verbosity' has been deprecated, use "
789
+ "'-p'/'--quiet' instead with inverted behaviour."
790
+ )
791
+
792
+ # Allow interrupting fpfind routine
793
+ if args.interruptible:
794
+ ENABLE_INTERRUPT = True
795
+
796
+ # Set lower threshold limit to frequency compensation before disabling
797
+ df_target = 1e-10
798
+ if args.freq_threshold > 0:
799
+ df_target = args.freq_threshold * 1e-9
800
+
801
+ # Set convergence rate
802
+ if args.convergence_rate is not None:
803
+ log(0).error(
804
+ "'--convergence-rate' has been removed, use "
805
+ "'-Q'/'--convergence-order' instead with new behaviour."
806
+ )
807
+ sys.exit(1)
808
+
809
+ # Check convergence rate is valid and not power of two
810
+ if args.convergence_order < 1:
811
+ log(0).erorr("'-Q'/'--convergence-order' must be larger than 1.")
812
+ exponent = np.log2(args.convergence_order)
813
+ if args.peak_threshold == 0 and np.isclose(exponent, np.round(exponent)):
814
+ log(0).warning(
815
+ "'-Q'/'--convergence-order' should not be a power of two to avoid "
816
+ "infinite loops during liberal peak search."
817
+ )
818
+
819
+ # Enforce only one frequency compensation setting
820
+ if args.disable_comp and args.force_comp:
821
+ log(0).error("Only one of '--disable-comp' and '--force-comp' allowed.")
822
+ sys.exit(1)
823
+ do_frequency_compensation = FrequencyCompensation.ENABLE
824
+ if args.disable_comp:
825
+ do_frequency_compensation = FrequencyCompensation.DISABLE
826
+ if args.force_comp:
827
+ do_frequency_compensation = FrequencyCompensation.FORCE
828
+
829
+ # Disable resolution doubling, if option specified
830
+ if args.disable_doubling:
831
+ _DISABLE_DOUBLING = True
832
+
833
+ # Verify minimum duration has been imported
834
+ num_bins = 1 << args.buffer_order
835
+ Ta = args.initial_res * num_bins * args.num_wraps
836
+ Ts = args.separation * Ta
837
+ minimum_duration = (args.separation + 2) * Ta # TODO: Check if this should be 1
838
+ log(0).info(
839
+ "Reading timestamps...",
840
+ f"Required duration: {minimum_duration * 1e-9:.1f}s "
841
+ f"(cross-corr {Ta * 1e-9:.1f}s)",
842
+ )
843
+
844
+ # fmt: off
845
+
846
+ # Obtain timestamps needed for fpfind
847
+ # alice: low count side - chopper - HeadT2 - sendfiles (reference)
848
+ # bob: high count side - chopper2 - HeadT1 - t1files
849
+ if args.sendfiles is not None and args.t1files is not None:
850
+ log(1).info("Reading from epoch directories...")
851
+ _is_reading_ts = False
852
+
853
+ # +1 epoch specified for use as buffer for frequency compensation
854
+ required_epochs = np.ceil(minimum_duration/EPOCH_LENGTH).astype(np.int32) + args.skip_epochs + 1
855
+
856
+ # Automatically choose first overlapping epoch if not supplied manually
857
+ first_epoch, available_epochs = get_first_overlapping_epoch(
858
+ args.sendfiles, args.t1files,
859
+ first_epoch=args.first_epoch, return_length=True,
860
+ ) # type: ignore (return_length returns two items)
861
+ log(1).debug(
862
+ "",
863
+ f"Available: {available_epochs:d} epochs "
864
+ f"(need {required_epochs:d})",
865
+ f"First epoch: {first_epoch}",
866
+ )
867
+ if available_epochs < required_epochs:
868
+ log(1).warning("Insufficient epochs")
869
+ if first_epoch is None:
870
+ log(1).error("No valid epochs available")
871
+ sys.exit(1)
872
+
873
+ # Read epochs
874
+ alice, aps = get_timestamp_pattern(
875
+ args.sendfiles, "T2",
876
+ first_epoch, args.skip_epochs, required_epochs - args.skip_epochs)
877
+ bob, bps = get_timestamp_pattern(
878
+ args.t1files, "T1",
879
+ first_epoch, args.skip_epochs, required_epochs - args.skip_epochs)
880
+
881
+ elif args.target is not None and args.reference is not None:
882
+ log(1).info("Reading from timestamp files...")
883
+ _is_reading_ts = True
884
+ first_epoch = None
885
+
886
+ # Get only required events
887
+ tas, tae = read_a1_start_end(args.reference, legacy=args.legacy)
888
+ tbs, tbe = read_a1_start_end(args.target, legacy=args.legacy)
889
+ start = max(tas, tbs)
890
+ end = min(tae, tbe)
891
+ required_duration = (minimum_duration + Ta) * 1e-9 + args.skip_duration # seconds
892
+ if end - start < required_duration :
893
+ log(1).warning("Insufficient timestamp events")
894
+
895
+ alice, aps = read_a1(args.reference, legacy=args.legacy, end=start+required_duration)
896
+ bob, bps = read_a1(args.target, legacy=args.legacy, end=start+required_duration)
897
+
898
+ else:
899
+ log(0).error("Timestamp files/epochs must be supplied with -tT/-dD")
900
+ sys.exit(1)
901
+
902
+ # Select events only from specified channels
903
+ if args.reference_pattern is not None:
904
+ alice = alice[(aps & args.reference_pattern).astype(bool)]
905
+ if args.target_pattern is not None:
906
+ bob = bob[(bps & args.target_pattern).astype(bool)]
907
+
908
+ # Normalize timestamps to common time reference near start, so that
909
+ # frequency compensation will not shift the timing difference too far
910
+ skip = args.skip_duration if _is_reading_ts else 0
911
+ alice, bob = normalize_timestamps(alice, bob, skip=skip)
912
+ log(1).debug(
913
+ f"Read {len(alice):d} and {len(bob):d} events from reference and compensating side.",
914
+ "Reference timing range: "
915
+ f"[{alice[0]*1e-9:.2f}, {alice[-1]*1e-9:.2f}]s",
916
+ "Compensating timing range: "
917
+ f"[{bob[0]*1e-9:.2f}, {bob[-1]*1e-9:.2f}]s",
918
+ f"(skipped {skip:.2f}s)",
919
+ )
920
+
921
+ # Prepare frequency pre-compensations
922
+ precompensations = [args.df]
923
+ if args.precomp_enable:
924
+ log(0).info("Generating frequency precompensations...")
925
+ precompensations = generate_precompensations(
926
+ args.df,
927
+ args.precomp_stop,
928
+ args.precomp_step,
929
+ ordered=args.precomp_ordered,
930
+ )
931
+ text = ",".join(map(lambda p: f"{p*1e6:g}", precompensations[:3]))
932
+ log(1).debug(
933
+ f"Prepared {len(precompensations):d} precompensation(s): {text}... ppm",
934
+ )
935
+
936
+ # Perform timing pre-compensation (experimental command)
937
+ # See notes in CLI definition.
938
+ if args.dt != 0:
939
+ alice = alice + args.dt * (args.initial_res if args.dt_use_bins else 1)
940
+
941
+ # Start fpfind
942
+ log(0).info("Running fpfind...")
943
+ dt, df = fpfind(
944
+ alice, bob,
945
+ num_wraps=args.num_wraps,
946
+ num_bins=num_bins,
947
+ resolution=args.initial_res,
948
+ target_resolution=args.final_res,
949
+ threshold=args.peak_threshold,
950
+ max_dt=args.max_dt,
951
+ max_df=args.max_df,
952
+ df_target=df_target,
953
+ separation_duration=Ts,
954
+ convergence_rate=args.convergence_order,
955
+ precompensations=precompensations,
956
+ precompensation_fullscan=args.precomp_fullscan,
957
+ quick=args.quick,
958
+ do_frequency_compensation=do_frequency_compensation,
959
+ )
960
+
961
+ # To understand the options below, we first clarify some definitions:
962
+ # - To apply a frequency difference is to apply an additional clock
963
+ # skew to the target timestamp events, i.e. ts * (1 + df).
964
+ # - In the context of timing difference: ts + dt
965
+ # - To compensate for a frequency difference is to undo the clock
966
+ # skew present in the target events, i.e. ts / (1 + df)
967
+ # - In the context of timing difference: ts - dt
968
+ # - 'fpfind' calculates the *compensation* frequency for the target,
969
+ # and the *compensation* timing prior to frequency compensation,
970
+ # i.e. (ts - dt) / (1 + df)
971
+ # - This is equivalent to applying the frequency difference to
972
+ # the reference, *then* applying the timing difference,
973
+ # i.e. Ts * (1 + df) + dt
974
+ # - To invert this relation is to have the results be *applied* to
975
+ # the target (vis a vis *compensated* for the reference), i.e.
976
+ # ts * (1 + dF) + dT
977
+ # (Ts - dT) / (1 + dF)
978
+ # - This convention is followed by the qcrypto stack, where
979
+ # 'freqcd' first applies the freq diff to the target, before
980
+ # downstream 'costream' corrects and tracks the timing diff.
981
+ # - This implies the inversion is represented by:
982
+ # dT = -dt / (1 + df)
983
+ # dF = 1 / (1 + df) - 1
984
+
985
+ # Modify output based on flags
986
+ flag = args.output
987
+ if flag & 0b0001:
988
+ dt = -dt / (1 + df)
989
+ df = 1 / (1 + df) - 1
990
+
991
+ # Convert into special bases
992
+ if flag & 0b0010:
993
+ df = f"{round(df * (1 << 34)):d}" # units of 2**-34 as per freqcd
994
+ if flag & 0b1000:
995
+ dt *= 8 # units of 1/8ns as per qcrypto
996
+
997
+ # Generate output, and add timing value if requested
998
+ output = f"{df}\t"
999
+ if not (flag & 0b0100):
1000
+ output += f"{round(dt):d}\t"
1001
+ if (flag & 0b10000) and not _is_reading_ts:
1002
+ output += f"{first_epoch}\t"
1003
+ output = output.rstrip()
1004
+ print(output, file=sys.stdout) # newline auto-added
1005
+
1006
+
1007
+ if __name__ == "__main__":
1008
+ main()