pymagnetos 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. pymagnetos/__init__.py +15 -0
  2. pymagnetos/cli.py +40 -0
  3. pymagnetos/core/__init__.py +19 -0
  4. pymagnetos/core/_config.py +340 -0
  5. pymagnetos/core/_data.py +132 -0
  6. pymagnetos/core/_processor.py +905 -0
  7. pymagnetos/core/config_models.py +57 -0
  8. pymagnetos/core/gui/__init__.py +6 -0
  9. pymagnetos/core/gui/_base_mainwindow.py +819 -0
  10. pymagnetos/core/gui/widgets/__init__.py +19 -0
  11. pymagnetos/core/gui/widgets/_batch_processing.py +319 -0
  12. pymagnetos/core/gui/widgets/_configuration.py +167 -0
  13. pymagnetos/core/gui/widgets/_files.py +129 -0
  14. pymagnetos/core/gui/widgets/_graphs.py +93 -0
  15. pymagnetos/core/gui/widgets/_param_content.py +20 -0
  16. pymagnetos/core/gui/widgets/_popup_progressbar.py +29 -0
  17. pymagnetos/core/gui/widgets/_text_logger.py +32 -0
  18. pymagnetos/core/signal_processing.py +1004 -0
  19. pymagnetos/core/utils.py +85 -0
  20. pymagnetos/log.py +126 -0
  21. pymagnetos/py.typed +0 -0
  22. pymagnetos/pytdo/__init__.py +6 -0
  23. pymagnetos/pytdo/_config.py +24 -0
  24. pymagnetos/pytdo/_config_models.py +59 -0
  25. pymagnetos/pytdo/_tdoprocessor.py +1052 -0
  26. pymagnetos/pytdo/assets/config_default.toml +84 -0
  27. pymagnetos/pytdo/gui/__init__.py +26 -0
  28. pymagnetos/pytdo/gui/_worker.py +106 -0
  29. pymagnetos/pytdo/gui/main.py +617 -0
  30. pymagnetos/pytdo/gui/widgets/__init__.py +8 -0
  31. pymagnetos/pytdo/gui/widgets/_buttons.py +66 -0
  32. pymagnetos/pytdo/gui/widgets/_configuration.py +78 -0
  33. pymagnetos/pytdo/gui/widgets/_graphs.py +280 -0
  34. pymagnetos/pytdo/gui/widgets/_param_content.py +137 -0
  35. pymagnetos/pyuson/__init__.py +7 -0
  36. pymagnetos/pyuson/_config.py +26 -0
  37. pymagnetos/pyuson/_config_models.py +71 -0
  38. pymagnetos/pyuson/_echoprocessor.py +1901 -0
  39. pymagnetos/pyuson/assets/config_default.toml +92 -0
  40. pymagnetos/pyuson/gui/__init__.py +26 -0
  41. pymagnetos/pyuson/gui/_worker.py +135 -0
  42. pymagnetos/pyuson/gui/main.py +767 -0
  43. pymagnetos/pyuson/gui/widgets/__init__.py +7 -0
  44. pymagnetos/pyuson/gui/widgets/_buttons.py +95 -0
  45. pymagnetos/pyuson/gui/widgets/_configuration.py +85 -0
  46. pymagnetos/pyuson/gui/widgets/_graphs.py +248 -0
  47. pymagnetos/pyuson/gui/widgets/_param_content.py +193 -0
  48. pymagnetos-0.1.0.dist-info/METADATA +23 -0
  49. pymagnetos-0.1.0.dist-info/RECORD +51 -0
  50. pymagnetos-0.1.0.dist-info/WHEEL +4 -0
  51. pymagnetos-0.1.0.dist-info/entry_points.txt +7 -0
@@ -0,0 +1,1004 @@
1
+ """pyqtSignal processing module."""
2
+
3
+ import warnings
4
+ from collections.abc import Callable, Iterable
5
+ from typing import Any
6
+
7
+ import numpy as np
8
+ from scipy import integrate, ndimage, optimize, signal
9
+
10
+
11
+ def _sanitize_dims_for_demodulation(
12
+ reftime: np.ndarray, refsig: np.ndarray, sigtime: np.ndarray, sig: np.ndarray
13
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
14
+ # Reference time vector should be 1D
15
+ if reftime.ndim != 1:
16
+ raise ValueError(
17
+ "Reference time vector has too much dimensions, it should be 1D."
18
+ )
19
+ # Others should be 2D even with only one serie for vectorization
20
+ if refsig.ndim == 1:
21
+ refsig_na = refsig[..., np.newaxis]
22
+ elif refsig.ndim == 2:
23
+ refsig_na = refsig
24
+ else:
25
+ raise ValueError(
26
+ "Reference signal has too much dimensions, it should be 1 or 2D."
27
+ )
28
+ if sigtime.ndim == 1:
29
+ sigtime_na = sigtime[..., np.newaxis]
30
+ elif (sigtime.ndim == 2) & (sigtime.shape[1] == 1):
31
+ sigtime_na = sigtime
32
+ else:
33
+ raise ValueError(
34
+ "pyqtSignal time vector has too much dimensions, it should be 1D "
35
+ "or have its second dimension shape equal to 1."
36
+ )
37
+ if sig.ndim == 1:
38
+ sig_na = sig[..., np.newaxis]
39
+ elif sig.ndim == 2:
40
+ sig_na = sig
41
+ else:
42
+ raise ValueError("pyqtSignal has too much dimensions, it should be 1 or 2D.")
43
+
44
+ return reftime, refsig_na, sigtime_na, sig_na
45
+
46
+
47
+ def integrate_pickup(
48
+ pickup_time: np.ndarray,
49
+ pickup_signal: np.ndarray,
50
+ pickup_surface: float,
51
+ method: str = "trapz",
52
+ ) -> np.ndarray:
53
+ """
54
+ Integrate pickup coil voltage to get the magnetic field.
55
+
56
+ Uses Faraday-Lenz law : e = -SdB/dt. There could be multiple implementation to
57
+ integrate, but only the cumulative trapezoid is implemented. The field is forced to
58
+ be positive and starts at 0T.
59
+
60
+ Parameters
61
+ ----------
62
+ pickup_time : np.ndarray
63
+ 1D vector with time points.
64
+ pickup_signal : np.ndarray
65
+ 1D vector with pickup coil signal.
66
+ pickup_surface : float
67
+ Pickup coil surface in m².
68
+ method : str, optional
69
+ Method for the integration, by default "trapz".
70
+
71
+ Returns
72
+ -------
73
+ magfield : np.ndarray
74
+ Magnetic field in T.
75
+
76
+ Raises
77
+ ------
78
+ ValueError
79
+ Only 'trapz' is supported as integration `method`.
80
+
81
+ """
82
+ # Remove overall mean (offset) so that B is 0 when there's no magnetic field
83
+ pu_signal = pickup_signal - np.mean(pickup_signal)
84
+
85
+ # Integrate to get the field
86
+ match method:
87
+ case "trapz":
88
+ magfield = (
89
+ integrate.cumulative_trapezoid(pu_signal, pickup_time, initial=0)
90
+ / pickup_surface
91
+ )
92
+ case _:
93
+ err_msg = (
94
+ "Only cumulative trapezoid integration method ('trapz') is implemented."
95
+ )
96
+ raise NotImplementedError(err_msg)
97
+
98
+ # Fix inverted field
99
+ if np.mean(magfield) < 0:
100
+ magfield = -magfield
101
+
102
+ return magfield
103
+
104
+
105
+ def rolling_average(
106
+ a: np.ndarray, wlen: int, subsample: bool = False, axis=0
107
+ ) -> np.ndarray:
108
+ """
109
+ Apply a rolling window average.
110
+
111
+ The moving window is applied on columns (with `axis=0`) or lines (with `axis=1`)
112
+ of the input 2D array `a`. Optionally subsample the output.
113
+
114
+ Parameters
115
+ ----------
116
+ a : np.ndarray
117
+ 2D array with series to smooth.
118
+ wlen : int
119
+ Time window size (expressed as indices).
120
+ subsample : bool, optional
121
+ If True, the series are sub-sampled `wlen - 1` times.
122
+ axis : int, optional
123
+ Direction to consider for time series, eg. 0 if time series to smooth are on the
124
+ columns, 1 on lines. Default is 0.
125
+
126
+ Returns
127
+ -------
128
+ smoothed_a : np.ndarray
129
+ Array with smoothed time series.
130
+ """
131
+ # rolling average filter
132
+ b = ndimage.uniform_filter1d(a, size=wlen, axis=axis, mode="reflect", origin=0)
133
+
134
+ # subsampling, handling axes
135
+ if subsample:
136
+ b = subsample_array(b, wlen - 1, axis=axis)
137
+
138
+ return b
139
+
140
+
141
+ def subsample_array(a: np.ndarray, step: int, axis: int = 0) -> np.ndarray:
142
+ """
143
+ Subsample an array in the given `axis` every `step` point.
144
+
145
+ Parameters
146
+ ----------
147
+ a : np.ndarray
148
+ Input array.
149
+ step : int
150
+ step::step points will be kept.
151
+ axis : int < 2, optional
152
+ Axis along which to operate, only 0 and 1 are supported. Default is 0 (along
153
+ columns).
154
+
155
+ Returns
156
+ -------
157
+ np.ndarray
158
+ Subsampled array.
159
+
160
+ Raises
161
+ ------
162
+ NotImplementedError
163
+ If axis > 1.
164
+ """
165
+ if axis == 0:
166
+ if a.ndim < 2:
167
+ a = a[step::step].copy()
168
+ else:
169
+ a = a[step::step, :].copy()
170
+ elif axis == 1:
171
+ a = a[:, step::step].copy()
172
+ else:
173
+ raise NotImplementedError("Subsampling is not supported for axis > 1.")
174
+
175
+ return a
176
+
177
+
178
+ def demodulate_chunks(
179
+ reftime: np.ndarray,
180
+ refsig: np.ndarray,
181
+ sigtime: np.ndarray,
182
+ sig: np.ndarray,
183
+ f0: float,
184
+ filter_order: int,
185
+ filter_fc: float,
186
+ decimate_factor: int = 0,
187
+ chunksize: int = 0,
188
+ bar: Callable[[Iterable], Iterable] | None = None,
189
+ progress_emitter: Any = None,
190
+ ) -> tuple[np.ndarray, np.ndarray]:
191
+ """
192
+ Demodulation with IQ real signals.
193
+
194
+ Reference signal is fitted to get its amplitude and phase over time. Those are used
195
+ to generate a continuous reference signal, used to demodulate `signal`. The I and Q
196
+ components are returned.
197
+
198
+ The signal is frequency shifted with the continuous reference and a low-pass filter
199
+ is applied.
200
+
201
+ The process is done in chunks to not overload the memory, calling the `demodulate()`
202
+ function on each chunk.
203
+
204
+ Optionally, decimation can be used to reduce the data before filtering.
205
+
206
+ Parameters
207
+ ----------
208
+ reftime : np.ndarray
209
+ Time vector of the reference signal.
210
+ refsig : np.ndarray
211
+ 2D array of the reference signal, cropped around where the fit will be done,
212
+ time series should be on columns.
213
+ sigtime : np.ndarray
214
+ Time vector of the whole signal.
215
+ sig : np.ndarray
216
+ 2D array representing the signal. Time series should be on columns.
217
+ f0 : float
218
+ Center frequency (experimental RF frequency).
219
+ filter_order : int
220
+ Order of the low-pass filter.
221
+ filter_fc : float
222
+ Cut-off frequency of the low-pass filter.
223
+ decimate_factor : int, optional
224
+ Downsampling factor, set to 0 or 1 to disable (default). An anti-aliasing filter
225
+ is applied before.
226
+ chunksize : int, optional
227
+ Size of chunks, set to -1 to disable or 0 to adapt chunk size so that there are
228
+ 100 chunks (default).
229
+ bar : Callable or None, optional
230
+ Decorator for the main loop. Should be a callable that takes an iterable as
231
+ argument to display a progress bar. Tested for `rich.progress.track()` and
232
+ `tqdm.tqdm()`). Default is None (no progress bar).
233
+ progress_emitter : Any or None, optional
234
+ An object with an `emit()` method, such as a pyqtpyqtSignal. The loop index (e.g.
235
+ frames index) is emitted at each iteration of the main loop. Default is None.
236
+
237
+ Returns
238
+ -------
239
+ I, Q : np.ndarray
240
+ I-Q-Demodulated signal.
241
+ """
242
+ # Dimensions checks
243
+ reftime, refsig, sigtime, sig = _sanitize_dims_for_demodulation(
244
+ reftime, refsig, sigtime, sig
245
+ )
246
+
247
+ npoints, nframes = sig.shape
248
+ if decimate_factor > 1:
249
+ new_npoints = int(npoints / decimate_factor)
250
+ new_shape = (new_npoints, nframes)
251
+ else:
252
+ new_shape = (npoints, nframes)
253
+
254
+ # Find reference phase
255
+ def cos_func(
256
+ x: np.ndarray, A: float | np.ndarray, phi: float | np.ndarray
257
+ ) -> np.ndarray:
258
+ return A * np.cos(2 * np.pi * f0 * x + phi)
259
+
260
+ # Initialize output arrays
261
+ X = np.empty(new_shape, float)
262
+ Y = np.empty(new_shape, float)
263
+
264
+ # Design low-pass filter
265
+ fs = 1 / np.mean(np.diff(sigtime, axis=0))
266
+ if decimate_factor > 1:
267
+ fs = fs / decimate_factor
268
+ sos = signal.butter(filter_order, filter_fc, btype="lowpass", fs=fs, output="sos")
269
+
270
+ # Determine chunk size
271
+ chunks, cs = get_chunks(nframes, chunksize)
272
+
273
+ if bar is not None:
274
+ chunks = bar(chunks)
275
+
276
+ # Processing in chunks
277
+ for idx in chunks:
278
+ # Select data
279
+ s = slice(idx, idx + cs + 1)
280
+ refsig_chunk = refsig[:, s]
281
+ sig_chunk = sig[:, s]
282
+
283
+ X[:, s], Y[:, s] = demodulate(
284
+ reftime,
285
+ refsig_chunk,
286
+ sigtime,
287
+ sig_chunk,
288
+ cos_func,
289
+ sos,
290
+ decimate_factor=decimate_factor,
291
+ )
292
+
293
+ if progress_emitter is not None:
294
+ progress_emitter.emit(idx)
295
+
296
+ return X, Y
297
+
298
+
299
+ def demodulate(
300
+ reftime: np.ndarray,
301
+ refsig: np.ndarray,
302
+ sigtime: np.ndarray,
303
+ sig: np.ndarray,
304
+ func: Callable[[np.ndarray, float | np.ndarray, float | np.ndarray], np.ndarray],
305
+ sos: np.ndarray,
306
+ decimate_factor: int = 0,
307
+ ) -> tuple[np.ndarray, np.ndarray]:
308
+ """
309
+ Classical demodulation with IQ real signals.
310
+
311
+ Reference signal is fitted to get amplitude and phase. Those are used to generate
312
+ a continuous reference signal, used to demodulate `signal`. The I and Q components
313
+ are returned.
314
+
315
+ The signal is frequency shifted with the continuous reference and a low-pass filter
316
+ is applied. The process is done in chunks to not overload the memory.
317
+
318
+ Optionally, decimation can be used to reduce the data before filtering. In that
319
+ case, the filter second-order sections `sos` must take into account the reduced
320
+ sampling frequency.
321
+
322
+ Parameters
323
+ ----------
324
+ reftime : np.ndarray
325
+ Time vector of the reference signal.
326
+ refsig : np.ndarray
327
+ 2D array of the reference signal, cropped around where the fit will be done,
328
+ time series should be on columns.
329
+ sigtime : np.ndarray
330
+ Time vector of the whole signal.
331
+ sig : np.ndarray
332
+ 2D array representing the signal. Time series should be on columns.
333
+ func : Callable
334
+ Function taking a vector and 2 constants as arguments and returns a vector the
335
+ same size as input. It is used to fit the reference so it should be a cosine
336
+ function where the amplitude and the phase are fitted -- center frequency f0
337
+ should be within the function.
338
+ sos : np.ndarray
339
+ Second-order sections representation of the IIR filter, as returned by
340
+ `scipy.signal` filters. It must take into account the decimation.
341
+ decimate_factor : int, optional
342
+ Downsampling factor, set to 0 to disable (default). An anti-aliasing filter is
343
+ applied before.
344
+
345
+ Returns
346
+ -------
347
+ I, Q : np.ndarray
348
+ I-Q-Demodulated signal.
349
+ """
350
+ # Dimensions checks
351
+ reftime, refsig, sigtime, sig = _sanitize_dims_for_demodulation(
352
+ reftime, refsig, sigtime, sig
353
+ )
354
+
355
+ # Fitting
356
+ refas = np.empty((refsig.shape[1],), dtype=float)
357
+ refphis = np.empty((refsig.shape[1],), dtype=float)
358
+ for frame in range(refsig.shape[1]):
359
+ popt, _ = optimize.curve_fit(func, reftime, refsig[:, frame])
360
+ refas[frame] = popt[0] # reference amplitude
361
+ refphis[frame] = popt[1] # reference phase
362
+
363
+ # Multiply signal by reference (frequency-shifting)
364
+ x = func(sigtime, refas, refphis) * sig
365
+ y = func(sigtime, refas, refphis + np.pi / 2) * sig
366
+
367
+ # Decimation (subsampling with anti-aliasing)
368
+ if decimate_factor > 1:
369
+ x = signal.decimate(x, decimate_factor, axis=0)
370
+ y = signal.decimate(y, decimate_factor, axis=0)
371
+ else:
372
+ x = x
373
+ y = y
374
+
375
+ # Low-pass filter
376
+ x = signal.sosfiltfilt(sos, x, axis=0)
377
+ y = signal.sosfiltfilt(sos, y, axis=0)
378
+
379
+ return x, y
380
+
381
+
382
+ def find_signal(
383
+ sig: np.ndarray, std_factor: float, before: float = 0, after: float = 0
384
+ ) -> tuple[int, int]:
385
+ """
386
+ Detect onset and offset of signal.
387
+
388
+ pyqtSignal is defined as when the values are above/below the mean +/- `std_factor` times
389
+ the standard deviation of the whole trace. Onset (offset) are the first (last, resp.)
390
+ indices where this condition is met.
391
+ `sig` can be 2D, in which case time series should be on column. The minimum index is
392
+ selected for the onset and the last for offset.
393
+
394
+ Parameters
395
+ ----------
396
+ sig : np.ndarray
397
+ Array with time series on columns.
398
+ std_factor : float
399
+ Multipler of signal standard deviation.
400
+ before : float, optional
401
+ Fraction to take before the actual detected onset, by default 0.
402
+ after : float, optional
403
+ Fraction to take after the actual detected onset, by default 0.
404
+
405
+ Returns
406
+ -------
407
+ start, stop : int
408
+ Indices of onset and offset.
409
+ """
410
+ npoints = sig.shape[0]
411
+
412
+ # Convert to float instead of doing it twice (mean and std)
413
+ findsig = sig.astype(float)
414
+
415
+ # Get threshold
416
+ thresh = np.abs(findsig.mean(axis=0)) + std_factor * findsig.std(axis=0)
417
+
418
+ # Binary mask
419
+ mask = np.abs(findsig) > thresh
420
+
421
+ # Find first non-zero value
422
+ first = mask.argmax(axis=0).min()
423
+ last = npoints - np.flip(mask, axis=0).argmax(axis=0).min()
424
+
425
+ # Take a bit more or less
426
+ delta = last - first
427
+ first -= before * delta # x% before onset
428
+ last += after * delta # x% after offset
429
+ # Check boundaries
430
+ first = max(0, int(first))
431
+ last = min(int(last), npoints)
432
+ # Check they didn't reverse, otherwise swap the two
433
+ if last < first:
434
+ ffirst = last
435
+ last = first
436
+ first = ffirst
437
+
438
+ return first, last
439
+
440
+
441
+ def find_f0(sig: np.ndarray, fs: float) -> np.ndarray:
442
+ """
443
+ Find center frequency in signal `sig` sampled at `fs`.
444
+
445
+ `sig` can be 2D, in which case the time series should be on columns, and the mean
446
+ frequency found in each time series is returned.
447
+
448
+ It uses the gaussian spectrum interpolation method described in Gasior & Gonzalez
449
+ (2004) (1).
450
+
451
+ (1) M. Gasior and J.L. Gonzalez, Improving FFT Frequency Measurement Resolution by
452
+ Parabolic and Gaussian Spectrum Interpolation, CERN, 2004.
453
+
454
+ Parameters
455
+ ----------
456
+ sig : np.ndarray
457
+ S
458
+ fs : float
459
+ Sampling rate.
460
+
461
+ Returns
462
+ -------
463
+ f0 : float
464
+ Center frequency in same units as `fs`, one value per serie (columns of `sig`).
465
+ """
466
+ serie = sig.copy()
467
+ npoints = serie.shape[0]
468
+
469
+ # Check dimensions for compatibility with multiple time series
470
+ if serie.ndim == 1:
471
+ serie = serie[..., np.newaxis]
472
+
473
+ # Indices vector
474
+ time_ind = np.arange(npoints)[..., np.newaxis]
475
+
476
+ # Gaussian coef.
477
+ t0 = (time_ind[0, :] + time_ind[-1, :]) * 0.5
478
+ c = 0.5 * (8 / (npoints - 1)) ** 2 # gaussian interp. with r=8
479
+ gaussian = np.exp(-((time_ind - t0) ** 2) * c)
480
+
481
+ # RFFT
482
+ Sn = np.abs(np.fft.rfft(serie * gaussian, axis=0))
483
+ f = np.fft.rfftfreq(npoints, d=1 / fs)
484
+
485
+ # Frequency at FFT peak
486
+ amax = Sn.argmax(axis=0)
487
+ if np.all(0 < amax) & np.all(amax < f.shape[0]):
488
+ indexer = np.arange(len(amax))
489
+ dm = (
490
+ fs
491
+ / npoints
492
+ * np.log(Sn[amax + 1, indexer] / Sn[amax - 1, indexer])
493
+ / (
494
+ 2
495
+ * np.log(
496
+ Sn[amax, indexer] ** 2
497
+ / (Sn[amax + 1, indexer] * Sn[amax - 1, indexer])
498
+ )
499
+ )
500
+ )
501
+ f0 = f[amax] + dm
502
+ else:
503
+ # Too close to the boundaries of the FFT window
504
+ warnings.warn(
505
+ "Center frequency is at the boundary of the window, "
506
+ "can't interpolate properly."
507
+ )
508
+ f0 = f[amax]
509
+
510
+ return f0
511
+
512
+
513
+ def compute_amp_iq(in_phase: np.ndarray, out_phase: np.ndarray) -> np.ndarray:
514
+ """
515
+ Compute amplitude from I and Q.
516
+
517
+ Parameters
518
+ ----------
519
+ in_phase, out_phase : np.ndarray
520
+ Same sized arrays corresponding to I and Q respectively.
521
+
522
+ Returns
523
+ -------
524
+ amplitude : np.ndarray
525
+ pyqtSignal amplitude.
526
+ """
527
+ return np.sqrt(in_phase**2 + out_phase**2)
528
+
529
+
530
+ def compute_phase_iq(
531
+ in_phase: np.ndarray,
532
+ out_phase: np.ndarray,
533
+ unwrap: bool = False,
534
+ period: float = np.pi,
535
+ axis: int = 1,
536
+ ) -> np.ndarray:
537
+ """
538
+ Compute phase from I and Q.
539
+
540
+ The result can be unwrapped, in which case an axis must be specified.
541
+
542
+ Parameters
543
+ ----------
544
+ in_phase, out_phase : np.ndarray
545
+ Same sized arrays corresponding to I and Q respectively.
546
+ unwrap : bool, optional
547
+ Unwrap resulting phase. Default is False.
548
+ period : float, optional
549
+ Period considered for unwrapping, used only if `unwrap` is True. Default is pi.
550
+ axis : int
551
+ If `unwrap` is True, defines in which axis are time series, default is 1 (time
552
+ series on columns).
553
+
554
+ Returns
555
+ -------
556
+ phase : np.ndarray
557
+ pyqtSignal phase.
558
+ """
559
+ res = np.arctan2(out_phase, in_phase)
560
+
561
+ if unwrap:
562
+ res = np.unwrap(res, period=period, axis=axis)
563
+
564
+ return res
565
+
566
+
567
+ def rescale_a2b(
568
+ a: np.ndarray, b: np.ndarray, allow_offset: bool = False, sub_mean: bool = False
569
+ ) -> np.ndarray:
570
+ """
571
+ Rescale `a` so that it is in the same range as `b`.
572
+
573
+ If `allow_offset` is False (default), the minimum will be set at 0 instead. If
574
+ `sub_mean` is True, the mean is subtracted from the final array before returning.
575
+
576
+ Parameters
577
+ ----------
578
+ a : np.ndarray
579
+ Array to rescale.
580
+ b : np.ndarray
581
+ Array to get range from.
582
+ allow_offset : bool, optionnal
583
+ If False (default), the minimum value of the returned array is 0, otherwise, it
584
+ is the minimum of `b`.
585
+ sub_mean : bool, optionnal
586
+ If True, the mean is subtracted from the rescaled array. Default is False.
587
+
588
+ Returns
589
+ -------
590
+ rescaled_a : np.ndarray
591
+ `a`, rescaled.
592
+
593
+ """
594
+ acopy = a.copy().astype(float)
595
+ amin, amax = acopy.min(), acopy.max()
596
+ bmin, bmax = b.min().astype(float), b.max().astype(float)
597
+ rescaled_a = ((acopy - amin) / (amax - amin)) * (bmax - bmin)
598
+ if allow_offset:
599
+ rescaled_a += bmin
600
+ if sub_mean:
601
+ rescaled_a -= rescaled_a.mean()
602
+
603
+ return rescaled_a
604
+
605
+
606
+ def get_chunks(nitems, chunk_size) -> tuple[range, int]:
607
+ """
608
+ Get a range generator.
609
+
610
+ The generator goes from 0 to `nitems` with steps of `chunk_size`.
611
+ If `chunk_size` is -1, it goes from 0 to `nitems` in one step (i.e. no chunking).
612
+ If `chunk_size` is 0, it adjusts the step size to get 100 chunks.
613
+
614
+ Parameters
615
+ ----------
616
+ nitems : int
617
+ Total number of items.
618
+ chunk_size : int
619
+ Step size. -1 and 0 are treated as special values (see description above).
620
+
621
+ Returns
622
+ -------
623
+ range : range
624
+ Range generator.
625
+ step_size : int
626
+ The step size (chunk size) that was determined.
627
+ """
628
+ # Determine chunk size
629
+ if chunk_size == -1:
630
+ # no chunking
631
+ cs = nitems
632
+ elif chunk_size == 0:
633
+ # get 100 chunks
634
+ cs = nitems // 100
635
+ else:
636
+ cs = chunk_size
637
+
638
+ return range(0, nitems, cs), cs
639
+
640
+
641
+ def compute_attenuation(
642
+ amp: np.ndarray,
643
+ amp0: float,
644
+ echo_idx: int,
645
+ length: float,
646
+ mode: str = "reflection",
647
+ corr: float = 1,
648
+ ) -> np.ndarray:
649
+ """
650
+ Compute the normalized amplitude : attenuation in dB/m.
651
+
652
+ Parameters
653
+ ----------
654
+ amp : np.ndarray
655
+ Amplitude time serie.
656
+ amp0 : float
657
+ Amplitude baseline.
658
+ echo_idx : int
659
+ Index of the analyzed echo (1-based).
660
+ length : float
661
+ Sample length in m.
662
+ mode : {"reflection" , "transmission"}, optional
663
+ Detection mode, by default "reflection".
664
+ corr : float, optional
665
+ Denominator correction factor (logarithmic amplificator slope). Default is 1
666
+ (no correction).
667
+
668
+ Returns
669
+ -------
670
+ attenuation : np.ndarray
671
+ Normalized amplitude in dB/m.
672
+
673
+ """
674
+ match mode:
675
+ case "reflection":
676
+ # reflection : 2nL
677
+ constant = 2 * echo_idx
678
+
679
+ case "transmission":
680
+ # transmission : (2n - 1)L
681
+ constant = 2 * echo_idx - 1
682
+
683
+ case _:
684
+ raise ValueError(
685
+ "Expected 'reflection' or 'transmission' for detection mode. "
686
+ f"Got {mode}."
687
+ )
688
+
689
+ return (amp0 - amp) / (corr * constant * length)
690
+
691
+
692
+ def compute_phase_shift(
693
+ phi: np.ndarray,
694
+ phi0: float,
695
+ echo_idx: int,
696
+ speed: float,
697
+ rf_freq: float,
698
+ length: float,
699
+ mode: str = "reflection",
700
+ ) -> np.ndarray:
701
+ """
702
+ Compute relative phase-shift.
703
+
704
+ Parameters
705
+ ----------
706
+ phi : np.ndarray
707
+ Pi-jump-corrected phase time serie.
708
+ phi0 : float
709
+ Phase baseline.
710
+ echo_idx : int
711
+ Index of analyzed echo (1-based).
712
+ speed : float
713
+ (Estimated) speed of sound in the sample.
714
+ rf_freq : float
715
+ Radiofrequency frequency used in the experiment in Hz.
716
+ length : float
717
+ Sample length in m.
718
+ mode : {"reflection", "transmission"}, optional
719
+ Detection mode, by default "reflection".
720
+
721
+ Returns
722
+ -------
723
+ deltaphi : np.ndarray
724
+ Relative phase-shift.
725
+
726
+ """
727
+ match mode:
728
+ case "reflection":
729
+ # reflection : 2nL
730
+ constant = 2 * echo_idx
731
+ case "transmission":
732
+ # transmission (2n - 1)L
733
+ constant = 2 * echo_idx - 1
734
+ case _:
735
+ raise ValueError(
736
+ "Expected 'reflection' or 'transmission' for detection mode. "
737
+ f"Got {mode}."
738
+ )
739
+
740
+ return (phi - phi0) * speed / (2 * np.pi * rf_freq * constant * length)
741
+
742
+
743
+ def _find_barycenters_fast(
744
+ fxx: np.ndarray, sxx: np.ndarray, freq_window: float
745
+ ) -> np.ndarray:
746
+ max_f_idx = np.argmax(sxx, axis=0)
747
+ bary_win_size = int(freq_window / np.mean(np.diff(fxx)))
748
+ # sxx is nfreqs x ntimes
749
+ # Create indices for all freq-windows at each time so dimensions :
750
+ # winsize x ntimes
751
+ # Indices are -30:30 centered on each max freq
752
+ indices = (
753
+ np.arange(-bary_win_size, bary_win_size + 1)[..., np.newaxis]
754
+ + max_f_idx[np.newaxis, ...]
755
+ )
756
+ # Make sure indices are in range
757
+ indices = np.clip(indices, 0, sxx.shape[0] - 1)
758
+ # Extract windows for all columns. We need cols_indexer to select all columns of
759
+ # the spectrogram and pair them with the frequency indices
760
+ cols_indexer = np.arange(sxx.shape[1])
761
+ sxx_windows = sxx[indices, cols_indexer]
762
+ fxx_windows = fxx[indices]
763
+ # Compute barycenters
764
+ sum_weight_f = np.sum(fxx_windows * sxx_windows, axis=0)
765
+ sum_weight = np.sum(sxx_windows, axis=0)
766
+
767
+ return sum_weight_f / sum_weight
768
+
769
+
770
+ def _find_barycenters_slow(
771
+ fxx: np.ndarray, sxx: np.ndarray, freq_window: float
772
+ ) -> np.ndarray:
773
+ max_f_idx = np.argmax(sxx, axis=0)
774
+ bary_win_size = int(freq_window / np.mean(np.diff(fxx)))
775
+ barycenters = np.zeros(max_f_idx.size)
776
+ for idx in range(max_f_idx.size):
777
+ indexer = slice(max_f_idx[idx] - bary_win_size, max_f_idx[idx] + bary_win_size)
778
+ sum_weight_f = np.sum(fxx[indexer] * sxx[indexer, idx])
779
+ sum_weight = np.sum(sxx[indexer, idx])
780
+ barycenters[idx] = sum_weight_f / sum_weight
781
+
782
+ return barycenters
783
+
784
+
785
+ def find_barycenters(
786
+ fxx: np.ndarray, sxx: np.ndarray, freq_window: float, fast: bool = True
787
+ ) -> np.ndarray:
788
+ """
789
+ Compute the barycenters of the maximum frequencies over time in a spectrogram.
790
+
791
+ It uses the spectrogram from `scipy.signal.spectrogram()`.
792
+
793
+ Parameters
794
+ ----------
795
+ fxx : np.ndarray
796
+ Array with the frequencies of the spectrogram, as returned by
797
+ `scipy.signal.spectrogram()`.
798
+ sxx : np.ndarray
799
+ Spectrogram as returned by `scipy.signal.spectrogram()`.
800
+ freq_window : float
801
+ Frequency-window size around the frequency maxima, in the same units as `fxx`.
802
+ fast : bool, optional
803
+ Whether to use the vectorized version of the algorithm, default is True.
804
+ """
805
+ if fast:
806
+ barycenters = _find_barycenters_fast(fxx, sxx, freq_window)
807
+ else:
808
+ barycenters = _find_barycenters_slow(fxx, sxx, freq_window)
809
+
810
+ return barycenters
811
+
812
+
813
+ def find_nearest_index(array: np.ndarray, value: float) -> int:
814
+ """
815
+ Find the index of the nearest value in `array` to the input `value`.
816
+
817
+ Parameters
818
+ ----------
819
+ array : np.ndarray
820
+ Array from which the index is extracted from.
821
+ value : float
822
+ Value to find in `array`.
823
+ """
824
+ return (np.abs(array - value)).argmin()
825
+
826
+
827
+ def get_up_down_indices(x: np.ndarray) -> tuple[slice, slice]:
828
+ """
829
+ Get indices in `y` for increasing and decreasing `x`.
830
+
831
+ Returns 2 slices, the first indexes `y` in increasing `x`, the second in decreasing
832
+ `x`. `x` should be a vector that increases, reach a maximum, then decreases.
833
+
834
+ Parameters
835
+ ----------
836
+ x : np.ndarray
837
+ Should be 1D.
838
+
839
+ Returns
840
+ -------
841
+ slice_inc, slice_dec : slice
842
+ """
843
+ idx_at_xmax = np.argmax(x)
844
+ return (slice(0, idx_at_xmax), slice(-1, idx_at_xmax - 1, -1))
845
+
846
+
847
+ def split_up_down(
848
+ x: np.ndarray, y: np.ndarray
849
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
850
+ """
851
+ Split `y` in two arrays based on if `x` is increasing or decreasing.
852
+
853
+ `x` should be a vector that increases, reaches a maximum and then decreases (e.g. a
854
+ magnetic field).
855
+ One array corresponds to increasing `x`, the othe corresponds to decreasing `x`. The
856
+ arrays are sorted and the `x` arrays are also returned.
857
+
858
+ Parameters
859
+ ----------
860
+ x, y : np.ndarray
861
+ 1D vectors.
862
+
863
+ Returns
864
+ -------
865
+ x_inc, y_inc : np.ndarray
866
+ `x` and `y` when `x` increases.
867
+ x_dec, y_dec : np.ndarray
868
+ `x` and `y` when `x` decreases.
869
+ """
870
+ s_inc, s_dec = get_up_down_indices(x)
871
+ return (x[s_inc], y[s_inc], x[s_dec], y[s_dec])
872
+
873
+
874
+ def fit_poly(
875
+ field: np.ndarray,
876
+ sig: np.ndarray,
877
+ boundary1: float,
878
+ boundary2: float,
879
+ poly_deg: int,
880
+ ) -> tuple[np.ndarray, np.ndarray]:
881
+ """
882
+ Polynomial fit of `sig(field)`, between boundaries.
883
+
884
+ Returns the resulting polynome evaluated at `field` along with the detrended signal.
885
+
886
+ Parameters
887
+ ----------
888
+ field : np.ndarray
889
+ Magnetic field vector (x).
890
+ sig : np.ndarray
891
+ pyqtSignal vector (y).
892
+ poly_boundary1, poly_boundary2 : float
893
+ Do the fit only in `field` comprised in `[poly_boundary1, poly_boundary2]`.
894
+ poly_deg : int
895
+ Polynome degree.
896
+
897
+ Returns
898
+ -------
899
+ poly : np.ndarray
900
+ Polynome evaluated at `field`.
901
+ sig_detrend : np.ndarray
902
+ `sig` subtracted
903
+ """
904
+ idx_bmin = find_nearest_index(field, boundary1)
905
+ idx_bmax = find_nearest_index(field, boundary2)
906
+
907
+ fit = np.polynomial.Polynomial.fit(
908
+ field[idx_bmin:idx_bmax],
909
+ sig[idx_bmin:idx_bmax],
910
+ poly_deg,
911
+ )
912
+ res = fit(field)
913
+ return res, sig - res
914
+
915
+
916
+ def interpolate_inverse(
917
+ x: np.ndarray, y: np.ndarray, boundary1: float, boundary2: float, npoints: int
918
+ ) -> tuple[np.ndarray, np.ndarray]:
919
+ """
920
+ Oversample `y` in `1/x` in the given range.
921
+
922
+ Parameters
923
+ ----------
924
+ x : np.ndarray
925
+ Points at which `y` is evaluated.
926
+ y : np.ndarray
927
+ pyqtSignal to oversample.
928
+ boundary1, boundary2 : float
929
+ Define the range to select and oversample, before inversion.
930
+ npoints : int
931
+ Number of points of the resulting vectors.
932
+
933
+ Returns
934
+ -------
935
+ x_inverse_oversample : np.ndarray
936
+ `1/x` oversampled in `[1/boundary2, 1/boundary1]`.
937
+ y_oversample : np.ndarray
938
+ `y` oversampled in `[1/boundary2, 1/boundary1]`.
939
+ """
940
+ # Create the oversampled time vector
941
+ x_inverse_oversample = np.linspace(
942
+ 1 / boundary2, 1.0 / boundary1, npoints, endpoint=False
943
+ )
944
+ # Interpolate
945
+ y_oversample = np.interp(1 / x_inverse_oversample, x, y)
946
+
947
+ return x_inverse_oversample, y_oversample
948
+
949
+
950
+ def fourier_transform(
951
+ a: np.ndarray, d: float, pad_mult: int = 1
952
+ ) -> tuple[np.ndarray, np.ndarray]:
953
+ """
954
+ Fourier transform for 1D real signal, with extra padding.
955
+
956
+ Parameters
957
+ ----------
958
+ a : np.ndarray
959
+ Input array.
960
+ d : float
961
+ Sample spacing.
962
+ pad_mult : int, optional
963
+ Multiplier of signal size used for padding. Default is 1.
964
+
965
+ Returns
966
+ -------
967
+ freq : np.ndarray
968
+ Frequencies.
969
+ X : np.ndarray
970
+ Magnitude of the fourier transform.
971
+ """
972
+ n = a.size * pad_mult
973
+ X = np.fft.rfft(np.hamming(a.size) * a, n=n)[:-1]
974
+ freq = np.fft.rfftfreq(n, d=d)[:-1]
975
+ X = np.abs(X) * 2 / n
976
+ return freq, X
977
+
978
+
979
+ def collate_arrays(to_save: list[np.ndarray]):
980
+ """
981
+ Store 1D vectors as columns in a 2D arrays.
982
+
983
+ Dimension mismatches are filled with trailing NaNs.
984
+
985
+ Parameters
986
+ ----------
987
+ to_save : list[np.ndarray]
988
+ List of arrays to save in the same file.
989
+
990
+ Returns
991
+ -------
992
+ a : np.ndarray
993
+ The array with all the vectors, with NaNs to fill shorter vectors.
994
+ """
995
+ # Build the array
996
+ ncolumns = len(to_save)
997
+ nrows = max([a.size for a in to_save])
998
+ a_out = np.full((nrows, ncolumns), np.nan)
999
+
1000
+ # Fill the array
1001
+ for idx_col, a in enumerate(to_save):
1002
+ a_out[: a.size, idx_col] = a
1003
+
1004
+ return a_out