pymagnetos 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. pymagnetos/__init__.py +15 -0
  2. pymagnetos/cli.py +40 -0
  3. pymagnetos/core/__init__.py +19 -0
  4. pymagnetos/core/_config.py +340 -0
  5. pymagnetos/core/_data.py +132 -0
  6. pymagnetos/core/_processor.py +905 -0
  7. pymagnetos/core/config_models.py +57 -0
  8. pymagnetos/core/gui/__init__.py +6 -0
  9. pymagnetos/core/gui/_base_mainwindow.py +819 -0
  10. pymagnetos/core/gui/widgets/__init__.py +19 -0
  11. pymagnetos/core/gui/widgets/_batch_processing.py +319 -0
  12. pymagnetos/core/gui/widgets/_configuration.py +167 -0
  13. pymagnetos/core/gui/widgets/_files.py +129 -0
  14. pymagnetos/core/gui/widgets/_graphs.py +93 -0
  15. pymagnetos/core/gui/widgets/_param_content.py +20 -0
  16. pymagnetos/core/gui/widgets/_popup_progressbar.py +29 -0
  17. pymagnetos/core/gui/widgets/_text_logger.py +32 -0
  18. pymagnetos/core/signal_processing.py +1004 -0
  19. pymagnetos/core/utils.py +85 -0
  20. pymagnetos/log.py +126 -0
  21. pymagnetos/py.typed +0 -0
  22. pymagnetos/pytdo/__init__.py +6 -0
  23. pymagnetos/pytdo/_config.py +24 -0
  24. pymagnetos/pytdo/_config_models.py +59 -0
  25. pymagnetos/pytdo/_tdoprocessor.py +1052 -0
  26. pymagnetos/pytdo/assets/config_default.toml +84 -0
  27. pymagnetos/pytdo/gui/__init__.py +26 -0
  28. pymagnetos/pytdo/gui/_worker.py +106 -0
  29. pymagnetos/pytdo/gui/main.py +617 -0
  30. pymagnetos/pytdo/gui/widgets/__init__.py +8 -0
  31. pymagnetos/pytdo/gui/widgets/_buttons.py +66 -0
  32. pymagnetos/pytdo/gui/widgets/_configuration.py +78 -0
  33. pymagnetos/pytdo/gui/widgets/_graphs.py +280 -0
  34. pymagnetos/pytdo/gui/widgets/_param_content.py +137 -0
  35. pymagnetos/pyuson/__init__.py +7 -0
  36. pymagnetos/pyuson/_config.py +26 -0
  37. pymagnetos/pyuson/_config_models.py +71 -0
  38. pymagnetos/pyuson/_echoprocessor.py +1901 -0
  39. pymagnetos/pyuson/assets/config_default.toml +92 -0
  40. pymagnetos/pyuson/gui/__init__.py +26 -0
  41. pymagnetos/pyuson/gui/_worker.py +135 -0
  42. pymagnetos/pyuson/gui/main.py +767 -0
  43. pymagnetos/pyuson/gui/widgets/__init__.py +7 -0
  44. pymagnetos/pyuson/gui/widgets/_buttons.py +95 -0
  45. pymagnetos/pyuson/gui/widgets/_configuration.py +85 -0
  46. pymagnetos/pyuson/gui/widgets/_graphs.py +248 -0
  47. pymagnetos/pyuson/gui/widgets/_param_content.py +193 -0
  48. pymagnetos-0.1.0.dist-info/METADATA +23 -0
  49. pymagnetos-0.1.0.dist-info/RECORD +51 -0
  50. pymagnetos-0.1.0.dist-info/WHEEL +4 -0
  51. pymagnetos-0.1.0.dist-info/entry_points.txt +7 -0
@@ -0,0 +1,1901 @@
1
+ """The EchoProcessor class for ultra-sound echoes experiments."""
2
+
3
+ import logging
4
+ import os
5
+ import time
6
+ from collections.abc import Iterable, Sequence
7
+ from pathlib import Path
8
+ from typing import Any, Literal, Self
9
+
10
+ import nexusformat.nexus as nx
11
+ import numpy as np
12
+ from fftekwfm import TekWFM
13
+ from scipy import signal
14
+
15
+ from ..core import BaseProcessor, sp
16
+ from ..log import configure_logger
17
+ from ._config import EchoConfig
18
+
19
+ # Name of the analysis serie, here it correspond to an echo index
20
+ SERIE_NAME = "echo"
21
+ # Measurements names to use
22
+ REFNAME = "reference"
23
+ I_NAME_ANALOG = "in_phase"
24
+ Q_NAME_ANALOG = "out_phase"
25
+ I_NAME_DIGITAL = "in_phase_demod"
26
+ Q_NAME_DIGITAL = "out_phase_demod"
27
+ # To guess if time is in microseconds, set approx. duration of a frame in seconds
28
+ # If the max is found 1000x above, it is guessed that the vector is in microseconds
29
+ FRAME_TIMESCALE = 10e-6
30
+
31
+ logger = logging.getLogger(__name__)
32
+
33
+
34
+ class EchoProcessor(BaseProcessor):
35
+ """Processor class for ultra-sound echoes experiments."""
36
+
37
+ def __init__(self, *args, digital: bool | None = None, **kwargs) -> None:
38
+ """
39
+ Processor class for ultra-sound echoes experiments.
40
+
41
+ Provide methods for :
42
+ - Loading data and metadata,
43
+ - Reshape and format data,
44
+ - Preprocessing : demodulation, decimation and/or smoothing (with rolling window
45
+ average)
46
+ - Averaging in a time-window,
47
+ - Derive ultra-sound waves attenuation and phase-shift in the sample during a
48
+ magnetic field shot.
49
+
50
+ The data can be loaded from both binary files from the LabVIEW program or
51
+ Tektronix WFM files. Digital demodulation is possible when necessary.
52
+
53
+ In any case, the raw data is assumed to be in the form of several frames paving
54
+ the experiment duration. Each frame should contain the same number of samples.
55
+ This is all read from the inputs files :
56
+
57
+ With the original LabVIEW program :
58
+ - sample.bin : raw binary file from the LabVIEW program with the measured
59
+ channels,
60
+ - sample.txt : frames onsets, with metadata in the header. The header is
61
+ specified in the configuration file,
62
+ - sample-pickup.bin : raw binary file from the LabVIEW program with pickup coil
63
+ voltage.
64
+
65
+ With the Tektronix oscilloscope :
66
+ - sample_ch1.wfm, sample_ch2.wfm, ... : Tektronix WFM files with the measured
67
+ channels,
68
+ - sample.txt : metadata only, the frames onsets are read from the WFM file,
69
+ - sample-pickup.bin : see above.
70
+
71
+ Parameters
72
+ ----------
73
+ *args : passed to `BaseProcessor()`
74
+ digital : bool or None, optional
75
+ Force digital mode by setting this parameter to True or to analog mode with
76
+ False. None will attempt to determine this automatically from the
77
+ configuration file, this is the default behavior.
78
+ **kwargs : passed to `BaseProcessor()`
79
+ """
80
+ configure_logger(logger, "pyuson.log")
81
+
82
+ # Get the Config object
83
+ self._config_cls = EchoConfig
84
+
85
+ # Prepare internals names
86
+ self._serie_name = SERIE_NAME
87
+
88
+ # Use digital mode or not, or automatically detect it from configuration
89
+ self.is_digital = digital
90
+
91
+ # Convenience attributes (might be overidden upon loading)
92
+ self.nframes = 0
93
+ self.npoints = 0
94
+
95
+ super().__init__(*args, **kwargs)
96
+
97
+ # Initialize flags
98
+ if not self.is_nexus_file:
99
+ self._init_flags()
100
+
101
+ # Additional initializations
102
+ if self.is_digital:
103
+ self._init_digital()
104
+ else:
105
+ self._i_name = I_NAME_ANALOG
106
+ self._q_name = Q_NAME_ANALOG
107
+
108
+ # Initialize metadata dict for WFM file (will be filled when loading data)
109
+ self.metadata["vscale"] = dict()
110
+ self.metadata["voffset"] = dict()
111
+
112
+ @property
113
+ def expid(self) -> str:
114
+ """Experiment ID, linked to the Config object."""
115
+ return self.cfg.expid
116
+
117
+ @expid.setter
118
+ def expid(self, value: str):
119
+ """
120
+ Setter for `expid`.
121
+
122
+ The `expid` attribute of the configuration is updated. The filenames are
123
+ rebuilt.
124
+ """
125
+ self.cfg.expid = value
126
+ self.cfg.build_filenames()
127
+ self._reinit()
128
+
129
+ @property
130
+ def data_directory(self) -> Path:
131
+ """Data directory, linked to the Config object."""
132
+ return self.cfg.data_directory
133
+
134
+ @data_directory.setter
135
+ def data_directory(self, value: str | Path):
136
+ """
137
+ Setter for `data_directory`.
138
+
139
+ The `data_directory` attribute of the configuration is updated. The filenames
140
+ are rebuilt.
141
+ """
142
+ self.cfg.data_directory = Path(value)
143
+ self.cfg.build_filenames()
144
+ self._reinit()
145
+
146
+ @property
147
+ def idx_serie(self) -> int:
148
+ """
149
+ Track the current analyzed echo.
150
+
151
+ The returned value is read from the "echo_index" parameter in the "settings"
152
+ section of the configuration.
153
+ """
154
+ return self.cfg.settings.echo_index
155
+
156
+ @idx_serie.setter
157
+ def idx_serie(self, value: int):
158
+ """
159
+ Setter for `idx_serie`.
160
+
161
+ The "echo_index" parameter in the "settings" section of the configuration is
162
+ updated.
163
+ """
164
+ self.cfg.settings.echo_index = value
165
+
166
+ @property
167
+ def analysis_window(self) -> Sequence[float]:
168
+ """
169
+ Time-range in which averaging is performed.
170
+
171
+ The returned value is read from the "analysis_window" parameter in the
172
+ "settings" section of the configuration.
173
+ """
174
+ return self.cfg.settings.analysis_window
175
+
176
+ @analysis_window.setter
177
+ def analysis_window(self, value: Sequence[float]):
178
+ """
179
+ Setter for `idx_serie`.
180
+
181
+ The "analysis_window" parameter in the "settings" section of the configuration
182
+ is updated.
183
+ """
184
+ self.cfg.settings.analysis_window = value
185
+
186
+ def _init_flags(self):
187
+ """Set up flags."""
188
+ # Tektronix WFM files or LabVIEW binary file
189
+ self.is_wfm = False
190
+ # Measurement time vector is in microseconds
191
+ self.is_us = False
192
+ # Averaged values were computed or updated
193
+ self.is_averaged = False
194
+ # Where the measurements are stored ('data_raw' or 'data_processed')
195
+ self.is_meas_processed = False
196
+ # Where the corresponding time vector is stored ('data_processed' if decimation
197
+ # is used)
198
+ self.is_decimated = False
199
+
200
+ # Determine analog or digital mode, if needed
201
+ if (self.is_digital is None) and self.is_config_file:
202
+ self.is_digital = self._check_digital()
203
+
204
+ # Moving mean was performed, if it was, use the smoothed data in data_processed
205
+ self.is_rollmean = {m: False for m in self.measurements}
206
+
207
+ def _guess_flags(self):
208
+ """Attempt to guess flags from data after loading a NeXus file."""
209
+ self.is_wfm = None # no files so irrelevant
210
+ self.is_us = self._check_measurement_time_us()
211
+ self.is_averaged = self._check_averaged()
212
+
213
+ if self.is_digital is None:
214
+ # It was not set by the user when initializing the Processor object
215
+ self.is_digital = self._check_digital()
216
+
217
+ if self.is_digital:
218
+ self._init_digital() # fills is_meas_processed
219
+ self._sig_name = self._get_signal_name()
220
+ else:
221
+ self.is_meas_processed = False
222
+
223
+ self.is_decimated = self._check_time_subsampled()
224
+ self.is_rollmean = self._check_rollmean()
225
+
226
+ def _init_digital(self):
227
+ """Additional initializations for digital mode."""
228
+ # Prepare internal names
229
+ self._ref_name = REFNAME
230
+ self._i_name = I_NAME_DIGITAL
231
+ self._q_name = Q_NAME_DIGITAL
232
+ # Get signal name
233
+ if self.is_config_file:
234
+ # If instantiated with configuration file
235
+ self._sig_name = self._get_signal_name()
236
+ else:
237
+ self._sig_name = "SIG_NAME_NOT_SET" # might be set later
238
+
239
+ # Update flags
240
+ self.is_digital = True
241
+
242
+ # Add a seed based on unix time for frame selection
243
+ self._seed = int(time.time())
244
+
245
+ # Update measurements names, eg. the actual measurements used in computation and
246
+ # not orignal "signal" and "reference"
247
+ self.measurements = [self._i_name, self._q_name]
248
+ # Update flags
249
+ self.is_rollmean = {m: False for m in self.measurements} # the keys changed
250
+ self.is_meas_processed = True
251
+
252
+ def _reinit(self):
253
+ """Re-initialize data objects."""
254
+ super()._reinit()
255
+ if self.is_digital:
256
+ self._init_digital()
257
+
258
+ self.metadata["vscale"] = dict()
259
+ self.metadata["voffset"] = dict()
260
+
261
+ logger.info(f"Experiment ID set to '{self.expid}'.")
262
+
263
+ def _get_signal_name(self) -> str:
264
+ """Determine signal name from configuration file measurements."""
265
+ signames = [*{*self.cfg.measurements.keys()} - {self._ref_name}]
266
+ if len(signames) != 1:
267
+ raise ValueError(
268
+ "Config file : Measurements should have 2 and only 2 entries."
269
+ )
270
+ return signames[0]
271
+
272
+ @staticmethod
273
+ def _load_metadata(
274
+ filename: str | Path,
275
+ header_map: dict[str, int],
276
+ conversion_map: dict[str, bool],
277
+ ) -> dict[str, float | str]:
278
+ """
279
+ Read metadata from the header of the text file with the frames onsets.
280
+
281
+ `header_map` is a dict mapping a metadata to a line number in the file
282
+ (0-based).
283
+
284
+ `conversion_map` is a dict specifying if those metadata should be
285
+ converted to numerical values.
286
+
287
+ Parameters
288
+ ----------
289
+ filename : str | Path
290
+ Full path to the text file with metadata.
291
+ header_map : dict
292
+ Maps a metadata name to a line number in the file (0-based).
293
+ conversion_map : dict
294
+ Maps a metadata name to a booelan, requiring if the metadata should be
295
+ converted to a numerical value or kept as-is.
296
+
297
+ Returns
298
+ -------
299
+ metadata : dict
300
+ {metadata name : value}
301
+ """
302
+ # Determine how many lines-long the header is
303
+ nlines_header = max(header_map.values()) + 1
304
+
305
+ # Check file exists
306
+ if not os.path.isfile(filename):
307
+ logger.warning(
308
+ f"{os.path.basename(filename)} : metadata file does not exist."
309
+ )
310
+ return {}
311
+
312
+ # Read first lines
313
+ with open(filename) as fid:
314
+ header = [next(fid) for _ in range(nlines_header)]
315
+
316
+ # Collect
317
+ metadata = {key: header[idx] for key, idx in header_map.items()}
318
+
319
+ # Convert numerical values
320
+ metadata = {
321
+ key: float(value) if conversion_map[key] else value.strip("\n")
322
+ for key, value in metadata.items()
323
+ }
324
+
325
+ return metadata
326
+
327
+ @staticmethod
328
+ def _load_frame_onsets(
329
+ filename: str | Path, nlines_header: int, delimiter: str
330
+ ) -> np.ndarray:
331
+ """
332
+ Read frame onsets in the reference time text file.
333
+
334
+ Parameters
335
+ ----------
336
+ filename : str | Path
337
+ Full path to the reference time text file.
338
+ nlines_header : int
339
+ Number of lines to ignore to skip the header.
340
+ delimiter : str
341
+ Delimiter in the text file.
342
+
343
+ Returns
344
+ -------
345
+ frames_onsets : np.ndarray
346
+ Vector with frames onsets.
347
+ """
348
+ frame_onsets = np.loadtxt(
349
+ filename,
350
+ skiprows=nlines_header,
351
+ delimiter=delimiter,
352
+ usecols=0,
353
+ dtype=float,
354
+ )
355
+ return frame_onsets
356
+
357
+ def _load_oscillo_bin(
358
+ self,
359
+ filename: str | Path,
360
+ nchannels: int,
361
+ precision: int,
362
+ endian: Literal["<", ">"] = "<",
363
+ order: Literal["F", "C"] = "F",
364
+ ) -> np.ndarray:
365
+ """
366
+ Load oscilloscope binary file.
367
+
368
+ Loaded data is reshaped and transposed so that time series corresponding to one
369
+ channel correspond to one row (so that it can be easily unpacked). Note that the
370
+ time serie is 1D : all frames are stored one after the other and need to be
371
+ reshaped.
372
+
373
+ Parameters
374
+ ----------
375
+ filename : str | Path
376
+ Full path to binary file.
377
+ nchannels : int
378
+ Number of channels.
379
+ precision : int
380
+ Byte precision.
381
+ endian : {"<", ">"}, optional
382
+ "<" for little endian, ">" for big endian. Default is "<".
383
+ order : {"F", "C"}, optional
384
+ Array order, "F" for Fortran, "C" for C. Default is "F".
385
+
386
+ Returns
387
+ -------
388
+ oscillo_data np.ndarray
389
+ Shape nchannels * nsamples.
390
+
391
+ Raises
392
+ ------
393
+ ValueError
394
+ If it can't be reshaped to the expected shape, raises an error.
395
+ """
396
+ if not isinstance(filename, Path):
397
+ filename = Path(filename)
398
+
399
+ # Load data
400
+ data = self.load_bin(filename, precision, endian)
401
+
402
+ # Reshape data as an array with a column per channel
403
+ filesize = filename.stat().st_size # get total size in bytes
404
+
405
+ # Normally there are filesize bytes in total in the file, precision bytes per
406
+ # float, so 12 bytes per sample (4 bytes * 3 channels), hence
407
+ # filesize / (nchannels * precision) columns and nchannels lines
408
+ # If the data is correct, it should be possible to reshape like this, as all
409
+ # channels have the same number of time points.
410
+ ncolumns = int(filesize / (nchannels * precision))
411
+ try:
412
+ data = data.reshape((ncolumns, nchannels), order=order).astype(float)
413
+ except ValueError:
414
+ raise ValueError("Missing samples in the oscilloscope binary file.")
415
+
416
+ return data.T
417
+
418
+ @staticmethod
419
+ def _reshape_frames(
420
+ oscillo_data: np.ndarray,
421
+ nframes: int,
422
+ order: Literal["F", "C"] = "F",
423
+ ) -> np.ndarray:
424
+ """
425
+ Reshape a 1D array into a 2D array.
426
+
427
+ Time series will end up on columns.
428
+
429
+ Parameters
430
+ ----------
431
+ oscillo_data : np.ndarray
432
+ 1D array with time gaps.
433
+ nframes : int
434
+ Number of frames, the resulting array will have `nframes` columns.
435
+ order : {"F", "C"}, optional
436
+ Order, "F" for Fortran, "C" for C. Default is "F".
437
+
438
+ Returns
439
+ -------
440
+ oscillo_data : np.ndarray
441
+ Reshaped array.
442
+ """
443
+ nsamples_per_seq = int(oscillo_data.shape[0] // nframes)
444
+
445
+ return oscillo_data.reshape((nsamples_per_seq, nframes), order=order)
446
+
447
+ @staticmethod
448
+ def _average_frame_range(
449
+ frame: np.ndarray,
450
+ tstart: float,
451
+ tstop: float,
452
+ dt: float,
453
+ toffset: float = 0,
454
+ tunit: str = "us",
455
+ ) -> np.ndarray:
456
+ """
457
+ Average the signal in the `frame` 2D array in the given time window.
458
+
459
+ The time window is defined by `tstart` and `tstop` expressed in the frame time
460
+ base (`dt`).
461
+
462
+ Parameters
463
+ ----------
464
+ frame : np.ndarray
465
+ Array with time series on columns.
466
+ tstart, tstop : float
467
+ Defines the time window in which the average is computed.
468
+ dt : float
469
+ Time interval between two samples, in seconds.
470
+ toffset : float, optional
471
+ Offset in time, if the measurement time vector does not start at 0, default
472
+ is 0.
473
+ tunit : {"us", "ms", "s"} str, optional
474
+ Units in which tstart and tstop are given, default is "us" (microseconds).
475
+
476
+ Returns
477
+ -------
478
+ meas_avg : np.ndarray
479
+ Measurement averaged in-frame, so that the resulting 1D vector is aligned on
480
+ the `time_exp` vector.
481
+ """
482
+ match tunit:
483
+ case "us":
484
+ mult = 1e-6
485
+ case "ms":
486
+ mult = 1e-3
487
+ case "s":
488
+ mult = 1
489
+ case _:
490
+ logger.warning(
491
+ f"{tunit} is not recognized as a valid unit, assuming it is 'us'."
492
+ )
493
+ mult = 1e-6
494
+
495
+ idx_start = int(np.ceil((tstart - toffset) * mult / dt))
496
+ idx_stop = int(np.ceil((tstop - toffset) * mult / dt))
497
+
498
+ return frame[idx_start:idx_stop, :].mean(axis=0)
499
+
500
+ def _find_signal_in_ref(self) -> Self:
501
+ """
502
+ Find signal in reference time series.
503
+
504
+ Wrap the `sp.find_signal()` function, fetching the parameters from the
505
+ configuration.
506
+ """
507
+ if not self.is_digital:
508
+ logger.warning("Can't find reference signal in analog mode.")
509
+ return self
510
+ else:
511
+ # for type checking
512
+ assert self.cfg.demodulation is not None
513
+
514
+ # Get parameters
515
+ nframes = self.cfg.demodulation.findsig_nframes
516
+ if nframes > 0:
517
+ rng = np.random.default_rng(self._seed)
518
+ framesid = rng.integers(0, self.nframes, nframes)
519
+ sig = self.get_data_raw(self._ref_name)[:, framesid]
520
+ else:
521
+ sig = self.get_data_raw(self._ref_name)
522
+ std_factor = self.cfg.demodulation.findsig_nstd
523
+ before = self.cfg.demodulation.findsig_extend
524
+
525
+ # Detect signal
526
+ logger.info("Detecting signal in reference trace...")
527
+ start, stop = sp.find_signal(sig, std_factor, before=before, after=before)
528
+
529
+ # Store
530
+ self.metadata["ref_on"] = start
531
+ self.metadata["ref_off"] = stop
532
+
533
+ logger.info(f"Found indices {start, stop}.")
534
+
535
+ return self
536
+
537
+ def _update_time_subsampled(self, new_npoints: int):
538
+ """
539
+ Update time vector if subsampling was used.
540
+
541
+ If decimation was used during demodulation, the corresponding time vector needs
542
+ to be updated and placed in 'data_processed'.
543
+ """
544
+ self.set_data_processed(
545
+ "time_meas",
546
+ np.linspace(
547
+ self.get_data_raw("time_meas")[0],
548
+ self.get_data_raw("time_meas")[-1],
549
+ new_npoints,
550
+ ),
551
+ )
552
+
553
+ def load_metadata(self) -> Self:
554
+ """
555
+ Read metadata from the header of the text file with the frames onsets.
556
+
557
+ Wrap the `_load_metadata()` method, fetching the parameters from the
558
+ configuration.
559
+ """
560
+ # Get parameters
561
+ filename = self.cfg.filenames["reference_time"]
562
+ header_map = self.cfg.metadata.index_map
563
+ conversion_map = self.cfg.metadata.conversion_map
564
+
565
+ # Read metadata
566
+ logger.info(f"Reading metadata from {os.path.basename(filename)}...")
567
+ metadata = self._load_metadata(filename, header_map, conversion_map)
568
+
569
+ # Remove unused metadata for WFM files
570
+ if self.is_wfm:
571
+ metadata.pop("dt_acq", None) # will be read from the WFM file directly
572
+
573
+ # Store (add freshly loaded metadata)
574
+ self.metadata = self.metadata | metadata
575
+
576
+ if self.is_digital:
577
+ if "rf_frequency" in self.metadata:
578
+ # Frequency used for demoduation will be read from configuration instead
579
+ self.metadata["rf_frequency"] = 0
580
+ else:
581
+ logger.info(
582
+ f"RF: {self.metadata['rf_frequency'] / 1e6:3.3f} MHz read from file."
583
+ )
584
+
585
+ # Verbose
586
+ logger.info("Done.")
587
+
588
+ return self
589
+
590
+ def load_frame_onsets(self) -> Self:
591
+ """
592
+ Read frames onsets in the reference time text file.
593
+
594
+ For LabVIEW binary files only. For WFM files, frame onsets are read from the
595
+ file.
596
+
597
+ Wrap the `_load_frame_onsets()` method, fetching the parameters from the
598
+ configuration.
599
+ """
600
+ if self.is_wfm:
601
+ # This will be read from the WFM file header.
602
+ return self
603
+
604
+ # Get parameters
605
+ filename = self.cfg.filenames["reference_time"]
606
+ nlines_header = self.cfg.files["reference_time"].header
607
+ delimiter = self.cfg.files["reference_time"].delimiter
608
+
609
+ # Read and store frames onsets
610
+ if not Path(filename).is_file():
611
+ logger.error(
612
+ f"{Path(filename).name} does not exist, check your configuration file."
613
+ )
614
+ return self
615
+
616
+ logger.info(f"Reading frame onsets from {os.path.basename(filename)}...")
617
+ self.set_data_raw(
618
+ "frame_onsets", self._load_frame_onsets(filename, nlines_header, delimiter)
619
+ )
620
+ self.nframes = self.data_raw["frame_onsets"].shape[0]
621
+
622
+ # Verbose
623
+ logger.info("Done.")
624
+
625
+ return self
626
+
627
+ def load_oscillo(self, **kwargs) -> Self:
628
+ """
629
+ Load oscilloscope data.
630
+
631
+ Arguments are passed to the loader method, either `load_oscillo_wfm()` for WFM
632
+ files, or `load_oscillo_bin()` for LabVIEW binary files.
633
+
634
+ The frames are reshaped if needed and the time vector for both the measurement
635
+ and the experiment are built and stored in `data_raw[results]`.
636
+
637
+ Parameters
638
+ ----------
639
+ **kwargs : passed to `load_oscillo_wfm()`.
640
+ """
641
+ filename = Path(self.cfg.filenames["oscillo"])
642
+
643
+ # Attempt to load metadata first
644
+ try:
645
+ self.load_metadata()
646
+ except Exception:
647
+ logger.warning("Metadata could not be loaded.")
648
+ pass
649
+
650
+ if filename.suffix.endswith("wfm"):
651
+ self.is_wfm = True
652
+ self.load_oscillo_wfm(**kwargs)
653
+ self.metadata["toffset_meas"] = self.get_data_raw("time_meas")[0]
654
+ elif filename.suffix.endswith("bin"):
655
+ self.is_wfm = False
656
+ self.load_oscillo_bin()
657
+ self.reshape_frames()
658
+ else:
659
+ logger.warning(
660
+ f"{os.path.basename(filename)} : extension not recognized, "
661
+ "assuming LabVIEW binary file."
662
+ )
663
+ self.is_wfm = False
664
+ self.load_oscillo_bin()
665
+
666
+ # Load experiment time vector
667
+ self.get_time_exp()
668
+
669
+ return self
670
+
671
+ def load_oscillo_bin(self) -> Self:
672
+ """Load oscilloscope binary data from LabVIEW binary files."""
673
+ # Get parameters
674
+ filename = Path(self.cfg.filenames["oscillo"])
675
+ nchannels = len(self.cfg.measurements)
676
+ precision = self.cfg.files["oscillo"].precision
677
+ endian = self.cfg.files["oscillo"].endian
678
+ order = self.cfg.files["oscillo"].order
679
+
680
+ # Check file exists
681
+ if not filename.is_file():
682
+ logger.error(
683
+ f"{filename.name} does not exist, check your configuration file."
684
+ )
685
+ return self
686
+
687
+ # Read data
688
+ logger.info(f"Loading oscilloscope data from {filename.name}...")
689
+ res = self._load_oscillo_bin(filename, nchannels, precision, endian, order)
690
+
691
+ # Store
692
+ for meas_name, meas_index in self.cfg.measurements.items():
693
+ self.set_data_raw(meas_name, res[meas_index])
694
+ # Add rolling average flag
695
+ self.is_rollmean[meas_name] = False
696
+ # Add dummy voltage scaling for compatibility with WFM files
697
+ self.metadata["vscale"][meas_name] = 1
698
+ self.metadata["voffset"][meas_name] = 0
699
+
700
+ # Verbose
701
+ logger.info(f"{', '.join(x for x in self.cfg.measurements.keys())} loaded.")
702
+
703
+ return self
704
+
705
+ def load_oscillo_wfm(self, scale: bool = False, microseconds: bool = True) -> Self:
706
+ """
707
+ Load oscilloscope data from Tektronix WFM files.
708
+
709
+ Parameters
710
+ ----------
711
+ scale : bool, optional
712
+ Whether to rescale loaded data to real units (e.g. volts) or keep it in
713
+ int16. Default is False.
714
+ microseconds : bool, optional
715
+ Whether to convert the measurement time vector `time_meas`, to microseconds.
716
+ Default is True.
717
+ """
718
+ # Get parameters
719
+ filename_base = self.cfg.filenames["oscillo"]
720
+
721
+ for meas_name, meas_index in self.cfg.measurements.items():
722
+ filename = Path(
723
+ str(filename_base).replace("_!CHANNELID", f"_ch{meas_index}")
724
+ )
725
+
726
+ if not filename.is_file():
727
+ # try uppercase extension
728
+ filename = filename.with_suffix(".WFM")
729
+ if not filename.is_file():
730
+ logger.error(
731
+ f"{filename.name} doesn't exist, check your configuration file."
732
+ )
733
+ return self
734
+
735
+ logger.info(
736
+ f"Loading oscilloscope data from {os.path.basename(filename)}..."
737
+ )
738
+ # Load data
739
+ tek = TekWFM(filename).load_frames().get_time_frame()
740
+
741
+ # Store data and metadata
742
+ if scale:
743
+ self.set_data_raw(meas_name, tek.scale_data(tek.frames))
744
+ else:
745
+ self.set_data_raw(meas_name, tek.frames)
746
+
747
+ self.metadata["vscale"][meas_name] = tek.vscale
748
+ self.metadata["voffset"][meas_name] = tek.voffset
749
+
750
+ logger.info(f"{meas_name} loaded.")
751
+
752
+ # Store (meta)data
753
+ self.set_data_raw("time_meas", tek.time_frame)
754
+ if microseconds:
755
+ self.is_us = True
756
+ self.data_raw["time_meas"] *= 1e6
757
+ else:
758
+ self.is_us = False
759
+ self.set_data_raw("frame_onsets", tek.frame_onsets)
760
+ self.metadata["dt_acq"] = tek.tscale
761
+ self.metadata["frame_duration"] = tek.npoints * tek.tscale
762
+ self.nframes = tek.nframes
763
+ self.npoints = tek.npoints
764
+
765
+ return self
766
+
767
+ def load_pickup(self) -> Self:
768
+ """
769
+ Load pickup coil binary data and create the corresponding time vector.
770
+
771
+ Wrap the `_load_pickup()` method, fetching the parameters from the
772
+ configuration.
773
+ """
774
+ # Get parameters
775
+ filename = Path(self.cfg.filenames["pickup"])
776
+ precision = self.cfg.files["pickup"].precision
777
+ endian = self.cfg.files["pickup"].endian
778
+ order = self.cfg.files["pickup"].order
779
+ npickups = self.cfg.parameters.pickup_number
780
+ pickup_index = self.cfg.parameters.pickup_index
781
+ samplerate = self.cfg.parameters.pickup_samplerate
782
+
783
+ # Read and store pickup coil voltage
784
+ logger.info(f"Loading pickup data from from {filename.name}...")
785
+ if Path(filename).is_file():
786
+ self.set_data_raw(
787
+ "pickup",
788
+ self._load_pickup(
789
+ filename,
790
+ precision,
791
+ endian,
792
+ order=order,
793
+ nseries=npickups,
794
+ index=pickup_index,
795
+ ),
796
+ )
797
+ # Create and store corresponding time vector
798
+ nsamples = self.data_raw["pickup"].shape[0]
799
+ self.set_data_raw(
800
+ "pickup_time", np.linspace(0, (nsamples - 1) / samplerate, nsamples)
801
+ )
802
+
803
+ # Verbose
804
+ logger.info("Done.")
805
+ else:
806
+ logger.info(f"{filename.name} not found, skipping.")
807
+ # Create empty data that will be created when we'll know how long is the
808
+ # experiment
809
+ self.set_data_raw("pickup", np.array([]))
810
+ self.set_data_raw("pickup_time", np.array([]))
811
+
812
+ return self
813
+
814
+ def compute_field(self, method: str = "trapz") -> Self:
815
+ """
816
+ Compute magnetic field from pickup coil data.
817
+
818
+ Wraps the `sp.integrate_pickup()` function.
819
+
820
+ Parameters
821
+ ----------
822
+ method : str, optional
823
+ Integration method. Default is "trapz" (which is the only one supported).
824
+
825
+ """
826
+ # Checks
827
+ if "pickup" not in self.data_raw:
828
+ self.load_pickup()
829
+
830
+ # Get parameters
831
+ surface = self.cfg.parameters.pickup_surface
832
+
833
+ # Integrate and store
834
+ return self._compute_field(surface=surface, method=method)
835
+
836
+ def reshape_frames(
837
+ self, microseconds: bool = True, meas_names: None | str | Iterable[str] = None
838
+ ) -> Self:
839
+ """
840
+ Reshape oscillo channels array to get a column per frame.
841
+
842
+ Each measurement yields a 2D array with time series on columns, thus an array
843
+ with sahep (npoints, nframes).
844
+
845
+ Also build the corresponding measurement time vector.
846
+
847
+ Parameters
848
+ ----------
849
+ microseconds : bool, optional
850
+ Convert the time vector from seconds to microseconds. Default is True.
851
+ meas_names : None or Sequence, optional
852
+ If None (default), uses all available measurement names.
853
+
854
+ """
855
+ # Checks
856
+ if self.is_wfm:
857
+ return self
858
+
859
+ if "frame_onsets" not in self.data_raw:
860
+ self.load_frame_onsets()
861
+
862
+ # Get list of arrays to reshape
863
+ if not meas_names:
864
+ measurements = self.measurements
865
+ elif isinstance(meas_names, str):
866
+ measurements = [meas_names]
867
+ else:
868
+ measurements = meas_names
869
+
870
+ # Reshape frames in-place
871
+ reshaped = False
872
+ for measurement in measurements:
873
+ logger.info(f"Reshaping {measurement}...\t")
874
+ # check if it was loaded
875
+ if measurement not in self.data_raw:
876
+ logger.warning(f"{measurement} was not loaded, skipping.")
877
+ continue
878
+
879
+ if self._check_frames_2d(measurement, reshape=False):
880
+ # check if it makes sense to reshape the data
881
+ logger.warning(
882
+ f"{measurement} is not 1D and has probably been already reshaped."
883
+ )
884
+ reshaped = True
885
+ else:
886
+ # Reshape
887
+ self.set_data_raw(
888
+ measurement,
889
+ self._reshape_frames(
890
+ self.get_data_raw(measurement),
891
+ self.nframes, # number of frames
892
+ order=self.cfg.files["oscillo"].order,
893
+ ),
894
+ )
895
+ newshape = self.data_raw[measurement].shape
896
+ logger.info(f"Reshaped to {newshape}.")
897
+ reshaped = True
898
+
899
+ if reshaped:
900
+ if "dt_acq" not in self.metadata:
901
+ if not self.is_digital:
902
+ self.load_metadata()
903
+ else:
904
+ logger.error(
905
+ "Oscilloscope data was not loaded, could not get acquisition "
906
+ "sampling rate. Load the data then re-run reshape_frames()."
907
+ )
908
+ # Number of time points in a frame
909
+ nsamples_per_seq = self.data_raw[measurement].shape[0]
910
+ # Duration of a frame
911
+ frame_duration = nsamples_per_seq * self.metadata["dt_acq"]
912
+ self.metadata["frame_duration"] = frame_duration # store it as metadata
913
+ # Build time vector
914
+ measurement_time = np.linspace(
915
+ 0, frame_duration - self.metadata["dt_acq"], nsamples_per_seq
916
+ )
917
+ if microseconds:
918
+ # convert to microseconds
919
+ measurement_time *= 1e6
920
+ self.is_us = True
921
+ else:
922
+ self.is_us = False
923
+ # Store
924
+ self.set_data_raw("time_meas", measurement_time)
925
+ self.metadata["toffset_meas"] = self.get_data_raw("time_meas")[0]
926
+
927
+ return self
928
+
929
+ def get_time_exp(self) -> Self:
930
+ """
931
+ Build the experiment time vector from frame onsets.
932
+
933
+ The time point associated to a frame is assumed to be at the middle of the
934
+ duration of a frame. The resulting vector has a shape of (nframes,) and is
935
+ stored in `data_processed[results]`.
936
+ """
937
+ logger.info("Building the experiment time vector...")
938
+ # Checks
939
+ if "frame_duration" not in self.metadata:
940
+ # we don't know how long a frame is, attempt to get through reshape
941
+ self.reshape_frames()
942
+ # won't do anything if oscilloscope data not loaded so we need to recheck
943
+ if "frame_duration" not in self.metadata:
944
+ logger.error(
945
+ "Oscilloscope data not loaded, can't determine frame duration."
946
+ )
947
+ return self
948
+
949
+ # Get parameters
950
+ seq_onsets = self.get_data_raw("frame_onsets")
951
+ seq_duration = self.metadata["frame_duration"]
952
+
953
+ # Build the the time vector : attribute a point to the middle of its frame
954
+ # time range
955
+ self.set_data_processed("time_exp", seq_onsets + seq_duration / 2)
956
+
957
+ # Add time interval in metadata
958
+ self.metadata["dt_exp"] = np.mean(np.diff(self.get_data_processed("time_exp")))
959
+
960
+ # Verbose
961
+ logger.info("Done.")
962
+
963
+ return self
964
+
965
+ def align_field(self) -> Self:
966
+ """
967
+ Align magnetic field time serie on the experiment time vector.
968
+
969
+ The pickup-coil sampling rate is much higher than the frames sampling rate. In
970
+ order to plot the different resulting metrics against the magnetic field instead
971
+ of the time, it needs to be aligned on the same time base using linear
972
+ interpolation.
973
+
974
+ Note that the magnetic field serie is overwritten with the aligned version.
975
+ """
976
+ # Checks
977
+ if not self.get_data_processed("time_exp", checkonly=True):
978
+ self.get_time_exp()
979
+ # maybe it was still not computed, we need to recheck
980
+ if not self.get_data_processed("time_exp", checkonly=True):
981
+ logger.error("Time vector could not be built, exiting.")
982
+ return self
983
+
984
+ # We need to re-compute the field from scratch in case it was downsampled with
985
+ # rolling average to make sure we're not upscaling it. This could be smarter (
986
+ # by tracking the switches between rolling-average/no-rolling-average) but the
987
+ # computation is cheap
988
+ self.compute_field()
989
+
990
+ # Collect data
991
+ exptime = self.get_data_processed("time_exp")
992
+ magtime = self.get_data_processed("magfield_time")
993
+ magfield = self.get_data_processed("magfield")
994
+
995
+ # Interpolate and store
996
+ logger.info("Aligning magnetic field on experiment time base...")
997
+ self.set_data_processed(
998
+ "magfield", np.interp(exptime, magtime, magfield, left=0, right=0)
999
+ )
1000
+ # overwrite the field time vector as well
1001
+ self.set_data_processed("magfield_time", self.get_data_processed("time_exp"))
1002
+
1003
+ # Verbose
1004
+ logger.info("Done.")
1005
+
1006
+ return self
1007
+
1008
+ def average_frame_range(
1009
+ self, tstart: None | float = None, tstop: None | float = None, **kwargs
1010
+ ) -> Self:
1011
+ """
1012
+ Average signal in the given frame time range.
1013
+
1014
+ `tstart` and `tstop` are expressed in unit of time (s). If they are not
1015
+ provided, they are read from the Config settings ('analysis_window').
1016
+
1017
+ If the averaging is successful :
1018
+ 1. The corresponding serie NXgroup is created as
1019
+ `data_processed[results_{serie_name}{serie_idx}]`,
1020
+ 2. The analysis window is set as an attribute,
1021
+ 3. The mean signal amplitude and phase are derived from the I and Q components
1022
+ and stored in this serie group.
1023
+ """
1024
+ # Reset averaging success flag
1025
+ self.is_averaged = False
1026
+ # Get parameters
1027
+ measurements = self.measurements # list of arrays to average
1028
+ if not tstart:
1029
+ if len(self.analysis_window) == 2:
1030
+ tstart = self.analysis_window[0]
1031
+ else:
1032
+ tstart = np.inf
1033
+ if not tstop:
1034
+ if len(self.analysis_window) == 2:
1035
+ tstop = self.analysis_window[1]
1036
+ else:
1037
+ tstop = -np.inf
1038
+ if not self.is_us:
1039
+ # analysis_window should be in microseconds
1040
+ toffset = self.metadata["toffset_meas"] * 1e6
1041
+ else:
1042
+ toffset = self.metadata["toffset_meas"]
1043
+
1044
+ # Check the range
1045
+ if tstop < tstart:
1046
+ logger.error(f"Invalid time range : {[tstart, tstop]}")
1047
+ return self
1048
+
1049
+ # Prepare storage
1050
+ self.create_data_serie()
1051
+
1052
+ # Perform average
1053
+ for measurement in measurements:
1054
+ # Check it was loaded
1055
+ if self.is_meas_processed and not self.get_data_processed(
1056
+ measurement, checkonly=True
1057
+ ):
1058
+ logger.warning(f"{measurement} was not computed, skipping...")
1059
+ continue
1060
+ elif not self.is_meas_processed and (measurement not in self.data_raw):
1061
+ logger.warning(f"{measurement} was not loaded, skipping...")
1062
+ continue
1063
+
1064
+ logger.info(f"Averaging {measurement}")
1065
+ # Determine what data to use
1066
+ if self.is_rollmean[measurement]:
1067
+ # Moving mean filter was applied
1068
+ meas_name = f"s{measurement}"
1069
+ # Check it actually exists
1070
+ if not self.get_data_processed(meas_name, checkonly=True):
1071
+ logger.warning(
1072
+ f"{meas_name} not found in data_processed, skipping..."
1073
+ )
1074
+ continue
1075
+ logger.info("Using smoothed trace...")
1076
+ data = self.get_data_processed(meas_name)
1077
+
1078
+ elif self.is_meas_processed:
1079
+ # Use 'raw' data found in 'data_processed' (e.g. demodulated)
1080
+ logger.info("Using demodulated trace...")
1081
+ meas_name = measurement
1082
+
1083
+ # Check if it is 2D
1084
+ if not self._check_frames_2d(
1085
+ meas_name, where="processed", reshape=False
1086
+ ):
1087
+ # check if the data was reshaped
1088
+ logger.error(f"{meas_name} is still 1D, skipping...")
1089
+ continue
1090
+
1091
+ data = self.get_data_processed(meas_name)
1092
+
1093
+ else:
1094
+ # Use raw raw data
1095
+ logger.info("using raw trace...")
1096
+ meas_name = measurement
1097
+
1098
+ # Check if it is 2D
1099
+ if not self._check_frames_2d(meas_name, reshape=True):
1100
+ # check if it the data was reshaped
1101
+ logger.error(f"{meas_name} is still 1D, skipping...")
1102
+ continue
1103
+
1104
+ data = self.get_data_raw(meas_name)
1105
+
1106
+ # Determine time interval
1107
+ if self.is_decimated:
1108
+ # Decimation was applied
1109
+ dt = np.mean(np.diff(self.get_data_processed("time_meas")))
1110
+ if self.is_us:
1111
+ # Convert to seconds
1112
+ dt *= 1e-6
1113
+ else:
1114
+ # Original time interval
1115
+ dt = self.metadata["dt_acq"]
1116
+ # Average & store (keep the original measurement name)
1117
+ self.set_data_serie(
1118
+ measurement + "_avg",
1119
+ self._average_frame_range(
1120
+ data, tstart, tstop, dt, toffset=toffset, **kwargs
1121
+ ),
1122
+ )
1123
+ self.is_averaged = True # averaging worked
1124
+ logger.info("Done.")
1125
+
1126
+ # Compute phase from I and Q
1127
+ if self.is_averaged:
1128
+ # If averaging worked, we can compute amplitude and phase on the average
1129
+ logger.info("Computing amplitude...")
1130
+ self.compute_amplitude_avg()
1131
+ logger.info("Done.")
1132
+ logger.info("Computing phase...")
1133
+ self.compute_phase_avg()
1134
+ logger.info("Done.")
1135
+
1136
+ # Store analysis window
1137
+ self.metadata["last_analysis_window"] = [tstart, tstop]
1138
+ self.set_attr_serie("analysis_window", [tstart, tstop])
1139
+ self.set_attr_serie("analysis_window_unit", "µs")
1140
+
1141
+ # Get time vector in case it was not computed before
1142
+ if not self.get_data_processed("time_exp", checkonly=True):
1143
+ self.get_time_exp()
1144
+ # Get magnetic field
1145
+ if not self.get_data_processed("magfield", checkonly=True):
1146
+ self.compute_field().align_field()
1147
+ # Prepare placeholders that will be replaced by actual NXlink when the
1148
+ # object has a NXroot entry
1149
+ phtime = f"!link to:processed/analysis/{self._results_name}/time_exp"
1150
+ phmagfield = f"!link to:processed/analysis/{self._results_name}/magfield"
1151
+ # Store the link in the 'serie' group
1152
+ self.set_data_serie("time_exp", phtime)
1153
+ self.set_data_serie("magfield", phmagfield)
1154
+
1155
+ return self
1156
+
1157
+ def average_frame(self, **kwargs) -> Self:
1158
+ """Alias for the `average_frame_range()` method."""
1159
+ return self.average_frame_range(**kwargs)
1160
+
1161
+ def _compute_amplitude_avg(self):
1162
+ """
1163
+ Compute amplitude from averaged I and Q.
1164
+
1165
+ They are read from `data_processed[results][results_{serie_name}{idx_name}]`,
1166
+ and the results is stored as "amplitude_avg".
1167
+ """
1168
+ # Get names
1169
+ iname = self._i_name + "_avg"
1170
+ qname = self._q_name + "_avg"
1171
+
1172
+ # Get data
1173
+ if not self.get_data_serie(iname, checkonly=True) or not self.get_data_serie(
1174
+ qname, checkonly=True
1175
+ ):
1176
+ logger.warning(
1177
+ f"'{iname}' or '{qname} not found, was the signal demodulated before "
1178
+ "averaging ?"
1179
+ )
1180
+ self.is_averaged = False
1181
+ return
1182
+
1183
+ i = self.get_data_serie(iname)
1184
+ q = self.get_data_serie(qname)
1185
+
1186
+ # Compute averaged amplitude
1187
+ self.set_data_serie("amplitude_avg", sp.compute_amp_iq(i, q))
1188
+
1189
+ # Mark for success
1190
+ self.is_averaged = True
1191
+
1192
+ def compute_amplitude(self) -> np.ndarray:
1193
+ """
1194
+ Compute amplitude from raw or demodulated data (not averaged).
1195
+
1196
+ Either return directly amplitude from 'data_raw' in analog mode, or derive it
1197
+ from I and Q in digital mode.
1198
+ The returned array has shape (nframes, npoints).
1199
+ """
1200
+ if self.is_digital:
1201
+ amp = sp.compute_amp_iq(
1202
+ self.get_data_processed(self._i_name),
1203
+ self.get_data_processed(self._q_name),
1204
+ )
1205
+ else:
1206
+ amp = self.get_data_raw("amplitude")
1207
+
1208
+ return amp
1209
+
1210
+ def compute_phase(self) -> np.ndarray:
1211
+ """
1212
+ Compute phase from raw or demodulated data (not averaged).
1213
+
1214
+ Phase is the arctan of the I and Q components, corrected for pi-jumps.
1215
+ The returned array has shape (nframes, npoints).
1216
+ """
1217
+ if self.is_digital:
1218
+ phase = sp.compute_phase_iq(
1219
+ self.get_data_processed(self._i_name),
1220
+ self.get_data_processed(self._q_name),
1221
+ )
1222
+ else:
1223
+ phase = sp.compute_phase_iq(
1224
+ self.get_data_raw(self._i_name), self.get_data_raw(self._q_name)
1225
+ )
1226
+
1227
+ return phase
1228
+
1229
+ def compute_amplitude_avg(self) -> None:
1230
+ """
1231
+ Compute amplitude of averaged time series.
1232
+
1233
+ In analog mode, amplitude is directly an oscilloscope channel, in digital mode,
1234
+ it needs to be derived from I and Q.
1235
+ """
1236
+ if self.is_digital:
1237
+ self._compute_amplitude_avg()
1238
+ else:
1239
+ self.is_averaged = True
1240
+ return
1241
+
1242
+ def compute_phase_avg(self):
1243
+ """Compute phase from I and Q and correct for pi jumps."""
1244
+ # Get names
1245
+ iname = self._i_name + "_avg"
1246
+ qname = self._q_name + "_avg"
1247
+
1248
+ # Get data
1249
+ i = self.get_data_serie(iname)
1250
+ q = self.get_data_serie(qname)
1251
+
1252
+ # Compute phase
1253
+ res = sp.compute_phase_iq(
1254
+ i,
1255
+ q,
1256
+ unwrap=True,
1257
+ period=self.cfg.settings.max_phase_jump * 2 * np.pi,
1258
+ axis=0,
1259
+ )
1260
+
1261
+ # Store
1262
+ self.set_data_serie("phase_avg", res)
1263
+
1264
+ # Mark for success
1265
+ self.is_averaged = True
1266
+
1267
+ def compute_attenuation(self) -> Self:
1268
+ """
1269
+ Compute attenuation from amplitude average for the current echo.
1270
+
1271
+ It is computed with the `sp.compute_attenuation()` function from the
1272
+ averaged amplitude and stored in the current serie group.
1273
+ """
1274
+ # Get parameters
1275
+ if "dt_exp" not in self.metadata:
1276
+ self.get_time_exp()
1277
+ if not self.is_averaged:
1278
+ logger.warning("Averaging amplitude or phase (or both) failed.")
1279
+ return self
1280
+
1281
+ # Get data
1282
+ amp_avg = self.get_data_serie("amplitude_avg")
1283
+
1284
+ # Get baseline
1285
+ wbline = self.cfg.settings.range_baseline
1286
+ dt = self.metadata["dt_exp"]
1287
+ idx_start = int(np.ceil(wbline[0] / dt))
1288
+ idx_stop = int(np.ceil(wbline[1] / dt))
1289
+ amp0 = amp_avg[idx_start:idx_stop].mean()
1290
+
1291
+ # Compute
1292
+ logger.info(f"Computing attenuation for echo index {self.idx_serie}...")
1293
+ self.set_data_serie(
1294
+ "attenuation",
1295
+ sp.compute_attenuation(
1296
+ amp_avg,
1297
+ amp0,
1298
+ self.idx_serie,
1299
+ self.cfg.parameters.sample_length,
1300
+ mode=self.cfg.parameters.detection_mode,
1301
+ corr=self.cfg.parameters.logamp_slope,
1302
+ ),
1303
+ )
1304
+ logger.info("Done.")
1305
+
1306
+ return self
1307
+
1308
+ def compute_phase_shift(self):
1309
+ """
1310
+ Compute relative phase shift given the phase average for the current echo.
1311
+
1312
+ It is computed with the `sp.compute_phase_shift()` function from the
1313
+ averaged phase and stored in the current serie group.
1314
+ """
1315
+ # Get parameters
1316
+ if "dt_exp" not in self.metadata:
1317
+ self.get_time_exp()
1318
+ if not self.is_averaged:
1319
+ logger.warning("Averaging amplitude or phase (or both) failed.")
1320
+ return self
1321
+
1322
+ # Get data
1323
+ phi_avg = self.get_data_serie("phase_avg")
1324
+
1325
+ # Get baseline
1326
+ wbline = self.cfg.settings.range_baseline
1327
+ dt = self.metadata["dt_exp"]
1328
+ idx_start = int(np.ceil(wbline[0] / dt))
1329
+ idx_stop = int(np.ceil(wbline[1] / dt))
1330
+ phi0 = phi_avg[idx_start:idx_stop].mean()
1331
+
1332
+ # Compute
1333
+ logger.info(f"Computing phase shift for echo index {self.idx_serie}...")
1334
+ self.set_data_serie(
1335
+ "phaseshift",
1336
+ sp.compute_phase_shift(
1337
+ phi_avg,
1338
+ phi0,
1339
+ self.idx_serie,
1340
+ self.cfg.parameters.sample_speed,
1341
+ self.metadata["rf_frequency"],
1342
+ self.cfg.parameters.sample_length,
1343
+ mode=self.cfg.parameters.detection_mode,
1344
+ ),
1345
+ )
1346
+ logger.info("Done.")
1347
+
1348
+ return self
1349
+
1350
+ def rolling_average(self) -> Self:
1351
+ """
1352
+ Perform a rolling average on raw data measurements.
1353
+
1354
+ Store the smoothed arrays in `data_processed[results]` with a leading "s".
1355
+
1356
+ The data can optionnally be sub-sampled, given the parameters defined in the
1357
+ settings section of the configuration file, in that case the corresponding time
1358
+ vector is subsampled as well and store in `data_processed[results]`.
1359
+
1360
+ Uses the `sp.rolling_average()` function.
1361
+ """
1362
+ # Collect parameters
1363
+ wlen = self.cfg.settings.rolling_mean_wlen
1364
+ subsample = self.cfg.settings.rolling_mean_subsample
1365
+
1366
+ # Check if there is something to do
1367
+ if wlen < 2:
1368
+ # wlen = 1 or 0 means no rolling average
1369
+ logger.warning(f"Time window set to {wlen}, no rolling average.")
1370
+
1371
+ # Re-generate time vector in case subsampling was applied before
1372
+ self.get_time_exp()
1373
+ self.align_field()
1374
+
1375
+ # Set flags
1376
+ self.is_rollmean.update({m: False for m in self.measurements})
1377
+ return self
1378
+
1379
+ for measurement in self.measurements:
1380
+ # Get data
1381
+ if self.is_meas_processed:
1382
+ if not self.get_data_processed(measurement, checkonly=True):
1383
+ logger.warning(f"{measurement} was not computed, skipping.")
1384
+ continue
1385
+
1386
+ data = self.get_data_processed(measurement)
1387
+ where = "processed"
1388
+ reshape = False
1389
+ else:
1390
+ if measurement not in self.data_raw:
1391
+ logger.warning(f"{measurement} was not loaded, skipping.")
1392
+ continue
1393
+ data = self.get_data_raw(measurement)
1394
+ where = "raw"
1395
+ reshape = False
1396
+
1397
+ # Check data is 2D
1398
+ if self._check_frames_2d(measurement, where=where, reshape=reshape):
1399
+ # Yup, apply moving mean
1400
+ logger.info(f"Applying rolling average to {measurement}...")
1401
+ self.set_data_processed(
1402
+ f"s{measurement}",
1403
+ sp.rolling_average(data, wlen, subsample=subsample, axis=1),
1404
+ )
1405
+ self.is_rollmean[measurement] = True
1406
+ logger.info("Done.")
1407
+
1408
+ # Re-generate time vector in case of subsampling
1409
+ self.get_time_exp()
1410
+
1411
+ # If subsampling is on, we need to update the experiment time vector
1412
+ if subsample:
1413
+ logger.info(
1414
+ "Subsampling is on, updating time and magnetic field..."
1415
+ )
1416
+ self.set_data_processed(
1417
+ "time_exp",
1418
+ sp.subsample_array(
1419
+ self.get_data_processed("time_exp"), wlen - 1
1420
+ ),
1421
+ )
1422
+
1423
+ # Re-align magnetic field
1424
+ self.align_field()
1425
+
1426
+ else:
1427
+ # Nope and reshape did not work somehow
1428
+ logger.error(
1429
+ f"{measurement} is not 2D and couldn't be reshaped, exiting..."
1430
+ )
1431
+ continue
1432
+
1433
+ return self
1434
+
1435
+ def find_f0(self) -> Self:
1436
+ """
1437
+ Find center frequency in reference signal.
1438
+
1439
+ The resulting frequency will be used to build the continuous reference wave used
1440
+ for demodulating the signal.
1441
+
1442
+ If it is not set in the "demodulation" section of the configuration (set to 0),
1443
+ it will be detected automatically using the `sp.find_f0()` function that relies
1444
+ on RFFT.
1445
+
1446
+ Only the part of the reference signal where there is an actual signal (as found
1447
+ by `_find_signal_in_ref()`) will be used to find f0. Optionally, detrending can
1448
+ be applied for better results of the FFT. This is controlled with the "detrend"
1449
+ parameter in the configuration.
1450
+ """
1451
+ # Check there is something to do
1452
+ if not self.is_digital:
1453
+ logger.warning("Can't find center frequency in analog mode.")
1454
+ return self
1455
+ else:
1456
+ # for type checking
1457
+ assert self.cfg.demodulation is not None
1458
+
1459
+ if self.cfg.demodulation.f0 > 0:
1460
+ self.metadata["rf_frequency"] = self.cfg.demodulation.f0
1461
+ logger.info(
1462
+ "Read f0 set in configuration : "
1463
+ f"{self.metadata['rf_frequency'] / 1e6:3.3f}MHz."
1464
+ )
1465
+ return self
1466
+
1467
+ # Get parameters
1468
+ if self._ref_name not in self.data_raw:
1469
+ logger.error("Reference signal was not loaded.")
1470
+ return self
1471
+
1472
+ if "ref_on" not in self.metadata:
1473
+ # find reference signal onset and offset
1474
+ self._find_signal_in_ref()
1475
+
1476
+ tstart = self.metadata["ref_on"]
1477
+ tstop = self.metadata["ref_off"]
1478
+ nframes = self.cfg.demodulation.fft_nframes
1479
+ if nframes > 0:
1480
+ # get random frames
1481
+ rng = np.random.default_rng(self._seed)
1482
+ framesid = rng.integers(0, self.nframes, nframes)
1483
+ sig = self.get_data_raw(self._ref_name)[tstart:tstop, framesid]
1484
+ else:
1485
+ # take all frames
1486
+ sig = self.get_data_raw(self._ref_name)[tstart:tstop, :]
1487
+
1488
+ # Optional detrending
1489
+ if self.cfg.demodulation.detrend:
1490
+ logger.info("Detrending signal...")
1491
+ sig = signal.detrend(sig, axis=0)
1492
+
1493
+ logger.info("Finding center frequency in reference signal...")
1494
+ self.metadata["rf_frequency"] = sp.find_f0(
1495
+ sig, 1 / self.metadata["dt_acq"]
1496
+ ).mean()
1497
+
1498
+ logger.info(f"Found {self.metadata['rf_frequency'] / 1e6:3.3f}MHz.")
1499
+
1500
+ return self
1501
+
1502
+ def demodulate(self, **kwargs) -> Self:
1503
+ """
1504
+ Digital demodulation : frequency-shifting followed by a low-pass filter.
1505
+
1506
+ A reference signal and a measurement signal are required, as well as a center
1507
+ frequency (f0, see the `find_f0()` method).
1508
+
1509
+ The reference signal is fitted to extract the phase (when there is signal, see
1510
+ the `_find_signal_in_ref()` method) and build a continuous, mono-frequency
1511
+ signal. The measurement signal is frequency-shifted (multiplied by cos(phi_ref)
1512
+ and sin(phi_ref)), then a low-pass filter is applied. Optionally, the frequency-
1513
+ shifted signal can be decimated before applying the filter. The filter
1514
+ properties are set in the configuration.
1515
+
1516
+ The demodulation is done in chunks to not saturate memory (chunks size is set in
1517
+ the configuration).
1518
+
1519
+ The I and Q components are stored in the `data_processed[results]`. If
1520
+ decimation is used, the corresponding time vector is updated accordingly and
1521
+ stored there as well.
1522
+
1523
+ Uses the `sp.demodulate_chunks()` function.
1524
+ """
1525
+ if not self.is_digital:
1526
+ logger.warning("Can't perform demodulation in analog mode.")
1527
+ return self
1528
+ else:
1529
+ # for type checking
1530
+ assert self.cfg.demodulation is not None
1531
+
1532
+ # Check data is loaded
1533
+ if "ref_on" not in self.metadata:
1534
+ self._find_signal_in_ref()
1535
+ if ("rf_frequency" not in self.metadata) or self.metadata["rf_frequency"] == 0:
1536
+ self.find_f0()
1537
+ if "time_meas" not in self.data_raw:
1538
+ logger.error("Data was not loaded, exiting.")
1539
+ return self
1540
+ else:
1541
+ if self.is_us:
1542
+ tmeas = self.get_data_raw("time_meas") * 1e-6 # convert back to seconds
1543
+ else:
1544
+ tmeas = self.get_data_raw("time_meas")
1545
+
1546
+ # Get parameters for reference fitting range
1547
+ istart = self.metadata["ref_on"]
1548
+ istop = self.metadata["ref_off"]
1549
+
1550
+ logger.info("Demodulation...")
1551
+ in_phase, out_phase = sp.demodulate_chunks(
1552
+ tmeas[istart:istop],
1553
+ self.get_data_raw(self._ref_name)[istart:istop, :],
1554
+ tmeas,
1555
+ self.get_data_raw(self._sig_name),
1556
+ f0=self.metadata["rf_frequency"],
1557
+ filter_order=self.cfg.demodulation.filter_order,
1558
+ filter_fc=self.cfg.demodulation.filter_fc,
1559
+ decimate_factor=self.cfg.demodulation.decimate_factor,
1560
+ chunksize=self.cfg.demodulation.chunksize,
1561
+ **kwargs,
1562
+ )
1563
+ logger.info("Done.")
1564
+
1565
+ # Store
1566
+ self.set_data_processed(self._i_name, in_phase) # I
1567
+ self.set_data_processed(self._q_name, out_phase) # Q
1568
+
1569
+ # In case of decimation, the time vector needs to be updated
1570
+ if self.cfg.demodulation.decimate_factor > 1:
1571
+ # Get new number of points and create new time vector
1572
+ self._update_time_subsampled(in_phase.shape[0])
1573
+ # Update flag
1574
+ self.is_decimated = True
1575
+ else:
1576
+ # Remove decimated time vector
1577
+ self.remove_data_processed("time_meas")
1578
+ self.is_decimated = False
1579
+
1580
+ # Removed rolling-averaged data and update flags
1581
+ self.is_rollmean.update({m: False for m in self.measurements})
1582
+ for meas in self.measurements:
1583
+ self.remove_data_processed(f"s{meas}")
1584
+ # Update time vector that was maybe subsampled because of rolling average
1585
+ self.get_time_exp()
1586
+ self.align_field()
1587
+
1588
+ return self
1589
+
1590
+ def get_csv_filename(self) -> str:
1591
+ """
1592
+ Generate output CSV file name for the current echo.
1593
+
1594
+ The file is placed in the data directory set in the configuration. Its name
1595
+ contains the analysis time window and the echo number.
1596
+ """
1597
+ fout = os.path.join(self.cfg.data_directory, self.cfg.expid + "-results")
1598
+ echo_index = self.idx_serie
1599
+ tstart = self.metadata["last_analysis_window"][0]
1600
+ tstop = self.metadata["last_analysis_window"][1]
1601
+ return f"{fout}_{tstart:.3f}-{tstop:.3f}_echo{echo_index}.csv"
1602
+
1603
+ def to_csv(
1604
+ self, fname: None | str | Path = None, sep: str = "\t", to_cm: bool = False
1605
+ ):
1606
+ r"""
1607
+ Export attenuation and phase shift data as a CSV file.
1608
+
1609
+ Columns names are written on the first line. The current echo index is used to
1610
+ select which data to save.
1611
+
1612
+ Parameters
1613
+ ----------
1614
+ fname : str, Path or None, optional
1615
+ Full path to the output file. If None, the name is generated automatically.
1616
+ This is the default behavior.
1617
+ sep : str, optional
1618
+ Separator in the file, usually "," (csv) or "\t" (tsv, default).
1619
+ to_cm : bool, optional
1620
+ Convert attenuation from dB/m to dB/cm. Default is False.
1621
+ """
1622
+ # Create output file name
1623
+ if not fname:
1624
+ fname = self.get_csv_filename()
1625
+
1626
+ logger.info(f"Saving at {fname}...")
1627
+
1628
+ # Collect data to be saved
1629
+ attenuation = self.get_data_serie("attenuation")[..., np.newaxis]
1630
+ data = np.concatenate(
1631
+ (
1632
+ self.get_data_processed("time_exp")[..., np.newaxis],
1633
+ self.get_data_processed("magfield")[..., np.newaxis],
1634
+ self.get_data_serie("phase_avg")[..., np.newaxis],
1635
+ self.get_data_serie("amplitude_avg")[..., np.newaxis],
1636
+ attenuation / 100 if to_cm else attenuation,
1637
+ self.get_data_serie("phaseshift")[..., np.newaxis],
1638
+ ),
1639
+ axis=1,
1640
+ )
1641
+ # Configure output file
1642
+ header = sep.join(
1643
+ ("time", "field", "phase", "amplitude", "attenuation", "dphase")
1644
+ )
1645
+
1646
+ # Save CSV file
1647
+ np.savetxt(fname, data, delimiter=sep, header=header, comments="")
1648
+
1649
+ logger.info("Done.")
1650
+
1651
+ def _check_frames_2d(
1652
+ self, meas_name: str, where: str = "raw", reshape: bool = True
1653
+ ) -> bool:
1654
+ """
1655
+ Check if the array corresponding to `meas_name` is 1 or 2D.
1656
+
1657
+ The data is looked up in 'data_raw' if `where` is "raw", and in 'data_processed'
1658
+ if `where` is "processed".
1659
+ If the former and `reshape` is True, call the `reshape_frame()` method on that
1660
+ measurement. This option is valid only for `where` = "raw".
1661
+
1662
+ Parameters
1663
+ ----------
1664
+ meas_name : str
1665
+ Name of some measurement in `data_raw`.
1666
+ where : {"raw", "processed"}, optional
1667
+ Whether to look up the data in 'data_raw' (default) or in 'data_processed'.
1668
+ reshape : bool, optional
1669
+ Whether to attempt to reshape frames through the `reshape_frame()` method.
1670
+ Default is True.
1671
+
1672
+ Returns
1673
+ -------
1674
+ res : bool
1675
+ Returns true if the array is 2D, or if `reshape_frame()` was called.
1676
+ """
1677
+ match where:
1678
+ case "raw":
1679
+ data = self.get_data_raw(meas_name)
1680
+ case "processed":
1681
+ data = self.get_data_processed(meas_name)
1682
+ reshape = False # disable reshape that works only on raw data
1683
+ case _:
1684
+ logger.warning(
1685
+ f"{where} is not recognized as a valid data location, assuming it "
1686
+ "is 'raw'."
1687
+ )
1688
+ where = "raw"
1689
+ data = self.get_data_raw(meas_name)
1690
+ if data.ndim < 2:
1691
+ # Not 2D
1692
+ if reshape:
1693
+ # Reshape
1694
+ self.reshape_frames(meas_names=[meas_name])
1695
+
1696
+ # Check if it worked
1697
+ data = self.get_data_raw(meas_name)
1698
+ if data.ndim < 2:
1699
+ # Still not 2D
1700
+ return False
1701
+ else:
1702
+ # Yup
1703
+ return True
1704
+ else:
1705
+ # Don't try to reshape, just return Nope
1706
+ return False
1707
+ else:
1708
+ # Yup
1709
+ return True
1710
+
1711
+ def _check_measurement_time_us(self) -> bool:
1712
+ """
1713
+ Check if the measurement time is in microseconds.
1714
+
1715
+ Read the `time_meas` dataset attribute "units", if it does not exist, look at
1716
+ the order of magnitude of values in the vector and compare to the
1717
+ `EXP_TIMESCALE` global variable. If the maximum is 1000 times higher, it is
1718
+ assumed the vector is in microseconds.
1719
+
1720
+ If the measurement time vector is not found, issue a warning and return True as
1721
+ it should be the default.
1722
+ """
1723
+ if "time_meas" in self.data_raw:
1724
+ if "units" in self.data_raw["time_meas"].attrs:
1725
+ # Check attribute
1726
+ if self.data_raw["time_meas"].attrs["units"] in ("us", "µs"):
1727
+ return True
1728
+ else:
1729
+ return False
1730
+ else:
1731
+ if self.data_raw["time_meas"].max() > 1e3 * FRAME_TIMESCALE:
1732
+ # Check numbers scale
1733
+ return True
1734
+ logger.warning("Could not determine 'time_meas' units, guessing µs (default).")
1735
+ return True
1736
+
1737
+ def _check_averaged(self) -> bool:
1738
+ """
1739
+ Check if data was averaged.
1740
+
1741
+ This is done by checking if any dataset name in `data_processed[results]` ends
1742
+ with "_avg" as written by the `average_frame_range()` method.
1743
+ """
1744
+ pattern = "_avg"
1745
+ for item in self.data_processed.values():
1746
+ if isinstance(item, nx.NXdata):
1747
+ for key in item:
1748
+ if key.endswith(pattern):
1749
+ return True
1750
+
1751
+ return False
1752
+
1753
+ def _check_digital(self) -> bool:
1754
+ """
1755
+ Check if the input should be treated as analog or digital.
1756
+
1757
+ This is done by checking if the configuration has a "demodulation" section. Note
1758
+ that even if this section is empty, True will be returned.
1759
+ """
1760
+ # Check if the Config object has a non-None 'demodulation' attribute
1761
+ res = getattr(self.cfg, "demodulation", None)
1762
+ if res is None:
1763
+ return False
1764
+ else:
1765
+ return True
1766
+
1767
+ def _check_rollmean(self) -> dict[str, bool]:
1768
+ """
1769
+ Check if rolling average was applied for each measurement.
1770
+
1771
+ This is done by checking if measurements datasets are found in
1772
+ `data_processed[results]` with a name with a leading "s", as written by the
1773
+ `rolling_average()` method.
1774
+ """
1775
+ is_rollmean = dict()
1776
+ for meas in self.measurements:
1777
+ if self.get_data_processed(f"s{meas}", checkonly=True):
1778
+ is_rollmean[meas] = True
1779
+ else:
1780
+ is_rollmean[meas] = False
1781
+ return is_rollmean
1782
+
1783
+ def _check_time_subsampled(self):
1784
+ """
1785
+ Check if measurement time vector was subsampled.
1786
+
1787
+ Happens when decimation is used during demodulation. This is done by checking
1788
+ if there is a "time_meas" dataset in `data_processed[results]` as this indicates
1789
+ some processing was applied to this vector.
1790
+ """
1791
+ return self.get_data_processed("time_meas", checkonly=True)
1792
+
1793
+ def load(self, filename: str | Path | None) -> Self:
1794
+ """
1795
+ Load a previously created NeXus file.
1796
+
1797
+ Use `BaseProcessor.save()` method, with additionnal steps : guess the internal
1798
+ flags and add convenience attributes. The returned `EchoProcessor` object can be
1799
+ used to resume analysis where it was left off.
1800
+
1801
+ Parameters
1802
+ ----------
1803
+ filename : str
1804
+ Full path to the file to load.
1805
+ """
1806
+ super().load(filename)
1807
+
1808
+ # Set measurements names
1809
+ self.measurements = [*self.cfg.measurements.keys()]
1810
+
1811
+ # Update flags
1812
+ self._guess_flags()
1813
+
1814
+ # Update convenience attributes
1815
+ if not self.is_digital:
1816
+ self._i_name = I_NAME_ANALOG
1817
+ self._q_name = Q_NAME_ANALOG
1818
+ if "frame_onsets" in self.data_raw:
1819
+ self.nframes = self.data_raw["frame_onsets"].shape[0]
1820
+ else:
1821
+ logger.warning("Could not read number of frames, features will be limited.")
1822
+ print("You can set it manually with 'd.nframes = 16000'")
1823
+ if "time_meas" in self.data_raw:
1824
+ self.npoints = self.data_raw["time_meas"].shape[0]
1825
+ else:
1826
+ logger.warning(
1827
+ "Could not read number of measurement time points, features will be"
1828
+ "limited."
1829
+ )
1830
+ print("You can set it manually with 'd.npoints = 5000'")
1831
+
1832
+ return self
1833
+
1834
+ def batch_process(
1835
+ self,
1836
+ expids: Sequence,
1837
+ rolling_average: bool = False,
1838
+ save_csv: bool = False,
1839
+ save_csv_kwargs: dict = {},
1840
+ find_f0: bool = False,
1841
+ batch_progress_emitter: Any = None,
1842
+ demodulation_progress_emitter: Any = None,
1843
+ ) -> Self:
1844
+ """
1845
+ Batch-process a list of experiment IDs, keeping current parameters.
1846
+
1847
+ Only supports datasets present in the current `data_directory`. Optionally,
1848
+ export the results as a CSV file for each dataset.
1849
+
1850
+ Parameters
1851
+ ----------
1852
+ expids : Sequence
1853
+ List of experiment ID to process.
1854
+ rolling_average : bool, optional
1855
+ Whether to apply rolling average in the process. Default is False.
1856
+ save_csv : bool, optional
1857
+ Whether to export results as a CSV file. Default is False.
1858
+ save_csv_kwargs : dict, optional
1859
+ Used only when `save_csv` is True. Specify arguments for the `to_csv()`
1860
+ method. Default is an empty dict (default arguments will be used).
1861
+ find_f0 : bool, optional
1862
+ Force finding f0 in digital mode, whatever the value of `f0` in the
1863
+ demodulation section of the current configuration. Default is False.
1864
+ batch_progress_emitter : Any, optional
1865
+ An object with an `emit()` method, such as a pyqtpyqtSignal. The loop index is
1866
+ emitted at each iteration of the main loop. Default is None.
1867
+ demodulation_progress_emitter : Any, optional
1868
+ An object with an `emit()` method, such as a pyqtpyqtSignal. The loop index is
1869
+ emitted at each iteration of the demodulation loop. Default is None.
1870
+ """
1871
+ for idx, expid in enumerate(expids):
1872
+ # Set the experiment ID (will reinitialize data but not configuration)
1873
+ self.expid = expid
1874
+
1875
+ # Load data
1876
+ self.load_oscillo(scale=True)
1877
+ # Load and align magnetic field
1878
+ self.align_field()
1879
+
1880
+ # Demodulate
1881
+ if self.is_digital:
1882
+ if find_f0 and self.cfg.demodulation is not None:
1883
+ self.cfg.demodulation.f0 = 0
1884
+ self.demodulate(progress_emitter=demodulation_progress_emitter)
1885
+
1886
+ # Rolling average
1887
+ if rolling_average:
1888
+ self.rolling_average()
1889
+
1890
+ # Average frames and compute
1891
+ self.average_frame_range().compute_attenuation().compute_phase_shift()
1892
+
1893
+ # Export as CSV
1894
+ if save_csv:
1895
+ self.to_csv(**save_csv_kwargs)
1896
+
1897
+ # Emit progress
1898
+ if batch_progress_emitter is not None:
1899
+ batch_progress_emitter.emit(idx)
1900
+
1901
+ return self