ibl-neuropixel 1.8.1__tar.gz → 1.9.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. {ibl_neuropixel-1.8.1/src/ibl_neuropixel.egg-info → ibl_neuropixel-1.9.0}/PKG-INFO +2 -20
  2. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/README.md +1 -19
  3. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/setup.py +1 -1
  4. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0/src/ibl_neuropixel.egg-info}/PKG-INFO +2 -20
  5. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/ibl_neuropixel.egg-info/SOURCES.txt +3 -1
  6. ibl_neuropixel-1.9.0/src/ibldsp/plots.py +135 -0
  7. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/ibldsp/utils.py +125 -14
  8. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/ibldsp/voltage.py +152 -10
  9. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/ibldsp/waveform_extraction.py +1 -0
  10. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/spikeglx.py +7 -3
  11. ibl_neuropixel-1.9.0/src/tests/unit/test_plots.py +30 -0
  12. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/unit/test_spikeglx.py +2 -0
  13. ibl_neuropixel-1.8.1/src/tests/unit/test_ibldsp.py → ibl_neuropixel-1.9.0/src/tests/unit/test_utils.py +16 -98
  14. ibl_neuropixel-1.9.0/src/tests/unit/test_voltage.py +142 -0
  15. ibl_neuropixel-1.8.1/src/ibldsp/plots.py +0 -58
  16. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/LICENSE +0 -0
  17. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/MANIFEST.in +0 -0
  18. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/setup.cfg +0 -0
  19. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/ibl_neuropixel.egg-info/dependency_links.txt +0 -0
  20. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/ibl_neuropixel.egg-info/requires.txt +0 -0
  21. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/ibl_neuropixel.egg-info/top_level.txt +0 -0
  22. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/ibldsp/__init__.py +0 -0
  23. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/ibldsp/cadzow.py +0 -0
  24. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/ibldsp/cuda_tools.py +0 -0
  25. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/ibldsp/destripe_gpu.py +0 -0
  26. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/ibldsp/filter_gpu.py +0 -0
  27. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/ibldsp/fourier.py +0 -0
  28. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/ibldsp/icsd.py +0 -0
  29. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/ibldsp/raw_metrics.py +0 -0
  30. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/ibldsp/smooth.py +0 -0
  31. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/ibldsp/spiketrains.py +0 -0
  32. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/ibldsp/waveforms.py +0 -0
  33. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/neuropixel.py +0 -0
  34. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/neurowaveforms/__init__.py +0 -0
  35. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/neurowaveforms/model.py +0 -0
  36. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/np2split/NP1_meta/_spikeglx_ephysData_g0_t0.imec0.ap.meta +0 -0
  37. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/np2split/NP21_meta/_spikeglx_ephysData_g0_t0.imec0.ap.meta +0 -0
  38. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/np2split/NP24_meta/_spikeglx_ephysData_g0_t0.imec0.ap.meta +0 -0
  39. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/np2split/_spikeglx_ephysData_g0_t0.imec0.ap.ch +0 -0
  40. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/np2split/_spikeglx_ephysData_g0_t0.imec0.ap.meta +0 -0
  41. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3A_376_channels.ap.meta +0 -0
  42. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3A_g0_t0.imec.ap.meta +0 -0
  43. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3A_g0_t0.imec.lf.meta +0 -0
  44. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3A_g0_t0.imec.wiring.json +0 -0
  45. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3A_short_g0_t0.imec.ap.meta +0 -0
  46. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3B2_exported.imec0.ap.meta +0 -0
  47. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3B_catgt.ap.meta +0 -0
  48. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3B_g0_t0.imec1.ap.meta +0 -0
  49. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3B_g0_t0.imec1.lf.meta +0 -0
  50. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3B_g0_t0.nidq.meta +0 -0
  51. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3B_g0_t0.nidq.wiring.json +0 -0
  52. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3B_version202304.ap.meta +0 -0
  53. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sampleNP2.1_g0_t0.imec.ap.meta +0 -0
  54. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sampleNP2.1_prototype.ap.meta +0 -0
  55. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sampleNP2.4_1shank_g0_t0.imec.ap.meta +0 -0
  56. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sampleNP2.4_4shanks_appVersion20230905.ap.meta +0 -0
  57. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sampleNP2.4_4shanks_g0_t0.imec.ap.meta +0 -0
  58. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sampleNP2.4_4shanks_while_acquiring_incomplete.ap.meta +0 -0
  59. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sampleNPultra_g0_t0.imec0.ap.meta +0 -0
  60. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/waveform_sample/test_arr_in.npy +0 -0
  61. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/waveform_sample/test_arr_peak.npy +0 -0
  62. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/waveform_sample/test_df.csv +0 -0
  63. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/fixtures/waveform_sample/test_df_wavinfo.csv +0 -0
  64. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/integration/__init__.py +0 -0
  65. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/integration/csd_experiments.py +0 -0
  66. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/integration/test_destripe.py +0 -0
  67. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/unit/__init__.py +0 -0
  68. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/unit/test_ephys_np2.py +0 -0
  69. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/unit/test_neuropixel.py +0 -0
  70. {ibl_neuropixel-1.8.1 → ibl_neuropixel-1.9.0}/src/tests/unit/test_waveforms.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ibl-neuropixel
3
- Version: 1.8.1
3
+ Version: 1.9.0
4
4
  Summary: Collection of tools for Neuropixel 1.0 and 2.0 probes data
5
5
  Home-page: https://github.com/int-brain-lab/ibl-neuropixel
6
6
  Author: The International Brain Laboratory
@@ -71,22 +71,4 @@ The following describes the methods implemented in this repository.
71
71
  https://doi.org/10.6084/m9.figshare.19705522
72
72
 
73
73
  ## Contribution
74
- Contribution checklist:
75
- - run tests
76
- - ruff format
77
- - PR to main
78
-
79
-
80
- Pypi Release checklist:
81
- - Edit the version number in `setup.py`
82
- - add release notes in `release_notes.md`
83
-
84
-
85
- ```shell
86
- ruff format
87
- tag=X.Y.Z
88
- git tag -a $tag
89
- git push origin $tag
90
- ```
91
-
92
- Create new release with tag X.Y.Z (will automatically publish to PyPI)
74
+ Please see our [contribution guidelines](CONTRIBUTING.md) for details on how to contribute to this project.
@@ -39,22 +39,4 @@ The following describes the methods implemented in this repository.
39
39
  https://doi.org/10.6084/m9.figshare.19705522
40
40
 
41
41
  ## Contribution
42
- Contribution checklist:
43
- - run tests
44
- - ruff format
45
- - PR to main
46
-
47
-
48
- Pypi Release checklist:
49
- - Edit the version number in `setup.py`
50
- - add release notes in `release_notes.md`
51
-
52
-
53
- ```shell
54
- ruff format
55
- tag=X.Y.Z
56
- git tag -a $tag
57
- git push origin $tag
58
- ```
59
-
60
- Create new release with tag X.Y.Z (will automatically publish to PyPI)
42
+ Please see our [contribution guidelines](CONTRIBUTING.md) for details on how to contribute to this project.
@@ -8,7 +8,7 @@ with open("requirements.txt") as f:
8
8
 
9
9
  setuptools.setup(
10
10
  name="ibl-neuropixel",
11
- version="1.8.1",
11
+ version="1.9.0",
12
12
  author="The International Brain Laboratory",
13
13
  description="Collection of tools for Neuropixel 1.0 and 2.0 probes data",
14
14
  long_description=long_description,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ibl-neuropixel
3
- Version: 1.8.1
3
+ Version: 1.9.0
4
4
  Summary: Collection of tools for Neuropixel 1.0 and 2.0 probes data
5
5
  Home-page: https://github.com/int-brain-lab/ibl-neuropixel
6
6
  Author: The International Brain Laboratory
@@ -71,22 +71,4 @@ The following describes the methods implemented in this repository.
71
71
  https://doi.org/10.6084/m9.figshare.19705522
72
72
 
73
73
  ## Contribution
74
- Contribution checklist:
75
- - run tests
76
- - ruff format
77
- - PR to main
78
-
79
-
80
- Pypi Release checklist:
81
- - Edit the version number in `setup.py`
82
- - add release notes in `release_notes.md`
83
-
84
-
85
- ```shell
86
- ruff format
87
- tag=X.Y.Z
88
- git tag -a $tag
89
- git push origin $tag
90
- ```
91
-
92
- Create new release with tag X.Y.Z (will automatically publish to PyPI)
74
+ Please see our [contribution guidelines](CONTRIBUTING.md) for details on how to contribute to this project.
@@ -59,7 +59,9 @@ src/tests/integration/csd_experiments.py
59
59
  src/tests/integration/test_destripe.py
60
60
  src/tests/unit/__init__.py
61
61
  src/tests/unit/test_ephys_np2.py
62
- src/tests/unit/test_ibldsp.py
63
62
  src/tests/unit/test_neuropixel.py
63
+ src/tests/unit/test_plots.py
64
64
  src/tests/unit/test_spikeglx.py
65
+ src/tests/unit/test_utils.py
66
+ src/tests/unit/test_voltage.py
65
67
  src/tests/unit/test_waveforms.py
@@ -0,0 +1,135 @@
1
+ import numpy as np
2
+ import matplotlib.pyplot as plt
3
+
4
+ AP_RANGE_UV = 75
5
+ LF_RANGE_UV = 250
6
+
7
+
8
+ def show_channels_labels(
9
+ raw,
10
+ fs,
11
+ channel_labels,
12
+ xfeats,
13
+ similarity_threshold=(-0.5, 1),
14
+ psd_hf_threshold=0.02,
15
+ ):
16
+ """
17
+ Shows the features side by side a snippet of raw data
18
+ :param sr:
19
+ :return:
20
+ """
21
+ nc, ns = raw.shape
22
+ raw = raw - np.mean(raw, axis=-1)[:, np.newaxis] # removes DC offset
23
+ ns_plot = np.minimum(ns, 3000)
24
+ fig, ax = plt.subplots(
25
+ 1, 5, figsize=(18, 6), gridspec_kw={"width_ratios": [1, 1, 1, 8, 0.2]}
26
+ )
27
+ ax[0].plot(xfeats["xcor_hf"], np.arange(nc))
28
+ ax[0].plot( # plot channel below the similarity threshold as dead in black
29
+ xfeats["xcor_hf"][(iko := channel_labels == 1)], np.arange(nc)[iko], "k*"
30
+ )
31
+ ax[0].plot( # plot the values above the similarity threshold as noisy in red
32
+ xfeats["xcor_hf"][
33
+ (iko := np.where(xfeats["xcor_hf"] > similarity_threshold[1]))
34
+ ],
35
+ np.arange(nc)[iko],
36
+ "r*",
37
+ )
38
+ ax[0].plot(similarity_threshold[0] * np.ones(2), [0, nc], "k--")
39
+ ax[0].plot(similarity_threshold[1] * np.ones(2), [0, nc], "r--")
40
+ ax[0].set(
41
+ ylabel="channel #",
42
+ xlabel="high coherence",
43
+ ylim=[0, nc],
44
+ title="a) dead channel",
45
+ )
46
+ ax[1].plot(xfeats["psd_hf"], np.arange(nc))
47
+ ax[1].plot(
48
+ xfeats["psd_hf"][(iko := xfeats["psd_hf"] > psd_hf_threshold)],
49
+ np.arange(nc)[iko],
50
+ "r*",
51
+ )
52
+ ax[1].plot(psd_hf_threshold * np.array([1, 1]), [0, nc], "r--")
53
+ ax[1].set(yticklabels=[], xlabel="PSD", ylim=[0, nc], title="b) noisy channel")
54
+ ax[1].sharey(ax[0])
55
+ ax[2].plot(xfeats["xcor_lf"], np.arange(nc))
56
+ ax[2].plot(
57
+ xfeats["xcor_lf"][(iko := channel_labels == 3)], np.arange(nc)[iko], "y*"
58
+ )
59
+ ax[2].plot([-0.75, -0.75], [0, nc], "y--")
60
+ ax[2].set(yticklabels=[], xlabel="LF coherence", ylim=[0, nc], title="c) outside")
61
+ ax[2].sharey(ax[0])
62
+ voltageshow(raw[:, :ns_plot], fs, ax=ax[3], cax=ax[4])
63
+ ax[3].sharey(ax[0])
64
+ fig.tight_layout()
65
+ return fig, ax
66
+
67
+
68
+ def voltageshow(
69
+ raw,
70
+ fs,
71
+ cmap="PuOr",
72
+ ax=None,
73
+ cax=None,
74
+ cbar_label="Voltage (uV)",
75
+ scaling=1e6,
76
+ vrange=None,
77
+ **axis_kwargs,
78
+ ):
79
+ """
80
+ Visualizes electrophysiological voltage data as a heatmap.
81
+
82
+ This function displays raw voltage data as a color-coded image with appropriate
83
+ scaling based on the sampling frequency. It automatically selects voltage range
84
+ based on whether the data is low-frequency (LF) or action potential (AP) data.
85
+
86
+ Parameters
87
+ ----------
88
+ raw : numpy.ndarray
89
+ Raw voltage data array with shape (channels, samples), in Volts
90
+ fs : float
91
+ Sampling frequency in Hz, used to determine time axis scaling and voltage range.
92
+ cmap : str, optional
93
+ Matplotlib colormap name for the heatmap. Default is 'PuOr'.
94
+ ax : matplotlib.axes.Axes, optional
95
+ Axes object to plot on. If None, a new figure and axes are created.
96
+ cax : matplotlib.axes.Axes, optional
97
+ Axes object for the colorbar. If None and ax is None, a new colorbar axes is created.
98
+ cbar_label : str, optional
99
+ Label for the colorbar. Default is 'Voltage (uV)'.
100
+ vrange: float, optional
101
+ Voltage range for the colorbar. Defaults to +/- 75 uV for AP and +/- 250 uV for LF.
102
+ scaling: float, optional
103
+ Unit transform: default is 1e6: we expect Volts but plot uV.
104
+ **axis_kwargs: optional
105
+ Additional keyword arguments for the axis properties, fed to the ax.set() method.
106
+ Returns
107
+ -------
108
+ matplotlib.image.AxesImage
109
+ The image object created by imshow, which can be used for further customization.
110
+ """
111
+ if ax is None:
112
+ fig, axs = plt.subplots(1, 2, gridspec_kw={"width_ratios": [1, 0.05]})
113
+ ax, cax = axs
114
+ nc, ns = raw.shape
115
+ default_vrange = LF_RANGE_UV if fs < 2600 else AP_RANGE_UV
116
+ vrange = vrange if vrange is not None else default_vrange
117
+ im = ax.imshow(
118
+ raw * scaling,
119
+ origin="lower",
120
+ cmap=cmap,
121
+ aspect="auto",
122
+ vmin=-vrange,
123
+ vmax=vrange,
124
+ extent=[0, ns / fs, 0, nc],
125
+ )
126
+ # set the axis properties: we use defaults values that can be overridden by user-provided ones
127
+ axis_kwargs = (
128
+ dict(ylim=[0, nc], xlabel="Time (s)", ylabel="Depth (μm)") | axis_kwargs
129
+ )
130
+ ax.set(**axis_kwargs)
131
+ ax.grid(False)
132
+ if cax is not None:
133
+ plt.colorbar(im, cax=cax, shrink=0.8).ax.set(ylabel=cbar_label)
134
+
135
+ return im
@@ -268,12 +268,64 @@ def make_channel_index(geom, radius=200.0, pad_val=None):
268
268
 
269
269
  class WindowGenerator(object):
270
270
  """
271
- `wg = WindowGenerator(ns, nswin, overlap)`
271
+ A utility class for generating sliding windows for signal processing applications.
272
272
 
273
- Provide sliding windows indices generator for signal processing applications.
274
- For straightforward spectrogram / periodogram implementation, prefer scipy methods !
273
+ WindowGenerator provides various methods to iterate through windows of a signal
274
+ with configurable window size and overlap. It's particularly useful for operations
275
+ like spectrograms, filtering, or any processing that requires windowed analysis.
275
276
 
276
- Example of implementations in test_dsp.py.
277
+ Parameters
278
+ ----------
279
+ ns : int
280
+ Total number of samples in the signal to be windowed.
281
+ nswin : int
282
+ Number of samples in each window.
283
+ overlap : int
284
+ Number of samples that overlap between consecutive windows.
285
+
286
+ Attributes
287
+ ----------
288
+ ns : int
289
+ Total number of samples in the signal.
290
+ nswin : int
291
+ Number of samples in each window.
292
+ overlap : int
293
+ Number of samples that overlap between consecutive windows.
294
+ nwin : int
295
+ Total number of windows.
296
+ iw : int or None
297
+ Current window index during iteration.
298
+
299
+ Notes
300
+ -----
301
+ For straightforward spectrogram or periodogram implementation,
302
+ scipy methods are recommended over this class.
303
+
304
+ Examples
305
+ --------
306
+ # straight windowing without overlap
307
+ >>> wg = WindowGenerator(ns=1000, nwin=111)
308
+ >>> signal = np.random.randn(1000)
309
+ >>> for window_slice in wg.slice:
310
+ ... window_data = signal[window_slice]
311
+ ... # Process window_data
312
+
313
+ # windowing with overlap (ie. buffers for apodization)
314
+ >>> for win_slice, valid_slice, win_valid_slice in wg.slices_valid:
315
+ ... window = signal[win_slice]
316
+ ... # Process window
317
+ ... processed = some_function_with_edge_effect(window)
318
+ ... # Only use the valid portion for reconstruction
319
+ ... recons[valid_slice] = processed[win_valid_slice]
320
+
321
+ # splicing add a fade-in / fade-out in the overlap so that reconstruction has unit amplitude
322
+ >>> recons = np.zeros_like(signal)
323
+ >>> for win_slice, amplitude in wg.splice:
324
+ ... window = signal[win_slice]
325
+ ... # Process window
326
+ ... processed = some_function(window)
327
+ ... # The processed windows is weighted with the amplitude and added to the reconstructed signal
328
+ ... recons[win_slice] = recons[win_slice] + processed * amplitude
277
329
  """
278
330
 
279
331
  def __init__(self, ns, nswin, overlap):
@@ -289,14 +341,35 @@ class WindowGenerator(object):
289
341
  self.iw = None
290
342
 
291
343
  @property
292
- def firstlast_splicing(self):
344
+ def splice(self):
345
+ """
346
+ Generator that yields slices and amplitude arrays for windowed signal processing with splicing.
347
+
348
+ This property provides a convenient way to iterate through all windows with their
349
+ corresponding amplitude arrays for proper signal reconstruction. The amplitude arrays
350
+ contain tapering values (from a Hann window) at the overlapping regions to ensure
351
+ unit amplitude of all samples of the original signal
352
+
353
+ Yields
354
+ ------
355
+ tuple
356
+ A tuple containing:
357
+ - slice: A Python slice object representing the current window
358
+ - amp: A numpy array containing amplitude values for proper splicing/tapering
359
+ at overlap regions
360
+
361
+ Notes
362
+ -----
363
+ This is particularly useful for overlap-add methods where windows need to be
364
+ properly weighted before being combined in the reconstruction process.
293
365
  """
294
- Generator that yields the indices as well as an amplitude function that can be used
295
- to splice the windows together.
296
- In the overlap, the amplitude function gradually transitions the amplitude from one window
297
- to the next. The amplitudes always sum to one (ie. windows are symmetrical)
366
+ for first, last, amp in self.firstlast_splicing:
367
+ yield slice(first, last), amp
298
368
 
299
- :return: tuple of (first_index, last_index, amplitude_vector]
369
+ @property
370
+ def firstlast_splicing(self):
371
+ """
372
+ cf. self.splice
300
373
  """
301
374
  w = scipy.signal.windows.hann((self.overlap + 1) * 2 + 1, sym=True)[
302
375
  1 : self.overlap + 1
@@ -323,7 +396,7 @@ class WindowGenerator(object):
323
396
  yield (first, last, first_valid, last_valid)
324
397
 
325
398
  @property
326
- def firstlast(self, return_valid=False):
399
+ def firstlast(self):
327
400
  """
328
401
  Generator that yields first and last index of windows
329
402
 
@@ -343,13 +416,51 @@ class WindowGenerator(object):
343
416
  @property
344
417
  def slice(self):
345
418
  """
346
- Generator that yields slices of windows
347
-
348
- :return: a slice of the window
419
+ Generator that yields slice objects for each window in the signal.
420
+
421
+ This property provides a convenient way to iterate through all windows
422
+ defined by the WindowGenerator parameters. Each yielded slice can be
423
+ used directly to index into the original signal array.
424
+
425
+ Yields
426
+ ------
427
+ slice
428
+ A Python slice object representing the current window, defined by
429
+ its first and last indices. The slice can be used to extract the
430
+ corresponding window from the original signal.
349
431
  """
350
432
  for first, last in self.firstlast:
351
433
  yield slice(first, last)
352
434
 
435
+ @property
436
+ def slices_valid(self):
437
+ """
438
+ Generator that yields slices for windowed signal processing with valid regions.
439
+
440
+ This method generates tuples of slice objects that can be used to extract windows
441
+ from a signal and identify the valid (non-overlapping) portions within each window.
442
+ It's particularly useful for reconstruction operations where overlapping regions
443
+ need special handling.
444
+
445
+ Yields
446
+ ------
447
+ tuple
448
+ A tuple containing three slice objects:
449
+ - slice(first, last): The full window slice
450
+ - slice(first_valid, last_valid): The valid portion of the signal in absolute indices
451
+ - slice_window_valid: The valid portion relative to the window (for use within the window)
452
+
453
+ Notes
454
+ -----
455
+ This generator relies on the firstlast_valid property which provides the
456
+ indices for both the full windows and their valid regions.
457
+ """
458
+ for first, last, first_valid, last_valid in self.firstlast_valid:
459
+ slice_window_valid = slice(
460
+ first_valid - first, None if (lv := -(last - last_valid)) == 0 else lv
461
+ )
462
+ yield slice(first, last), slice(first_valid, last_valid), slice_window_valid
463
+
353
464
  def slice_array(self, sig, axis=-1):
354
465
  """
355
466
  Provided an array or sliceable object, generator that yields
@@ -3,6 +3,8 @@ Module to work with raw voltage traces. Spike sorting pre-processing functions.
3
3
  """
4
4
 
5
5
  import inspect
6
+ import joblib
7
+ import tqdm
6
8
  from pathlib import Path
7
9
 
8
10
  import numpy as np
@@ -217,6 +219,7 @@ def kfilt(
217
219
  xf, gain = agc(x, wl=lagc, si=1.0, gpu=gpu)
218
220
  if ntr_pad > 0:
219
221
  # pad the array with a mirrored version of itself and apply a cosine taper
222
+ ntr_pad = np.min([ntr_pad, xf.shape[0]])
220
223
  xf = gp.r_[gp.flipud(xf[:ntr_pad]), xf, gp.flipud(xf[-ntr_pad:])]
221
224
  if ntr_tap > 0:
222
225
  taper = fourier.fcn_cosine([0, ntr_tap], gpu=gpu)(gp.arange(nxp)) # taper up
@@ -266,6 +269,120 @@ def saturation(
266
269
  return saturation, mute
267
270
 
268
271
 
272
+ def saturation_samples_to_intervals(
273
+ _saturation: np.ndarray, output_file: Path = None
274
+ ) -> pd.DataFrame:
275
+ """
276
+ Convert a flat npy file to a table with saturation intervals.
277
+ :param _saturation: np.ndarray: Boolean array with saturation samples set as True
278
+ :return:
279
+ """
280
+ assert not _saturation[0]
281
+ ind, pol = ibldsp.utils.fronts(_saturation.astype(np.int8))
282
+ # if the last sample is positive, make sure the interval is closed by providing an even number of events
283
+ if len(pol) > 0 and pol[-1] == 1:
284
+ pol = np.r_[pol, -1]
285
+ ind = np.r_[ind, _saturation.shape[0] - 1]
286
+ df_saturation = pd.DataFrame(
287
+ np.c_[ind[::2], ind[1::2]], columns=["start_sample", "stop_sample"]
288
+ )
289
+ if output_file is not None:
290
+ df_saturation.to_parquet(output_file)
291
+ return df_saturation
292
+
293
+
294
+ def saturation_cbin(
295
+ bin_file_path: Path,
296
+ file_saturation: Path = None,
297
+ max_voltage=None,
298
+ n_jobs=4,
299
+ v_per_sec=1e-8,
300
+ proportion=0.2,
301
+ mute_window_samples=7,
302
+ ) -> Path:
303
+ """
304
+ Detect saturation in a compressed binary (cbin) electrophysiology file and save the results.
305
+
306
+ This function processes a SpikeGLX binary file in chunks to identify saturated samples
307
+ and saves the results as a memory-mapped boolean array. Processing is parallelized
308
+ for improved performance.
309
+
310
+ Parameters
311
+ ----------
312
+ bin_file_path : Path | spikeglx.Reader
313
+ Path to the SpikeGLX binary file to be processed or spikeglx.Reader object
314
+ file_saturation : Path, optional
315
+ Path where the saturation data will be saved. If None, defaults to
316
+ "_iblqc_ephysSaturation.samples.npy" in the same directory as the input file
317
+ max_voltage : np.float, optional
318
+ one-sided maximum voltage range (V), if not provided will use the spikeglx metadata
319
+ n_jobs : int, optional
320
+ Number of parallel jobs to use for processing, defaults to 4
321
+ v_per_sec : float, optional
322
+ Maximum derivative of the voltage in V/s (or units/s), defaults to 1e-8
323
+ proportion : float, optional
324
+ Threshold proportion (0-1) of channels that must be above threshold to consider
325
+ a sample as saturated, defaults to 0.2
326
+ mute_window_samples : int, optional
327
+ Number of samples for the cosine taper applied to the saturation, defaults to 7
328
+
329
+ Returns
330
+ -------
331
+ Path
332
+ Path to the file where the saturation data was saved
333
+ """
334
+ if isinstance(bin_file_path, spikeglx.Reader):
335
+ sr = bin_file_path
336
+ bin_file_path = sr.file_bin
337
+ else:
338
+ sr = spikeglx.Reader(bin_file_path)
339
+ file_saturation = (
340
+ file_saturation
341
+ if file_saturation is not None
342
+ else bin_file_path.parent.joinpath("_iblqc_ephysSaturation.samples.npy")
343
+ )
344
+ max_voltage = max_voltage if max_voltage is not None else sr.range_volts[:-1]
345
+ # Create a memory-mapped array
346
+ _saturation = np.lib.format.open_memmap(
347
+ file_saturation, dtype=bool, mode="w+", shape=(sr.ns,)
348
+ )
349
+ _saturation[:] = False # Initialize all values to False
350
+ _saturation.flush() # Make sure to flush to disk
351
+
352
+ wg = ibldsp.utils.WindowGenerator(ns=sr.ns, nswin=2**17, overlap=16)
353
+
354
+ # we can parallelize this as there is no conflict on output
355
+ def _saturation_slice(slice_win, slice_valid, slice_relative_valid):
356
+ sr = spikeglx.Reader(bin_file_path)
357
+ data = sr[slice_win, : sr.nc - sr.nsync].T
358
+ satwin, _ = ibldsp.voltage.saturation(
359
+ data,
360
+ max_voltage=max_voltage,
361
+ fs=sr.fs,
362
+ v_per_sec=v_per_sec,
363
+ proportion=proportion,
364
+ mute_window_samples=mute_window_samples,
365
+ )
366
+ _saturation[slice_valid] = satwin[slice_relative_valid]
367
+ _saturation.flush()
368
+ # getting the list of jobs as a generator allows running tqdm to monitor progress
369
+
370
+ jobs = [
371
+ joblib.delayed(_saturation_slice)(slw, slv, slrv)
372
+ for (slw, slv, slrv) in wg.slices_valid
373
+ ]
374
+ list(
375
+ tqdm.tqdm(
376
+ joblib.Parallel(return_as="generator", n_jobs=n_jobs)(jobs), total=wg.nwin
377
+ )
378
+ )
379
+
380
+ _ = saturation_samples_to_intervals(
381
+ _saturation, output_file=file_saturation.with_suffix(".pqt")
382
+ )
383
+ return file_saturation.with_suffix(".pqt")
384
+
385
+
269
386
  def interpolate_bad_channels(
270
387
  data, channel_labels=None, x=None, y=None, p=1.3, kriging_distance_um=20, gpu=False
271
388
  ):
@@ -655,6 +772,9 @@ def decompress_destripe_cbin(
655
772
  saturation_data = np.load(file_saturation)
656
773
  assert rms_data.shape[0] == time_data.shape[0] * ncv
657
774
  rms_data = rms_data.reshape(time_data.shape[0], ncv)
775
+ # Save the rms data using the original channel index
776
+ unsort = np.argsort(sr.raw_channel_order)[: -sr.nsync]
777
+ rms_data = rms_data[:, unsort]
658
778
  output_qc_path = (
659
779
  output_file.parent if output_qc_path is None else output_qc_path
660
780
  )
@@ -781,7 +901,7 @@ def detect_bad_channels(
781
901
  window_size = 25 # Choose based on desired smoothing (e.g., 25 samples)
782
902
  kernel = np.ones(window_size) / window_size
783
903
  # Apply convolution
784
- signal_filtered = np.convolve(signal_noisy, kernel, mode='same')
904
+ signal_filtered = np.convolve(signal_noisy, kernel, mode="same")
785
905
 
786
906
  diff_x = np.diff(signal_filtered)
787
907
  indx = np.where(diff_x < -0.02)[0] # hardcoded threshold
@@ -934,16 +1054,39 @@ def stack(data, word, fcn_agg=np.nanmean, header=None):
934
1054
 
935
1055
  def current_source_density(lfp, h, n=2, method="diff", sigma=1 / 3):
936
1056
  """
937
- Compute the current source density (CSD) of a given LFP signal recorded on neuropixel 1 or 2
938
- :param data: LFP signal (n_channels, n_samples)
939
- :param h: trace header dictionary
940
- :param n: the n derivative
941
- :param method: diff (straight double difference) or kernel CSD (needs the KCSD python package)
942
- :param sigma: conductivity, defaults to 1/3 S.m-1
943
- :return:
1057
+ Compute the current source density (CSD) of a given LFP signal recorded on Neuropixel probes.
1058
+
1059
+ The CSD estimates the location of current sources and sinks in neural tissue based on
1060
+ the spatial distribution of local field potentials (LFPs). This implementation supports
1061
+ both the standard double-derivative method and kernel CSD method.
1062
+
1063
+ The CSD is computed for each column of the Neuropixel probe layout separately.
1064
+
1065
+ Parameters
1066
+ ----------
1067
+ lfp : numpy.ndarray
1068
+ LFP signal array with shape (n_channels, n_samples)
1069
+ h : dict
1070
+ Trace header dictionary containing probe geometry information with keys:
1071
+ 'x', 'y' for electrode coordinates, 'col' for column indices, and 'row' for row indices
1072
+ n : int, optional
1073
+ Order of the derivative for the 'diff' method, defaults to 2
1074
+ method : str, optional
1075
+ Method to compute CSD:
1076
+ - 'diff': standard finite difference method (default)
1077
+ - 'kcsd': kernel CSD method (requires the KCSD Python package)
1078
+ sigma : float, optional
1079
+ Tissue conductivity in Siemens per meter, defaults to 1/3 S.m-1
1080
+
1081
+ Returns
1082
+ -------
1083
+ numpy.ndarray
1084
+ Current source density with the same shape as the input LFP array.
1085
+ Positive values indicate current sources, negative values indicate sinks.
1086
+ Units are in A.m-3 (amperes per cubic meter).
944
1087
  """
945
1088
  csd = np.zeros(lfp.shape, dtype=np.float64) * np.nan
946
- xy = h["x"] + 1j * h["y"]
1089
+ xy = (h["x"] + 1j * h["y"]) / 1e6
947
1090
  for col in np.unique(h["col"]):
948
1091
  ind = np.where(h["col"] == col)[0]
949
1092
  isort = np.argsort(h["row"][ind])
@@ -990,7 +1133,6 @@ def _svd_denoise(datr, rank):
990
1133
 
991
1134
  def svd_denoise_npx(datr, rank=None, collection=None):
992
1135
  """
993
-
994
1136
  :param datr: [nc, ns]
995
1137
  :param rank:
996
1138
  :param collection:
@@ -280,6 +280,7 @@ def extract_wfs_cbin(
280
280
  chunksize_samples=int(30_000),
281
281
  reader_kwargs=None,
282
282
  n_jobs=None,
283
+ wfs_dtype=np.float32,
283
284
  preprocess_steps=None,
284
285
  seed=None,
285
286
  scratch_dir=None,
@@ -144,8 +144,12 @@ class Reader:
144
144
  sglx_file = str(self.file_bin)
145
145
  if self.is_mtscomp:
146
146
  self._raw = mtscomp.Reader()
147
- ch_file = self.ch_file or _get_companion_file(sglx_file, ".ch")
148
- self._raw.open(self.file_bin, ch_file)
147
+ self.ch_file = (
148
+ _get_companion_file(sglx_file, ".ch")
149
+ if self.ch_file is None
150
+ else self.ch_file
151
+ )
152
+ self._raw.open(self.file_bin, self.ch_file)
149
153
  if self._raw.shape != (self.ns, self.nc):
150
154
  ftsec = self._raw.shape[0] / self.fs
151
155
  if not self.ignore_warnings: # avoid the checks for streaming data
@@ -411,7 +415,7 @@ class Reader:
411
415
  """
412
416
  if file_meta is None:
413
417
  file_meta = Path(self.file_bin).with_suffix(".meta")
414
-
418
+ file_ch = file_ch if file_ch is not None else self.ch_file
415
419
  if scratch_dir is None:
416
420
  bin_file = Path(self.file_bin).with_suffix(".bin")
417
421
  else:
@@ -0,0 +1,30 @@
1
+ import unittest
2
+
3
+ import numpy as np
4
+
5
+ import ibldsp.plots
6
+ import ibldsp.voltage
7
+
8
+
9
+ class TestPlots(unittest.TestCase):
10
+ def test_voltage(self):
11
+ ibldsp.plots.voltageshow(
12
+ (np.random.rand(384, 2000) - 0.5) / 1e6 * 20, fs=30_000
13
+ )
14
+
15
+ def test_bad_channels(self):
16
+ np.random.seed(0)
17
+ raw = np.random.randn(384, 2000) / 1e6 * 15
18
+ raw += np.random.randn(1, 2000) / 1e6 * 2
19
+ raw[66] *= 2
20
+ raw[166] = 0
21
+ fs = 30_000
22
+ labels, features = ibldsp.voltage.detect_bad_channels(raw, fs)
23
+ ibldsp.plots.show_channels_labels(
24
+ raw=raw,
25
+ fs=30_000,
26
+ channel_labels=labels,
27
+ xfeats=features,
28
+ )
29
+ np.testing.assert_array_equal(np.argwhere(labels == 2), 66)
30
+ np.testing.assert_array_equal(np.argwhere(labels == 1), 166)
@@ -243,6 +243,8 @@ class TestsSpikeGLX_compress(unittest.TestCase):
243
243
  with spikeglx.Reader(self.file_cbin, open=False) as sc:
244
244
  self.assertTrue(sc.is_mtscomp)
245
245
  compare_data(sr_ref, sc)
246
+ # here we make sure the chunks file has been registered as a property
247
+ self.assertEqual(sc.ch_file, self.file_cbin.with_suffix(".ch"))
246
248
 
247
249
  # test decompression in-place
248
250
  sc.decompress_file(keep_original=False, overwrite=True)
@@ -7,7 +7,6 @@ import spikeglx
7
7
  import ibldsp.fourier as fourier
8
8
  import ibldsp.utils as utils
9
9
  import ibldsp.voltage as voltage
10
- import ibldsp.cadzow as cadzow
11
10
  import ibldsp.smooth as smooth
12
11
  import ibldsp.spiketrains as spiketrains
13
12
  import ibldsp.raw_metrics as raw_metrics
@@ -368,6 +367,13 @@ class TestWindowGenerator(unittest.TestCase):
368
367
  for first, last, amp in wg.firstlast_splicing:
369
368
  sig_out[first:last] = sig_out[first:last] + amp * sig_in[first:last]
370
369
  np.testing.assert_allclose(sig_out, sig_in)
370
+ # now performs the same operation with the new interface
371
+ sig_in = np.random.randn(600)
372
+ sig_out = np.zeros_like(sig_in)
373
+ wg = utils.WindowGenerator(ns=600, nswin=100, overlap=20)
374
+ for slicewin, amp in wg.splice:
375
+ sig_out[slicewin] = sig_out[slicewin] + amp * sig_in[slicewin]
376
+ np.testing.assert_allclose(sig_out, sig_in)
371
377
 
372
378
  def test_firstlast_valid(self):
373
379
  sig_in = np.random.randn(600)
@@ -377,6 +383,15 @@ class TestWindowGenerator(unittest.TestCase):
377
383
  sig_out[first_valid:last_valid] = sig_in[first_valid:last_valid]
378
384
  np.testing.assert_array_equal(sig_out, sig_in)
379
385
 
386
+ def test_slices_valid(self):
387
+ sig_in = np.random.randn(600)
388
+ sig_out = np.zeros_like(sig_in)
389
+ wg = utils.WindowGenerator(ns=600, nswin=39, overlap=20)
390
+ for slice_win, slice_valid, slice_win_valid in wg.slices_valid:
391
+ win = sig_in[slice_win]
392
+ sig_out[slice_valid] = win[slice_win_valid]
393
+ np.testing.assert_array_equal(sig_out, sig_in)
394
+
380
395
  def test_tscale(self):
381
396
  wg = utils.WindowGenerator(ns=500, nswin=100, overlap=50)
382
397
  ts = wg.tscale(fs=1000)
@@ -430,103 +445,6 @@ class TestFrontDetection(unittest.TestCase):
430
445
  np.testing.assert_array_equal(utils.rises(a, step=3, analog=True), 283)
431
446
 
432
447
 
433
- class TestVoltage(unittest.TestCase):
434
- def test_destripe_parameters(self):
435
- import inspect
436
-
437
- _, _, spatial_fcn = voltage._get_destripe_parameters(
438
- 30_000, None, None, k_filter=True
439
- )
440
- assert "kfilt" in inspect.getsource(spatial_fcn)
441
- _, _, spatial_fcn = voltage._get_destripe_parameters(
442
- 2_500, None, None, k_filter=False
443
- )
444
- assert "car" in inspect.getsource(spatial_fcn)
445
- _, _, spatial_fcn = voltage._get_destripe_parameters(
446
- 2_500, None, None, k_filter=None
447
- )
448
- assert "dat: dat" in inspect.getsource(spatial_fcn)
449
- _, _, spatial_fcn = voltage._get_destripe_parameters(
450
- 2_500, None, None, k_filter=lambda dat: 3 * dat
451
- )
452
- assert "lambda dat: 3 * dat" in inspect.getsource(spatial_fcn)
453
-
454
- def test_fk(self):
455
- """
456
- creates a couple of plane waves and separate them using the velocity HP filter
457
- """
458
- ntr, ns, sr, dx, v1, v2 = (500, 2000, 0.002, 5, 2000, 1000)
459
- data = np.zeros((ntr, ns), np.float32)
460
- data[:, :100] = utils.ricker(100, 4)
461
- offset = np.arange(ntr) * dx
462
- offset = np.abs(offset - np.mean(offset))
463
- data_v1 = fourier.fshift(data, offset / v1 / sr)
464
- data_v2 = fourier.fshift(data, offset / v2 / sr)
465
-
466
- noise = np.random.randn(ntr, ns) / 60
467
- fk = voltage.fk(
468
- data_v1 + data_v2 + noise,
469
- si=sr,
470
- dx=dx,
471
- vbounds=[1200, 1500],
472
- ntr_pad=10,
473
- ntr_tap=15,
474
- lagc=0.25,
475
- )
476
- fknoise = voltage.fk(
477
- noise, si=sr, dx=dx, vbounds=[1200, 1500], ntr_pad=10, ntr_tap=15, lagc=0.25
478
- )
479
- # at least 90% of the traces should be below 50dB and 98% below 40 dB
480
- assert np.mean(20 * np.log10(utils.rms(fk - data_v1 - fknoise)) < -50) > 0.9
481
- assert np.mean(20 * np.log10(utils.rms(fk - data_v1 - fknoise)) < -40) > 0.98
482
- # test the K option
483
- kbands = np.sin(np.arange(ns) / ns * 8 * np.pi) / 10
484
- fkk = voltage.fk(
485
- data_v1 + data_v2 + kbands,
486
- si=sr,
487
- dx=dx,
488
- vbounds=[1200, 1500],
489
- ntr_pad=40,
490
- ntr_tap=15,
491
- lagc=0.25,
492
- kfilt={"bounds": [0, 0.01], "btype": "hp"},
493
- )
494
- assert np.mean(20 * np.log10(utils.rms(fkk - data_v1)) < -40) > 0.9
495
- # from easyqc.gui import viewseis
496
- # a = viewseis(data_v1 + data_v2 + kbands, .002, title='input')
497
- # b = viewseis(fkk, .002, title='output')
498
- # c = viewseis(data_v1 - fkk, .002, title='test')
499
-
500
- def test_saturation(self):
501
- np.random.seed(7654)
502
- data = (np.random.randn(384, 30_000).astype(np.float32) + 20) * 1e-6
503
- saturated, mute = voltage.saturation(data, max_voltage=1200)
504
- np.testing.assert_array_equal(saturated, 0)
505
- np.testing.assert_array_equal(mute, 1.0)
506
- # now we stick a big waveform in the middle of the recorder and expect some saturation
507
- w = utils.ricker(100, 4)
508
- w = np.minimum(1200, w / w.max() * 1400)
509
- data[:, 13_600:13700] = data[0, 13_600:13700] + w * 1e-6
510
- saturated, mute = voltage.saturation(
511
- data,
512
- max_voltage=np.ones(
513
- 384,
514
- )
515
- * 1200
516
- * 1e-6,
517
- )
518
- self.assertGreater(np.sum(saturated), 5)
519
- self.assertGreater(np.sum(mute == 0), np.sum(saturated))
520
-
521
-
522
- class TestCadzow(unittest.TestCase):
523
- def test_trajectory_matrixes(self):
524
- assert np.all(
525
- cadzow.traj_matrix_indices(4) == np.array([[1, 0], [2, 1], [3, 2]])
526
- )
527
- assert np.all(cadzow.traj_matrix_indices(3) == np.array([[1, 0], [2, 1]]))
528
-
529
-
530
448
  class TestStack(unittest.TestCase):
531
449
  def test_simple_stack(self):
532
450
  ntr, ns = (24, 400)
@@ -0,0 +1,142 @@
1
+ import numpy as np
2
+ import tempfile
3
+ from pathlib import Path
4
+ import unittest
5
+
6
+ import pandas as pd
7
+
8
+ import spikeglx
9
+ import ibldsp.voltage
10
+ import ibldsp.fourier
11
+ import ibldsp.utils
12
+ import ibldsp.cadzow
13
+
14
+
15
+ class TestDestripe(unittest.TestCase):
16
+ def test_destripe_parameters(self):
17
+ import inspect
18
+
19
+ _, _, spatial_fcn = ibldsp.voltage._get_destripe_parameters(
20
+ 30_000, None, None, k_filter=True
21
+ )
22
+ assert "kfilt" in inspect.getsource(spatial_fcn)
23
+ _, _, spatial_fcn = ibldsp.voltage._get_destripe_parameters(
24
+ 2_500, None, None, k_filter=False
25
+ )
26
+ assert "car" in inspect.getsource(spatial_fcn)
27
+ _, _, spatial_fcn = ibldsp.voltage._get_destripe_parameters(
28
+ 2_500, None, None, k_filter=None
29
+ )
30
+ assert "dat: dat" in inspect.getsource(spatial_fcn)
31
+ _, _, spatial_fcn = ibldsp.voltage._get_destripe_parameters(
32
+ 2_500, None, None, k_filter=lambda dat: 3 * dat
33
+ )
34
+ assert "lambda dat: 3 * dat" in inspect.getsource(spatial_fcn)
35
+
36
+ def test_fk(self):
37
+ """
38
+ creates a couple of plane waves and separate them using the velocity HP filter
39
+ """
40
+ ntr, ns, sr, dx, v1, v2 = (500, 2000, 0.002, 5, 2000, 1000)
41
+ data = np.zeros((ntr, ns), np.float32)
42
+ data[:, :100] = ibldsp.utils.ricker(100, 4)
43
+ offset = np.arange(ntr) * dx
44
+ offset = np.abs(offset - np.mean(offset))
45
+ data_v1 = ibldsp.fourier.fshift(data, offset / v1 / sr)
46
+ data_v2 = ibldsp.fourier.fshift(data, offset / v2 / sr)
47
+
48
+ noise = np.random.randn(ntr, ns) / 60
49
+ fk = ibldsp.voltage.fk(
50
+ data_v1 + data_v2 + noise,
51
+ si=sr,
52
+ dx=dx,
53
+ vbounds=[1200, 1500],
54
+ ntr_pad=10,
55
+ ntr_tap=15,
56
+ lagc=0.25,
57
+ )
58
+ fknoise = ibldsp.voltage.fk(
59
+ noise, si=sr, dx=dx, vbounds=[1200, 1500], ntr_pad=10, ntr_tap=15, lagc=0.25
60
+ )
61
+ # at least 90% of the traces should be below 50dB and 98% below 40 dB
62
+ assert (
63
+ np.mean(20 * np.log10(ibldsp.utils.rms(fk - data_v1 - fknoise)) < -50) > 0.9
64
+ )
65
+ assert (
66
+ np.mean(20 * np.log10(ibldsp.utils.rms(fk - data_v1 - fknoise)) < -40)
67
+ > 0.98
68
+ )
69
+ # test the K option
70
+ kbands = np.sin(np.arange(ns) / ns * 8 * np.pi) / 10
71
+ fkk = ibldsp.voltage.fk(
72
+ data_v1 + data_v2 + kbands,
73
+ si=sr,
74
+ dx=dx,
75
+ vbounds=[1200, 1500],
76
+ ntr_pad=40,
77
+ ntr_tap=15,
78
+ lagc=0.25,
79
+ kfilt={"bounds": [0, 0.01], "btype": "hp"},
80
+ )
81
+ assert np.mean(20 * np.log10(ibldsp.utils.rms(fkk - data_v1)) < -40) > 0.9
82
+ # from easyqc.gui import viewseis
83
+ # a = viewseis(data_v1 + data_v2 + kbands, .002, title='input')
84
+ # b = viewseis(fkk, .002, title='output')
85
+ # c = viewseis(data_v1 - fkk, .002, title='test')
86
+
87
+
88
+ class TestSaturation(unittest.TestCase):
89
+ def test_saturation_cbin(self):
90
+ nsat = 252
91
+ ns, nc = (350_072, 384)
92
+ s2v = np.float32(2.34375e-06)
93
+ sat = ibldsp.utils.fcn_cosine([0, 100])(
94
+ np.arange(nsat)
95
+ ) - ibldsp.utils.fcn_cosine([150, 250])(np.arange(nsat))
96
+ range_volt = 0.0012
97
+ sat = (sat / s2v * 0.0012).astype(np.int16)
98
+
99
+ with tempfile.TemporaryDirectory() as temp_dir:
100
+ file_bin = Path(temp_dir) / "binary.bin"
101
+ data = np.memmap(file_bin, dtype=np.int16, mode="w+", shape=(ns, nc))
102
+ data[50_000 : 50_000 + nsat, :] = sat[:, np.newaxis]
103
+
104
+ _sr = spikeglx.Reader(
105
+ file_bin, fs=30_000, dtype=np.int16, nc=nc, nsync=0, s2v=s2v
106
+ )
107
+ file_saturation = ibldsp.voltage.saturation_cbin(
108
+ _sr, max_voltage=range_volt, n_jobs=1
109
+ )
110
+ df_sat = pd.read_parquet(file_saturation)
111
+ assert np.sum(df_sat["stop_sample"] - df_sat["start_sample"]) == 67
112
+
113
+ def test_saturation(self):
114
+ np.random.seed(7654)
115
+ data = (np.random.randn(384, 30_000).astype(np.float32) + 20) * 1e-6
116
+ saturated, mute = ibldsp.voltage.saturation(data, max_voltage=1200)
117
+ np.testing.assert_array_equal(saturated, 0)
118
+ np.testing.assert_array_equal(mute, 1.0)
119
+ # now we stick a big waveform in the middle of the recorder and expect some saturation
120
+ w = ibldsp.utils.ricker(100, 4)
121
+ w = np.minimum(1200, w / w.max() * 1400)
122
+ data[:, 13_600:13700] = data[0, 13_600:13700] + w * 1e-6
123
+ saturated, mute = ibldsp.voltage.saturation(
124
+ data,
125
+ max_voltage=np.ones(
126
+ 384,
127
+ )
128
+ * 1200
129
+ * 1e-6,
130
+ )
131
+ self.assertGreater(np.sum(saturated), 5)
132
+ self.assertGreater(np.sum(mute == 0), np.sum(saturated))
133
+
134
+
135
+ class TestCadzow(unittest.TestCase):
136
+ def test_trajectory_matrixes(self):
137
+ assert np.all(
138
+ ibldsp.cadzow.traj_matrix_indices(4) == np.array([[1, 0], [2, 1], [3, 2]])
139
+ )
140
+ assert np.all(
141
+ ibldsp.cadzow.traj_matrix_indices(3) == np.array([[1, 0], [2, 1]])
142
+ )
@@ -1,58 +0,0 @@
1
- import numpy as np
2
- import matplotlib.pyplot as plt
3
-
4
-
5
- def show_channels_labels(
6
- raw, fs, channel_labels, xfeats, similarity_threshold, psd_hf_threshold=0.02
7
- ):
8
- """
9
- Shows the features side by side a snippet of raw data
10
- :param sr:
11
- :return:
12
- """
13
- nc, ns = raw.shape
14
- raw = raw - np.mean(raw, axis=-1)[:, np.newaxis] # removes DC offset
15
- ns_plot = np.minimum(ns, 3000)
16
- vaxis_uv = 250 if fs < 2600 else 75
17
- fig, ax = plt.subplots(
18
- 1, 5, figsize=(18, 6), gridspec_kw={"width_ratios": [1, 1, 1, 8, 0.2]}
19
- )
20
- ax[0].plot(xfeats["xcor_hf"], np.arange(nc))
21
- ax[0].plot(
22
- xfeats["xcor_hf"][(iko := channel_labels == 1)], np.arange(nc)[iko], "k*"
23
- )
24
- ax[0].plot(similarity_threshold[0] * np.ones(2), [0, nc], "k--")
25
- ax[0].plot(similarity_threshold[1] * np.ones(2), [0, nc], "r--")
26
- ax[0].set(
27
- ylabel="channel #",
28
- xlabel="high coherence",
29
- ylim=[0, nc],
30
- title="a) dead channel",
31
- )
32
- ax[1].plot(xfeats["psd_hf"], np.arange(nc))
33
- ax[1].plot(xfeats["psd_hf"][(iko := channel_labels == 2)], np.arange(nc)[iko], "r*")
34
- ax[1].plot(psd_hf_threshold * np.array([1, 1]), [0, nc], "r--")
35
- ax[1].set(yticklabels=[], xlabel="PSD", ylim=[0, nc], title="b) noisy channel")
36
- ax[1].sharey(ax[0])
37
- ax[2].plot(xfeats["xcor_lf"], np.arange(nc))
38
- ax[2].plot(
39
- xfeats["xcor_lf"][(iko := channel_labels == 3)], np.arange(nc)[iko], "y*"
40
- )
41
- ax[2].plot([-0.75, -0.75], [0, nc], "y--")
42
- ax[2].set(yticklabels=[], xlabel="LF coherence", ylim=[0, nc], title="c) outside")
43
- ax[2].sharey(ax[0])
44
- im = ax[3].imshow(
45
- raw[:, :ns_plot] * 1e6,
46
- origin="lower",
47
- cmap="PuOr",
48
- aspect="auto",
49
- vmin=-vaxis_uv,
50
- vmax=vaxis_uv,
51
- extent=[0, ns_plot / fs * 1e3, 0, nc],
52
- )
53
- ax[3].set(yticklabels=[], title="d) Raw data", xlabel="time (ms)", ylim=[0, nc])
54
- ax[3].grid(False)
55
- ax[3].sharey(ax[0])
56
- plt.colorbar(im, cax=ax[4], shrink=0.8).ax.set(ylabel="(uV)")
57
- fig.tight_layout()
58
- return fig, ax
File without changes
File without changes