ibl-neuropixel 1.8.0__tar.gz → 1.9.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. {ibl_neuropixel-1.8.0/src/ibl_neuropixel.egg-info → ibl_neuropixel-1.9.0}/PKG-INFO +2 -20
  2. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/README.md +1 -19
  3. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/setup.py +1 -1
  4. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0/src/ibl_neuropixel.egg-info}/PKG-INFO +2 -20
  5. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/ibl_neuropixel.egg-info/SOURCES.txt +3 -1
  6. ibl_neuropixel-1.9.0/src/ibldsp/plots.py +135 -0
  7. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/ibldsp/utils.py +125 -14
  8. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/ibldsp/voltage.py +175 -14
  9. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/ibldsp/waveform_extraction.py +1 -0
  10. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/spikeglx.py +7 -3
  11. ibl_neuropixel-1.9.0/src/tests/unit/test_plots.py +30 -0
  12. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/unit/test_spikeglx.py +2 -0
  13. ibl_neuropixel-1.8.0/src/tests/unit/test_ibldsp.py → ibl_neuropixel-1.9.0/src/tests/unit/test_utils.py +16 -98
  14. ibl_neuropixel-1.9.0/src/tests/unit/test_voltage.py +142 -0
  15. ibl_neuropixel-1.8.0/src/ibldsp/plots.py +0 -58
  16. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/LICENSE +0 -0
  17. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/MANIFEST.in +0 -0
  18. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/setup.cfg +0 -0
  19. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/ibl_neuropixel.egg-info/dependency_links.txt +0 -0
  20. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/ibl_neuropixel.egg-info/requires.txt +0 -0
  21. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/ibl_neuropixel.egg-info/top_level.txt +0 -0
  22. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/ibldsp/__init__.py +0 -0
  23. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/ibldsp/cadzow.py +0 -0
  24. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/ibldsp/cuda_tools.py +0 -0
  25. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/ibldsp/destripe_gpu.py +0 -0
  26. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/ibldsp/filter_gpu.py +0 -0
  27. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/ibldsp/fourier.py +0 -0
  28. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/ibldsp/icsd.py +0 -0
  29. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/ibldsp/raw_metrics.py +0 -0
  30. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/ibldsp/smooth.py +0 -0
  31. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/ibldsp/spiketrains.py +0 -0
  32. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/ibldsp/waveforms.py +0 -0
  33. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/neuropixel.py +0 -0
  34. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/neurowaveforms/__init__.py +0 -0
  35. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/neurowaveforms/model.py +0 -0
  36. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/np2split/NP1_meta/_spikeglx_ephysData_g0_t0.imec0.ap.meta +0 -0
  37. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/np2split/NP21_meta/_spikeglx_ephysData_g0_t0.imec0.ap.meta +0 -0
  38. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/np2split/NP24_meta/_spikeglx_ephysData_g0_t0.imec0.ap.meta +0 -0
  39. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/np2split/_spikeglx_ephysData_g0_t0.imec0.ap.ch +0 -0
  40. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/np2split/_spikeglx_ephysData_g0_t0.imec0.ap.meta +0 -0
  41. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3A_376_channels.ap.meta +0 -0
  42. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3A_g0_t0.imec.ap.meta +0 -0
  43. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3A_g0_t0.imec.lf.meta +0 -0
  44. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3A_g0_t0.imec.wiring.json +0 -0
  45. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3A_short_g0_t0.imec.ap.meta +0 -0
  46. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3B2_exported.imec0.ap.meta +0 -0
  47. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3B_catgt.ap.meta +0 -0
  48. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3B_g0_t0.imec1.ap.meta +0 -0
  49. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3B_g0_t0.imec1.lf.meta +0 -0
  50. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3B_g0_t0.nidq.meta +0 -0
  51. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3B_g0_t0.nidq.wiring.json +0 -0
  52. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sample3B_version202304.ap.meta +0 -0
  53. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sampleNP2.1_g0_t0.imec.ap.meta +0 -0
  54. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sampleNP2.1_prototype.ap.meta +0 -0
  55. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sampleNP2.4_1shank_g0_t0.imec.ap.meta +0 -0
  56. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sampleNP2.4_4shanks_appVersion20230905.ap.meta +0 -0
  57. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sampleNP2.4_4shanks_g0_t0.imec.ap.meta +0 -0
  58. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sampleNP2.4_4shanks_while_acquiring_incomplete.ap.meta +0 -0
  59. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/sampleNPultra_g0_t0.imec0.ap.meta +0 -0
  60. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/waveform_sample/test_arr_in.npy +0 -0
  61. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/waveform_sample/test_arr_peak.npy +0 -0
  62. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/waveform_sample/test_df.csv +0 -0
  63. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/fixtures/waveform_sample/test_df_wavinfo.csv +0 -0
  64. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/integration/__init__.py +0 -0
  65. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/integration/csd_experiments.py +0 -0
  66. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/integration/test_destripe.py +0 -0
  67. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/unit/__init__.py +0 -0
  68. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/unit/test_ephys_np2.py +0 -0
  69. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/unit/test_neuropixel.py +0 -0
  70. {ibl_neuropixel-1.8.0 → ibl_neuropixel-1.9.0}/src/tests/unit/test_waveforms.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ibl-neuropixel
3
- Version: 1.8.0
3
+ Version: 1.9.0
4
4
  Summary: Collection of tools for Neuropixel 1.0 and 2.0 probes data
5
5
  Home-page: https://github.com/int-brain-lab/ibl-neuropixel
6
6
  Author: The International Brain Laboratory
@@ -71,22 +71,4 @@ The following describes the methods implemented in this repository.
71
71
  https://doi.org/10.6084/m9.figshare.19705522
72
72
 
73
73
  ## Contribution
74
- Contribution checklist:
75
- - run tests
76
- - ruff format
77
- - PR to main
78
-
79
-
80
- Pypi Release checklist:
81
- - Edit the version number in `setup.py`
82
- - add release notes in `release_notes.md`
83
-
84
-
85
- ```shell
86
- ruff format
87
- tag=X.Y.Z
88
- git tag -a $tag
89
- git push origin $tag
90
- ```
91
-
92
- Create new release with tag X.Y.Z (will automatically publish to PyPI)
74
+ Please see our [contribution guidelines](CONTRIBUTING.md) for details on how to contribute to this project.
@@ -39,22 +39,4 @@ The following describes the methods implemented in this repository.
39
39
  https://doi.org/10.6084/m9.figshare.19705522
40
40
 
41
41
  ## Contribution
42
- Contribution checklist:
43
- - run tests
44
- - ruff format
45
- - PR to main
46
-
47
-
48
- Pypi Release checklist:
49
- - Edit the version number in `setup.py`
50
- - add release notes in `release_notes.md`
51
-
52
-
53
- ```shell
54
- ruff format
55
- tag=X.Y.Z
56
- git tag -a $tag
57
- git push origin $tag
58
- ```
59
-
60
- Create new release with tag X.Y.Z (will automatically publish to PyPI)
42
+ Please see our [contribution guidelines](CONTRIBUTING.md) for details on how to contribute to this project.
@@ -8,7 +8,7 @@ with open("requirements.txt") as f:
8
8
 
9
9
  setuptools.setup(
10
10
  name="ibl-neuropixel",
11
- version="1.8.0",
11
+ version="1.9.0",
12
12
  author="The International Brain Laboratory",
13
13
  description="Collection of tools for Neuropixel 1.0 and 2.0 probes data",
14
14
  long_description=long_description,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ibl-neuropixel
3
- Version: 1.8.0
3
+ Version: 1.9.0
4
4
  Summary: Collection of tools for Neuropixel 1.0 and 2.0 probes data
5
5
  Home-page: https://github.com/int-brain-lab/ibl-neuropixel
6
6
  Author: The International Brain Laboratory
@@ -71,22 +71,4 @@ The following describes the methods implemented in this repository.
71
71
  https://doi.org/10.6084/m9.figshare.19705522
72
72
 
73
73
  ## Contribution
74
- Contribution checklist:
75
- - run tests
76
- - ruff format
77
- - PR to main
78
-
79
-
80
- Pypi Release checklist:
81
- - Edit the version number in `setup.py`
82
- - add release notes in `release_notes.md`
83
-
84
-
85
- ```shell
86
- ruff format
87
- tag=X.Y.Z
88
- git tag -a $tag
89
- git push origin $tag
90
- ```
91
-
92
- Create new release with tag X.Y.Z (will automatically publish to PyPI)
74
+ Please see our [contribution guidelines](CONTRIBUTING.md) for details on how to contribute to this project.
@@ -59,7 +59,9 @@ src/tests/integration/csd_experiments.py
59
59
  src/tests/integration/test_destripe.py
60
60
  src/tests/unit/__init__.py
61
61
  src/tests/unit/test_ephys_np2.py
62
- src/tests/unit/test_ibldsp.py
63
62
  src/tests/unit/test_neuropixel.py
63
+ src/tests/unit/test_plots.py
64
64
  src/tests/unit/test_spikeglx.py
65
+ src/tests/unit/test_utils.py
66
+ src/tests/unit/test_voltage.py
65
67
  src/tests/unit/test_waveforms.py
@@ -0,0 +1,135 @@
1
+ import numpy as np
2
+ import matplotlib.pyplot as plt
3
+
4
+ AP_RANGE_UV = 75
5
+ LF_RANGE_UV = 250
6
+
7
+
8
+ def show_channels_labels(
9
+ raw,
10
+ fs,
11
+ channel_labels,
12
+ xfeats,
13
+ similarity_threshold=(-0.5, 1),
14
+ psd_hf_threshold=0.02,
15
+ ):
16
+ """
17
+ Shows the features side by side a snippet of raw data
18
+ :param sr:
19
+ :return:
20
+ """
21
+ nc, ns = raw.shape
22
+ raw = raw - np.mean(raw, axis=-1)[:, np.newaxis] # removes DC offset
23
+ ns_plot = np.minimum(ns, 3000)
24
+ fig, ax = plt.subplots(
25
+ 1, 5, figsize=(18, 6), gridspec_kw={"width_ratios": [1, 1, 1, 8, 0.2]}
26
+ )
27
+ ax[0].plot(xfeats["xcor_hf"], np.arange(nc))
28
+ ax[0].plot( # plot channel below the similarity threshold as dead in black
29
+ xfeats["xcor_hf"][(iko := channel_labels == 1)], np.arange(nc)[iko], "k*"
30
+ )
31
+ ax[0].plot( # plot the values above the similarity threshold as noisy in red
32
+ xfeats["xcor_hf"][
33
+ (iko := np.where(xfeats["xcor_hf"] > similarity_threshold[1]))
34
+ ],
35
+ np.arange(nc)[iko],
36
+ "r*",
37
+ )
38
+ ax[0].plot(similarity_threshold[0] * np.ones(2), [0, nc], "k--")
39
+ ax[0].plot(similarity_threshold[1] * np.ones(2), [0, nc], "r--")
40
+ ax[0].set(
41
+ ylabel="channel #",
42
+ xlabel="high coherence",
43
+ ylim=[0, nc],
44
+ title="a) dead channel",
45
+ )
46
+ ax[1].plot(xfeats["psd_hf"], np.arange(nc))
47
+ ax[1].plot(
48
+ xfeats["psd_hf"][(iko := xfeats["psd_hf"] > psd_hf_threshold)],
49
+ np.arange(nc)[iko],
50
+ "r*",
51
+ )
52
+ ax[1].plot(psd_hf_threshold * np.array([1, 1]), [0, nc], "r--")
53
+ ax[1].set(yticklabels=[], xlabel="PSD", ylim=[0, nc], title="b) noisy channel")
54
+ ax[1].sharey(ax[0])
55
+ ax[2].plot(xfeats["xcor_lf"], np.arange(nc))
56
+ ax[2].plot(
57
+ xfeats["xcor_lf"][(iko := channel_labels == 3)], np.arange(nc)[iko], "y*"
58
+ )
59
+ ax[2].plot([-0.75, -0.75], [0, nc], "y--")
60
+ ax[2].set(yticklabels=[], xlabel="LF coherence", ylim=[0, nc], title="c) outside")
61
+ ax[2].sharey(ax[0])
62
+ voltageshow(raw[:, :ns_plot], fs, ax=ax[3], cax=ax[4])
63
+ ax[3].sharey(ax[0])
64
+ fig.tight_layout()
65
+ return fig, ax
66
+
67
+
68
+ def voltageshow(
69
+ raw,
70
+ fs,
71
+ cmap="PuOr",
72
+ ax=None,
73
+ cax=None,
74
+ cbar_label="Voltage (uV)",
75
+ scaling=1e6,
76
+ vrange=None,
77
+ **axis_kwargs,
78
+ ):
79
+ """
80
+ Visualizes electrophysiological voltage data as a heatmap.
81
+
82
+ This function displays raw voltage data as a color-coded image with appropriate
83
+ scaling based on the sampling frequency. It automatically selects voltage range
84
+ based on whether the data is low-frequency (LF) or action potential (AP) data.
85
+
86
+ Parameters
87
+ ----------
88
+ raw : numpy.ndarray
89
+ Raw voltage data array with shape (channels, samples), in Volts
90
+ fs : float
91
+ Sampling frequency in Hz, used to determine time axis scaling and voltage range.
92
+ cmap : str, optional
93
+ Matplotlib colormap name for the heatmap. Default is 'PuOr'.
94
+ ax : matplotlib.axes.Axes, optional
95
+ Axes object to plot on. If None, a new figure and axes are created.
96
+ cax : matplotlib.axes.Axes, optional
97
+ Axes object for the colorbar. If None and ax is None, a new colorbar axes is created.
98
+ cbar_label : str, optional
99
+ Label for the colorbar. Default is 'Voltage (uV)'.
100
+ vrange: float, optional
101
+ Voltage range for the colorbar. Defaults to +/- 75 uV for AP and +/- 250 uV for LF.
102
+ scaling: float, optional
103
+ Unit transform: default is 1e6: we expect Volts but plot uV.
104
+ **axis_kwargs: optional
105
+ Additional keyword arguments for the axis properties, fed to the ax.set() method.
106
+ Returns
107
+ -------
108
+ matplotlib.image.AxesImage
109
+ The image object created by imshow, which can be used for further customization.
110
+ """
111
+ if ax is None:
112
+ fig, axs = plt.subplots(1, 2, gridspec_kw={"width_ratios": [1, 0.05]})
113
+ ax, cax = axs
114
+ nc, ns = raw.shape
115
+ default_vrange = LF_RANGE_UV if fs < 2600 else AP_RANGE_UV
116
+ vrange = vrange if vrange is not None else default_vrange
117
+ im = ax.imshow(
118
+ raw * scaling,
119
+ origin="lower",
120
+ cmap=cmap,
121
+ aspect="auto",
122
+ vmin=-vrange,
123
+ vmax=vrange,
124
+ extent=[0, ns / fs, 0, nc],
125
+ )
126
+ # set the axis properties: we use defaults values that can be overridden by user-provided ones
127
+ axis_kwargs = (
128
+ dict(ylim=[0, nc], xlabel="Time (s)", ylabel="Depth (μm)") | axis_kwargs
129
+ )
130
+ ax.set(**axis_kwargs)
131
+ ax.grid(False)
132
+ if cax is not None:
133
+ plt.colorbar(im, cax=cax, shrink=0.8).ax.set(ylabel=cbar_label)
134
+
135
+ return im
@@ -268,12 +268,64 @@ def make_channel_index(geom, radius=200.0, pad_val=None):
268
268
 
269
269
  class WindowGenerator(object):
270
270
  """
271
- `wg = WindowGenerator(ns, nswin, overlap)`
271
+ A utility class for generating sliding windows for signal processing applications.
272
272
 
273
- Provide sliding windows indices generator for signal processing applications.
274
- For straightforward spectrogram / periodogram implementation, prefer scipy methods !
273
+ WindowGenerator provides various methods to iterate through windows of a signal
274
+ with configurable window size and overlap. It's particularly useful for operations
275
+ like spectrograms, filtering, or any processing that requires windowed analysis.
275
276
 
276
- Example of implementations in test_dsp.py.
277
+ Parameters
278
+ ----------
279
+ ns : int
280
+ Total number of samples in the signal to be windowed.
281
+ nswin : int
282
+ Number of samples in each window.
283
+ overlap : int
284
+ Number of samples that overlap between consecutive windows.
285
+
286
+ Attributes
287
+ ----------
288
+ ns : int
289
+ Total number of samples in the signal.
290
+ nswin : int
291
+ Number of samples in each window.
292
+ overlap : int
293
+ Number of samples that overlap between consecutive windows.
294
+ nwin : int
295
+ Total number of windows.
296
+ iw : int or None
297
+ Current window index during iteration.
298
+
299
+ Notes
300
+ -----
301
+ For straightforward spectrogram or periodogram implementation,
302
+ scipy methods are recommended over this class.
303
+
304
+ Examples
305
+ --------
306
+ # straight windowing without overlap
307
+ >>> wg = WindowGenerator(ns=1000, nwin=111)
308
+ >>> signal = np.random.randn(1000)
309
+ >>> for window_slice in wg.slice:
310
+ ... window_data = signal[window_slice]
311
+ ... # Process window_data
312
+
313
+ # windowing with overlap (ie. buffers for apodization)
314
+ >>> for win_slice, valid_slice, win_valid_slice in wg.slices_valid:
315
+ ... window = signal[win_slice]
316
+ ... # Process window
317
+ ... processed = some_function_with_edge_effect(window)
318
+ ... # Only use the valid portion for reconstruction
319
+ ... recons[valid_slice] = processed[win_valid_slice]
320
+
321
+ # splicing add a fade-in / fade-out in the overlap so that reconstruction has unit amplitude
322
+ >>> recons = np.zeros_like(signal)
323
+ >>> for win_slice, amplitude in wg.splice:
324
+ ... window = signal[win_slice]
325
+ ... # Process window
326
+ ... processed = some_function(window)
327
+ ... # The processed windows is weighted with the amplitude and added to the reconstructed signal
328
+ ... recons[win_slice] = recons[win_slice] + processed * amplitude
277
329
  """
278
330
 
279
331
  def __init__(self, ns, nswin, overlap):
@@ -289,14 +341,35 @@ class WindowGenerator(object):
289
341
  self.iw = None
290
342
 
291
343
  @property
292
- def firstlast_splicing(self):
344
+ def splice(self):
345
+ """
346
+ Generator that yields slices and amplitude arrays for windowed signal processing with splicing.
347
+
348
+ This property provides a convenient way to iterate through all windows with their
349
+ corresponding amplitude arrays for proper signal reconstruction. The amplitude arrays
350
+ contain tapering values (from a Hann window) at the overlapping regions to ensure
351
+ unit amplitude of all samples of the original signal
352
+
353
+ Yields
354
+ ------
355
+ tuple
356
+ A tuple containing:
357
+ - slice: A Python slice object representing the current window
358
+ - amp: A numpy array containing amplitude values for proper splicing/tapering
359
+ at overlap regions
360
+
361
+ Notes
362
+ -----
363
+ This is particularly useful for overlap-add methods where windows need to be
364
+ properly weighted before being combined in the reconstruction process.
293
365
  """
294
- Generator that yields the indices as well as an amplitude function that can be used
295
- to splice the windows together.
296
- In the overlap, the amplitude function gradually transitions the amplitude from one window
297
- to the next. The amplitudes always sum to one (ie. windows are symmetrical)
366
+ for first, last, amp in self.firstlast_splicing:
367
+ yield slice(first, last), amp
298
368
 
299
- :return: tuple of (first_index, last_index, amplitude_vector]
369
+ @property
370
+ def firstlast_splicing(self):
371
+ """
372
+ cf. self.splice
300
373
  """
301
374
  w = scipy.signal.windows.hann((self.overlap + 1) * 2 + 1, sym=True)[
302
375
  1 : self.overlap + 1
@@ -323,7 +396,7 @@ class WindowGenerator(object):
323
396
  yield (first, last, first_valid, last_valid)
324
397
 
325
398
  @property
326
- def firstlast(self, return_valid=False):
399
+ def firstlast(self):
327
400
  """
328
401
  Generator that yields first and last index of windows
329
402
 
@@ -343,13 +416,51 @@ class WindowGenerator(object):
343
416
  @property
344
417
  def slice(self):
345
418
  """
346
- Generator that yields slices of windows
347
-
348
- :return: a slice of the window
419
+ Generator that yields slice objects for each window in the signal.
420
+
421
+ This property provides a convenient way to iterate through all windows
422
+ defined by the WindowGenerator parameters. Each yielded slice can be
423
+ used directly to index into the original signal array.
424
+
425
+ Yields
426
+ ------
427
+ slice
428
+ A Python slice object representing the current window, defined by
429
+ its first and last indices. The slice can be used to extract the
430
+ corresponding window from the original signal.
349
431
  """
350
432
  for first, last in self.firstlast:
351
433
  yield slice(first, last)
352
434
 
435
+ @property
436
+ def slices_valid(self):
437
+ """
438
+ Generator that yields slices for windowed signal processing with valid regions.
439
+
440
+ This method generates tuples of slice objects that can be used to extract windows
441
+ from a signal and identify the valid (non-overlapping) portions within each window.
442
+ It's particularly useful for reconstruction operations where overlapping regions
443
+ need special handling.
444
+
445
+ Yields
446
+ ------
447
+ tuple
448
+ A tuple containing three slice objects:
449
+ - slice(first, last): The full window slice
450
+ - slice(first_valid, last_valid): The valid portion of the signal in absolute indices
451
+ - slice_window_valid: The valid portion relative to the window (for use within the window)
452
+
453
+ Notes
454
+ -----
455
+ This generator relies on the firstlast_valid property which provides the
456
+ indices for both the full windows and their valid regions.
457
+ """
458
+ for first, last, first_valid, last_valid in self.firstlast_valid:
459
+ slice_window_valid = slice(
460
+ first_valid - first, None if (lv := -(last - last_valid)) == 0 else lv
461
+ )
462
+ yield slice(first, last), slice(first_valid, last_valid), slice_window_valid
463
+
353
464
  def slice_array(self, sig, axis=-1):
354
465
  """
355
466
  Provided an array or sliceable object, generator that yields
@@ -3,6 +3,8 @@ Module to work with raw voltage traces. Spike sorting pre-processing functions.
3
3
  """
4
4
 
5
5
  import inspect
6
+ import joblib
7
+ import tqdm
6
8
  from pathlib import Path
7
9
 
8
10
  import numpy as np
@@ -217,6 +219,7 @@ def kfilt(
217
219
  xf, gain = agc(x, wl=lagc, si=1.0, gpu=gpu)
218
220
  if ntr_pad > 0:
219
221
  # pad the array with a mirrored version of itself and apply a cosine taper
222
+ ntr_pad = np.min([ntr_pad, xf.shape[0]])
220
223
  xf = gp.r_[gp.flipud(xf[:ntr_pad]), xf, gp.flipud(xf[-ntr_pad:])]
221
224
  if ntr_tap > 0:
222
225
  taper = fourier.fcn_cosine([0, ntr_tap], gpu=gpu)(gp.arange(nxp)) # taper up
@@ -266,6 +269,120 @@ def saturation(
266
269
  return saturation, mute
267
270
 
268
271
 
272
+ def saturation_samples_to_intervals(
273
+ _saturation: np.ndarray, output_file: Path = None
274
+ ) -> pd.DataFrame:
275
+ """
276
+ Convert a flat npy file to a table with saturation intervals.
277
+ :param _saturation: np.ndarray: Boolean array with saturation samples set as True
278
+ :return:
279
+ """
280
+ assert not _saturation[0]
281
+ ind, pol = ibldsp.utils.fronts(_saturation.astype(np.int8))
282
+ # if the last sample is positive, make sure the interval is closed by providing an even number of events
283
+ if len(pol) > 0 and pol[-1] == 1:
284
+ pol = np.r_[pol, -1]
285
+ ind = np.r_[ind, _saturation.shape[0] - 1]
286
+ df_saturation = pd.DataFrame(
287
+ np.c_[ind[::2], ind[1::2]], columns=["start_sample", "stop_sample"]
288
+ )
289
+ if output_file is not None:
290
+ df_saturation.to_parquet(output_file)
291
+ return df_saturation
292
+
293
+
294
+ def saturation_cbin(
295
+ bin_file_path: Path,
296
+ file_saturation: Path = None,
297
+ max_voltage=None,
298
+ n_jobs=4,
299
+ v_per_sec=1e-8,
300
+ proportion=0.2,
301
+ mute_window_samples=7,
302
+ ) -> Path:
303
+ """
304
+ Detect saturation in a compressed binary (cbin) electrophysiology file and save the results.
305
+
306
+ This function processes a SpikeGLX binary file in chunks to identify saturated samples
307
+ and saves the results as a memory-mapped boolean array. Processing is parallelized
308
+ for improved performance.
309
+
310
+ Parameters
311
+ ----------
312
+ bin_file_path : Path | spikeglx.Reader
313
+ Path to the SpikeGLX binary file to be processed or spikeglx.Reader object
314
+ file_saturation : Path, optional
315
+ Path where the saturation data will be saved. If None, defaults to
316
+ "_iblqc_ephysSaturation.samples.npy" in the same directory as the input file
317
+ max_voltage : np.float, optional
318
+ one-sided maximum voltage range (V), if not provided will use the spikeglx metadata
319
+ n_jobs : int, optional
320
+ Number of parallel jobs to use for processing, defaults to 4
321
+ v_per_sec : float, optional
322
+ Maximum derivative of the voltage in V/s (or units/s), defaults to 1e-8
323
+ proportion : float, optional
324
+ Threshold proportion (0-1) of channels that must be above threshold to consider
325
+ a sample as saturated, defaults to 0.2
326
+ mute_window_samples : int, optional
327
+ Number of samples for the cosine taper applied to the saturation, defaults to 7
328
+
329
+ Returns
330
+ -------
331
+ Path
332
+ Path to the file where the saturation data was saved
333
+ """
334
+ if isinstance(bin_file_path, spikeglx.Reader):
335
+ sr = bin_file_path
336
+ bin_file_path = sr.file_bin
337
+ else:
338
+ sr = spikeglx.Reader(bin_file_path)
339
+ file_saturation = (
340
+ file_saturation
341
+ if file_saturation is not None
342
+ else bin_file_path.parent.joinpath("_iblqc_ephysSaturation.samples.npy")
343
+ )
344
+ max_voltage = max_voltage if max_voltage is not None else sr.range_volts[:-1]
345
+ # Create a memory-mapped array
346
+ _saturation = np.lib.format.open_memmap(
347
+ file_saturation, dtype=bool, mode="w+", shape=(sr.ns,)
348
+ )
349
+ _saturation[:] = False # Initialize all values to False
350
+ _saturation.flush() # Make sure to flush to disk
351
+
352
+ wg = ibldsp.utils.WindowGenerator(ns=sr.ns, nswin=2**17, overlap=16)
353
+
354
+ # we can parallelize this as there is no conflict on output
355
+ def _saturation_slice(slice_win, slice_valid, slice_relative_valid):
356
+ sr = spikeglx.Reader(bin_file_path)
357
+ data = sr[slice_win, : sr.nc - sr.nsync].T
358
+ satwin, _ = ibldsp.voltage.saturation(
359
+ data,
360
+ max_voltage=max_voltage,
361
+ fs=sr.fs,
362
+ v_per_sec=v_per_sec,
363
+ proportion=proportion,
364
+ mute_window_samples=mute_window_samples,
365
+ )
366
+ _saturation[slice_valid] = satwin[slice_relative_valid]
367
+ _saturation.flush()
368
+ # getting the list of jobs as a generator allows running tqdm to monitor progress
369
+
370
+ jobs = [
371
+ joblib.delayed(_saturation_slice)(slw, slv, slrv)
372
+ for (slw, slv, slrv) in wg.slices_valid
373
+ ]
374
+ list(
375
+ tqdm.tqdm(
376
+ joblib.Parallel(return_as="generator", n_jobs=n_jobs)(jobs), total=wg.nwin
377
+ )
378
+ )
379
+
380
+ _ = saturation_samples_to_intervals(
381
+ _saturation, output_file=file_saturation.with_suffix(".pqt")
382
+ )
383
+ return file_saturation.with_suffix(".pqt")
384
+
385
+
269
386
  def interpolate_bad_channels(
270
387
  data, channel_labels=None, x=None, y=None, p=1.3, kriging_distance_um=20, gpu=False
271
388
  ):
@@ -456,8 +573,9 @@ def decompress_destripe_cbin(
456
573
  :param nbatch: (optional) batch size
457
574
  :param nprocesses: (optional) number of parallel processes to run, defaults to number or processes detected with joblib
458
575
  interp 3:outside of brain and discard
459
- :param reject_channels: (True) detects noisy or bad channels and interpolate them. Channels outside of the brain are left
460
- untouched
576
+ :param reject_channels: (True) True | False | np.array()
577
+ If True, detects noisy or bad channels and interpolate them, zero out the channels outside the brain.
578
+ If the labels are already computed, they can be provided as a numpy array.
461
579
  :param k_kwargs: (None) arguments for the kfilter function
462
580
  :param reader_kwargs: (None) optional arguments for the spikeglx Reader instance
463
581
  :param k_filter: (True) True | False | None | custom function.
@@ -475,8 +593,11 @@ def decompress_destripe_cbin(
475
593
  # handles input parameters
476
594
  reader_kwargs = {} if reader_kwargs is None else reader_kwargs
477
595
  sr = spikeglx.Reader(sr_file, open=True, **reader_kwargs)
478
- if reject_channels: # get bad channels if option is on
596
+ if reject_channels is True: # get bad channels if option is on
479
597
  channel_labels = detect_bad_channels_cbin(sr)
598
+ elif isinstance(reject_channels, np.ndarray):
599
+ channel_labels = reject_channels
600
+ reject_channels = True
480
601
  assert isinstance(sr_file, str) or isinstance(sr_file, Path)
481
602
  butter_kwargs, k_kwargs, spatial_fcn = _get_destripe_parameters(
482
603
  sr.fs, butter_kwargs, k_kwargs, k_filter
@@ -502,7 +623,6 @@ def decompress_destripe_cbin(
502
623
  DEPHAS = np.exp(
503
624
  1j * np.angle(fft_object(dephas)) * h["sample_shift"][:, np.newaxis]
504
625
  )
505
-
506
626
  # if we want to compute the rms ap across the session as well as the saturation
507
627
  if compute_rms:
508
628
  # creates a saturation memmap, this is a nsamples vector of booleans
@@ -652,6 +772,9 @@ def decompress_destripe_cbin(
652
772
  saturation_data = np.load(file_saturation)
653
773
  assert rms_data.shape[0] == time_data.shape[0] * ncv
654
774
  rms_data = rms_data.reshape(time_data.shape[0], ncv)
775
+ # Save the rms data using the original channel index
776
+ unsort = np.argsort(sr.raw_channel_order)[: -sr.nsync]
777
+ rms_data = rms_data[:, unsort]
655
778
  output_qc_path = (
656
779
  output_file.parent if output_qc_path is None else output_qc_path
657
780
  )
@@ -772,7 +895,23 @@ def detect_bad_channels(
772
895
  )
773
896
  )[0]
774
897
  # the channels outside of the brains are the contiguous channels below the threshold on the trend coherency
775
- ioutside = np.where(xfeats["xcor_lf"] < -0.75)[0] # fixme: hardcoded threshold
898
+
899
+ signal_noisy = xfeats["xcor_lf"]
900
+ # Filter signal
901
+ window_size = 25 # Choose based on desired smoothing (e.g., 25 samples)
902
+ kernel = np.ones(window_size) / window_size
903
+ # Apply convolution
904
+ signal_filtered = np.convolve(signal_noisy, kernel, mode="same")
905
+
906
+ diff_x = np.diff(signal_filtered)
907
+ indx = np.where(diff_x < -0.02)[0] # hardcoded threshold
908
+ if indx.size > 0:
909
+ indx_threshold = np.floor(np.median(indx)).astype(int)
910
+ threshold = signal_noisy[indx_threshold]
911
+ ioutside = np.where(signal_noisy < threshold)[0]
912
+ else:
913
+ ioutside = np.array([])
914
+
776
915
  if ioutside.size > 0 and ioutside[-1] == (nc - 1):
777
916
  a = np.cumsum(np.r_[0, np.diff(ioutside) - 1])
778
917
  ioutside = ioutside[a == np.max(a)]
@@ -915,16 +1054,39 @@ def stack(data, word, fcn_agg=np.nanmean, header=None):
915
1054
 
916
1055
  def current_source_density(lfp, h, n=2, method="diff", sigma=1 / 3):
917
1056
  """
918
- Compute the current source density (CSD) of a given LFP signal recorded on neuropixel 1 or 2
919
- :param data: LFP signal (n_channels, n_samples)
920
- :param h: trace header dictionary
921
- :param n: the n derivative
922
- :param method: diff (straight double difference) or kernel CSD (needs the KCSD python package)
923
- :param sigma: conductivity, defaults to 1/3 S.m-1
924
- :return:
1057
+ Compute the current source density (CSD) of a given LFP signal recorded on Neuropixel probes.
1058
+
1059
+ The CSD estimates the location of current sources and sinks in neural tissue based on
1060
+ the spatial distribution of local field potentials (LFPs). This implementation supports
1061
+ both the standard double-derivative method and kernel CSD method.
1062
+
1063
+ The CSD is computed for each column of the Neuropixel probe layout separately.
1064
+
1065
+ Parameters
1066
+ ----------
1067
+ lfp : numpy.ndarray
1068
+ LFP signal array with shape (n_channels, n_samples)
1069
+ h : dict
1070
+ Trace header dictionary containing probe geometry information with keys:
1071
+ 'x', 'y' for electrode coordinates, 'col' for column indices, and 'row' for row indices
1072
+ n : int, optional
1073
+ Order of the derivative for the 'diff' method, defaults to 2
1074
+ method : str, optional
1075
+ Method to compute CSD:
1076
+ - 'diff': standard finite difference method (default)
1077
+ - 'kcsd': kernel CSD method (requires the KCSD Python package)
1078
+ sigma : float, optional
1079
+ Tissue conductivity in Siemens per meter, defaults to 1/3 S.m-1
1080
+
1081
+ Returns
1082
+ -------
1083
+ numpy.ndarray
1084
+ Current source density with the same shape as the input LFP array.
1085
+ Positive values indicate current sources, negative values indicate sinks.
1086
+ Units are in A.m-3 (amperes per cubic meter).
925
1087
  """
926
1088
  csd = np.zeros(lfp.shape, dtype=np.float64) * np.nan
927
- xy = h["x"] + 1j * h["y"]
1089
+ xy = (h["x"] + 1j * h["y"]) / 1e6
928
1090
  for col in np.unique(h["col"]):
929
1091
  ind = np.where(h["col"] == col)[0]
930
1092
  isort = np.argsort(h["row"][ind])
@@ -971,7 +1133,6 @@ def _svd_denoise(datr, rank):
971
1133
 
972
1134
  def svd_denoise_npx(datr, rank=None, collection=None):
973
1135
  """
974
-
975
1136
  :param datr: [nc, ns]
976
1137
  :param rank:
977
1138
  :param collection:
@@ -280,6 +280,7 @@ def extract_wfs_cbin(
280
280
  chunksize_samples=int(30_000),
281
281
  reader_kwargs=None,
282
282
  n_jobs=None,
283
+ wfs_dtype=np.float32,
283
284
  preprocess_steps=None,
284
285
  seed=None,
285
286
  scratch_dir=None,