ibl-neuropixel 1.9.3__py3-none-any.whl → 1.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ibl_neuropixel-1.9.3.dist-info → ibl_neuropixel-1.10.0.dist-info}/METADATA +2 -2
- {ibl_neuropixel-1.9.3.dist-info → ibl_neuropixel-1.10.0.dist-info}/RECORD +17 -15
- {ibl_neuropixel-1.9.3.dist-info → ibl_neuropixel-1.10.0.dist-info}/WHEEL +1 -1
- ibldsp/fourier.py +2 -0
- ibldsp/sync.py +147 -0
- ibldsp/utils.py +23 -58
- ibldsp/voltage.py +197 -74
- neuropixel.py +136 -88
- spikeglx.py +39 -12
- tests/unit/test_ephys_np2.py +76 -106
- tests/unit/test_neuropixel.py +37 -0
- tests/unit/test_spikeglx.py +20 -7
- tests/unit/test_sync.py +104 -0
- tests/unit/test_utils.py +0 -49
- tests/unit/test_voltage.py +159 -19
- {ibl_neuropixel-1.9.3.dist-info → ibl_neuropixel-1.10.0.dist-info}/licenses/LICENSE +0 -0
- {ibl_neuropixel-1.9.3.dist-info → ibl_neuropixel-1.10.0.dist-info}/top_level.txt +0 -0
neuropixel.py
CHANGED
|
@@ -5,13 +5,18 @@ from typing import Any
|
|
|
5
5
|
import warnings
|
|
6
6
|
import traceback
|
|
7
7
|
import numbers
|
|
8
|
+
import requests
|
|
9
|
+
import json
|
|
10
|
+
from functools import lru_cache
|
|
8
11
|
|
|
9
12
|
import scipy.signal
|
|
10
13
|
import numpy as np
|
|
14
|
+
import pandas as pd
|
|
11
15
|
|
|
12
16
|
import spikeglx
|
|
13
17
|
from ibldsp.utils import WindowGenerator
|
|
14
18
|
|
|
19
|
+
|
|
15
20
|
_logger = logging.getLogger("ibllib")
|
|
16
21
|
|
|
17
22
|
# hardware pin to channel mapping
|
|
@@ -57,7 +62,7 @@ SYNC_PIN_OUT = {
|
|
|
57
62
|
# sample to volt conversion factors
|
|
58
63
|
S2V_AP = 2.34375e-06
|
|
59
64
|
S2V_LFP = 4.6875e-06
|
|
60
|
-
TIP_SIZE_UM = 200
|
|
65
|
+
TIP_SIZE_UM = 200 # 209 / 206 NP1 / NP2
|
|
61
66
|
NC = 384
|
|
62
67
|
SITES_COORDINATES: np.array
|
|
63
68
|
# channel layouts for neuropixel probes as a function of the major version (1 or 2)
|
|
@@ -65,6 +70,8 @@ CHANNEL_GRID = {
|
|
|
65
70
|
1: dict(DX=16, X0=11, DY=20, Y0=20),
|
|
66
71
|
2: dict(DX=32, X0=27, DY=15, Y0=20),
|
|
67
72
|
"NPultra": dict(DX=6, X0=0, DY=6, Y0=0),
|
|
73
|
+
"NP2QB": dict(DX=32, X0=27, DY=15, Y0=20),
|
|
74
|
+
"NHPlong": dict(DX=87 / 2, X0=27, DY=15, Y0=20),
|
|
68
75
|
}
|
|
69
76
|
|
|
70
77
|
|
|
@@ -124,47 +131,63 @@ def rc2xy(row, col, version=1):
|
|
|
124
131
|
return {"x": x, "y": y}
|
|
125
132
|
|
|
126
133
|
|
|
127
|
-
def dense_layout(version=1, nshank=1):
|
|
134
|
+
def dense_layout(version=1, nshank=1, nc=NC):
|
|
128
135
|
"""
|
|
129
136
|
Returns a dense layout indices map for neuropixel, as used at IBL
|
|
130
137
|
:param version: major version number: 1 or 2 or 2.4
|
|
131
138
|
:return: dictionary with keys 'ind', 'col', 'row', 'x', 'y'
|
|
132
139
|
"""
|
|
133
140
|
ch = {
|
|
134
|
-
"ind": np.arange(
|
|
135
|
-
"row": np.floor(np.arange(
|
|
136
|
-
"shank": np.zeros(
|
|
141
|
+
"ind": np.arange(nc),
|
|
142
|
+
"row": np.floor(np.arange(nc) / 2),
|
|
143
|
+
"shank": np.zeros(nc),
|
|
137
144
|
}
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
ch.update({"col": np.tile(np.array([2, 0, 3, 1]), int(NC / 4))})
|
|
145
|
+
if version in [1, "NHPlong"]: # version 1 has a dense layout, checkerboard pattern
|
|
146
|
+
ch.update({"col": np.tile(np.array([2, 0, 3, 1]), int(nc / 4))})
|
|
141
147
|
elif version == "NPultra": # NPultra has 8 columns with square grid spacing
|
|
142
|
-
ch.update({"row": np.floor(np.arange(
|
|
143
|
-
ch.update({"col": np.tile(np.arange(8), int(
|
|
148
|
+
ch.update({"row": np.floor(np.arange(nc) / 8)})
|
|
149
|
+
ch.update({"col": np.tile(np.arange(8), int(nc / 8))})
|
|
150
|
+
elif version == "NP2QB":
|
|
151
|
+
# each of the 4 Neuropixel quad shanks are built like individual NP2.1 probes
|
|
152
|
+
_ch = dense_layout(version=2, nshank=1)
|
|
153
|
+
ch = {k: [] for k in _ch.keys()}
|
|
154
|
+
# we concatenate the 4 shanks, only the shanks key is updated
|
|
155
|
+
for k in _ch.keys():
|
|
156
|
+
for s in range(4):
|
|
157
|
+
if k == "shank":
|
|
158
|
+
_ch[k] = _ch[k] * 0 + s
|
|
159
|
+
ch[k].append([_ch[k]])
|
|
160
|
+
# output a dictionary with each key being the concatenated vector
|
|
161
|
+
ch = {k: np.squeeze(np.concatenate(ch[k], axis=1)) for k in ch.keys()}
|
|
162
|
+
return ch
|
|
144
163
|
elif (
|
|
145
|
-
|
|
146
|
-
): # single shank NP1 has 2 columns in a dense
|
|
147
|
-
ch.update({"col": np.tile(np.array([0, 1]), int(
|
|
164
|
+
version in [2, 2.1, 2.4] and nshank == 1
|
|
165
|
+
): # single shank NP1 has 2 columns in a dense pattern
|
|
166
|
+
ch.update({"col": np.tile(np.array([0, 1]), int(nc / 2))})
|
|
148
167
|
elif (
|
|
149
|
-
|
|
168
|
+
version in [2, 2.1, 2.4] and nshank == 4
|
|
150
169
|
): # the 4 shank version default is rather complicated
|
|
151
|
-
shank_row = np.tile(np.arange(
|
|
170
|
+
shank_row = np.tile(np.arange(nc / 16), (2, 1)).T[:, np.newaxis].flatten()
|
|
152
171
|
shank_row = np.tile(shank_row, 8)
|
|
153
172
|
shank_row += (
|
|
154
173
|
np.tile(
|
|
155
|
-
np.array([0, 0, 1, 1, 0, 0, 1, 1])[:, np.newaxis], (1, int(
|
|
174
|
+
np.array([0, 0, 1, 1, 0, 0, 1, 1])[:, np.newaxis], (1, int(nc / 8))
|
|
156
175
|
).flatten()
|
|
157
176
|
* 24
|
|
158
177
|
)
|
|
159
178
|
ch.update(
|
|
160
179
|
{
|
|
161
|
-
"col": np.tile(np.array([0, 1]), int(
|
|
180
|
+
"col": np.tile(np.array([0, 1]), int(nc / 2)),
|
|
162
181
|
"shank": np.tile(
|
|
163
|
-
np.array([0, 1, 0, 1, 2, 3, 2, 3])[:, np.newaxis], (1, int(
|
|
182
|
+
np.array([0, 1, 0, 1, 2, 3, 2, 3])[:, np.newaxis], (1, int(nc / 8))
|
|
164
183
|
).flatten(),
|
|
165
184
|
"row": shank_row,
|
|
166
185
|
}
|
|
167
186
|
)
|
|
187
|
+
else:
|
|
188
|
+
raise ValueError(
|
|
189
|
+
f"Invalid version {version}. Supported versions are 1, 2.X, NP2QB, NHPlong, NPultra."
|
|
190
|
+
)
|
|
168
191
|
# for all, get coordinates
|
|
169
192
|
ch.update(rc2xy(ch["row"], ch["col"], version=version))
|
|
170
193
|
return ch
|
|
@@ -197,16 +220,22 @@ def adc_shifts(version=1, nc=NC):
|
|
|
197
220
|
these are listed in the snsSaveChannelSubset field.
|
|
198
221
|
|
|
199
222
|
:param version: neuropixel major version 1 or 2
|
|
200
|
-
:param nc: number of channels
|
|
201
223
|
"""
|
|
202
|
-
|
|
224
|
+
# version 1 uses 32 ADC that sample 12 channels each
|
|
225
|
+
if version in [1, "NPultra", "NHPlong"]:
|
|
203
226
|
adc_channels = 12
|
|
204
227
|
n_cycles = 13
|
|
205
|
-
|
|
206
|
-
elif
|
|
207
|
-
# version 2 uses 24 ADC that sample 16 channels each
|
|
228
|
+
# version 2 uses 24 ADC that sample 16 channels each
|
|
229
|
+
elif version == "NP2QB":
|
|
208
230
|
adc_channels = n_cycles = 16
|
|
209
|
-
|
|
231
|
+
nc = 384 * 4
|
|
232
|
+
elif version in [2, 2.1, 2.4]:
|
|
233
|
+
adc_channels = n_cycles = 16
|
|
234
|
+
else:
|
|
235
|
+
raise ValueError(
|
|
236
|
+
f"Invalid version {version}. Supported versions are 1, 2.X, NP2QB, NHPlong, NPultra."
|
|
237
|
+
)
|
|
238
|
+
adc = np.floor(np.arange(nc) / (adc_channels * 2)) * 2 + np.mod(np.arange(nc), 2)
|
|
210
239
|
sample_shift = np.zeros_like(adc)
|
|
211
240
|
for a in adc:
|
|
212
241
|
sample_shift[adc == a] = np.arange(adc_channels) / n_cycles
|
|
@@ -308,9 +337,9 @@ class NP2Converter:
|
|
|
308
337
|
self.sos_lp = scipy.signal.butter(**butter_lp_kwargs, output="sos")
|
|
309
338
|
|
|
310
339
|
# Number of ap channels
|
|
311
|
-
self.napch =
|
|
340
|
+
self.napch = self.sr.nc - self.sr.nsync
|
|
312
341
|
# Position of start of sync channels in the raw data
|
|
313
|
-
self.idxsyncch =
|
|
342
|
+
self.idxsyncch = np.arange(self.sr.nsync) + self.sr.nc - self.sr.nsync
|
|
314
343
|
|
|
315
344
|
self.extra = extra or ""
|
|
316
345
|
self.nshank = nshank or None
|
|
@@ -343,8 +372,12 @@ class NP2Converter:
|
|
|
343
372
|
status = self._process_NP21(overwrite=overwrite)
|
|
344
373
|
elif self.np_version == "NP2.1":
|
|
345
374
|
status = self._process_NP21(overwrite=overwrite)
|
|
375
|
+
elif self.np_version == "NP2QB":
|
|
376
|
+
status = self._process_NP24(overwrite=overwrite)
|
|
346
377
|
else:
|
|
347
|
-
_logger.warning(
|
|
378
|
+
_logger.warning(
|
|
379
|
+
f"Probe version {self.np_version} unknown. Should be NP2.1, NP2.4 or NP2QB."
|
|
380
|
+
)
|
|
348
381
|
status = -1
|
|
349
382
|
return status
|
|
350
383
|
|
|
@@ -377,11 +410,9 @@ class NP2Converter:
|
|
|
377
410
|
|
|
378
411
|
for first, last in wg.firstlast:
|
|
379
412
|
chunk_ap = self.sr._raw[first:last, : self.napch].T
|
|
380
|
-
chunk_ap_sync = self.sr._raw[first:last, self.idxsyncch
|
|
413
|
+
chunk_ap_sync = self.sr._raw[first:last, self.idxsyncch].T
|
|
381
414
|
chunk_lf = self.extract_lfp(self.sr[first:last, : self.napch].T)
|
|
382
|
-
chunk_lf_sync = self.extract_lfp_sync(
|
|
383
|
-
self.sr[first:last, self.idxsyncch :].T
|
|
384
|
-
)
|
|
415
|
+
chunk_lf_sync = self.extract_lfp_sync(self.sr[first:last, self.idxsyncch].T)
|
|
385
416
|
|
|
386
417
|
chunk_ap2save = self._ind2save(
|
|
387
418
|
chunk_ap, chunk_ap_sync, wg, ratio=1, etype="ap"
|
|
@@ -396,8 +427,8 @@ class NP2Converter:
|
|
|
396
427
|
self._closefiles(etype="ap")
|
|
397
428
|
self._closefiles(etype="lf")
|
|
398
429
|
|
|
399
|
-
self.
|
|
400
|
-
self.
|
|
430
|
+
self._writemetadata(etype="ap")
|
|
431
|
+
self._writemetadata(etype="lf")
|
|
401
432
|
|
|
402
433
|
if self.post_check:
|
|
403
434
|
self.check_NP24()
|
|
@@ -424,14 +455,23 @@ class NP2Converter:
|
|
|
424
455
|
shank_info = {}
|
|
425
456
|
self.already_exists = False
|
|
426
457
|
|
|
427
|
-
for sh in n_shanks:
|
|
458
|
+
for i, sh in enumerate(n_shanks):
|
|
428
459
|
_shank_info = {}
|
|
429
460
|
# channels for individual shank + sync channel
|
|
461
|
+
# NP2QB has a sync channel per shank
|
|
462
|
+
if self.np_version == "NP2.4":
|
|
463
|
+
isync = self.idxsyncch
|
|
464
|
+
elif self.np_version == "NP2QB":
|
|
465
|
+
isync = self.idxsyncch[i]
|
|
466
|
+
else:
|
|
467
|
+
raise ValueError(
|
|
468
|
+
f"Probe version {self.np_version} should be 'NP2.4' or 'NP2QB'"
|
|
469
|
+
)
|
|
430
470
|
_shank_info["chns"] = np.r_[
|
|
431
471
|
np.where(chn_info["shank"] == sh)[0],
|
|
432
|
-
|
|
472
|
+
isync,
|
|
433
473
|
]
|
|
434
|
-
|
|
474
|
+
# we name the probe folder by appending a, b, c etc..
|
|
435
475
|
probe_path = self.ap_file.parent.parent.joinpath(
|
|
436
476
|
label + chr(97 + int(sh)) + self.extra
|
|
437
477
|
)
|
|
@@ -484,9 +524,7 @@ class NP2Converter:
|
|
|
484
524
|
last = last + offset
|
|
485
525
|
|
|
486
526
|
chunk_lf = self.extract_lfp(self.sr[first:last, : self.napch].T)
|
|
487
|
-
chunk_lf_sync = self.extract_lfp_sync(
|
|
488
|
-
self.sr[first:last, self.idxsyncch :].T
|
|
489
|
-
)
|
|
527
|
+
chunk_lf_sync = self.extract_lfp_sync(self.sr[first:last, self.idxsyncch].T)
|
|
490
528
|
|
|
491
529
|
chunk_lf2save = self._ind2save(
|
|
492
530
|
chunk_lf, chunk_lf_sync, wg, ratio=self.ratio, etype="lf"
|
|
@@ -496,7 +534,7 @@ class NP2Converter:
|
|
|
496
534
|
|
|
497
535
|
self._closefiles(etype="lf")
|
|
498
536
|
|
|
499
|
-
self.
|
|
537
|
+
self._writemetadata(etype="lf")
|
|
500
538
|
|
|
501
539
|
if self.compress:
|
|
502
540
|
self.compress_NP21(overwrite=overwrite)
|
|
@@ -570,19 +608,12 @@ class NP2Converter:
|
|
|
570
608
|
chunk = np.zeros_like(expected)
|
|
571
609
|
for ish, sh in enumerate(self.shank_info.keys()):
|
|
572
610
|
srs = self.shank_info[sh]["sr"]
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
else:
|
|
576
|
-
chunk[:, self.shank_info[sh]["chns"][:-1]] = srs[first:last, :-1]
|
|
577
|
-
assert np.array_equal(expected, chunk), (
|
|
578
|
-
"data in original file and split files do no match"
|
|
579
|
-
)
|
|
580
|
-
|
|
611
|
+
chunk[:, self.shank_info[sh]["chns"]] = srs[first:last, :]
|
|
612
|
+
np.testing.assert_array_equal(expected, chunk)
|
|
581
613
|
# close the sglx instances once we are done checking
|
|
582
614
|
for sh in self.shank_info.keys():
|
|
583
615
|
sr = self.shank_info[sh].pop("sr")
|
|
584
616
|
sr.close()
|
|
585
|
-
|
|
586
617
|
self.check_completed = True
|
|
587
618
|
|
|
588
619
|
def compress_NP24(self, overwrite=False, **kwargs):
|
|
@@ -652,7 +683,7 @@ class NP2Converter:
|
|
|
652
683
|
|
|
653
684
|
def _split2shanks(self, chunk, etype="ap"):
|
|
654
685
|
"""
|
|
655
|
-
Splits the signal on the 384 channels into the individual shanks and saves to file
|
|
686
|
+
Splits the signal on the full 384 / 1536 channels into the individual shanks and saves to file
|
|
656
687
|
|
|
657
688
|
:param chunk: portion of signal with all 384 channels
|
|
658
689
|
:param type: ephys type, either 'ap' or 'lf'
|
|
@@ -691,7 +722,7 @@ class NP2Converter:
|
|
|
691
722
|
chunk[:, slice(*ind2save)].T
|
|
692
723
|
/ self.sr.channel_conversion_sample2v[etype][: self.napch],
|
|
693
724
|
chunk_sync[:, slice(*ind2save)].T
|
|
694
|
-
/ self.sr.channel_conversion_sample2v[etype][self.idxsyncch
|
|
725
|
+
/ self.sr.channel_conversion_sample2v[etype][self.idxsyncch],
|
|
695
726
|
]
|
|
696
727
|
).astype(np.int16)
|
|
697
728
|
else:
|
|
@@ -740,33 +771,7 @@ class NP2Converter:
|
|
|
740
771
|
open = self.shank_info[sh].pop(f"{etype}_open_file")
|
|
741
772
|
open.close()
|
|
742
773
|
|
|
743
|
-
def
|
|
744
|
-
"""
|
|
745
|
-
Function to create ap meta data file. Adapts the relevant keys in the spikeglx meta file
|
|
746
|
-
to contain the correct number of channels. Also adds key to indicate that this is not an
|
|
747
|
-
original meta data file, but one that has been adapted
|
|
748
|
-
|
|
749
|
-
:return:
|
|
750
|
-
"""
|
|
751
|
-
|
|
752
|
-
for sh in self.shank_info.keys():
|
|
753
|
-
n_chns = len(self.shank_info[sh]["chns"])
|
|
754
|
-
# First for the ap file
|
|
755
|
-
meta_shank = copy.deepcopy(self.sr.meta)
|
|
756
|
-
meta_shank["acqApLfSy"][0] = n_chns - 1
|
|
757
|
-
meta_shank["snsApLfSy"][0] = n_chns - 1
|
|
758
|
-
meta_shank["nSavedChans"] = n_chns
|
|
759
|
-
meta_shank["fileSizeBytes"] = self.shank_info[sh]["ap_file"].stat().st_size
|
|
760
|
-
meta_shank["snsSaveChanSubset_orig"] = spikeglx._get_savedChans_subset(
|
|
761
|
-
self.shank_info[sh]["chns"]
|
|
762
|
-
)
|
|
763
|
-
meta_shank["snsSaveChanSubset"] = f"0:{n_chns - 1}"
|
|
764
|
-
meta_shank["original_meta"] = False
|
|
765
|
-
meta_shank[f"{self.np_version}_shank"] = int(sh[-1])
|
|
766
|
-
meta_file = self.shank_info[sh]["ap_file"].with_suffix(".meta")
|
|
767
|
-
spikeglx.write_meta_data(meta_shank, meta_file)
|
|
768
|
-
|
|
769
|
-
def _writemetadata_lf(self):
|
|
774
|
+
def _writemetadata(self, etype="ap"):
|
|
770
775
|
"""
|
|
771
776
|
Function to create lf meta data file. Adapts the relevant keys in the spikeglx meta file
|
|
772
777
|
to contain the correct number of channels. Also adds key to indicate that this is not an
|
|
@@ -774,25 +779,34 @@ class NP2Converter:
|
|
|
774
779
|
|
|
775
780
|
:return:
|
|
776
781
|
"""
|
|
777
|
-
|
|
782
|
+
if etype == "ap":
|
|
783
|
+
ifull, iempty, fkey, fs = (0, 1, "ap_file", self.sr.fs) # ap
|
|
784
|
+
elif etype == "lf":
|
|
785
|
+
ifull, iempty, fkey, fs = (1, 0, "lf_file", self.fs_lf) # lf
|
|
786
|
+
else:
|
|
787
|
+
ValueError(f"Unsupported etype: {etype}")
|
|
778
788
|
for sh in self.shank_info.keys():
|
|
779
789
|
n_chns = len(self.shank_info[sh]["chns"])
|
|
780
790
|
meta_shank = copy.deepcopy(self.sr.meta)
|
|
781
|
-
meta_shank["acqApLfSy"][
|
|
782
|
-
meta_shank["acqApLfSy"][
|
|
783
|
-
meta_shank["
|
|
784
|
-
meta_shank["snsApLfSy"][
|
|
785
|
-
meta_shank["
|
|
786
|
-
meta_shank["
|
|
787
|
-
|
|
791
|
+
meta_shank["acqApLfSy"][iempty] = 0
|
|
792
|
+
meta_shank["acqApLfSy"][ifull] = n_chns - 1
|
|
793
|
+
meta_shank["acqApLfSy"][2] = 1 # for NP2QB this goes from 4 to 1
|
|
794
|
+
meta_shank["snsApLfSy"][iempty] = 0
|
|
795
|
+
meta_shank["snsApLfSy"][ifull] = n_chns - 1
|
|
796
|
+
meta_shank["snsApLfSy"][2] = 1 # for NP2QB this goes from 4 to 1
|
|
797
|
+
meta_shank["fileSizeBytes"] = self.shank_info[sh][fkey].stat().st_size
|
|
798
|
+
meta_shank["imSampRate"] = fs
|
|
799
|
+
if self.np_version in ["NP2.4", "NP2QB"]:
|
|
788
800
|
meta_shank["snsSaveChanSubset_orig"] = spikeglx._get_savedChans_subset(
|
|
789
801
|
self.shank_info[sh]["chns"]
|
|
790
802
|
)
|
|
791
803
|
meta_shank["snsSaveChanSubset"] = f"0:{n_chns - 1}"
|
|
792
804
|
meta_shank["nSavedChans"] = n_chns
|
|
805
|
+
meta_shank["NP2.4_shank"] = int(sh[-1])
|
|
806
|
+
else:
|
|
807
|
+
meta_shank[f"{self.np_version}_shank"] = int(sh[-1])
|
|
793
808
|
meta_shank["original_meta"] = False
|
|
794
|
-
|
|
795
|
-
meta_file = self.shank_info[sh]["lf_file"].with_suffix(".meta")
|
|
809
|
+
meta_file = self.shank_info[sh][fkey].with_suffix(".meta")
|
|
796
810
|
spikeglx.write_meta_data(meta_shank, meta_file)
|
|
797
811
|
|
|
798
812
|
def get_processed_files_NP24(self):
|
|
@@ -1024,3 +1038,37 @@ class NP2Reconstructor:
|
|
|
1024
1038
|
sr.close()
|
|
1025
1039
|
self.save_file.unlink()
|
|
1026
1040
|
self.save_file = cbin_file
|
|
1041
|
+
|
|
1042
|
+
|
|
1043
|
+
@lru_cache(maxsize=1)
|
|
1044
|
+
def load_spike_glx_probe_table(force_download=False, cache_dir=None):
|
|
1045
|
+
"""
|
|
1046
|
+
Load ProbeTable.json, downloading if necessary.
|
|
1047
|
+
|
|
1048
|
+
Parameters
|
|
1049
|
+
----------
|
|
1050
|
+
force_download : bool, default: False
|
|
1051
|
+
If True, download the file even if cached version exists
|
|
1052
|
+
|
|
1053
|
+
Returns
|
|
1054
|
+
-------
|
|
1055
|
+
dict
|
|
1056
|
+
The probe table data
|
|
1057
|
+
"""
|
|
1058
|
+
PROBE_TABLE_URL = "https://raw.githubusercontent.com/billkarsh/ProbeTable/refs/heads/main/Tables/probe_features.json"
|
|
1059
|
+
cache_dir = Path.home().joinpath(".ibldsp") if cache_dir is None else cache_dir
|
|
1060
|
+
cache_file = cache_dir / "ProbeTable.json"
|
|
1061
|
+
|
|
1062
|
+
if not cache_file.exists() or force_download:
|
|
1063
|
+
cache_dir.mkdir(parents=True, exist_ok=True)
|
|
1064
|
+
response = requests.get(PROBE_TABLE_URL)
|
|
1065
|
+
response.raise_for_status()
|
|
1066
|
+
|
|
1067
|
+
with open(cache_file, "w") as f:
|
|
1068
|
+
json.dump(response.json(), f, indent=2)
|
|
1069
|
+
|
|
1070
|
+
with open(cache_file, "r") as f:
|
|
1071
|
+
probe_dict = json.load(f)
|
|
1072
|
+
|
|
1073
|
+
df_tables = pd.DataFrame(probe_dict["neuropixels_probes"]).T
|
|
1074
|
+
return df_tables, probe_dict
|
spikeglx.py
CHANGED
|
@@ -14,6 +14,7 @@ import one.alf.path
|
|
|
14
14
|
|
|
15
15
|
import neuropixel
|
|
16
16
|
|
|
17
|
+
|
|
17
18
|
SAMPLE_SIZE = 2 # int16
|
|
18
19
|
DEFAULT_BATCH_SIZE = 1e6
|
|
19
20
|
_logger = logging.getLogger("ibllib")
|
|
@@ -459,6 +460,12 @@ class Reader:
|
|
|
459
460
|
log_func(f"SHA1 computed: {sc}")
|
|
460
461
|
return sm == sc
|
|
461
462
|
|
|
463
|
+
def extract_sync_files(self, output_path=None, save=False, parts=""):
|
|
464
|
+
"""
|
|
465
|
+
Extracts sync.times, sync.channels and sync.polarities from binary ephys dataset
|
|
466
|
+
"""
|
|
467
|
+
pass
|
|
468
|
+
|
|
462
469
|
def _parse_ch_file(self, ch_file=None):
|
|
463
470
|
ch_file = (
|
|
464
471
|
_get_companion_file(self.file_bin, ".ch") if ch_file is None else ch_file
|
|
@@ -567,6 +574,8 @@ def _get_neuropixel_major_version_from_meta(md):
|
|
|
567
574
|
"NP2.1": 2,
|
|
568
575
|
"NP2.4": 2.4,
|
|
569
576
|
"NPultra": "NPultra",
|
|
577
|
+
"NP2QB": "NP2QB",
|
|
578
|
+
"NHPlong": "NHPlong",
|
|
570
579
|
}
|
|
571
580
|
version = _get_neuropixel_version_from_meta(md)
|
|
572
581
|
if version is not None:
|
|
@@ -591,6 +600,19 @@ def _get_max_int_from_meta(md, neuropixel_version=None):
|
|
|
591
600
|
return int(md.get("imMaxInt", 32768))
|
|
592
601
|
|
|
593
602
|
|
|
603
|
+
def _get_gain_from_meta(md):
|
|
604
|
+
version = _get_neuropixel_version_from_meta(md)
|
|
605
|
+
if version == 1:
|
|
606
|
+
return 500
|
|
607
|
+
else:
|
|
608
|
+
if md.get("imChan0apGain"):
|
|
609
|
+
return float(md["imChan0apGain"])
|
|
610
|
+
elif md["imDatPrb_pn"] in ["NP2010", "NP2000"]:
|
|
611
|
+
return 80
|
|
612
|
+
else:
|
|
613
|
+
return 100
|
|
614
|
+
|
|
615
|
+
|
|
594
616
|
def _get_neuropixel_version_from_meta(md):
|
|
595
617
|
"""
|
|
596
618
|
Get neuropixel version tag (3A, 3B1, 3B2) from the metadata dictionary
|
|
@@ -607,13 +629,19 @@ def _get_neuropixel_version_from_meta(md):
|
|
|
607
629
|
else:
|
|
608
630
|
return "3B1"
|
|
609
631
|
# Neuropixel 2.0 single shank
|
|
610
|
-
elif prb_type
|
|
632
|
+
elif prb_type in [21]:
|
|
611
633
|
return "NP2.1"
|
|
612
634
|
# Neuropixel 2.0 four shank
|
|
613
|
-
elif prb_type
|
|
635
|
+
elif prb_type in [24, 2013]:
|
|
614
636
|
return "NP2.4"
|
|
615
|
-
elif prb_type
|
|
637
|
+
elif prb_type in [1100]:
|
|
616
638
|
return "NPultra"
|
|
639
|
+
elif prb_type in [2020]:
|
|
640
|
+
return "NP2QB"
|
|
641
|
+
elif prb_type in [1030]:
|
|
642
|
+
return "NHPlong"
|
|
643
|
+
else:
|
|
644
|
+
ValueError(f"Unknown neuropixel probe type {prb_type}")
|
|
617
645
|
|
|
618
646
|
|
|
619
647
|
def _get_sync_trace_indices_from_meta(md):
|
|
@@ -722,12 +750,10 @@ def geometry_from_meta(meta_data, return_index=False, nc=384, sort=True):
|
|
|
722
750
|
th.update(neuropixel.xy2rc(th["x"], th["y"], version=major_version))
|
|
723
751
|
else:
|
|
724
752
|
# the spike sorting channel maps have a flipped version of the channel map
|
|
725
|
-
if major_version
|
|
753
|
+
if major_version in [1, "NHPlong"]:
|
|
726
754
|
th["col"] = -cm["col"] * 2 + 2 + np.mod(cm["row"], 2)
|
|
727
755
|
th.update(neuropixel.rc2xy(th["row"], th["col"], version=major_version))
|
|
728
|
-
th["sample_shift"], th["adc"] = neuropixel.adc_shifts(
|
|
729
|
-
version=major_version, nc=th["col"].size
|
|
730
|
-
)
|
|
756
|
+
th["sample_shift"], th["adc"] = neuropixel.adc_shifts(version=major_version)
|
|
731
757
|
th = _split_geometry_into_shanks(th, meta_data)
|
|
732
758
|
th["ind"] = np.arange(th["col"].size)
|
|
733
759
|
if sort:
|
|
@@ -807,14 +833,15 @@ def _conversion_sample2v_from_meta(meta_data):
|
|
|
807
833
|
_get_sync_trace_indices_from_meta(meta_data)
|
|
808
834
|
)
|
|
809
835
|
if "NP2" in version:
|
|
810
|
-
# NP
|
|
811
|
-
#
|
|
836
|
+
# NP 2010; APGain = 80 for all AP
|
|
837
|
+
# NP 2013, 2014, 2020, 2021; APGain = 100 for all AP
|
|
838
|
+
gain = _get_gain_from_meta(meta_data)
|
|
812
839
|
out = {
|
|
813
840
|
"lf": np.hstack(
|
|
814
|
-
(int2volt /
|
|
841
|
+
(int2volt / gain * np.ones(n_chn).astype(np.float32), sy_gain)
|
|
815
842
|
),
|
|
816
843
|
"ap": np.hstack(
|
|
817
|
-
(int2volt /
|
|
844
|
+
(int2volt / gain * np.ones(n_chn).astype(np.float32), sy_gain)
|
|
818
845
|
),
|
|
819
846
|
}
|
|
820
847
|
else:
|
|
@@ -997,7 +1024,7 @@ def glob_ephys_files(
|
|
|
997
1024
|
|
|
998
1025
|
def _mock_spikeglx_file(
|
|
999
1026
|
mock_bin_file,
|
|
1000
|
-
meta_file,
|
|
1027
|
+
meta_file, # the read-only fixture file that will be copied over
|
|
1001
1028
|
ns,
|
|
1002
1029
|
nc,
|
|
1003
1030
|
sync_depth=16,
|