eegdash 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of eegdash might be problematic. Click here for more details.
- eegdash/SignalStore/__init__.py +0 -0
- eegdash/SignalStore/signalstore/__init__.py +3 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/abstract_read_adapter.py +13 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/domain_modeling/schema_read_adapter.py +16 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/domain_modeling/vocabulary_read_adapter.py +19 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/handmade_records/excel_study_organizer_read_adapter.py +114 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/axona/axona_read_adapter.py +912 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/ReadIntanSpikeFile.py +140 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/intan_read_adapter.py +29 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/__init__.py +0 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/data_to_result.py +62 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/get_bytes_per_data_block.py +36 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/notch_filter.py +50 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/qstring.py +41 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/read_header.py +135 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/read_one_data_block.py +45 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/load_intan_rhd_format.py +204 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/__init__.py +0 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/data_to_result.py +60 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/get_bytes_per_data_block.py +37 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/notch_filter.py +50 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/qstring.py +41 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/read_header.py +153 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/read_one_data_block.py +47 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/load_intan_rhs_format.py +213 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/neurodata_without_borders/neurodata_without_borders_read_adapter.py +14 -0
- eegdash/SignalStore/signalstore/operations/__init__.py +4 -0
- eegdash/SignalStore/signalstore/operations/handler_executor.py +22 -0
- eegdash/SignalStore/signalstore/operations/handler_factory.py +41 -0
- eegdash/SignalStore/signalstore/operations/handlers/base_handler.py +44 -0
- eegdash/SignalStore/signalstore/operations/handlers/domain/property_model_handlers.py +79 -0
- eegdash/SignalStore/signalstore/operations/handlers/domain/schema_handlers.py +3 -0
- eegdash/SignalStore/signalstore/operations/helpers/abstract_helper.py +17 -0
- eegdash/SignalStore/signalstore/operations/helpers/neuroscikit_extractor.py +33 -0
- eegdash/SignalStore/signalstore/operations/helpers/neuroscikit_rawio.py +165 -0
- eegdash/SignalStore/signalstore/operations/helpers/spikeinterface_helper.py +100 -0
- eegdash/SignalStore/signalstore/operations/helpers/wrappers/neo_wrappers.py +21 -0
- eegdash/SignalStore/signalstore/operations/helpers/wrappers/nwb_wrappers.py +27 -0
- eegdash/SignalStore/signalstore/store/__init__.py +8 -0
- eegdash/SignalStore/signalstore/store/data_access_objects.py +1181 -0
- eegdash/SignalStore/signalstore/store/datafile_adapters.py +131 -0
- eegdash/SignalStore/signalstore/store/repositories.py +928 -0
- eegdash/SignalStore/signalstore/store/store_errors.py +68 -0
- eegdash/SignalStore/signalstore/store/unit_of_work.py +97 -0
- eegdash/SignalStore/signalstore/store/unit_of_work_provider.py +67 -0
- eegdash/SignalStore/signalstore/utilities/data_adapters/spike_interface_adapters/si_recording.py +1 -0
- eegdash/SignalStore/signalstore/utilities/data_adapters/spike_interface_adapters/si_sorter.py +1 -0
- eegdash/SignalStore/signalstore/utilities/testing/data_mocks.py +513 -0
- eegdash/SignalStore/signalstore/utilities/tools/dataarrays.py +49 -0
- eegdash/SignalStore/signalstore/utilities/tools/mongo_records.py +25 -0
- eegdash/SignalStore/signalstore/utilities/tools/operation_response.py +78 -0
- eegdash/SignalStore/signalstore/utilities/tools/purge_orchestration_response.py +21 -0
- eegdash/SignalStore/signalstore/utilities/tools/quantities.py +15 -0
- eegdash/SignalStore/signalstore/utilities/tools/strings.py +38 -0
- eegdash/SignalStore/signalstore/utilities/tools/time.py +17 -0
- eegdash/SignalStore/tests/conftest.py +799 -0
- eegdash/SignalStore/tests/data/valid_data/data_arrays/make_fake_data.py +59 -0
- eegdash/SignalStore/tests/unit/store/conftest.py +0 -0
- eegdash/SignalStore/tests/unit/store/test_data_access_objects.py +1235 -0
- eegdash/SignalStore/tests/unit/store/test_repositories.py +1309 -0
- eegdash/SignalStore/tests/unit/store/test_unit_of_work.py +7 -0
- eegdash/SignalStore/tests/unit/test_ci_cd.py +8 -0
- eegdash/__init__.py +1 -0
- eegdash/aws_ingest.py +29 -0
- eegdash/data_utils.py +213 -0
- eegdash/main.py +17 -0
- eegdash/signalstore_data_utils.py +280 -0
- eegdash-0.0.1.dist-info/LICENSE +20 -0
- eegdash-0.0.1.dist-info/METADATA +72 -0
- eegdash-0.0.1.dist-info/RECORD +72 -0
- eegdash-0.0.1.dist-info/WHEEL +5 -0
- eegdash-0.0.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
import os, struct
|
|
2
|
+
|
|
3
|
+
import tkinter as tk
|
|
4
|
+
from tkinter import filedialog
|
|
5
|
+
|
|
6
|
+
import matplotlib.pyplot as plt
|
|
7
|
+
|
|
8
|
+
# Version 3.0, 11 February 2021
|
|
9
|
+
|
|
10
|
+
# Reads spike.dat files generated by Intan Technologies RHX data acqusition
|
|
11
|
+
# software. Data are parsed and placed into variables within the Python workspace.
|
|
12
|
+
# Therefore, it is recommended to add either saving these variables to disk or
|
|
13
|
+
# any plotting or processing of the data at the end of readIntanSpikeFile, before
|
|
14
|
+
# those variables are removed when execution completes
|
|
15
|
+
|
|
16
|
+
# Spike data from N channels are loaded into an N x M list named
|
|
17
|
+
# 'spikes', where M = 5 if spike snapshots were saved, otherwise M = 4.
|
|
18
|
+
# The first column of spikes contains native channel names. The second
|
|
19
|
+
# column contains custom channel names. The third column contains spike
|
|
20
|
+
# timestamps. The fourth column contains spike ID numbers (128 = likely
|
|
21
|
+
# artifact due to amplitude exceeding threshold set in Spike Scope). All
|
|
22
|
+
# normal spikes have a spike ID of 1. Future versions of the RHX software
|
|
23
|
+
# may support realtime spike sorting, and in this case spike ID will
|
|
24
|
+
# denote different identified spikes (1, 2, 3, etc.). If spike snapshots
|
|
25
|
+
# were saved then they are contained in the fifth column.
|
|
26
|
+
|
|
27
|
+
def readString(fid):
|
|
28
|
+
resultStr = ""
|
|
29
|
+
ch, = struct.unpack('<c', fid.read(1))
|
|
30
|
+
while ch != b'\0':
|
|
31
|
+
resultStr = resultStr + str(ch, "utf-8")
|
|
32
|
+
ch, = struct.unpack('<c', fid.read(1))
|
|
33
|
+
return resultStr
|
|
34
|
+
|
|
35
|
+
def readIntanSpikeFile(option):
|
|
36
|
+
|
|
37
|
+
noArtifacts = 0
|
|
38
|
+
if option == "noartifacts":
|
|
39
|
+
noArtifacts = 1
|
|
40
|
+
|
|
41
|
+
print("Select a Spike Data File")
|
|
42
|
+
root = tk.Tk()
|
|
43
|
+
root.withdraw()
|
|
44
|
+
fullFileName = filedialog.askopenfilename()
|
|
45
|
+
if not fullFileName:
|
|
46
|
+
print("Canceled")
|
|
47
|
+
return
|
|
48
|
+
|
|
49
|
+
# Open data file
|
|
50
|
+
fid = open(fullFileName, 'rb')
|
|
51
|
+
filesize = os.path.getsize(fullFileName)
|
|
52
|
+
|
|
53
|
+
# Check 'magic number' at beginning of file to make sure this is an Intan
|
|
54
|
+
# Technologies spike data file.
|
|
55
|
+
multichannel = 0
|
|
56
|
+
magicNumber, = struct.unpack('<I', fid.read(4))
|
|
57
|
+
if magicNumber == int('18f8474b', 16):
|
|
58
|
+
multichannel = 1
|
|
59
|
+
elif magicNumber == int('18f88c00', 16):
|
|
60
|
+
multichannel = 0
|
|
61
|
+
else:
|
|
62
|
+
raise Exception('Unrecognized file type.')
|
|
63
|
+
|
|
64
|
+
spikeFileVersionNumber, = struct.unpack('<H', fid.read(2))
|
|
65
|
+
|
|
66
|
+
if (spikeFileVersionNumber > 1):
|
|
67
|
+
print("Warning: This spike file version is not supported by this file reader.")
|
|
68
|
+
print("Check the Intan Technologies website for a more recent version.")
|
|
69
|
+
|
|
70
|
+
filename = readString(fid)
|
|
71
|
+
channelList = readString(fid).split(",")
|
|
72
|
+
customChannelList = readString(fid).split(",")
|
|
73
|
+
|
|
74
|
+
sampleRate, = struct.unpack('<f', fid.read(4))
|
|
75
|
+
|
|
76
|
+
samplesPreDetect, = struct.unpack('<I', fid.read(4))
|
|
77
|
+
samplesPostDetect, = struct.unpack('<I', fid.read(4))
|
|
78
|
+
nSamples = samplesPreDetect + samplesPostDetect
|
|
79
|
+
|
|
80
|
+
if nSamples == 0:
|
|
81
|
+
snapshotsPresent = 0
|
|
82
|
+
else:
|
|
83
|
+
snapshotsPresent = 1
|
|
84
|
+
|
|
85
|
+
N = len(channelList)
|
|
86
|
+
|
|
87
|
+
spikes = [[] for _ in range(N)]
|
|
88
|
+
for i in range(N):
|
|
89
|
+
spikes[i].append(channelList[i]) # 0: native channel name
|
|
90
|
+
spikes[i].append(customChannelList[i]) # 1: custom channel name
|
|
91
|
+
spikes[i].append([]) # 2: single-float timestamp
|
|
92
|
+
spikes[i].append([]) # 3: uint8 spike ID
|
|
93
|
+
if snapshotsPresent:
|
|
94
|
+
spikes[i].append([]) # 4: single-float snapshot
|
|
95
|
+
|
|
96
|
+
while (filesize - fid.tell() > 0):
|
|
97
|
+
if multichannel:
|
|
98
|
+
channelName = ""
|
|
99
|
+
for charIndex in range(5):
|
|
100
|
+
thisChar, = struct.unpack('<c', fid.read(1))
|
|
101
|
+
channelName = channelName + str(thisChar, "utf-8")
|
|
102
|
+
for i in range(N):
|
|
103
|
+
if spikes[i][0] == channelName:
|
|
104
|
+
index = i
|
|
105
|
+
break
|
|
106
|
+
else:
|
|
107
|
+
index = 1
|
|
108
|
+
|
|
109
|
+
timestamp, = struct.unpack('<i', fid.read(4))
|
|
110
|
+
spikeID, = struct.unpack('<B', fid.read(1))
|
|
111
|
+
|
|
112
|
+
if (snapshotsPresent):
|
|
113
|
+
snapshot = list(struct.unpack("<%dH" % nSamples, fid.read(2 * nSamples)))
|
|
114
|
+
|
|
115
|
+
if spikeID == 128 and noArtifacts:
|
|
116
|
+
continue
|
|
117
|
+
|
|
118
|
+
timestampSeconds = timestamp / sampleRate
|
|
119
|
+
|
|
120
|
+
spikes[index][2].append(timestampSeconds)
|
|
121
|
+
spikes[index][3].append(spikeID)
|
|
122
|
+
if snapshotsPresent:
|
|
123
|
+
snapshotMicroVolts = [0.195 * (float(snapshotSample) - 32768.0) for snapshotSample in snapshot]
|
|
124
|
+
spikes[i][4].append(snapshotMicroVolts)
|
|
125
|
+
|
|
126
|
+
# Close data file
|
|
127
|
+
fid.close()
|
|
128
|
+
|
|
129
|
+
if snapshotsPresent:
|
|
130
|
+
tSnapshot = [(sample - samplesPreDetect) / sampleRate for sample in range(nSamples)]
|
|
131
|
+
|
|
132
|
+
# Just for demonstration, take the plot the 2nd (N = 1) channel's list of snapshots (snapshots are always
|
|
133
|
+
# in the fifth column M = 4). Grab the 6th snapshot present (list index = 5) and plot it
|
|
134
|
+
secondChannelSnapshots = spikes[1][4]
|
|
135
|
+
plt.plot(tSnapshot, secondChannelSnapshots[5])
|
|
136
|
+
plt.show()
|
|
137
|
+
|
|
138
|
+
# If the function is called with the "noartifacts" parameter, all spikes with spike ID = 128 are ignored.
|
|
139
|
+
readIntanSpikeFile("artifacts")
|
|
140
|
+
#readIntanSpikeFile("noartifacts")
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
from signalstore.operations.importers.adapters.abstract_read_adapter import AbstractReadAdapter
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class IntanReadAdapter(AbstractReadAdapter):
|
|
5
|
+
def __init__(self, path):
|
|
6
|
+
self.path = path
|
|
7
|
+
|
|
8
|
+
def read(self):
|
|
9
|
+
"""Reads a set of Intan files (RHD, RHS, Spike) and converts each data object into an xarray.DataArray with the appropriate dimensions, coordinates and metadata attributes for the Neuroscikit data model.
|
|
10
|
+
"""
|
|
11
|
+
pass
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# ================================================================================
|
|
15
|
+
# RHD read helper functions
|
|
16
|
+
# ================================================================================
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
# ================================================================================
|
|
21
|
+
# RHS helper functions
|
|
22
|
+
# ================================================================================
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
# ================================================================================
|
|
27
|
+
# Spike File helper functions
|
|
28
|
+
# ================================================================================
|
|
29
|
+
|
|
File without changes
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
#! /bin/env python
|
|
2
|
+
#
|
|
3
|
+
# Michael Gibson 27 April 2015
|
|
4
|
+
# Modified Adrian Foy Sep 2018
|
|
5
|
+
|
|
6
|
+
def data_to_result(header, data, data_present):
|
|
7
|
+
"""Moves the header and data (if present) into a common object."""
|
|
8
|
+
|
|
9
|
+
result = {}
|
|
10
|
+
if header['num_amplifier_channels'] > 0 and data_present:
|
|
11
|
+
result['t_amplifier'] = data['t_amplifier']
|
|
12
|
+
if header['num_aux_input_channels'] > 0 and data_present:
|
|
13
|
+
result['t_aux_input'] = data['t_aux_input']
|
|
14
|
+
if header['num_supply_voltage_channels'] > 0 and data_present:
|
|
15
|
+
result['t_supply_voltage'] = data['t_supply_voltage']
|
|
16
|
+
if header['num_board_adc_channels'] > 0 and data_present:
|
|
17
|
+
result['t_board_adc'] = data['t_board_adc']
|
|
18
|
+
if (header['num_board_dig_in_channels'] > 0 or header['num_board_dig_out_channels'] > 0) and data_present:
|
|
19
|
+
result['t_dig'] = data['t_dig']
|
|
20
|
+
if header['num_temp_sensor_channels'] > 0 and data_present:
|
|
21
|
+
result['t_temp_sensor'] = data['t_temp_sensor']
|
|
22
|
+
|
|
23
|
+
if header['num_amplifier_channels'] > 0:
|
|
24
|
+
result['spike_triggers'] = header['spike_triggers']
|
|
25
|
+
|
|
26
|
+
result['notes'] = header['notes']
|
|
27
|
+
result['frequency_parameters'] = header['frequency_parameters']
|
|
28
|
+
|
|
29
|
+
if header['version']['major'] > 1:
|
|
30
|
+
result['reference_channel'] = header['reference_channel']
|
|
31
|
+
|
|
32
|
+
if header['num_amplifier_channels'] > 0:
|
|
33
|
+
result['amplifier_channels'] = header['amplifier_channels']
|
|
34
|
+
if data_present:
|
|
35
|
+
result['amplifier_data'] = data['amplifier_data']
|
|
36
|
+
|
|
37
|
+
if header['num_aux_input_channels'] > 0:
|
|
38
|
+
result['aux_input_channels'] = header['aux_input_channels']
|
|
39
|
+
if data_present:
|
|
40
|
+
result['aux_input_data'] = data['aux_input_data']
|
|
41
|
+
|
|
42
|
+
if header['num_supply_voltage_channels'] > 0:
|
|
43
|
+
result['supply_voltage_channels'] = header['supply_voltage_channels']
|
|
44
|
+
if data_present:
|
|
45
|
+
result['supply_voltage_data'] = data['supply_voltage_data']
|
|
46
|
+
|
|
47
|
+
if header['num_board_adc_channels'] > 0:
|
|
48
|
+
result['board_adc_channels'] = header['board_adc_channels']
|
|
49
|
+
if data_present:
|
|
50
|
+
result['board_adc_data'] = data['board_adc_data']
|
|
51
|
+
|
|
52
|
+
if header['num_board_dig_in_channels'] > 0:
|
|
53
|
+
result['board_dig_in_channels'] = header['board_dig_in_channels']
|
|
54
|
+
if data_present:
|
|
55
|
+
result['board_dig_in_data'] = data['board_dig_in_data']
|
|
56
|
+
|
|
57
|
+
if header['num_board_dig_out_channels'] > 0:
|
|
58
|
+
result['board_dig_out_channels'] = header['board_dig_out_channels']
|
|
59
|
+
if data_present:
|
|
60
|
+
result['board_dig_out_data'] = data['board_dig_out_data']
|
|
61
|
+
|
|
62
|
+
return result
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
#! /bin/env python
|
|
2
|
+
#
|
|
3
|
+
# Michael Gibson 23 April 2015
|
|
4
|
+
# Modified Adrian Foy Sep 2018
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def get_bytes_per_data_block(header):
|
|
8
|
+
"""Calculates the number of bytes in each 60 or 128 sample datablock."""
|
|
9
|
+
|
|
10
|
+
# Each data block contains 60 or 128 amplifier samples.
|
|
11
|
+
bytes_per_block = header['num_samples_per_data_block'] * 4 # timestamp data
|
|
12
|
+
bytes_per_block = bytes_per_block + header['num_samples_per_data_block'] * 2 * header['num_amplifier_channels']
|
|
13
|
+
|
|
14
|
+
# Auxiliary inputs are sampled 4x slower than amplifiers
|
|
15
|
+
bytes_per_block = bytes_per_block + (header['num_samples_per_data_block'] / 4) * 2 * header['num_aux_input_channels']
|
|
16
|
+
|
|
17
|
+
# Supply voltage is sampled 60 or 128x slower than amplifiers
|
|
18
|
+
bytes_per_block = bytes_per_block + 1 * 2 * header['num_supply_voltage_channels']
|
|
19
|
+
|
|
20
|
+
# Board analog inputs are sampled at same rate as amplifiers
|
|
21
|
+
bytes_per_block = bytes_per_block + header['num_samples_per_data_block'] * 2 * header['num_board_adc_channels']
|
|
22
|
+
|
|
23
|
+
# Board digital inputs are sampled at same rate as amplifiers
|
|
24
|
+
if header['num_board_dig_in_channels'] > 0:
|
|
25
|
+
bytes_per_block = bytes_per_block + header['num_samples_per_data_block'] * 2
|
|
26
|
+
|
|
27
|
+
# Board digital outputs are sampled at same rate as amplifiers
|
|
28
|
+
if header['num_board_dig_out_channels'] > 0:
|
|
29
|
+
bytes_per_block = bytes_per_block + header['num_samples_per_data_block'] * 2
|
|
30
|
+
|
|
31
|
+
# Temp sensor is sampled 60 or 128x slower than amplifiers
|
|
32
|
+
if header['num_temp_sensor_channels'] > 0:
|
|
33
|
+
bytes_per_block = bytes_per_block + 1 * 2 * header['num_temp_sensor_channels']
|
|
34
|
+
|
|
35
|
+
return bytes_per_block
|
|
36
|
+
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
#! /bin/env python
|
|
2
|
+
#
|
|
3
|
+
# Michael Gibson 27 April 2015
|
|
4
|
+
|
|
5
|
+
import math
|
|
6
|
+
import numpy as np
|
|
7
|
+
|
|
8
|
+
def notch_filter(input, fSample, fNotch, Bandwidth):
|
|
9
|
+
"""Implements a notch filter (e.g., for 50 or 60 Hz) on vector 'input'.
|
|
10
|
+
|
|
11
|
+
fSample = sample rate of data (input Hz or Samples/sec)
|
|
12
|
+
fNotch = filter notch frequency (input Hz)
|
|
13
|
+
Bandwidth = notch 3-dB bandwidth (input Hz). A bandwidth of 10 Hz is
|
|
14
|
+
recommended for 50 or 60 Hz notch filters; narrower bandwidths lead to
|
|
15
|
+
poor time-domain properties with an extended ringing response to
|
|
16
|
+
transient disturbances.
|
|
17
|
+
|
|
18
|
+
Example: If neural data was sampled at 30 kSamples/sec
|
|
19
|
+
and you wish to implement a 60 Hz notch filter:
|
|
20
|
+
|
|
21
|
+
out = notch_filter(input, 30000, 60, 10);
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
tstep = 1.0/fSample
|
|
25
|
+
Fc = fNotch*tstep
|
|
26
|
+
|
|
27
|
+
L = len(input)
|
|
28
|
+
|
|
29
|
+
# Calculate IIR filter parameters
|
|
30
|
+
d = math.exp(-2.0*math.pi*(Bandwidth/2.0)*tstep)
|
|
31
|
+
b = (1.0 + d*d) * math.cos(2.0*math.pi*Fc)
|
|
32
|
+
a0 = 1.0
|
|
33
|
+
a1 = -b
|
|
34
|
+
a2 = d*d
|
|
35
|
+
a = (1.0 + d*d)/2.0
|
|
36
|
+
b0 = 1.0
|
|
37
|
+
b1 = -2.0 * math.cos(2.0*math.pi*Fc)
|
|
38
|
+
b2 = 1.0
|
|
39
|
+
|
|
40
|
+
out = np.zeros(len(input))
|
|
41
|
+
out[0] = input[0]
|
|
42
|
+
out[1] = input[1]
|
|
43
|
+
# (If filtering a continuous data stream, change out[0:1] to the
|
|
44
|
+
# previous final two values of out.)
|
|
45
|
+
|
|
46
|
+
# Run filter
|
|
47
|
+
for i in range(2,L):
|
|
48
|
+
out[i] = (a*b2*input[i-2] + a*b1*input[i-1] + a*b0*input[i] - a2*out[i-2] - a1*out[i-1])/a0
|
|
49
|
+
|
|
50
|
+
return out
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
#! /bin/env python
|
|
2
|
+
#
|
|
3
|
+
# Michael Gibson 23 April 2015
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
import sys, struct, os
|
|
7
|
+
|
|
8
|
+
def read_qstring(fid):
|
|
9
|
+
"""Read Qt style QString.
|
|
10
|
+
|
|
11
|
+
The first 32-bit unsigned number indicates the length of the string (in bytes).
|
|
12
|
+
If this number equals 0xFFFFFFFF, the string is null.
|
|
13
|
+
|
|
14
|
+
Strings are stored as unicode.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
length, = struct.unpack('<I', fid.read(4))
|
|
18
|
+
if length == int('ffffffff', 16): return ""
|
|
19
|
+
|
|
20
|
+
if length > (os.fstat(fid.fileno()).st_size - fid.tell() + 1) :
|
|
21
|
+
print(length)
|
|
22
|
+
raise Exception('Length too long.')
|
|
23
|
+
|
|
24
|
+
# convert length from bytes to 16-bit Unicode words
|
|
25
|
+
length = int(length / 2)
|
|
26
|
+
|
|
27
|
+
data = []
|
|
28
|
+
for i in range(0, length):
|
|
29
|
+
c, = struct.unpack('<H', fid.read(2))
|
|
30
|
+
data.append(c)
|
|
31
|
+
|
|
32
|
+
if sys.version_info >= (3,0):
|
|
33
|
+
a = ''.join([chr(c) for c in data])
|
|
34
|
+
else:
|
|
35
|
+
a = ''.join([unichr(c) for c in data])
|
|
36
|
+
|
|
37
|
+
return a
|
|
38
|
+
|
|
39
|
+
if __name__ == '__main__':
|
|
40
|
+
a=read_qstring(open(sys.argv[1], 'rb'))
|
|
41
|
+
print(a)
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
#! /bin/env python
|
|
2
|
+
#
|
|
3
|
+
# Michael Gibson 23 April 2015
|
|
4
|
+
# Modified Adrian Foy Sep 2018
|
|
5
|
+
|
|
6
|
+
import sys, struct
|
|
7
|
+
from intanutil.qstring import read_qstring
|
|
8
|
+
|
|
9
|
+
def read_header(fid):
|
|
10
|
+
"""Reads the Intan File Format header from the given file."""
|
|
11
|
+
|
|
12
|
+
# Check 'magic number' at beginning of file to make sure this is an Intan
|
|
13
|
+
# Technologies RHD2000 data file.
|
|
14
|
+
magic_number, = struct.unpack('<I', fid.read(4))
|
|
15
|
+
if magic_number != int('c6912702', 16): raise Exception('Unrecognized file type.')
|
|
16
|
+
|
|
17
|
+
header = {}
|
|
18
|
+
# Read version number.
|
|
19
|
+
version = {}
|
|
20
|
+
(version['major'], version['minor']) = struct.unpack('<hh', fid.read(4))
|
|
21
|
+
header['version'] = version
|
|
22
|
+
|
|
23
|
+
print('')
|
|
24
|
+
print('Reading Intan Technologies RHD2000 Data File, Version {}.{}'.format(version['major'], version['minor']))
|
|
25
|
+
print('')
|
|
26
|
+
|
|
27
|
+
freq = {}
|
|
28
|
+
|
|
29
|
+
# Read information of sampling rate and amplifier frequency settings.
|
|
30
|
+
header['sample_rate'], = struct.unpack('<f', fid.read(4))
|
|
31
|
+
(freq['dsp_enabled'], freq['actual_dsp_cutoff_frequency'], freq['actual_lower_bandwidth'], freq['actual_upper_bandwidth'],
|
|
32
|
+
freq['desired_dsp_cutoff_frequency'], freq['desired_lower_bandwidth'], freq['desired_upper_bandwidth']) = struct.unpack('<hffffff', fid.read(26))
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
# This tells us if a software 50/60 Hz notch filter was enabled during
|
|
36
|
+
# the data acquisition.
|
|
37
|
+
notch_filter_mode, = struct.unpack('<h', fid.read(2))
|
|
38
|
+
header['notch_filter_frequency'] = 0
|
|
39
|
+
if notch_filter_mode == 1:
|
|
40
|
+
header['notch_filter_frequency'] = 50
|
|
41
|
+
elif notch_filter_mode == 2:
|
|
42
|
+
header['notch_filter_frequency'] = 60
|
|
43
|
+
freq['notch_filter_frequency'] = header['notch_filter_frequency']
|
|
44
|
+
|
|
45
|
+
(freq['desired_impedance_test_frequency'], freq['actual_impedance_test_frequency']) = struct.unpack('<ff', fid.read(8))
|
|
46
|
+
|
|
47
|
+
note1 = read_qstring(fid)
|
|
48
|
+
note2 = read_qstring(fid)
|
|
49
|
+
note3 = read_qstring(fid)
|
|
50
|
+
header['notes'] = { 'note1' : note1, 'note2' : note2, 'note3' : note3}
|
|
51
|
+
|
|
52
|
+
# If data file is from GUI v1.1 or later, see if temperature sensor data was saved.
|
|
53
|
+
header['num_temp_sensor_channels'] = 0
|
|
54
|
+
if (version['major'] == 1 and version['minor'] >= 1) or (version['major'] > 1) :
|
|
55
|
+
header['num_temp_sensor_channels'], = struct.unpack('<h', fid.read(2))
|
|
56
|
+
|
|
57
|
+
# If data file is from GUI v1.3 or later, load eval board mode.
|
|
58
|
+
header['eval_board_mode'] = 0
|
|
59
|
+
if ((version['major'] == 1) and (version['minor'] >= 3)) or (version['major'] > 1) :
|
|
60
|
+
header['eval_board_mode'], = struct.unpack('<h', fid.read(2))
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
header['num_samples_per_data_block'] = 60
|
|
64
|
+
# If data file is from v2.0 or later (Intan Recording Controller), load name of digital reference channel
|
|
65
|
+
if (version['major'] > 1):
|
|
66
|
+
header['reference_channel'] = read_qstring(fid)
|
|
67
|
+
header['num_samples_per_data_block'] = 128
|
|
68
|
+
|
|
69
|
+
# Place frequency-related information in data structure. (Note: much of this structure is set above)
|
|
70
|
+
freq['amplifier_sample_rate'] = header['sample_rate']
|
|
71
|
+
freq['aux_input_sample_rate'] = header['sample_rate'] / 4
|
|
72
|
+
freq['supply_voltage_sample_rate'] = header['sample_rate'] / header['num_samples_per_data_block']
|
|
73
|
+
freq['board_adc_sample_rate'] = header['sample_rate']
|
|
74
|
+
freq['board_dig_in_sample_rate'] = header['sample_rate']
|
|
75
|
+
|
|
76
|
+
header['frequency_parameters'] = freq
|
|
77
|
+
|
|
78
|
+
# Create structure arrays for each type of data channel.
|
|
79
|
+
header['spike_triggers'] = []
|
|
80
|
+
header['amplifier_channels'] = []
|
|
81
|
+
header['aux_input_channels'] = []
|
|
82
|
+
header['supply_voltage_channels'] = []
|
|
83
|
+
header['board_adc_channels'] = []
|
|
84
|
+
header['board_dig_in_channels'] = []
|
|
85
|
+
header['board_dig_out_channels'] = []
|
|
86
|
+
|
|
87
|
+
# Read signal summary from data file header.
|
|
88
|
+
|
|
89
|
+
number_of_signal_groups, = struct.unpack('<h', fid.read(2))
|
|
90
|
+
|
|
91
|
+
for signal_group in range(1, number_of_signal_groups + 1):
|
|
92
|
+
signal_group_name = read_qstring(fid)
|
|
93
|
+
signal_group_prefix = read_qstring(fid)
|
|
94
|
+
(signal_group_enabled, signal_group_num_channels, signal_group_num_amp_channels) = struct.unpack('<hhh', fid.read(6))
|
|
95
|
+
|
|
96
|
+
if (signal_group_num_channels > 0) and (signal_group_enabled > 0):
|
|
97
|
+
for signal_channel in range(0, signal_group_num_channels):
|
|
98
|
+
new_channel = {'port_name' : signal_group_name, 'port_prefix' : signal_group_prefix, 'port_number' : signal_group}
|
|
99
|
+
new_channel['native_channel_name'] = read_qstring(fid)
|
|
100
|
+
new_channel['custom_channel_name'] = read_qstring(fid)
|
|
101
|
+
(new_channel['native_order'], new_channel['custom_order'], signal_type, channel_enabled, new_channel['chip_channel'], new_channel['board_stream']) = struct.unpack('<hhhhhh', fid.read(12))
|
|
102
|
+
new_trigger_channel = {}
|
|
103
|
+
(new_trigger_channel['voltage_trigger_mode'], new_trigger_channel['voltage_threshold'], new_trigger_channel['digital_trigger_channel'], new_trigger_channel['digital_edge_polarity']) = struct.unpack('<hhhh', fid.read(8))
|
|
104
|
+
(new_channel['channel_impedance_magnitude'], new_channel['channel_impedance_phase']) = struct.unpack('<ff', fid.read(8))
|
|
105
|
+
|
|
106
|
+
if channel_enabled:
|
|
107
|
+
if signal_type == 0:
|
|
108
|
+
header['amplifier_channels'].append(new_channel)
|
|
109
|
+
header['spike_triggers'].append(new_trigger_channel)
|
|
110
|
+
elif signal_type == 1:
|
|
111
|
+
header['aux_input_channels'].append(new_channel)
|
|
112
|
+
elif signal_type == 2:
|
|
113
|
+
header['supply_voltage_channels'].append(new_channel)
|
|
114
|
+
elif signal_type == 3:
|
|
115
|
+
header['board_adc_channels'].append(new_channel)
|
|
116
|
+
elif signal_type == 4:
|
|
117
|
+
header['board_dig_in_channels'].append(new_channel)
|
|
118
|
+
elif signal_type == 5:
|
|
119
|
+
header['board_dig_out_channels'].append(new_channel)
|
|
120
|
+
else:
|
|
121
|
+
raise Exception('Unknown channel type.')
|
|
122
|
+
|
|
123
|
+
# Summarize contents of data file.
|
|
124
|
+
header['num_amplifier_channels'] = len(header['amplifier_channels'])
|
|
125
|
+
header['num_aux_input_channels'] = len(header['aux_input_channels'])
|
|
126
|
+
header['num_supply_voltage_channels'] = len(header['supply_voltage_channels'])
|
|
127
|
+
header['num_board_adc_channels'] = len(header['board_adc_channels'])
|
|
128
|
+
header['num_board_dig_in_channels'] = len(header['board_dig_in_channels'])
|
|
129
|
+
header['num_board_dig_out_channels'] = len(header['board_dig_out_channels'])
|
|
130
|
+
|
|
131
|
+
return header
|
|
132
|
+
|
|
133
|
+
if __name__ == '__main__':
|
|
134
|
+
h=read_header(open(sys.argv[1], 'rb'))
|
|
135
|
+
print(h)
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
#! /bin/env python
|
|
2
|
+
#
|
|
3
|
+
# Michael Gibson 23 April 2015
|
|
4
|
+
# Modified Adrian Foy Sep 2018
|
|
5
|
+
|
|
6
|
+
import sys, struct
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
def read_one_data_block(data, header, indices, fid):
|
|
10
|
+
"""Reads one 60 or 128 sample data block from fid into data, at the location indicated by indices."""
|
|
11
|
+
|
|
12
|
+
# In version 1.2, we moved from saving timestamps as unsigned
|
|
13
|
+
# integers to signed integers to accommodate negative (adjusted)
|
|
14
|
+
# timestamps for pretrigger data['
|
|
15
|
+
if (header['version']['major'] == 1 and header['version']['minor'] >= 2) or (header['version']['major'] > 1):
|
|
16
|
+
data['t_amplifier'][indices['amplifier']:(indices['amplifier'] + header['num_samples_per_data_block'])] = np.array(struct.unpack('<' + 'i' * header['num_samples_per_data_block'], fid.read(4 * header['num_samples_per_data_block'])))
|
|
17
|
+
else:
|
|
18
|
+
data['t_amplifier'][indices['amplifier']:(indices['amplifier'] + header['num_samples_per_data_block'])] = np.array(struct.unpack('<' + 'I' * header['num_samples_per_data_block'], fid.read(4 * header['num_samples_per_data_block'])))
|
|
19
|
+
|
|
20
|
+
if header['num_amplifier_channels'] > 0:
|
|
21
|
+
tmp = np.fromfile(fid, dtype='uint16', count= header['num_samples_per_data_block'] * header['num_amplifier_channels'])
|
|
22
|
+
data['amplifier_data'][range(header['num_amplifier_channels']), (indices['amplifier']):(indices['amplifier']+ header['num_samples_per_data_block'])] = tmp.reshape(header['num_amplifier_channels'], header['num_samples_per_data_block'])
|
|
23
|
+
|
|
24
|
+
if header['num_aux_input_channels'] > 0:
|
|
25
|
+
tmp = np.fromfile(fid, dtype='uint16', count= int((header['num_samples_per_data_block'] / 4) * header['num_aux_input_channels']))
|
|
26
|
+
data['aux_input_data'][range(header['num_aux_input_channels']), indices['aux_input']:int(indices['aux_input']+ (header['num_samples_per_data_block']/4))] = tmp.reshape(header['num_aux_input_channels'], int(header['num_samples_per_data_block']/4))
|
|
27
|
+
|
|
28
|
+
if header['num_supply_voltage_channels'] > 0:
|
|
29
|
+
tmp = np.fromfile(fid, dtype='uint16', count=1 * header['num_supply_voltage_channels'])
|
|
30
|
+
data['supply_voltage_data'][range(header['num_supply_voltage_channels']), indices['supply_voltage']:(indices['supply_voltage']+1)] = tmp.reshape(header['num_supply_voltage_channels'], 1)
|
|
31
|
+
|
|
32
|
+
if header['num_temp_sensor_channels'] > 0:
|
|
33
|
+
tmp = np.fromfile(fid, dtype='uint16', count=1 * header['num_temp_sensor_channels'])
|
|
34
|
+
data['temp_sensor_data'][range(header['num_temp_sensor_channels']), indices['supply_voltage']:(indices['supply_voltage']+1)] = tmp.reshape(header['num_temp_sensor_channels'], 1)
|
|
35
|
+
|
|
36
|
+
if header['num_board_adc_channels'] > 0:
|
|
37
|
+
tmp = np.fromfile(fid, dtype='uint16', count= (header['num_samples_per_data_block']) * header['num_board_adc_channels'])
|
|
38
|
+
data['board_adc_data'][range(header['num_board_adc_channels']), indices['board_adc']:(indices['board_adc']+ header['num_samples_per_data_block'])] = tmp.reshape(header['num_board_adc_channels'], header['num_samples_per_data_block'])
|
|
39
|
+
|
|
40
|
+
if header['num_board_dig_in_channels'] > 0:
|
|
41
|
+
data['board_dig_in_raw'][indices['board_dig_in']:(indices['board_dig_in']+ header['num_samples_per_data_block'])] = np.array(struct.unpack('<' + 'H' * header['num_samples_per_data_block'], fid.read(2 * header['num_samples_per_data_block'])))
|
|
42
|
+
|
|
43
|
+
if header['num_board_dig_out_channels'] > 0:
|
|
44
|
+
data['board_dig_out_raw'][indices['board_dig_out']:(indices['board_dig_out']+ header['num_samples_per_data_block'])] = np.array(struct.unpack('<' + 'H' * header['num_samples_per_data_block'], fid.read(2 * header['num_samples_per_data_block'])))
|
|
45
|
+
|