NeuroDOT-py 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2021 ythackerCS
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,44 @@
1
+ Metadata-Version: 2.1
2
+ Name: NeuroDOT_py
3
+ Version: 1.0.0
4
+ Summary: An extensible Python toolbox for efficient optical brain mapping
5
+ Author-email: "Adam T. Eggebrecht" <aeggebre@wustl.edu>, Emma Speh <espeh@wustl.edu>, Ari Segel <ari@wustl.edu>, Yash Thacker <ythacker@wustl.edu>
6
+ Project-URL: Homepage, https://github.com/WUSTL-ORL/NeuroDOT_py
7
+ Project-URL: Issues, https://github.com/WUSTL-ORL/NeuroDOT_py/issues
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Operating System :: OS Independent
11
+ Requires-Python: >=3.8
12
+ Description-Content-Type: text/markdown
13
+ License-File: LICENSE
14
+
15
+ NeuroDOT_py README
16
+
17
+
18
+ 1. Installation:
19
+
20
+ 1. First, download Python. NeuroDOT_py is optimized for Python version 3.8.8: https://www.python.org/downloads/
21
+
22
+ 2. Download VSCode: https://code.visualstudio.com
23
+
24
+ 3. Download the Jupyter notebook extension for VSCode: launch your VS Code and type “jupyter notebook” in the extension search box. Select the first result (Jupyter) and click 'Install'.
25
+
26
+ 4. Install NeuroDOT_py using Pip: pip install neuro_dot
27
+
28
+
29
+ 2. Geting Started
30
+
31
+ 1. The toolbox contains 4 folders: Data, neuro_dot, Support Files, and outputfiles/output_Images.
32
+
33
+ 1. The Data folder contains 10 data samples including both retinotopic mapping of visual cortex and mapping of hierarchical language processing with HD-DOT. There are also two example parameter files, 'params.txt,' and 'params2.txt' to be used with 'getting_started' (the NeuroDOT Preprocessing script).
34
+
35
+ 2. The neuro_dot folder contains the library, consisting of modules for each category of function involved in NeuroDOT_py (Analysis, File_IO, Light Modeling, Matlab Equivalent Functions, Reconstruction, Spatial Transforms, Temporal Transforms, and Visualizations). There is also a function named DynamicFilter, which is used in 'getting_started.ipynb' to simplify visualizations for data pre-processing. There is also 'requirements.txt' which contains all of the necessary libraries to be installed to use NeuroDOT_py.
36
+
37
+ 3. The Support Files folder contains necessary files for running NeuroDOT pipelines.
38
+ - The A matrix file required for Reconstruction is too large to be posted on GitHub, so it can be downloaded from: https://www.nitrc.org/projects/neurodot/. Other A matrices will be added in the future.
39
+
40
+ 4. The 'outputfiles' folder is created after running 'getting_started' and is where all of the images (.png) generated will be saved to.
41
+
42
+ 2. 'getting_started.ipynb' is the Jupyter notebook for getting acquainted with NeuroDOT_Py. This is the file that you will open in VSCode/Jupter Notebook to run and manipulate the code.
43
+
44
+
@@ -0,0 +1,30 @@
1
+ NeuroDOT_py README
2
+
3
+
4
+ 1. Installation:
5
+
6
+ 1. First, download Python. NeuroDOT_py is optimized for Python version 3.8.8: https://www.python.org/downloads/
7
+
8
+ 2. Download VSCode: https://code.visualstudio.com
9
+
10
+ 3. Download the Jupyter notebook extension for VSCode: launch your VS Code and type “jupyter notebook” in the extension search box. Select the first result (Jupyter) and click 'Install'.
11
+
12
+ 4. Install NeuroDOT_py using Pip: pip install neuro_dot
13
+
14
+
15
+ 2. Geting Started
16
+
17
+ 1. The toolbox contains 4 folders: Data, neuro_dot, Support Files, and outputfiles/output_Images.
18
+
19
+ 1. The Data folder contains 10 data samples including both retinotopic mapping of visual cortex and mapping of hierarchical language processing with HD-DOT. There are also two example parameter files, 'params.txt,' and 'params2.txt' to be used with 'getting_started' (the NeuroDOT Preprocessing script).
20
+
21
+ 2. The neuro_dot folder contains the library, consisting of modules for each category of function involved in NeuroDOT_py (Analysis, File_IO, Light Modeling, Matlab Equivalent Functions, Reconstruction, Spatial Transforms, Temporal Transforms, and Visualizations). There is also a function named DynamicFilter, which is used in 'getting_started.ipynb' to simplify visualizations for data pre-processing. There is also 'requirements.txt' which contains all of the necessary libraries to be installed to use NeuroDOT_py.
22
+
23
+ 3. The Support Files folder contains necessary files for running NeuroDOT pipelines.
24
+ - The A matrix file required for Reconstruction is too large to be posted on GitHub, so it can be downloaded from: https://www.nitrc.org/projects/neurodot/. Other A matrices will be added in the future.
25
+
26
+ 4. The 'outputfiles' folder is created after running 'getting_started' and is where all of the images (.png) generated will be saved to.
27
+
28
+ 2. 'getting_started.ipynb' is the Jupyter notebook for getting acquainted with NeuroDOT_Py. This is the file that you will open in VSCode/Jupter Notebook to run and manipulate the code.
29
+
30
+
@@ -0,0 +1,26 @@
1
+ [project]
2
+ name = "NeuroDOT_py"
3
+ version = "1.0.0"
4
+ authors = [
5
+ { name = "Adam T. Eggebrecht", email = "aeggebre@wustl.edu"},
6
+ { name="Emma Speh", email="espeh@wustl.edu" },
7
+ { name ="Ari Segel", email = "ari@wustl.edu"},
8
+ { name = "Yash Thacker", email = "ythacker@wustl.edu"},
9
+
10
+ ]
11
+ description = "An extensible Python toolbox for efficient optical brain mapping"
12
+ readme = "README.md"
13
+ requires-python = ">=3.8"
14
+ classifiers = [
15
+ "Programming Language :: Python :: 3",
16
+ "License :: OSI Approved :: MIT License",
17
+ "Operating System :: OS Independent",
18
+ ]
19
+
20
+ [build-system]
21
+ requires = ["setuptools", "wheel"]
22
+
23
+ [project.urls]
24
+ Homepage = "https://github.com/WUSTL-ORL/NeuroDOT_py"
25
+ Issues = "https://github.com/WUSTL-ORL/NeuroDOT_py/issues"
26
+
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,44 @@
1
+ Metadata-Version: 2.1
2
+ Name: NeuroDOT_py
3
+ Version: 1.0.0
4
+ Summary: An extensible Python toolbox for efficient optical brain mapping
5
+ Author-email: "Adam T. Eggebrecht" <aeggebre@wustl.edu>, Emma Speh <espeh@wustl.edu>, Ari Segel <ari@wustl.edu>, Yash Thacker <ythacker@wustl.edu>
6
+ Project-URL: Homepage, https://github.com/WUSTL-ORL/NeuroDOT_py
7
+ Project-URL: Issues, https://github.com/WUSTL-ORL/NeuroDOT_py/issues
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Operating System :: OS Independent
11
+ Requires-Python: >=3.8
12
+ Description-Content-Type: text/markdown
13
+ License-File: LICENSE
14
+
15
+ NeuroDOT_py README
16
+
17
+
18
+ 1. Installation:
19
+
20
+ 1. First, download Python. NeuroDOT_py is optimized for Python version 3.8.8: https://www.python.org/downloads/
21
+
22
+ 2. Download VSCode: https://code.visualstudio.com
23
+
24
+ 3. Download the Jupyter notebook extension for VSCode: launch your VS Code and type “jupyter notebook” in the extension search box. Select the first result (Jupyter) and click 'Install'.
25
+
26
+ 4. Install NeuroDOT_py using Pip: pip install neuro_dot
27
+
28
+
29
+ 2. Geting Started
30
+
31
+ 1. The toolbox contains 4 folders: Data, neuro_dot, Support Files, and outputfiles/output_Images.
32
+
33
+ 1. The Data folder contains 10 data samples including both retinotopic mapping of visual cortex and mapping of hierarchical language processing with HD-DOT. There are also two example parameter files, 'params.txt,' and 'params2.txt' to be used with 'getting_started' (the NeuroDOT Preprocessing script).
34
+
35
+ 2. The neuro_dot folder contains the library, consisting of modules for each category of function involved in NeuroDOT_py (Analysis, File_IO, Light Modeling, Matlab Equivalent Functions, Reconstruction, Spatial Transforms, Temporal Transforms, and Visualizations). There is also a function named DynamicFilter, which is used in 'getting_started.ipynb' to simplify visualizations for data pre-processing. There is also 'requirements.txt' which contains all of the necessary libraries to be installed to use NeuroDOT_py.
36
+
37
+ 3. The Support Files folder contains necessary files for running NeuroDOT pipelines.
38
+ - The A matrix file required for Reconstruction is too large to be posted on GitHub, so it can be downloaded from: https://www.nitrc.org/projects/neurodot/. Other A matrices will be added in the future.
39
+
40
+ 4. The 'outputfiles' folder is created after running 'getting_started' and is where all of the images (.png) generated will be saved to.
41
+
42
+ 2. 'getting_started.ipynb' is the Jupyter notebook for getting acquainted with NeuroDOT_Py. This is the file that you will open in VSCode/Jupter Notebook to run and manipulate the code.
43
+
44
+
@@ -0,0 +1,17 @@
1
+ LICENSE
2
+ README.md
3
+ pyproject.toml
4
+ src/NeuroDOT_py.egg-info/PKG-INFO
5
+ src/NeuroDOT_py.egg-info/SOURCES.txt
6
+ src/NeuroDOT_py.egg-info/dependency_links.txt
7
+ src/NeuroDOT_py.egg-info/top_level.txt
8
+ src/neuro_dot/Analysis.py
9
+ src/neuro_dot/DynamicFilter.py
10
+ src/neuro_dot/File_IO.py
11
+ src/neuro_dot/Light_Modeling.py
12
+ src/neuro_dot/Matlab_Equivalent_Functions.py
13
+ src/neuro_dot/Reconstruction.py
14
+ src/neuro_dot/Spatial_Transforms.py
15
+ src/neuro_dot/Temporal_Transforms.py
16
+ src/neuro_dot/Visualizations.py
17
+ src/neuro_dot/__init__.py
@@ -0,0 +1 @@
1
+ neuro_dot
@@ -0,0 +1,237 @@
1
+ # General imports
2
+ import numpy as np
3
+ import numpy.linalg as lna
4
+
5
+ import neuro_dot as ndot
6
+
7
+
8
+
9
+ def BlockAverage(data_in, pulse, dt, Tkeep = 0):
10
+ """
11
+ BLOCKAVERAGE Averages data by stimulus blocks.
12
+
13
+ data_out = BLOCKAVERAGE(data_in, pulse, dt) takes a data array "data_in"
14
+ and uses the pulse and dt information to cut that data timewise into
15
+ blocks of equal length (dt), which are then averaged together and
16
+ output as "data_out".
17
+
18
+ Tkeep is a temporal mask. Any time points with a zero in this vector is
19
+ set to NaN.
20
+ """
21
+ ## Parameters and Initialization.
22
+ dims = np.shape(data_in)
23
+ Nt = dims[-1]
24
+ NDtf = np.ndim(data_in) > 2
25
+ Nbl = len(pulse)
26
+
27
+ if Tkeep == 0:
28
+ Tkeep = np.ones(shape = (Nt, 1))==1
29
+
30
+ # Check to make sure that the block after the last synch point for this
31
+ if (dt + pulse[-1] - 1) > Nt:
32
+ Nbl = Nbl - 1
33
+
34
+ ## N-D Input (for 3-D or N-D voxel spaces).
35
+ if NDtf:
36
+ data_in = np.reshape(data_in, [], Nt)
37
+
38
+ ## Incorporate Tkeep
39
+ data_in[:, np.argwhere(Tkeep == False)] = np.NaN
40
+
41
+ ## Cut data into blocks.
42
+ Nm = np.shape(data_in)[0]
43
+ blocks = np.zeros((Nm, dt, Nbl))
44
+
45
+ for p in range(0,len(pulse)):
46
+ pulse[p] = pulse[p] +1
47
+
48
+ for k in range(0, Nbl):
49
+ pulse_k = int(pulse[k])
50
+
51
+ if (pulse[k] + dt -1) <= Nt:
52
+ blocks[:, :, k] = data_in[:, pulse_k-1:pulse_k + dt-1] #Need to subtract 1 from both indices to account for 0 indexing in python
53
+ else:
54
+ dtb = (pulse_k-1) + dt - 1 - Nt+1
55
+ nans = np.empty(shape = (np.shape(data_in)[0], dtb)) # need multiple lines to create an array of nans in python
56
+ nans[:] = np.NaN
57
+ blocks[:, :, k] = np.concatenate((data_in[:, (pulse_k-1):Nt], nans), axis = 1) # need to subtract 1 from start index: pulse[k] due to zero indexing, also need to use Nt as final index to get correct size
58
+
59
+ ## Average blocks and return.
60
+ BA_out = np.nanmean(blocks, axis = 2)
61
+ BSTD_out = np.nanstd(blocks, axis = 2, ddof = 1)
62
+ nanmean_cols = np.nanmean(BA_out, axis = 1)
63
+ nanmean_matrix = np.ones(np.shape(BA_out))
64
+ for x in range(0, dt):
65
+ nanmean_matrix[:, x] = nanmean_cols
66
+ BA_out = BA_out - nanmean_matrix
67
+ BT_out = np.divide(BA_out, BSTD_out)
68
+ BT_out[np.argwhere(np.isinf(BT_out))] = 0
69
+
70
+ ## N-D Output.
71
+ if NDtf:
72
+ #create tuple containing the desired output shape for BA_out, BSTD_out and BT_out
73
+ #tuple contains first axis until second to last axis of data with dt appeneded to the end of it
74
+ newshape = tuple(np.append(np.array(dims[0:-1]), dt))
75
+ BA_out = np.reshape(BA_out, newshape)
76
+ BSTD_out = np.reshape(BSTD_out, newshape)
77
+ BT_out = np.reshape(BT_out, newshape)
78
+ newshape_blocks = tuple(np.append(np.array(dims[0:-1]), (dt, Nbl))) # create tuple for deisred output shape for blocks, different from previous newshape bc Nbl is also appended
79
+ blocks = np.reshape(blocks, newshape_blocks)
80
+
81
+
82
+ return BA_out, BSTD_out, BT_out, blocks
83
+
84
+
85
+ def CalcGVTD(data):
86
+ """
87
+ CalcGVTD calculates the Root Mean Square across measurements (log-mean light levels or voxels) of the temporal derivative.
88
+
89
+ The data is assumed to have measurements in the first and time in the last
90
+ dimension.
91
+
92
+ Any selection of measurement type or voxel index must be done
93
+ outside of this function.
94
+ """
95
+ # Double check data has correct dimensions
96
+ # Dsizes=size(data);
97
+ Dsizes = np.shape(data)
98
+ Ndim = len(Dsizes)
99
+ if Ndim > 2:
100
+ data = np.reshape(data, [], Dsizes[-1])
101
+
102
+ # 1st Temporal Derivative
103
+ Ddata = data - np.roll(data, np.array([0, -1]), np.array([0,-1]))
104
+
105
+ # RMS across measurements
106
+ GVTD = np.concatenate(([0], ndot.rms_py(Ddata[:,0:-1])), axis = 0)
107
+
108
+ return GVTD
109
+
110
+
111
+ def FindGoodMeas(data, info_in, bthresh = 0.075):
112
+
113
+ """
114
+ FINDGOODMEAS Performs "Good Measurements" analysis to return indices of measurements within a chosen threshold.
115
+
116
+ info_out = FINDGOODMEAS(data, info_in) takes a light-level array "data"
117
+ in the MEAS x TIME format, and calculates the std of each channel
118
+ as its noise level.
119
+
120
+ These are then thresholded by the default value of
121
+ 0.075 to create a logical array, and both are returned as MEAS x 1
122
+ columns of the "info.MEAS" table.
123
+
124
+ If pulse synch point information exists in "info.system.synchpts",
125
+ then FINDGOODMEAS will crop the data to the start and stop pulses.
126
+
127
+ info_out = FINDGOODMEAS(data, info_in, bthresh) allows the user to
128
+ specify a threshold value.
129
+
130
+ See Also: PLOTCAPGOODMEAS, PLOTHISTOGRAMSTD.
131
+ """
132
+ ## Parameters and Initialization.
133
+ # look for required info, FGM will not run if these fields are nonexistant
134
+ try:
135
+ info1 = info_in['system']['framerate']
136
+ except KeyError:
137
+ print('info_in["system"]["framerate"] does not exist and is required')
138
+ print('exiting FindGoodMeas')
139
+ return()
140
+ try:
141
+ info1 = info_in['pairs']
142
+ except KeyError:
143
+ print('info_in["pairs"] does not exist and is required')
144
+ print('exiting FindGoodMeas')
145
+ return()
146
+ info_out = info_in.copy() # create info_out
147
+ try:
148
+ GVwin
149
+ except NameError:
150
+ GVwin = 600
151
+ if not 'paradigm' in info_out:
152
+ info_out['paradigm'] = {}
153
+ if not bthresh in locals():
154
+ bthresh = 0.075 # Empirically derived threshold value.
155
+ dims = data.shape
156
+ Nt = dims[-1] # Assumes time is always the last dimension
157
+ NDtf = np.ndim(data) > 2
158
+ if GVwin > (Nt-1):
159
+ GVwin = (Nt-1)
160
+
161
+ # N-D Input.
162
+ if NDtf:
163
+ data = np.reshape(data, [], Nt)
164
+
165
+ # Crop data to synchpts if necessary.
166
+ keep = np.logical_and(info_in['pairs']['r2d'] < 20, info_in['pairs']['WL'] == 2)
167
+ foo = np.squeeze(data[keep,:])
168
+ foo = ndot.highpass(foo, 0.02, info_in['system']['framerate']) # bandpass filter, omega_hp = 0.02
169
+ foo = ndot.lowpass(foo, 1, info_in['system']['framerate']) # bandpass filter, omega_lp = 1
170
+ foo = foo - np.roll(foo, 1, 1)
171
+ foo[:,0] = 0
172
+ foob = ndot.rms_py(foo) # uses new rms that only takes one input, calculates rms for every column in rms_input and outputs row vector
173
+ NtGV = Nt - GVwin
174
+ NtGV_mat = np.ones((1,NtGV), dtype = np.int8)
175
+
176
+ if NtGV > 1: # sliding window to grab a meaningful set for 'quiet'
177
+ GVTD_win_means = np.zeros(NtGV, order = 'F')
178
+ i = 0
179
+ while i <= (NtGV - 1):
180
+ GVTD_win_means[i] = np.mean(foob[i:((i+1)+ GVwin - 1)])
181
+ i = i+1
182
+ t0 = np.where(GVTD_win_means == np.min(GVTD_win_means)) # find min and set t0 --> tF
183
+ tF = t0[0][0] + GVwin
184
+ STD = np.std(data[:, t0[0][0]:tF], 1, ddof= 1) # Calulate STD, make sure ddof param is set = 1 so that np.STD behaves the same as matlab STD
185
+ elif not 'synchpts' in info_out['paradigm']:
186
+ NsynchPts = len(info_out['paradigm']['synchpts'])
187
+ if NsynchPts > 2:
188
+ tF = info_out['paradigm']['synchpts'][-1]
189
+ t0 = info_out['paradigm']['synchpts'][0]
190
+ elif NsynchPts == 2:
191
+ tF = info_out['paradigm']['synchpts'][1]
192
+ t0 = info_out['paradigm']['synchpts'][0]
193
+ else:
194
+ tF = data.shape[1]
195
+ STD = np.std(data[:, t0:tF], 1, ddof=1) # Calculate STD.
196
+ else:
197
+ STD = np.std(data, 1, ddof=1)
198
+
199
+ # Populate in table of on-the-fly calculated stuff.
200
+ info_out['GVTDparams'] = {}
201
+ info_out['GVTDparams']['t0'] = t0[0][0]
202
+ info_out['GVTDparams']['tF'] = tF
203
+ if not 'MEAS' in info_out:
204
+ info_out['MEAS'] = {}
205
+ info_out['MEAS']['STD'] = STD
206
+ info_out['MEAS']['GI'] = np.zeros(np.shape(STD), dtype = np.uint8)
207
+ info_out['MEAS']['GI'][np.where(STD <= bthresh)] = 1
208
+ else:
209
+ info_out['MEAS']['STD'] = STD
210
+ info_out['MEAS']['GI'] = np.zeros(np.shape(STD), dtype = np.uint8)
211
+ info_out['MEAS']['GI'][np.where(STD <= bthresh)] = 1
212
+ if 'Clipped' in info_out['MEAS']:
213
+ info_out['MEAS']['GI'] = np.zeros(np.shape(STD), dtype = np.uint8)
214
+ info_out['MEAS']['GI'][np.where(info_out['MEAS']['GI'] and not info_out['MEAS']['Clipped'])] = 1
215
+
216
+ return info_out
217
+
218
+
219
+ def normcND(data):
220
+ """
221
+ NORMCND returns a column-normed matrix. It is assumed that the matrix is 2D.
222
+ """
223
+ vecnorm = lna.norm(data, ord = 2, axis = 0)
224
+ data = data / vecnorm
225
+
226
+ return data
227
+
228
+
229
+ def normrND(data):
230
+ """
231
+ NORMRND returns a row-normed matrix. It is assumed that the matrix is 2D. Updated for broader compatability.
232
+ """
233
+ dataNorm = np.sqrt(np.sum(data**2))
234
+ data = data / dataNorm
235
+ data[np.argwhere(np.isfinite(data) == False)] = 0
236
+
237
+ return data