py-neuromodulation 0.0.4__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. py_neuromodulation/ConnectivityDecoding/_get_grid_hull.m +34 -34
  2. py_neuromodulation/ConnectivityDecoding/_get_grid_whole_brain.py +95 -106
  3. py_neuromodulation/ConnectivityDecoding/_helper_write_connectome.py +107 -119
  4. py_neuromodulation/__init__.py +80 -13
  5. py_neuromodulation/{nm_RMAP.py → analysis/RMAP.py} +496 -531
  6. py_neuromodulation/analysis/__init__.py +4 -0
  7. py_neuromodulation/{nm_decode.py → analysis/decode.py} +918 -992
  8. py_neuromodulation/{nm_analysis.py → analysis/feature_reader.py} +994 -1074
  9. py_neuromodulation/{nm_plots.py → analysis/plots.py} +627 -612
  10. py_neuromodulation/{nm_stats.py → analysis/stats.py} +458 -480
  11. py_neuromodulation/data/README +6 -6
  12. py_neuromodulation/data/dataset_description.json +8 -8
  13. py_neuromodulation/data/participants.json +32 -32
  14. py_neuromodulation/data/participants.tsv +2 -2
  15. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_coordsystem.json +5 -5
  16. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_electrodes.tsv +11 -11
  17. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_channels.tsv +11 -11
  18. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.json +18 -18
  19. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vhdr +35 -35
  20. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vmrk +13 -13
  21. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/sub-testsub_ses-EphysMedOff_scans.tsv +2 -2
  22. py_neuromodulation/default_settings.yaml +241 -0
  23. py_neuromodulation/features/__init__.py +31 -0
  24. py_neuromodulation/features/bandpower.py +165 -0
  25. py_neuromodulation/features/bispectra.py +157 -0
  26. py_neuromodulation/features/bursts.py +297 -0
  27. py_neuromodulation/features/coherence.py +255 -0
  28. py_neuromodulation/features/feature_processor.py +121 -0
  29. py_neuromodulation/features/fooof.py +142 -0
  30. py_neuromodulation/features/hjorth_raw.py +57 -0
  31. py_neuromodulation/features/linelength.py +21 -0
  32. py_neuromodulation/features/mne_connectivity.py +148 -0
  33. py_neuromodulation/features/nolds.py +94 -0
  34. py_neuromodulation/features/oscillatory.py +249 -0
  35. py_neuromodulation/features/sharpwaves.py +432 -0
  36. py_neuromodulation/filter/__init__.py +3 -0
  37. py_neuromodulation/filter/kalman_filter.py +67 -0
  38. py_neuromodulation/filter/kalman_filter_external.py +1890 -0
  39. py_neuromodulation/filter/mne_filter.py +128 -0
  40. py_neuromodulation/filter/notch_filter.py +93 -0
  41. py_neuromodulation/grid_cortex.tsv +40 -40
  42. py_neuromodulation/liblsl/libpugixml.so.1.12 +0 -0
  43. py_neuromodulation/liblsl/linux/bionic_amd64/liblsl.1.16.2.so +0 -0
  44. py_neuromodulation/liblsl/linux/bookworm_amd64/liblsl.1.16.2.so +0 -0
  45. py_neuromodulation/liblsl/linux/focal_amd46/liblsl.1.16.2.so +0 -0
  46. py_neuromodulation/liblsl/linux/jammy_amd64/liblsl.1.16.2.so +0 -0
  47. py_neuromodulation/liblsl/linux/jammy_x86/liblsl.1.16.2.so +0 -0
  48. py_neuromodulation/liblsl/linux/noble_amd64/liblsl.1.16.2.so +0 -0
  49. py_neuromodulation/liblsl/macos/amd64/liblsl.1.16.2.dylib +0 -0
  50. py_neuromodulation/liblsl/macos/arm64/liblsl.1.16.0.dylib +0 -0
  51. py_neuromodulation/liblsl/windows/amd64/liblsl.1.16.2.dll +0 -0
  52. py_neuromodulation/liblsl/windows/x86/liblsl.1.16.2.dll +0 -0
  53. py_neuromodulation/processing/__init__.py +10 -0
  54. py_neuromodulation/{nm_artifacts.py → processing/artifacts.py} +29 -25
  55. py_neuromodulation/processing/data_preprocessor.py +77 -0
  56. py_neuromodulation/processing/filter_preprocessing.py +78 -0
  57. py_neuromodulation/processing/normalization.py +175 -0
  58. py_neuromodulation/{nm_projection.py → processing/projection.py} +370 -394
  59. py_neuromodulation/{nm_rereference.py → processing/rereference.py} +97 -95
  60. py_neuromodulation/{nm_resample.py → processing/resample.py} +56 -50
  61. py_neuromodulation/stream/__init__.py +3 -0
  62. py_neuromodulation/stream/data_processor.py +325 -0
  63. py_neuromodulation/stream/generator.py +53 -0
  64. py_neuromodulation/stream/mnelsl_player.py +94 -0
  65. py_neuromodulation/stream/mnelsl_stream.py +120 -0
  66. py_neuromodulation/stream/settings.py +292 -0
  67. py_neuromodulation/stream/stream.py +427 -0
  68. py_neuromodulation/utils/__init__.py +2 -0
  69. py_neuromodulation/{nm_define_nmchannels.py → utils/channels.py} +305 -302
  70. py_neuromodulation/utils/database.py +149 -0
  71. py_neuromodulation/utils/io.py +378 -0
  72. py_neuromodulation/utils/keyboard.py +52 -0
  73. py_neuromodulation/utils/logging.py +66 -0
  74. py_neuromodulation/utils/types.py +251 -0
  75. {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.6.dist-info}/METADATA +28 -33
  76. py_neuromodulation-0.0.6.dist-info/RECORD +89 -0
  77. {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.6.dist-info}/WHEEL +1 -1
  78. {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.6.dist-info}/licenses/LICENSE +21 -21
  79. py_neuromodulation/FieldTrip.py +0 -589
  80. py_neuromodulation/_write_example_dataset_helper.py +0 -65
  81. py_neuromodulation/nm_EpochStream.py +0 -92
  82. py_neuromodulation/nm_IO.py +0 -417
  83. py_neuromodulation/nm_across_patient_decoding.py +0 -927
  84. py_neuromodulation/nm_bispectra.py +0 -168
  85. py_neuromodulation/nm_bursts.py +0 -198
  86. py_neuromodulation/nm_coherence.py +0 -205
  87. py_neuromodulation/nm_cohortwrapper.py +0 -435
  88. py_neuromodulation/nm_eval_timing.py +0 -239
  89. py_neuromodulation/nm_features.py +0 -116
  90. py_neuromodulation/nm_features_abc.py +0 -39
  91. py_neuromodulation/nm_filter.py +0 -219
  92. py_neuromodulation/nm_filter_preprocessing.py +0 -91
  93. py_neuromodulation/nm_fooof.py +0 -159
  94. py_neuromodulation/nm_generator.py +0 -37
  95. py_neuromodulation/nm_hjorth_raw.py +0 -73
  96. py_neuromodulation/nm_kalmanfilter.py +0 -58
  97. py_neuromodulation/nm_linelength.py +0 -33
  98. py_neuromodulation/nm_mne_connectivity.py +0 -112
  99. py_neuromodulation/nm_nolds.py +0 -93
  100. py_neuromodulation/nm_normalization.py +0 -214
  101. py_neuromodulation/nm_oscillatory.py +0 -448
  102. py_neuromodulation/nm_run_analysis.py +0 -435
  103. py_neuromodulation/nm_settings.json +0 -338
  104. py_neuromodulation/nm_settings.py +0 -68
  105. py_neuromodulation/nm_sharpwaves.py +0 -401
  106. py_neuromodulation/nm_stream_abc.py +0 -218
  107. py_neuromodulation/nm_stream_offline.py +0 -359
  108. py_neuromodulation/utils/_logging.py +0 -24
  109. py_neuromodulation-0.0.4.dist-info/RECORD +0 -72
@@ -1,435 +0,0 @@
1
- from re import VERBOSE
2
- import sys
3
- import os
4
- import numpy as np
5
- from pathlib import Path
6
- from scipy import stats
7
- import pandas as pd
8
- from multiprocessing import Pool
9
- from sklearn import linear_model, discriminant_analysis, ensemble, svm
10
- from sklearn import metrics
11
- from sklearn.base import clone
12
- from sklearn import model_selection
13
- from sklearn.utils import class_weight
14
- from scipy.ndimage import binary_dilation, binary_erosion, label
15
- import _pickle as cPickle
16
- from scipy import io
17
- from matplotlib import pyplot as plt
18
- import matplotlib
19
- import bids
20
- from bids import BIDSLayout
21
- from itertools import product
22
- import nibabel as nib
23
-
24
- import py_neuromodulation
25
- from py_neuromodulation import nm_decode, nm_analysis, nm_IO
26
- from py_neuromodulation import nm_stream_offline
27
-
28
-
29
- class CohortRunner:
30
- def __init__(
31
- self,
32
- cohorts: dict = None,
33
- ML_model_name="LM",
34
- model=linear_model.LogisticRegression(class_weight="balanced"),
35
- eval_method=metrics.balanced_accuracy_score,
36
- estimate_gridpoints=False,
37
- estimate_channels=True,
38
- estimate_all_channels_combined=False,
39
- save_coef=False,
40
- TRAIN_VAL_SPLIT=False,
41
- plot_features=False,
42
- plot_grid_performances=False,
43
- run_ML_model=True,
44
- run_bids=True,
45
- binarize_label=True,
46
- used_types=("ecog", "dbs", "seeg"),
47
- target_keywords=("mov", "squared", "label"),
48
- get_movement_detection_rate=False,
49
- run_pool=True,
50
- VERBOSE=False,
51
- LIMIT_DATA=False,
52
- RUN_BAY_OPT=False,
53
- STACK_FEATURES_N_SAMPLES=True,
54
- cv_method=model_selection.KFold(n_splits=3, shuffle=False),
55
- use_nested_cv=True,
56
- outpath=r"C:\Users\ICN_admin\Documents\Decoding_Toolbox\write_out\0209_SharpWaveLimFeaturesSTFT_with_Grid",
57
- PATH_SETTINGS=r"C:\Users\ICN_admin\Documents\py_neuromodulation\pyneuromodulation\nm_settings.json",
58
- ) -> None:
59
-
60
- self.ML_model_name = ML_model_name
61
- self.model = model
62
- self.outpath = outpath
63
- self.PATH_SETTINGS = PATH_SETTINGS
64
- self.estimate_gridpoints = estimate_gridpoints
65
- self.estimate_channels = estimate_channels
66
- self.estimate_all_channels_combined = estimate_all_channels_combined
67
- self.save_coef = save_coef
68
- self.plot_features = plot_features
69
- self.plot_grid_performances = plot_grid_performances
70
- self.run_ML_model = run_ML_model
71
- self.run_bids = run_bids
72
- self.run_pool = run_pool
73
- self.TRAIN_VAL_SPLIT = TRAIN_VAL_SPLIT
74
- self.cohorts = cohorts
75
- self.VERBOSE = VERBOSE
76
- self.LIMIT_DATA = LIMIT_DATA
77
- self.eval_method = eval_method
78
- self.cv_method = cv_method
79
- self.use_nested_cv = use_nested_cv
80
- self.RUN_BAY_OPT = RUN_BAY_OPT
81
- self.TRAIN_VAL_SPLIT = TRAIN_VAL_SPLIT
82
- self.STACK_FEATURES_N_SAMPLES = STACK_FEATURES_N_SAMPLES
83
- self.model = model
84
- self.binarize_label = binarize_label
85
- self.used_types = used_types
86
- self.target_keywords = target_keywords
87
- self.get_movement_detection_rate = get_movement_detection_rate
88
- self.grid_cortex = pd.read_csv(
89
- os.path.join(py_neuromodulation.__path__[0], "grid_cortex.tsv"),
90
- sep="\t",
91
- ).to_numpy()
92
-
93
- def init_decoder(self) -> nm_decode.Decoder:
94
- return nm_decode.Decoder(
95
- model=self.model,
96
- TRAIN_VAL_SPLIT=self.TRAIN_VAL_SPLIT,
97
- STACK_FEATURES_N_SAMPLES=self.STACK_FEATURES_N_SAMPLES,
98
- get_movement_detection_rate=self.get_movement_detection_rate,
99
- eval_method=self.eval_method,
100
- VERBOSE=self.VERBOSE,
101
- cv_method=self.cv_method,
102
- use_nested_cv=self.use_nested_cv,
103
- RUN_BAY_OPT=self.RUN_BAY_OPT,
104
- )
105
-
106
- def multiprocess_pipeline_run_wrapper(self, PATH_RUN):
107
-
108
- if type(PATH_RUN) is bids.layout.models.BIDSFile:
109
- PATH_RUN = PATH_RUN.path
110
-
111
- # set BIDS PATH and out path
112
- # better option: feed the output and bids path as well as a param through the pool
113
-
114
- for cohort, PATH_COHORT in self.cohorts.items():
115
- if cohort in PATH_RUN:
116
- PATH_BIDS = PATH_COHORT
117
- PATH_OUT = os.path.join(self.outpath, cohort)
118
- break
119
-
120
- if self.run_bids:
121
- stream = nm_stream_offline.Stream(
122
- PATH_RUN=PATH_RUN,
123
- PATH_BIDS=PATH_BIDS,
124
- PATH_OUT=PATH_OUT,
125
- LIMIT_DATA=self.LIMIT_DATA,
126
- LIMIT_HIGH=200000,
127
- LIMIT_LOW=0,
128
- target_keywords=self.target_keywords,
129
- used_types=self.used_types,
130
- PATH_SETTINGS=self.PATH_SETTINGS,
131
- VERBOSE=self.VERBOSE,
132
- )
133
- stream.run()
134
-
135
- feature_file = os.path.basename(PATH_RUN)[:-5] # cut off ".vhdr"
136
-
137
- feature_reader = nm_analysis.Feature_Reader(
138
- feature_dir=PATH_OUT,
139
- feature_file=feature_file,
140
- binarize_label=self.binarize_label,
141
- )
142
-
143
- if self.plot_grid_performances:
144
- feature_reader.plot_cort_projection()
145
-
146
- if self.plot_features:
147
-
148
- ch_to_plot = feature_reader.nm_channels.query(
149
- '(type=="ecog") and (used == 1)'
150
- )["name"]
151
-
152
- feature_used = "stft"
153
-
154
- for ch_used in ch_to_plot:
155
- feature_reader.plot_target_averaged_channel(
156
- ch=ch_used,
157
- list_feature_keywords=[feature_used],
158
- epoch_len=4,
159
- threshold=0.5,
160
- )
161
-
162
- # model = discriminant_analysis.LinearDiscriminantAnalysis()
163
- # model = xgboost.XGBClassifier(scale_pos_weight=10) # balanced class weights
164
- # model = ensemble.RandomForestClassifier(n_estimators=6, max_depth=6, class_weight='balanced')
165
- # model = svm.SVC(class_weight="balanced")
166
-
167
- if self.run_ML_model:
168
- # set decoder for this specific run (using the feature_reader features)
169
- feature_reader.decoder = self.init_decoder()
170
-
171
- feature_reader.decoder.set_data(
172
- features=feature_reader.feature_arr,
173
- label=feature_reader.label,
174
- label_name=feature_reader.label_name,
175
- used_chs=feature_reader.used_chs,
176
- )
177
-
178
- performances = feature_reader.run_ML_model(
179
- estimate_channels=self.estimate_channels,
180
- estimate_gridpoints=self.estimate_gridpoints,
181
- estimate_all_channels_combined=self.estimate_all_channels_combined,
182
- save_results=True,
183
- output_name="XGBNOMP",
184
- )
185
-
186
- if self.plot_grid_performances:
187
- feature_reader.plot_subject_grid_ch_performance(
188
- performance_dict=performances, plt_grid=True
189
- )
190
-
191
- def run_cohorts(self):
192
-
193
- run_files_all = []
194
- for _, PATH_COHORT in self.cohorts.items():
195
- layout = BIDSLayout(PATH_COHORT)
196
- run_files_all.append(layout.get(extension=".vhdr"))
197
-
198
- run_files_all = list(np.concatenate(run_files_all))
199
-
200
- if self.run_pool:
201
- pool = Pool(processes=50)
202
- pool.map(self.multiprocess_pipeline_run_wrapper, run_files_all)
203
- else:
204
- # self.multiprocess_pipeline_run_wrapper(run_files_all[11])
205
- for run_file in run_files_all[:12]:
206
- self.multiprocess_pipeline_run_wrapper(run_file)
207
-
208
- def read_cohort_results(self, feature_path, cohort):
209
- """Read for a given path (of potentially multiple estimated runs) performance results
210
- Parameters
211
- ----------
212
- feature_path : string
213
- path where estimated runs are saved
214
- cohort : string
215
- used for saving output npy dictionary
216
- ML_model_name : string
217
- model name, by default "LM"
218
- """
219
-
220
- # Here the runs are overwritten!
221
- folders_path = [x[0] for x in os.walk(feature_path)]
222
- feature_paths = [os.path.basename(x) for x in folders_path[1:]]
223
- performance_out = {}
224
-
225
- for feature_file in feature_paths:
226
- feature_reader = nm_analysis.Feature_Reader(
227
- feature_dir=feature_path, feature_file=feature_file
228
- )
229
-
230
- performance_run = feature_reader.read_results(
231
- read_grid_points=self.estimate_gridpoints,
232
- read_channels=self.estimate_channels,
233
- read_all_combined=self.estimate_all_channels_combined,
234
- read_mov_detection_rates=True,
235
- )
236
-
237
- sub = feature_file[
238
- feature_file.find("sub-") : feature_file.find("_ses")
239
- ][4:]
240
- if sub not in performance_out:
241
- performance_out[sub] = {}
242
- performance_out[sub][feature_file] = performance_run[
243
- sub
244
- ] # get saved in performance_run
245
-
246
- np.save(
247
- os.path.join(
248
- self.outpath, self.ML_model_name + "_cohort_" + cohort + ".npy"
249
- ),
250
- performance_out,
251
- )
252
-
253
- def cohort_wrapper_read_cohort(self):
254
- """Read results for multiple cohorts"""
255
-
256
- for cohort in self.cohorts.keys():
257
- self.read_cohort_results(
258
- os.path.join(self.outpath, cohort), cohort
259
- )
260
-
261
- def read_all_channels(
262
- self,
263
- channel_all,
264
- feature_path,
265
- feature_file,
266
- cohort,
267
- read_channels: bool = True,
268
- ):
269
- """Save for a given feature path all used grid point data. Necessary to run across patient and cohort analysis.
270
- Parameters
271
- ----------
272
- channel_all : dictionary
273
- dictionary with data, label, label_name and feature_names for each channel
274
- feature_path : string
275
- path to feature files
276
- feature_file : string
277
- feature file
278
- cohort : string
279
- used for indecing of grid_point_all
280
- read_ch : bool
281
- if True read channels, else read grid_points
282
- Returns
283
- -------
284
- dictionary
285
- ch_all
286
- """
287
- feature_reader = nm_analysis.Feature_Reader(
288
- feature_dir=feature_path, feature_file=feature_file
289
- )
290
- if "Washington" in feature_path:
291
- mov_starts = np.where(np.diff(feature_reader.feature_arr["mov"])>0)[0]
292
- seg_cut = []
293
- for mov_start in mov_starts:
294
- for i in range(5):
295
- seg_cut.append(mov_start+i)
296
-
297
- ind_cut = np.concatenate((np.where(feature_reader.feature_arr["mov"] == 11)[0], seg_cut))
298
- idx_select = set(np.arange(feature_reader.feature_arr["mov"].shape[0])) - set(ind_cut)
299
- feature_reader.feature_arr = feature_reader.feature_arr.iloc[list(idx_select), :].reset_index(drop=True)
300
- #analyzer.feature_arr["mov"] = analyzer.feature_arr["mov"] > 0
301
- feature_reader.label = np.array(feature_reader.feature_arr["mov"] > 0, dtype=int)
302
- subject_name = feature_file[:2]
303
- task_name = "hand_movement"
304
- run_number = 1
305
- else:
306
- subject_name = feature_file[
307
- feature_file.find("sub-") + 4 : feature_file.find("_ses")
308
- ]
309
- sess_name = feature_file[
310
- feature_file.find("ses-") + 4 : feature_file.find("_task")
311
- ]
312
- task_name = feature_file[
313
- feature_file.find("task-") + 5 : feature_file.find("_run")
314
- ]
315
- run_number = feature_file[
316
- feature_file.find("run-") + 4 : feature_file.find("_ieeg")
317
- ]
318
- print(feature_reader.label_name)
319
- decoder = nm_decode.Decoder(
320
- features=feature_reader.feature_arr,
321
- label=feature_reader.label,
322
- label_name=feature_reader.label_name,
323
- used_chs=feature_reader.used_chs,
324
- )
325
-
326
- if read_channels is True:
327
- decoder.set_data_ind_channels()
328
- data_to_read = decoder.ch_ind_data
329
- else:
330
- decoder.set_data_grid_points()
331
- data_to_read = decoder.grid_point_ind_data
332
-
333
- for ch in list(data_to_read.keys()):
334
- if cohort not in channel_all:
335
- channel_all[cohort] = {}
336
- if subject_name not in channel_all[cohort]:
337
- channel_all[cohort][subject_name] = {}
338
- if ch not in channel_all[cohort][subject_name]:
339
- channel_all[cohort][subject_name][ch] = {}
340
- channel_all[cohort][subject_name][ch][feature_file] = {}
341
-
342
- channel_all[cohort][subject_name][ch][feature_file][
343
- "data"
344
- ] = data_to_read[ch]
345
- channel_all[cohort][subject_name][ch][feature_file][
346
- "feature_names"
347
- ] = [
348
- ch_[len(ch) + 1 :]
349
- for ch_ in decoder.features.columns
350
- if ch in ch_
351
- ]
352
- channel_all[cohort][subject_name][ch][feature_file][
353
- "label"
354
- ] = decoder.label
355
- channel_all[cohort][subject_name][ch][feature_file][
356
- "label_name"
357
- ] = decoder.label_name
358
-
359
- # check laterality
360
- lat = "CON" # Beijing is always contralateral
361
- # Pittsburgh Subjects
362
- if (
363
- "LEFT" in decoder.label_name
364
- and "LEFT" in decoder.features.columns[1]
365
- ) or (
366
- "RIGHT" in decoder.label_name
367
- and "RIGHT" in decoder.features.columns[1]
368
- ):
369
- lat = "IPS"
370
-
371
- # Berlin subjects
372
- if (
373
- "_L_" in decoder.features.columns[1]
374
- and task_name == "SelfpacedRotationL"
375
- ) or (
376
- "_R_" in decoder.features.columns[1]
377
- and task_name == "SelfpacedRotationR"
378
- ):
379
- lat = "IPS"
380
- channel_all[cohort][subject_name][ch][feature_file]["lat"] = lat
381
- return channel_all
382
-
383
- def cohort_wrapper_read_all_grid_points(self, read_channels=True):
384
- cohorts = self.cohorts.keys()
385
- grid_point_all = {}
386
- for cohort in cohorts:
387
- print("COHORT: " + cohort)
388
- feature_path = os.path.join(self.outpath, cohort)
389
- feature_list = nm_IO.get_run_list_indir(feature_path)
390
- for feature_file in feature_list:
391
- print(feature_file)
392
- grid_point_all = self.read_all_channels(
393
- grid_point_all,
394
- feature_path,
395
- feature_file,
396
- cohort,
397
- read_channels=read_channels,
398
- )
399
-
400
- if read_channels is True:
401
- np.save(
402
- os.path.join(self.outpath, "channel_all.npy"), grid_point_all
403
- )
404
- else:
405
- np.save(
406
- os.path.join(self.outpath, "grid_point_all.npy"),
407
- grid_point_all,
408
- )
409
-
410
- @staticmethod
411
- def rewrite_grid_point_all(d, outpath):
412
-
413
- # from channel_all[cohort][subject_name][ch][feature_file]
414
-
415
- # to grid_point_all[grid_point][cohort][subject_test][run]
416
-
417
- p = {}
418
- for cohort in d.keys():
419
- for sub in d[cohort].keys():
420
- for gp in d[cohort][sub].keys():
421
- for f in d[cohort][sub][gp].keys():
422
- if gp not in p.keys():
423
- p[gp] = {}
424
- if cohort not in p[gp].keys():
425
- p[gp][cohort] = {}
426
- if sub not in p[gp][cohort].keys():
427
- p[gp][cohort][sub] = {}
428
- if f not in p[gp][cohort][sub].keys():
429
- p[gp][cohort][sub][f] = {}
430
- for key_ in d[cohort][sub][gp][f].keys():
431
- p[gp][cohort][sub][f][key_] = d[cohort][sub][gp][
432
- f
433
- ][key_]
434
-
435
- np.save(os.path.join(outpath, "grid_point_all_re.npy"), p)
@@ -1,239 +0,0 @@
1
- import timeit
2
- from . import nm_oscillatory
3
- import numpy as np
4
- from py_neuromodulation import (
5
- nm_normalization,
6
- nm_stft,
7
- nm_bandpower,
8
- nm_filter,
9
- )
10
-
11
-
12
- class NM_Timer:
13
- def __init__(self, analyzer) -> None:
14
- self.analyzer = analyzer
15
-
16
- self.get_timings()
17
-
18
- def get_timings(self, number_repeat=1000):
19
-
20
- features_ = {}
21
- ch_idx = 0
22
- fs = self.analyzer.fs
23
- ch_name = "ECOG_L_1_SMC_AT"
24
- N_CH_BEFORE_REREF = 15 # 2
25
- N_CH_AFTER_REREF = 11 # 2
26
- data = np.random.random([N_CH_BEFORE_REREF, fs])
27
-
28
- dict_timings = {}
29
-
30
- if self.analyzer.settings["methods"]["notch_filter"]:
31
- dict_timings["time_rereference"] = (
32
- timeit.timeit(
33
- lambda: self.analyzer.reference.rereference(data),
34
- number=number_repeat,
35
- )
36
- / number_repeat
37
- )
38
-
39
- data = np.random.random([N_CH_AFTER_REREF, fs])
40
-
41
- if self.analyzer.settings["methods"]["raw_resampling"]:
42
- dict_timings["time_resample"] = (
43
- timeit.timeit(
44
- lambda: self.analyzer.resample.raw_resampling(data),
45
- number=number_repeat,
46
- )
47
- / number_repeat
48
- )
49
-
50
- data = np.random.random(
51
- [
52
- N_CH_AFTER_REREF,
53
- self.analyzer.settings["raw_resampling_settings"][
54
- "resample_freq"
55
- ],
56
- ]
57
- )
58
-
59
- if self.analyzer.settings["methods"]["notch_filter"]:
60
- dict_timings["time_notchfilter"] = (
61
- timeit.timeit(
62
- lambda: self.analyzer.notch_filter.filter_data(data),
63
- number=number_repeat,
64
- )
65
- / number_repeat
66
- )
67
-
68
- if self.analyzer.settings["methods"]["raw_normalization"]:
69
- dict_timings["time_norm_raw"] = (
70
- timeit.timeit(
71
- lambda: nm_normalization.normalize_raw(
72
- current=data,
73
- previous=data.T,
74
- normalize_samples=int(
75
- self.analyzer.settings[
76
- "raw_normalization_settings"
77
- ]["normalization_time"]
78
- * self.analyzer.fs
79
- ),
80
- sample_add=int(self.analyzer.fs / self.analyzer.fs_new),
81
- method=self.analyzer.settings[
82
- "raw_normalization_settings"
83
- ]["normalization_method"],
84
- clip=self.analyzer.settings[
85
- "raw_normalization_settings"
86
- ]["clip"],
87
- ),
88
- number=number_repeat,
89
- )
90
- / number_repeat
91
- )
92
-
93
- features_previous = self.analyzer.features_previous
94
- features_current = self.analyzer.features_current.iloc[
95
- : features_previous.shape[1]
96
- ]
97
-
98
- if self.analyzer.settings["methods"]["feature_normalization"]:
99
- dict_timings["time_feature_norm"] = (
100
- timeit.timeit(
101
- lambda: nm_normalization.normalize_features(
102
- current=features_current.to_numpy(),
103
- previous=features_previous,
104
- normalize_samples=self.analyzer.feat_normalize_samples,
105
- method=self.analyzer.settings[
106
- "feature_normalization_settings"
107
- ]["normalization_method"],
108
- clip=self.analyzer.settings[
109
- "feature_normalization_settings"
110
- ]["clip"],
111
- ),
112
- number=number_repeat,
113
- )
114
- / number_repeat
115
- )
116
-
117
- if self.analyzer.settings["methods"]["project_cortex"]:
118
- dict_timings["time_projection"] = (
119
- timeit.timeit(
120
- lambda: self.analyzer.projection.project_features(
121
- features_current
122
- ),
123
- number=number_repeat,
124
- )
125
- / number_repeat
126
- )
127
-
128
- if self.analyzer.settings["methods"]["bandpass_filter"]:
129
- dict_timings["time_applyfilterband"] = (
130
- timeit.timeit(
131
- lambda: self.analyzer.features.bandpass_filter.filter_data(
132
- data,
133
- ),
134
- number=number_repeat,
135
- )
136
- / number_repeat
137
- )
138
-
139
- if self.analyzer.settings["methods"]["sharpwave_analysis"]:
140
- dict_timings["time_sw"] = (
141
- timeit.timeit(
142
- lambda: self.analyzer.features.sw_features.get_sharpwave_features(
143
- features_, data[ch_idx, -100:], ch_name
144
- ),
145
- number=number_repeat,
146
- )
147
- / number_repeat
148
- )
149
-
150
- if self.analyzer.settings["methods"]["stft"]:
151
- dict_timings["time_stft"] = (
152
- timeit.timeit(
153
- lambda: nm_stft.get_stft_features(
154
- features_,
155
- self.analyzer.features.s,
156
- self.analyzer.features.fs,
157
- data[ch_idx, :],
158
- self.analyzer.features.KF_dict,
159
- ch_name + "-avgref",
160
- self.analyzer.features.f_ranges,
161
- self.analyzer.features.fband_names,
162
- ),
163
- number=number_repeat,
164
- )
165
- / number_repeat
166
- )
167
-
168
- if self.analyzer.settings["methods"]["fft"]:
169
- dict_timings["time_fft"] = (
170
- timeit.timeit(
171
- lambda: nm_oscillatory.get_fft_features(
172
- features_,
173
- self.analyzer.features.s,
174
- self.analyzer.features.fs,
175
- data[ch_idx, :],
176
- self.analyzer.features.KF_dict,
177
- ch_name,
178
- self.analyzer.features.f_ranges,
179
- self.analyzer.features.fband_names,
180
- ),
181
- number=number_repeat,
182
- )
183
- / number_repeat
184
- )
185
-
186
- if self.analyzer.settings["methods"]["bandpass_filter"]:
187
- seglengths = np.floor(
188
- self.analyzer.fs
189
- / 1000
190
- * np.array(
191
- [
192
- value
193
- for value in self.analyzer.features.s[
194
- "bandpass_filter_settings"
195
- ]["segment_lengths"].values()
196
- ]
197
- )
198
- ).astype(int)
199
-
200
- dat_filtered = nm_filter.apply_filter(
201
- data, self.analyzer.features.filter_fun
202
- ) # shape (bands, time)
203
- dict_timings["time_bandpass_filter"] = (
204
- timeit.timeit(
205
- lambda: nm_bandpower.get_bandpower_features(
206
- features_,
207
- self.analyzer.features.s,
208
- seglengths,
209
- dat_filtered,
210
- self.analyzer.features.KF_dict,
211
- ch_name,
212
- ch_idx,
213
- ),
214
- number=number_repeat,
215
- )
216
- / number_repeat
217
- )
218
-
219
- if self.analyzer.settings["methods"]["coherence"]:
220
- coh_obj = self.analyzer.features.coherence_objects[0]
221
- dict_timings["time_coherence"] = (
222
- timeit.timeit(
223
- lambda: coh_obj.get_coh(
224
- features_,
225
- data[coh_obj.ch_1_idx, :],
226
- data[coh_obj.ch_2_idx, :],
227
- ),
228
- number=number_repeat,
229
- )
230
- / number_repeat
231
- )
232
-
233
- print("Average duration per function:")
234
- for key, val in dict_timings.items():
235
- print(f" {key} : {np.round(val*1000, 2)}ms")
236
-
237
- print(
238
- "fft, sw, bandpass, coherence and stft are timings for an individual channel"
239
- )