py-neuromodulation 0.1.3__py3-none-any.whl → 0.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- py_neuromodulation/default_settings.yaml +5 -4
- py_neuromodulation/features/fooof.py +9 -5
- py_neuromodulation/features/sharpwaves.py +31 -11
- py_neuromodulation/gui/frontend/assets/{index-_6V8ZfAS.js → index-B53U6dwc.js} +6 -2
- py_neuromodulation/gui/frontend/index.html +1 -1
- py_neuromodulation/lsl_api.cfg +2 -2
- py_neuromodulation/processing/rereference.py +6 -1
- py_neuromodulation/stream/data_processor.py +7 -2
- py_neuromodulation/stream/stream.py +7 -2
- py_neuromodulation/utils/channels.py +5 -1
- py_neuromodulation/utils/file_writer.py +8 -0
- py_neuromodulation/utils/pydantic_extensions.py +9 -18
- py_neuromodulation/utils/types.py +3 -3
- {py_neuromodulation-0.1.3.dist-info → py_neuromodulation-0.1.7.dist-info}/METADATA +14 -5
- {py_neuromodulation-0.1.3.dist-info → py_neuromodulation-0.1.7.dist-info}/RECORD +18 -18
- {py_neuromodulation-0.1.3.dist-info → py_neuromodulation-0.1.7.dist-info}/WHEEL +1 -1
- {py_neuromodulation-0.1.3.dist-info → py_neuromodulation-0.1.7.dist-info}/entry_points.txt +0 -0
- {py_neuromodulation-0.1.3.dist-info → py_neuromodulation-0.1.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -54,10 +54,10 @@ raw_normalization_settings:
|
|
|
54
54
|
clip: 3
|
|
55
55
|
|
|
56
56
|
preprocessing_filter:
|
|
57
|
-
bandstop_filter:
|
|
58
|
-
lowpass_filter:
|
|
59
|
-
highpass_filter:
|
|
60
|
-
bandpass_filter:
|
|
57
|
+
bandstop_filter: false
|
|
58
|
+
lowpass_filter: false
|
|
59
|
+
highpass_filter: false
|
|
60
|
+
bandpass_filter: false
|
|
61
61
|
bandstop_filter_settings: [100, 160] # [low_hz, high_hz]
|
|
62
62
|
bandpass_filter_settings: [3, 200] # [hz, _hz]
|
|
63
63
|
lowpass_filter_cutoff_hz: 200
|
|
@@ -245,3 +245,4 @@ bispectrum_settings:
|
|
|
245
245
|
mean: true
|
|
246
246
|
sum: true
|
|
247
247
|
var: true
|
|
248
|
+
|
|
@@ -87,17 +87,21 @@ class FooofAnalyzer(NMFeature):
|
|
|
87
87
|
|
|
88
88
|
spectra = np.abs(rfft(data[:, -self.num_samples :])) # type: ignore
|
|
89
89
|
|
|
90
|
-
|
|
90
|
+
try:
|
|
91
|
+
self.fm.fit(self.f_vec, spectra, self.settings.freq_range_hz)
|
|
92
|
+
except:
|
|
93
|
+
failed_fits = list(range(len(self.ch_names)))
|
|
91
94
|
|
|
92
95
|
if not self.fm.has_model or self.fm.null_inds_ is None:
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
+
failed_fits = list(range(len(self.ch_names)))
|
|
97
|
+
else:
|
|
98
|
+
failed_fits: list[int] = self.fm.null_inds_
|
|
96
99
|
|
|
97
100
|
feature_results = {}
|
|
98
101
|
for ch_idx, ch_name in enumerate(self.ch_names):
|
|
99
102
|
FIT_PASSED = ch_idx not in failed_fits
|
|
100
|
-
|
|
103
|
+
if FIT_PASSED:
|
|
104
|
+
exp = self.fm.get_params("aperiodic_params", "exponent")[ch_idx]
|
|
101
105
|
|
|
102
106
|
for feat in self.settings.aperiodic.get_enabled():
|
|
103
107
|
f_name = f"{ch_name}_fooof_a_{self.feat_name_map[feat]}"
|
|
@@ -85,16 +85,16 @@ class SharpwaveSettings(NMBaseModel):
|
|
|
85
85
|
self.estimator[est] = []
|
|
86
86
|
|
|
87
87
|
@model_validator(mode="after")
|
|
88
|
-
def test_settings(
|
|
88
|
+
def test_settings(self):
|
|
89
89
|
# check if all features are also enabled via an estimator
|
|
90
|
-
estimator_list = [est for list_ in
|
|
90
|
+
estimator_list = [est for list_ in self.estimator.values() for est in list_]
|
|
91
91
|
|
|
92
|
-
for used_feature in
|
|
92
|
+
for used_feature in self.sharpwave_features.get_enabled():
|
|
93
93
|
assert (
|
|
94
94
|
used_feature in estimator_list
|
|
95
95
|
), f"Add estimator key for {used_feature}"
|
|
96
96
|
|
|
97
|
-
return
|
|
97
|
+
return self
|
|
98
98
|
|
|
99
99
|
|
|
100
100
|
class SharpwaveAnalyzer(NMFeature):
|
|
@@ -144,8 +144,16 @@ class SharpwaveAnalyzer(NMFeature):
|
|
|
144
144
|
)
|
|
145
145
|
|
|
146
146
|
self.filter_names = [name for name, _ in self.list_filter]
|
|
147
|
-
|
|
148
|
-
|
|
147
|
+
filter_lengths = [len(f) for _, f in self.list_filter]
|
|
148
|
+
|
|
149
|
+
self.equal_filters_lengths = len(set(filter_lengths)) <= 1
|
|
150
|
+
if self.equal_filters_lengths:
|
|
151
|
+
self.filters = np.vstack([f for _, f in self.list_filter])
|
|
152
|
+
self.filters = np.tile(self.filters[None, :, :], (len(self.ch_names), 1, 1))
|
|
153
|
+
else:
|
|
154
|
+
self.filters = [
|
|
155
|
+
np.tile(f, (len(self.ch_names), 1)) for _, f in self.list_filter
|
|
156
|
+
]
|
|
149
157
|
|
|
150
158
|
self.used_features = self.sw_settings.sharpwave_features.get_enabled()
|
|
151
159
|
|
|
@@ -235,8 +243,14 @@ class SharpwaveAnalyzer(NMFeature):
|
|
|
235
243
|
|
|
236
244
|
from scipy.signal import fftconvolve
|
|
237
245
|
|
|
238
|
-
|
|
239
|
-
|
|
246
|
+
if self.equal_filters_lengths:
|
|
247
|
+
data = np.tile(data[:, None, :], (1, len(self.list_filter), 1))
|
|
248
|
+
data = fftconvolve(data, self.filters, axes=2, mode="same")
|
|
249
|
+
else:
|
|
250
|
+
len_data = len(data[0])
|
|
251
|
+
conv_results = [fftconvolve(data, f, mode="same") for f in self.filters]
|
|
252
|
+
data = np.concat(conv_results, axis=1)
|
|
253
|
+
data = data.reshape([len(self.ch_names), len(self.filters), len_data])
|
|
240
254
|
|
|
241
255
|
self.filtered_data = (
|
|
242
256
|
data # TONI: Expose filtered data for example 3, need a better way
|
|
@@ -270,7 +284,9 @@ class SharpwaveAnalyzer(NMFeature):
|
|
|
270
284
|
if feature_name == "num_peaks":
|
|
271
285
|
key_name = f"{ch_name}_Sharpwave_{feature_name}_{filter_name}"
|
|
272
286
|
if len(waveform_results[feature_name]) == 1:
|
|
273
|
-
dict_ch_features[key_name][key_name_pt] = waveform_results[
|
|
287
|
+
dict_ch_features[key_name][key_name_pt] = waveform_results[
|
|
288
|
+
feature_name
|
|
289
|
+
][0]
|
|
274
290
|
continue
|
|
275
291
|
else:
|
|
276
292
|
raise ValueError("num_peaks should be a list with length 1")
|
|
@@ -305,8 +321,12 @@ class SharpwaveAnalyzer(NMFeature):
|
|
|
305
321
|
for ch_name in self.ch_names:
|
|
306
322
|
for filter_name in self.filter_names:
|
|
307
323
|
key_name = f"{ch_name}_Sharpwave_num_peaks_{filter_name}"
|
|
308
|
-
feature_results[key_name] = np_mean(
|
|
309
|
-
|
|
324
|
+
feature_results[key_name] = np_mean(
|
|
325
|
+
[
|
|
326
|
+
dict_ch_features[key_name]["Peak"],
|
|
327
|
+
dict_ch_features[key_name]["Trough"],
|
|
328
|
+
]
|
|
329
|
+
)
|
|
310
330
|
else:
|
|
311
331
|
# otherwise, save all write all "flattened" key value pairs in feature_results
|
|
312
332
|
for key, subdict in dict_ch_features.items():
|
|
@@ -13906,7 +13906,11 @@ const useSessionStore = createStore("session", (set2, get) => ({
|
|
|
13906
13906
|
sourceType: null,
|
|
13907
13907
|
// 'file' or 'lsl'
|
|
13908
13908
|
isSourceValid: false,
|
|
13909
|
-
fileSource: {
|
|
13909
|
+
fileSource: {
|
|
13910
|
+
name: "",
|
|
13911
|
+
path: "",
|
|
13912
|
+
size: 0
|
|
13913
|
+
},
|
|
13910
13914
|
// FileInfo object
|
|
13911
13915
|
lslSource: {
|
|
13912
13916
|
selectedStream: null,
|
|
@@ -14183,7 +14187,7 @@ const useSessionStore = createStore("session", (set2, get) => ({
|
|
|
14183
14187
|
sourceType: null,
|
|
14184
14188
|
isSourceValid: false,
|
|
14185
14189
|
fileSource: {
|
|
14186
|
-
|
|
14190
|
+
path: ""
|
|
14187
14191
|
},
|
|
14188
14192
|
lslSource: {
|
|
14189
14193
|
streamName: ""
|
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
<link rel="icon" type="image/svg+xml" href="/charite.svg" />
|
|
6
6
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
|
7
7
|
<title>PyNeuromodulation</title>
|
|
8
|
-
<script type="module" crossorigin src="/assets/index-
|
|
8
|
+
<script type="module" crossorigin src="/assets/index-B53U6dwc.js"></script>
|
|
9
9
|
<link rel="modulepreload" crossorigin href="/assets/plotly-DTCwMlpS.js">
|
|
10
10
|
</head>
|
|
11
11
|
<body>
|
py_neuromodulation/lsl_api.cfg
CHANGED
|
@@ -1,3 +1,3 @@
|
|
|
1
1
|
[log]
|
|
2
|
-
level =
|
|
3
|
-
file = ./lsllog.txt
|
|
2
|
+
level = 6
|
|
3
|
+
file = ./lsllog.txt
|
|
@@ -55,7 +55,7 @@ class ReReferencer(NMPreprocessor):
|
|
|
55
55
|
# if ind not in channels_used:
|
|
56
56
|
# continue
|
|
57
57
|
ref = refs[ind]
|
|
58
|
-
if ref.lower() == "none" or pd.isnull(ref):
|
|
58
|
+
if ref.lower() == "none" or pd.isnull(ref) or channels["status"][ind] != "good":
|
|
59
59
|
ref_idx = None
|
|
60
60
|
continue
|
|
61
61
|
if ref.lower() == "average":
|
|
@@ -78,7 +78,12 @@ class ReReferencer(NMPreprocessor):
|
|
|
78
78
|
)
|
|
79
79
|
ref_idx.append(ch_names.index(ref_chan))
|
|
80
80
|
ref_matrix[ind, ref_idx] = -1 / len(ref_idx)
|
|
81
|
+
|
|
81
82
|
self.ref_matrix = ref_matrix
|
|
83
|
+
|
|
84
|
+
# only index good channels
|
|
85
|
+
good_idxs = np.where(channels["status"] == "good")[0]
|
|
86
|
+
self.ref_matrix = self.ref_matrix[good_idxs, :][:, good_idxs]
|
|
82
87
|
|
|
83
88
|
def process(self, data: np.ndarray) -> np.ndarray:
|
|
84
89
|
"""Rereference data according to the initialized ReReferencer class.
|
|
@@ -143,12 +143,17 @@ class DataProcessor:
|
|
|
143
143
|
) -> tuple[list[str], list[str], list[int], np.ndarray]:
|
|
144
144
|
"""Get used feature and label info from channels"""
|
|
145
145
|
channels = self.channels
|
|
146
|
-
ch_names_used = channels
|
|
147
|
-
ch_types_used = channels
|
|
146
|
+
ch_names_used = channels.query("used == 1 and status == 'good'")["new_name"].tolist()
|
|
147
|
+
ch_types_used = channels.query("used == 1 and status == 'good'")["type"].tolist()
|
|
148
148
|
|
|
149
149
|
# used channels for feature estimation
|
|
150
150
|
feature_idx = np.where(channels["used"] & ~channels["target"])[0].tolist()
|
|
151
151
|
|
|
152
|
+
# remove the idxs of status == "bad"
|
|
153
|
+
feature_idx = [
|
|
154
|
+
idx for idx in feature_idx if channels.loc[idx, "status"] == "good"
|
|
155
|
+
]
|
|
156
|
+
|
|
152
157
|
# If multiple targets exist, select only the first
|
|
153
158
|
label_idx = np.where(channels["target"] == 1)[0]
|
|
154
159
|
|
|
@@ -39,7 +39,7 @@ class Stream:
|
|
|
39
39
|
path_grids: _PathLike | None = None,
|
|
40
40
|
coord_names: list | None = None,
|
|
41
41
|
coord_list: list | None = None,
|
|
42
|
-
verbose: bool =
|
|
42
|
+
verbose: bool = False,
|
|
43
43
|
) -> None:
|
|
44
44
|
"""Stream initialization
|
|
45
45
|
|
|
@@ -208,6 +208,7 @@ class Stream:
|
|
|
208
208
|
simulate_real_time: bool = False,
|
|
209
209
|
decoder: RealTimeDecoder | None = None,
|
|
210
210
|
backend_interface: StreamBackendInterface | None = None,
|
|
211
|
+
delete_ind_batch_files_after_stream: bool = True,
|
|
211
212
|
) -> "pd.DataFrame":
|
|
212
213
|
self.is_stream_lsl = is_stream_lsl
|
|
213
214
|
self.stream_lsl_name = stream_lsl_name
|
|
@@ -240,7 +241,8 @@ class Stream:
|
|
|
240
241
|
verbose=self.verbose,
|
|
241
242
|
)
|
|
242
243
|
|
|
243
|
-
|
|
244
|
+
if self.verbose:
|
|
245
|
+
nm.logger.log_to_file(out_dir)
|
|
244
246
|
|
|
245
247
|
self.generator: Iterator
|
|
246
248
|
if not is_stream_lsl and data is not None:
|
|
@@ -337,6 +339,9 @@ class Stream:
|
|
|
337
339
|
self._save_after_stream()
|
|
338
340
|
self.is_running = False
|
|
339
341
|
|
|
342
|
+
if delete_ind_batch_files_after_stream is True:
|
|
343
|
+
file_writer.delete_ind_files()
|
|
344
|
+
|
|
340
345
|
return feature_df # Timon: We could think of returnader instead
|
|
341
346
|
|
|
342
347
|
def _prepare_raw_data_dict(self, data_batch: np.ndarray) -> dict[str, Any]:
|
|
@@ -46,6 +46,7 @@ def set_channels(
|
|
|
46
46
|
For this, the channel names must contain the substring "_L_" and/or
|
|
47
47
|
"_R_" (lower or upper case). CAVE: Adjacent channels will be
|
|
48
48
|
determined using the sort() function.
|
|
49
|
+
Re-reference can be also "average" for common-average-referencing
|
|
49
50
|
bads : str | list of str, default: None
|
|
50
51
|
channels that should be marked as bad and not be used for
|
|
51
52
|
average re-referencing etc.
|
|
@@ -127,9 +128,12 @@ def set_channels(
|
|
|
127
128
|
if isinstance(reference, str):
|
|
128
129
|
if reference.lower() == "default":
|
|
129
130
|
df = _get_default_references(df=df, ch_names=ch_names, ch_types=ch_types)
|
|
131
|
+
elif reference.lower() == "average":
|
|
132
|
+
df["rereference"] = ["average" if df["used"][ch_idx] == 1 else "None"
|
|
133
|
+
for ch_idx in range(len(ch_names))]
|
|
130
134
|
else:
|
|
131
135
|
raise ValueError(
|
|
132
|
-
"`reference` must be either `default`, `None` or "
|
|
136
|
+
"`reference` must be either `default`, `None`, `average` or "
|
|
133
137
|
"an iterable of new reference channel names. "
|
|
134
138
|
f"Got: {reference}."
|
|
135
139
|
)
|
|
@@ -108,3 +108,11 @@ class MsgPackFileWriter(AbstractFileWriter):
|
|
|
108
108
|
with open(outpath, "rb") as f:
|
|
109
109
|
data = msgpack.unpack(f)
|
|
110
110
|
data.to_csv(self.csv_path, index=False)
|
|
111
|
+
|
|
112
|
+
def delete_ind_files(self,):
|
|
113
|
+
"""
|
|
114
|
+
Delete individual MessagePack files.
|
|
115
|
+
"""
|
|
116
|
+
files_msg_pack = list(self.out_dir.glob(f"{self.name}-*.msgpack"))
|
|
117
|
+
for file in files_msg_pack:
|
|
118
|
+
file.unlink()
|
|
@@ -5,10 +5,11 @@ from typing import (
|
|
|
5
5
|
get_args,
|
|
6
6
|
get_type_hints,
|
|
7
7
|
Literal,
|
|
8
|
+
Unpack,
|
|
9
|
+
TypedDict,
|
|
8
10
|
cast,
|
|
9
11
|
Sequence,
|
|
10
12
|
)
|
|
11
|
-
from typing_extensions import Unpack, TypedDict
|
|
12
13
|
from pydantic import BaseModel, GetCoreSchemaHandler, ConfigDict
|
|
13
14
|
|
|
14
15
|
from pydantic_core import (
|
|
@@ -118,23 +119,13 @@ class _NMExtraFieldInputs(TypedDict, total=False):
|
|
|
118
119
|
custom_metadata: dict[str, Any]
|
|
119
120
|
|
|
120
121
|
|
|
121
|
-
class _NMFieldInfoInputs(_FieldInfoInputs, _NMExtraFieldInputs, total=False):
|
|
122
|
-
"""Combine pydantic FieldInfo inputs with PyNM additional inputs"""
|
|
123
|
-
|
|
124
|
-
pass
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
class _NMFromFieldInfoInputs(_FromFieldInfoInputs, _NMExtraFieldInputs, total=False):
|
|
128
|
-
"""Combine pydantic FieldInfo.from_field inputs with PyNM additional inputs"""
|
|
129
|
-
|
|
130
|
-
pass
|
|
131
|
-
|
|
132
|
-
|
|
133
122
|
class NMFieldInfo(FieldInfo):
|
|
134
123
|
# Add default values for any other custom fields here
|
|
135
124
|
_default_values = {}
|
|
136
125
|
|
|
137
|
-
def __init__(
|
|
126
|
+
def __init__(
|
|
127
|
+
self, **kwargs: Unpack[_FieldInfoInputs | _NMExtraFieldInputs]
|
|
128
|
+
) -> None:
|
|
138
129
|
self.sequence: bool = kwargs.pop("sequence", False) # type: ignore
|
|
139
130
|
self.custom_metadata: dict[str, Any] = kwargs.pop("custom_metadata", {})
|
|
140
131
|
super().__init__(**kwargs)
|
|
@@ -162,7 +153,7 @@ class NMFieldInfo(FieldInfo):
|
|
|
162
153
|
@staticmethod
|
|
163
154
|
def from_field(
|
|
164
155
|
default: Any = PydanticUndefined,
|
|
165
|
-
**kwargs: Unpack[
|
|
156
|
+
**kwargs: Unpack[_FromFieldInfoInputs | _NMExtraFieldInputs],
|
|
166
157
|
) -> "NMFieldInfo":
|
|
167
158
|
if "annotation" in kwargs:
|
|
168
159
|
raise TypeError('"annotation" is not permitted as a Field keyword argument')
|
|
@@ -178,7 +169,7 @@ class NMFieldInfo(FieldInfo):
|
|
|
178
169
|
|
|
179
170
|
def NMField(
|
|
180
171
|
default: Any = PydanticUndefined,
|
|
181
|
-
**kwargs: Unpack[
|
|
172
|
+
**kwargs: Unpack[_FromFieldInfoInputs | _NMExtraFieldInputs],
|
|
182
173
|
) -> Any:
|
|
183
174
|
return NMFieldInfo.from_field(default=default, **kwargs)
|
|
184
175
|
|
|
@@ -197,7 +188,7 @@ class NMBaseModel(BaseModel):
|
|
|
197
188
|
super().__init__(*args, **kwargs)
|
|
198
189
|
return
|
|
199
190
|
|
|
200
|
-
field_names = list(self.model_fields.keys())
|
|
191
|
+
field_names = list(self.__class__.model_fields.keys())
|
|
201
192
|
# If we have more positional args than fields, that's an error
|
|
202
193
|
if len(args) > len(field_names):
|
|
203
194
|
raise ValueError(
|
|
@@ -238,7 +229,7 @@ class NMBaseModel(BaseModel):
|
|
|
238
229
|
|
|
239
230
|
@property
|
|
240
231
|
def fields(self) -> dict[str, FieldInfo | NMFieldInfo]:
|
|
241
|
-
return self.model_fields # type: ignore
|
|
232
|
+
return self.__class__.model_fields # type: ignore
|
|
242
233
|
|
|
243
234
|
def serialize_with_metadata(self):
|
|
244
235
|
result: dict[str, Any] = {"__field_type__": self.__class__.__name__}
|
|
@@ -135,17 +135,17 @@ class BoolSelector(NMBaseModel):
|
|
|
135
135
|
def get_enabled(self):
|
|
136
136
|
return [
|
|
137
137
|
f
|
|
138
|
-
for f in self.model_fields.keys()
|
|
138
|
+
for f in self.__class__.model_fields.keys()
|
|
139
139
|
if (isinstance(self[f], bool) and self[f])
|
|
140
140
|
]
|
|
141
141
|
|
|
142
142
|
def enable_all(self):
|
|
143
|
-
for f in self.model_fields.keys():
|
|
143
|
+
for f in self.__class__.model_fields.keys():
|
|
144
144
|
if isinstance(self[f], bool):
|
|
145
145
|
self[f] = True
|
|
146
146
|
|
|
147
147
|
def disable_all(self):
|
|
148
|
-
for f in self.model_fields.keys():
|
|
148
|
+
for f in self.__class__.model_fields.keys():
|
|
149
149
|
if isinstance(self[f], bool):
|
|
150
150
|
self[f] = False
|
|
151
151
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: py_neuromodulation
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.7
|
|
4
4
|
Summary: Real-time analysis of intracranial neurophysiology recordings.
|
|
5
5
|
Project-URL: Homepage, https://neuromodulation.github.io/py_neuromodulation/
|
|
6
6
|
Project-URL: Documentation, https://neuromodulation.github.io/py_neuromodulation/
|
|
@@ -35,7 +35,7 @@ Classifier: Development Status :: 2 - Pre-Alpha
|
|
|
35
35
|
Classifier: License :: OSI Approved :: MIT License
|
|
36
36
|
Classifier: Programming Language :: Python
|
|
37
37
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
38
|
-
Requires-Python: >=3.
|
|
38
|
+
Requires-Python: >=3.12
|
|
39
39
|
Requires-Dist: cbor2>=5.6.4
|
|
40
40
|
Requires-Dist: fastapi
|
|
41
41
|
Requires-Dist: fooof
|
|
@@ -92,9 +92,18 @@ Requires-Dist: pytest-xdist; extra == 'test'
|
|
|
92
92
|
Requires-Dist: pytest>=8.0.2; extra == 'test'
|
|
93
93
|
Description-Content-Type: text/x-rst
|
|
94
94
|
|
|
95
|
+
|
|
96
|
+
|
|
95
97
|
py_neuromodulation
|
|
96
98
|
==================
|
|
97
99
|
|
|
100
|
+
Journal of Open Source Science publication:
|
|
101
|
+
|
|
102
|
+
.. image:: https://joss.theoj.org/papers/10.21105/joss.08258/status.svg
|
|
103
|
+
:target: https://doi.org/10.21105/joss.08258
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
|
|
98
107
|
Documentation: https://neuromodulation.github.io/py_neuromodulation/
|
|
99
108
|
|
|
100
109
|
Analyzing neural data can be a troublesome, trial and error prone,
|
|
@@ -130,7 +139,7 @@ Find the documentation here neuromodulation.github.io/py_neuromodulation/ for ex
|
|
|
130
139
|
Installation
|
|
131
140
|
============
|
|
132
141
|
|
|
133
|
-
py_neuromodulation requires at least python 3.
|
|
142
|
+
py_neuromodulation requires at least python 3.12. For installation you can use pip:
|
|
134
143
|
|
|
135
144
|
.. code-block::
|
|
136
145
|
|
|
@@ -140,7 +149,7 @@ Alternatively you can also clone the pacakge and install it using `uv <https://d
|
|
|
140
149
|
|
|
141
150
|
.. code-block::
|
|
142
151
|
|
|
143
|
-
uv python install 3.
|
|
152
|
+
uv python install 3.12
|
|
144
153
|
uv venv
|
|
145
154
|
. .venv/bin/activate
|
|
146
155
|
uv sync
|
|
@@ -170,7 +179,7 @@ Basic Usage
|
|
|
170
179
|
stream = nm.Stream(sfreq=sfreq, data=data, sampling_rate_features_hz=sampling_rate_features_hz)
|
|
171
180
|
features = stream.run()
|
|
172
181
|
|
|
173
|
-
Check the `Usage <https://
|
|
182
|
+
Check the `Usage <https://neuromodulation.github.io/py_neuromodulation/usage.html>`_ and `First examples <https://neuromodulation.github.io/py_neuromodulation/auto_examples/index.html>`_ for further introduction.
|
|
174
183
|
|
|
175
184
|
Contact information
|
|
176
185
|
-------------------
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
py_neuromodulation/__init__.py,sha256=gu0XWs6bzSMJ7JTu8xxFMCPj6TY0nQ6Q0tuBkK7onRI,2996
|
|
2
|
-
py_neuromodulation/default_settings.yaml,sha256=
|
|
2
|
+
py_neuromodulation/default_settings.yaml,sha256=7e2UYayioKx85HeW43OyVS7CvWEt37yeITp0lJCPFGU,6393
|
|
3
3
|
py_neuromodulation/grid_cortex.tsv,sha256=k2QOkHY1ej3lJ33LD6DOPVlTynzB3s4BYaoQaoUCyYc,643
|
|
4
4
|
py_neuromodulation/grid_subcortex.tsv,sha256=oCQDYLDdYSa1DAI9ybwECfuzWulFzXqKHyf7oZ1oDBM,25842
|
|
5
|
-
py_neuromodulation/lsl_api.cfg,sha256=
|
|
5
|
+
py_neuromodulation/lsl_api.cfg,sha256=oKJ5S_9mJjLUCuI4i1jZVOOquNebzdCDIMQWv1gwT3U,39
|
|
6
6
|
py_neuromodulation/run_gui.py,sha256=NW6mjSfgNAHoIcFYOD-kebANTvw3UKr470lLplveicI,700
|
|
7
7
|
py_neuromodulation/ConnectivityDecoding/Automated Anatomical Labeling 3 (Rolls 2020).nii,sha256=Sp-cjF_AuT0Tlilb5s8lB14hVgkXJiR2uKMS9nOQOeg,902981
|
|
8
8
|
py_neuromodulation/ConnectivityDecoding/_get_grid_hull.m,sha256=2RPDGotbLsCzDJLFB2JXatJtfOMno9UUBCBnsOuse8A,714
|
|
@@ -36,13 +36,13 @@ py_neuromodulation/features/bispectra.py,sha256=BzI0UsMnBWal6qtqmTmLOR20IA_Wly7E
|
|
|
36
36
|
py_neuromodulation/features/bursts.py,sha256=HPLZ7EHbDjDthN96bRgUjb9JDvSoZkf-aYGroI9GLdM,11574
|
|
37
37
|
py_neuromodulation/features/coherence.py,sha256=RmY-KPROZbsnmjewOrZjFO4KEq7D_D5P4sgZ3W_CCaw,9238
|
|
38
38
|
py_neuromodulation/features/feature_processor.py,sha256=S1hFg7Ke5CaD64sti0ZaTBL-KnOrIjliq3ytpO01Wls,3976
|
|
39
|
-
py_neuromodulation/features/fooof.py,sha256=
|
|
39
|
+
py_neuromodulation/features/fooof.py,sha256=YF3i_RgzJ4SMTRkOBPXB4hQNppcSKhS0S-KS0bTbpt8,5319
|
|
40
40
|
py_neuromodulation/features/hjorth_raw.py,sha256=mRPioPHJdN73AGErRbJ5S1Vz__qEQ89jAKaeN5k8eXo,1845
|
|
41
41
|
py_neuromodulation/features/linelength.py,sha256=8BTctvr9Zj8TEK2HLJqi73j_y2Xgt8lKK-mJhv8eAsM,641
|
|
42
42
|
py_neuromodulation/features/mne_connectivity.py,sha256=lQHLIXmoyDOWf5agmGMLeq1cQLiG6ud-vYf75CpYTVI,4109
|
|
43
43
|
py_neuromodulation/features/nolds.py,sha256=jNCKQlIfmcAhmzjTAJMbFPhRuPtYZF5BDzR4qHlLp1k,3374
|
|
44
44
|
py_neuromodulation/features/oscillatory.py,sha256=KzQQ3EA75G-hJVmL_YBybWTnuNJFZXAM0aBHudr-dvM,7806
|
|
45
|
-
py_neuromodulation/features/sharpwaves.py,sha256=
|
|
45
|
+
py_neuromodulation/features/sharpwaves.py,sha256=lnX6y4JsNQ7B0iU0W9GDkqvldVHO9FyoaUF1cMHuCbs,18714
|
|
46
46
|
py_neuromodulation/filter/__init__.py,sha256=ut1q8daCZoN7lhTKURGpk1X5oKiS3eSNqR7SkZyGDJw,128
|
|
47
47
|
py_neuromodulation/filter/kalman_filter.py,sha256=-aSAq7KcJ8LUjUThsQtTaIcvz-Qtavik6ltk59j7O-Q,2194
|
|
48
48
|
py_neuromodulation/filter/kalman_filter_external.py,sha256=_7FFq-1GQY9mNA0EvmaM4wQ46DVkHC9bYFIgiw9b6nY,61367
|
|
@@ -57,9 +57,9 @@ py_neuromodulation/gui/backend/app_socket.py,sha256=QeaD1AKd_F2oPDKtpikgcNL3v9v8
|
|
|
57
57
|
py_neuromodulation/gui/backend/app_utils.py,sha256=KgnldjqiYosgGGtE7l7LOg-q3_U1Cpz3IlpnkzGuux8,9693
|
|
58
58
|
py_neuromodulation/gui/backend/app_window.py,sha256=eOk4yjx4qIKYKZhyP8MPlnbZx6OF7DjFgyG8rXjo2vY,6207
|
|
59
59
|
py_neuromodulation/gui/frontend/charite.svg,sha256=RlvOSsBUuFKePKjkuadbtI1Yv3zZ41nRbfqfEb0h_88,1112
|
|
60
|
-
py_neuromodulation/gui/frontend/index.html,sha256
|
|
60
|
+
py_neuromodulation/gui/frontend/index.html,sha256=-30HoBOEmsPAR1QxJj5L4TpPevYgRyf4VIDR4CtpXU4,470
|
|
61
61
|
py_neuromodulation/gui/frontend/assets/Figtree-VariableFont_wght-CkXbWBDP.ttf,sha256=_tZBDRp07GKuFOiGalezi5Pedl29EpNUhoJ2lAspjXA,62868
|
|
62
|
-
py_neuromodulation/gui/frontend/assets/index-
|
|
62
|
+
py_neuromodulation/gui/frontend/assets/index-B53U6dwc.js,sha256=2-GEEs-bOOec81oVIwiLZyQExQW0qvQy3kSHa2DOjQ0,13847275
|
|
63
63
|
py_neuromodulation/gui/frontend/assets/plotly-DTCwMlpS.js,sha256=O34l2pwjKay4L9VvYbEvRUuJ8PbvTTQGaPqZUcoao0g,1696475
|
|
64
64
|
py_neuromodulation/liblsl/libpugixml.so.1.12,sha256=_bCOHUjcnGpDiROg1qjgty8ZQhcKHSnaCIP6SMgw6SY,240248
|
|
65
65
|
py_neuromodulation/liblsl/linux/bionic_amd64/liblsl.1.16.2.so,sha256=YXFbA23CQqWg6mWhk-73WY9gSx79NtgnBr6UFVByC2I,1033592
|
|
@@ -82,28 +82,28 @@ py_neuromodulation/processing/data_preprocessor.py,sha256=4_yTKl9efCRFDicStgArQl
|
|
|
82
82
|
py_neuromodulation/processing/filter_preprocessing.py,sha256=uIp2ewKQe3ojvGLHK26e9fLubEHOg9UF3uypTLkktFg,3611
|
|
83
83
|
py_neuromodulation/processing/normalization.py,sha256=ixzl4zBotppQjJwCYdU2tGR_sJEf2mtYTfCRlRRd8E4,6150
|
|
84
84
|
py_neuromodulation/processing/projection.py,sha256=JYYRG9_PcPL7a2ge3FjrCWiEIaCXGIKKegTjW1ecQ_Y,14772
|
|
85
|
-
py_neuromodulation/processing/rereference.py,sha256=
|
|
85
|
+
py_neuromodulation/processing/rereference.py,sha256=X8YdOGt6mVxIE6gfkP7b1OsQVY7SXE-PMHZBgu_gP4k,3526
|
|
86
86
|
py_neuromodulation/processing/resample.py,sha256=J87gjTSqWM7dDB1ZkoMo9EP3iAL4bI03K9RAMzCCowQ,1354
|
|
87
87
|
py_neuromodulation/stream/__init__.py,sha256=4kagKtWOLKKZ7l6Dag1EpOFBOf8vv1eu2ll7EYbFD2Y,266
|
|
88
88
|
py_neuromodulation/stream/backend_interface.py,sha256=Qn2iUH4l3VNfTXXWNJykseNzZQQAZF0M-5e_nd4qjMM,1789
|
|
89
|
-
py_neuromodulation/stream/data_processor.py,sha256
|
|
89
|
+
py_neuromodulation/stream/data_processor.py,sha256=pBXyma25KNl1npWLEHBYXcJ35fFoByzPd8WaaT5OdUQ,12398
|
|
90
90
|
py_neuromodulation/stream/generator.py,sha256=UKLuM8gz2YLBuVQnQNkkOOKhwsyW27ZgvRJ_5BK7Glo,1588
|
|
91
91
|
py_neuromodulation/stream/mnelsl_player.py,sha256=KksAWzr78JPqPlECweWZ7JThoxmTVWGo1_-m-fEL0N4,6351
|
|
92
92
|
py_neuromodulation/stream/mnelsl_stream.py,sha256=-uHMCNLZwIcjiT9AMGWkJit5GsZjEJYkpki_DoaLzWY,4352
|
|
93
93
|
py_neuromodulation/stream/settings.py,sha256=--K4NOlR3QNmg4ISgtvC1z_Ucf7MvGChvocck3w6LoI,11832
|
|
94
|
-
py_neuromodulation/stream/stream.py,sha256=
|
|
94
|
+
py_neuromodulation/stream/stream.py,sha256=tcIH0tW99qxrMS3GeRF45xe9K3PO6FB8JlYESH5uNDk,16593
|
|
95
95
|
py_neuromodulation/utils/__init__.py,sha256=Ok3STMpsflCTclJC9C1iQgdT-3HNGMM7U45w5Oespr4,46
|
|
96
|
-
py_neuromodulation/utils/channels.py,sha256=
|
|
96
|
+
py_neuromodulation/utils/channels.py,sha256=QYgDqB5lgTlPLSgpHMA_6U_AgIwWAoHxYF_1haeuggw,10934
|
|
97
97
|
py_neuromodulation/utils/database.py,sha256=VEFsmbYDQWwaoZKmJCG8oyWoDTbfSiT_p0n7da9_Pn4,4755
|
|
98
|
-
py_neuromodulation/utils/file_writer.py,sha256=
|
|
98
|
+
py_neuromodulation/utils/file_writer.py,sha256=eJLx3hK1-1VHKFXWHl4bPZH-dJ8zplUBLVQPDOa_VgQ,3427
|
|
99
99
|
py_neuromodulation/utils/io.py,sha256=uE4k3ScspRy_RuWrrV7cdosH4eKxJKAkvnr5KC8SG6A,11515
|
|
100
100
|
py_neuromodulation/utils/keyboard.py,sha256=swoxYhf4Q3pj50EKALUFt6hREfXnoXq2Z2q01IahPe8,1505
|
|
101
101
|
py_neuromodulation/utils/logging.py,sha256=eIBFBRaAMb3KJnoxNFiCkMrTGzWwgfeDs8m5iq6FxN8,2178
|
|
102
102
|
py_neuromodulation/utils/perf.py,sha256=10LYM13iTuWA-il-EMMOyZke3-1gcFEa6WLlHsJLO50,5471
|
|
103
|
-
py_neuromodulation/utils/pydantic_extensions.py,sha256=
|
|
104
|
-
py_neuromodulation/utils/types.py,sha256=
|
|
105
|
-
py_neuromodulation-0.1.
|
|
106
|
-
py_neuromodulation-0.1.
|
|
107
|
-
py_neuromodulation-0.1.
|
|
108
|
-
py_neuromodulation-0.1.
|
|
109
|
-
py_neuromodulation-0.1.
|
|
103
|
+
py_neuromodulation/utils/pydantic_extensions.py,sha256=Z7dpGw6cKuXM1CM5AKYzlbdItGfsDT1Yx8MHSNA0VP0,10751
|
|
104
|
+
py_neuromodulation/utils/types.py,sha256=3AukBYuOqZJ5Mw6ZimRIYxGRSDdmMU9lsnaZkPUNWRA,4673
|
|
105
|
+
py_neuromodulation-0.1.7.dist-info/METADATA,sha256=u__cUSnRv36x-Ac1o_Imm8l3WTXjTAUKt9KtBcdkjMM,7903
|
|
106
|
+
py_neuromodulation-0.1.7.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
107
|
+
py_neuromodulation-0.1.7.dist-info/entry_points.txt,sha256=hImSrCn9vJcwocoeehqNyJ-qj5Hgfrg2o6MPAnIaAa0,60
|
|
108
|
+
py_neuromodulation-0.1.7.dist-info/licenses/LICENSE,sha256=EMBwuBRPBo-WkHSjqxZ55E6j95gKNBZ8x30pt-VGfrM,1118
|
|
109
|
+
py_neuromodulation-0.1.7.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|