cydms 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cydms-0.1.0/PKG-INFO +14 -0
- cydms-0.1.0/cydms/__init__.py +3 -0
- cydms-0.1.0/cydms/analyze.py +51 -0
- cydms-0.1.0/cydms/eeg.py +277 -0
- cydms-0.1.0/cydms/mri.py +80 -0
- cydms-0.1.0/cydms/source.py +276 -0
- cydms-0.1.0/cydms.egg-info/PKG-INFO +14 -0
- cydms-0.1.0/cydms.egg-info/SOURCES.txt +11 -0
- cydms-0.1.0/cydms.egg-info/dependency_links.txt +1 -0
- cydms-0.1.0/cydms.egg-info/requires.txt +6 -0
- cydms-0.1.0/cydms.egg-info/top_level.txt +1 -0
- cydms-0.1.0/setup.cfg +4 -0
- cydms-0.1.0/setup.py +17 -0
cydms-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: cydms
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: EEG + MRI source localization library — no FreeSurfer required
|
|
5
|
+
Requires-Python: >=3.8
|
|
6
|
+
Requires-Dist: numpy
|
|
7
|
+
Requires-Dist: nibabel
|
|
8
|
+
Requires-Dist: scipy
|
|
9
|
+
Requires-Dist: scikit-image
|
|
10
|
+
Requires-Dist: mne
|
|
11
|
+
Requires-Dist: pandas
|
|
12
|
+
Dynamic: requires-dist
|
|
13
|
+
Dynamic: requires-python
|
|
14
|
+
Dynamic: summary
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""
|
|
2
|
+
cydms.analyze
|
|
3
|
+
-------------
|
|
4
|
+
Main entry point — รับ MRI + EEG path คืน dict ผลลัพธ์ครบชุด
|
|
5
|
+
"""
|
|
6
|
+
from .mri import process_mri
|
|
7
|
+
from .eeg import load_eeg
|
|
8
|
+
from .source import compute_source_localization, compute_clinical_findings
|
|
9
|
+
|
|
10
|
+
DISCLAIMER = (
|
|
11
|
+
"WARNING: This is a preliminary AI-based analysis only. "
|
|
12
|
+
"Reference ranges are general estimates and may vary by age, condition, "
|
|
13
|
+
"and recording equipment. Must be confirmed by a qualified physician."
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def analyze(mri_path, eeg_path, tsv_elec_path=None, **kwargs):
|
|
18
|
+
debug_log = []
|
|
19
|
+
warnings = []
|
|
20
|
+
|
|
21
|
+
debug_log.append("🧠 Processing MRI...")
|
|
22
|
+
mri_result = process_mri(mri_path)
|
|
23
|
+
debug_log.append(f"🧠 MRI voxel size: {mri_result['voxel_size']:.2f}mm → threshold: {mri_result['threshold_pct']}%")
|
|
24
|
+
|
|
25
|
+
debug_log.append("📡 Loading EEG...")
|
|
26
|
+
raw, eeg_logs, eeg_warnings = load_eeg(eeg_path, tsv_elec_path=tsv_elec_path)
|
|
27
|
+
debug_log.extend(eeg_logs)
|
|
28
|
+
warnings.extend(eeg_warnings)
|
|
29
|
+
|
|
30
|
+
debug_log.append("🔬 Computing source localization (sLORETA)...")
|
|
31
|
+
results, waveform, src_logs = compute_source_localization(raw, debug_log=debug_log)
|
|
32
|
+
debug_log.extend(src_logs)
|
|
33
|
+
|
|
34
|
+
findings = compute_clinical_findings(
|
|
35
|
+
results,
|
|
36
|
+
mri_result['thickness'],
|
|
37
|
+
mri_result['asymmetry']
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
return {
|
|
41
|
+
'verts': mri_result['verts'],
|
|
42
|
+
'faces': mri_result['faces'],
|
|
43
|
+
'results': results,
|
|
44
|
+
'waveform': waveform,
|
|
45
|
+
'thickness': mri_result['thickness'],
|
|
46
|
+
'asymmetry': mri_result['asymmetry'],
|
|
47
|
+
'clinical_findings': findings,
|
|
48
|
+
'warnings': warnings,
|
|
49
|
+
'debug_log': debug_log,
|
|
50
|
+
'disclaimer': DISCLAIMER,
|
|
51
|
+
}
|
cydms-0.1.0/cydms/eeg.py
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
"""
|
|
2
|
+
cydms.eeg
|
|
3
|
+
---------
|
|
4
|
+
EEG loading, montage setup, channel matching
|
|
5
|
+
"""
|
|
6
|
+
import numpy as np
|
|
7
|
+
import mne
|
|
8
|
+
import pandas as pd
|
|
9
|
+
import re
|
|
10
|
+
import os
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def load_eeg(eeg_path, tsv_elec_path=None):
|
|
14
|
+
debug_log = []
|
|
15
|
+
warnings = []
|
|
16
|
+
|
|
17
|
+
eeg_ext = os.path.splitext(eeg_path)[1].lower()
|
|
18
|
+
if eeg_ext == '.vhdr':
|
|
19
|
+
raw = mne.io.read_raw_brainvision(eeg_path, preload=True, verbose=False)
|
|
20
|
+
debug_log.append("📄 EEG format: BrainVision (.vhdr)")
|
|
21
|
+
elif eeg_ext == '.edf':
|
|
22
|
+
raw = mne.io.read_raw_edf(eeg_path, preload=True, verbose=False)
|
|
23
|
+
debug_log.append("📄 EEG format: EDF (.edf)")
|
|
24
|
+
elif eeg_ext == '.bdf':
|
|
25
|
+
raw = mne.io.read_raw_bdf(eeg_path, preload=True, verbose=False)
|
|
26
|
+
debug_log.append("📄 EEG format: BioSemi (.bdf)")
|
|
27
|
+
else:
|
|
28
|
+
try:
|
|
29
|
+
raw = mne.io.read_raw_eeglab(eeg_path, preload=True, verbose=False)
|
|
30
|
+
debug_log.append("📄 EEG format: EEGLAB (.set) — raw")
|
|
31
|
+
except Exception as e_raw:
|
|
32
|
+
if 'number of trials' in str(e_raw).lower() or 'epochs' in str(e_raw).lower():
|
|
33
|
+
epochs = mne.io.read_epochs_eeglab(eeg_path, verbose=False)
|
|
34
|
+
raw = mne.EpochsArray(epochs.get_data(), epochs.info).average().interpolate_bads()
|
|
35
|
+
raw = mne.io.RawArray(np.tile(raw.data, (1, 10)), raw.info)
|
|
36
|
+
debug_log.append(f"📄 EEG format: EEGLAB (.set) — epoched ({len(epochs)} trials)")
|
|
37
|
+
warnings.append("⚠️ Epoched data detected — converted to continuous automatically")
|
|
38
|
+
else:
|
|
39
|
+
raise e_raw
|
|
40
|
+
|
|
41
|
+
has_builtin_pos = False
|
|
42
|
+
try:
|
|
43
|
+
builtin_pos = {ch['ch_name']: ch['loc'][:3] for ch in raw.info['chs']
|
|
44
|
+
if not np.all(ch['loc'][:3] == 0) and not np.any(np.isnan(ch['loc'][:3]))}
|
|
45
|
+
if len(builtin_pos) >= 3:
|
|
46
|
+
has_builtin_pos = True
|
|
47
|
+
debug_log.append(f"✅ พบพิกัดในไฟล์ EEG เลย: {len(builtin_pos)} channels — ไม่ต้องใช้ TSV")
|
|
48
|
+
except:
|
|
49
|
+
pass
|
|
50
|
+
|
|
51
|
+
debug_log.append(f"⏱️ ใช้ข้อมูลทั้งหมด {raw.times[-1]:.1f}s")
|
|
52
|
+
|
|
53
|
+
for ch in raw.ch_names:
|
|
54
|
+
if any(x in ch.lower() for x in ['heog', 'veog']):
|
|
55
|
+
raw.set_channel_types({ch: 'eog'})
|
|
56
|
+
|
|
57
|
+
applied_tsv = False
|
|
58
|
+
raw_names_before = list(raw.ch_names)
|
|
59
|
+
|
|
60
|
+
if has_builtin_pos:
|
|
61
|
+
applied_tsv = True
|
|
62
|
+
|
|
63
|
+
if not applied_tsv and tsv_elec_path:
|
|
64
|
+
try:
|
|
65
|
+
def try_parse_no_separator(lines):
|
|
66
|
+
rows = []
|
|
67
|
+
pat = re.compile(
|
|
68
|
+
r'([A-Za-z]+\d{1,4}|[A-Za-z]{1,4}\d{0,4})'
|
|
69
|
+
r'.*?'
|
|
70
|
+
r'(-?\d{1,3}\.\d{1,4})'
|
|
71
|
+
r'[\s,]*'
|
|
72
|
+
r'(-?\d{1,3}\.\d{1,4})'
|
|
73
|
+
r'[\s,]*'
|
|
74
|
+
r'(-?\d{1,3}\.\d{1,4})',
|
|
75
|
+
re.IGNORECASE
|
|
76
|
+
)
|
|
77
|
+
for line in lines:
|
|
78
|
+
line = line.strip()
|
|
79
|
+
if not line: continue
|
|
80
|
+
m = pat.search(line)
|
|
81
|
+
if m:
|
|
82
|
+
rows.append([m.group(1), m.group(2), m.group(3), m.group(4)])
|
|
83
|
+
if not rows:
|
|
84
|
+
return None
|
|
85
|
+
df = pd.DataFrame(rows, columns=['name', 'x', 'y', 'z'])
|
|
86
|
+
return df.replace(['n/a', 'N/A', 'nan'], np.nan)
|
|
87
|
+
|
|
88
|
+
def load_file(enc):
|
|
89
|
+
with open(tsv_elec_path, 'r', encoding=enc, errors='ignore') as f:
|
|
90
|
+
lines = [l.strip() for l in f.readlines() if l.strip()]
|
|
91
|
+
parsed = [list(filter(None, re.split(r'\s+', l))) for l in lines]
|
|
92
|
+
data_rows = [p for p in parsed if p and str(p[0]).upper() not in ['NAME','LABEL','N/A']]
|
|
93
|
+
looks_no_sep = data_rows and all(len(p) < 3 for p in data_rows[:5])
|
|
94
|
+
if looks_no_sep:
|
|
95
|
+
df_try = try_parse_no_separator(lines)
|
|
96
|
+
if df_try is not None and len(df_try) > 0:
|
|
97
|
+
return df_try
|
|
98
|
+
if not parsed: return pd.DataFrame()
|
|
99
|
+
df = pd.DataFrame(parsed)
|
|
100
|
+
first_row = [str(x).lower() for x in parsed[0]]
|
|
101
|
+
if any(c in first_row for c in ['x', 'y', 'z', 'name', 'label']):
|
|
102
|
+
df.columns = df.iloc[0]
|
|
103
|
+
df = df[1:].reset_index(drop=True)
|
|
104
|
+
else:
|
|
105
|
+
df.columns = ['name', 'x', 'y', 'z'] + [f'extra_{i}' for i in range(len(df.columns)-4)]
|
|
106
|
+
return df.replace(['n/a','N/A','nan'], np.nan)
|
|
107
|
+
|
|
108
|
+
try:
|
|
109
|
+
df_elec = load_file('utf-8-sig')
|
|
110
|
+
except:
|
|
111
|
+
df_elec = load_file('cp874')
|
|
112
|
+
|
|
113
|
+
debug_log.append(f"📄 TSV: อ่านได้ {len(df_elec)} แถว | คอลัมน์: {', '.join(map(str, df_elec.columns.tolist()))}")
|
|
114
|
+
|
|
115
|
+
cols_lower = [str(c).lower().strip() for c in df_elec.columns]
|
|
116
|
+
has_header = any(re.fullmatch(r'x|y|z|name|label|pos_x|pos_y|pos_z', c) for c in cols_lower)
|
|
117
|
+
|
|
118
|
+
if not has_header:
|
|
119
|
+
df_elec = pd.read_csv(tsv_elec_path, sep=None, engine='python', header=None, na_values=['n/a','N/A','nan'])
|
|
120
|
+
if len(df_elec.columns) <= 1:
|
|
121
|
+
df_elec = pd.read_csv(tsv_elec_path, sep=r'\s+', engine='python', header=None, na_values=['n/a','N/A','nan'])
|
|
122
|
+
df_elec.columns = ['name', 'x', 'y', 'z'] + [f'extra_{i}' for i in range(len(df_elec.columns)-4)]
|
|
123
|
+
|
|
124
|
+
cols = {str(c).lower().strip(): c for c in df_elec.columns}
|
|
125
|
+
name_col = next((cols[c] for c in ['name', 'label', 'channel', 'ch_name', 'electrode'] if c in cols), df_elec.columns[0])
|
|
126
|
+
x_col = next((cols[c] for c in ['x', 'pos_x', 'coordinate_x', 'left-right'] if c in cols), None)
|
|
127
|
+
y_col = next((cols[c] for c in ['y', 'pos_y', 'coordinate_y', 'posterior-anterior'] if c in cols), None)
|
|
128
|
+
z_col = next((cols[c] for c in ['z', 'pos_z', 'coordinate_z', 'inferior-superior'] if c in cols), None)
|
|
129
|
+
|
|
130
|
+
if x_col is None and len(df_elec.columns) >= 4:
|
|
131
|
+
x_col, y_col, z_col = df_elec.columns[1], df_elec.columns[2], df_elec.columns[3]
|
|
132
|
+
|
|
133
|
+
if not all([x_col, y_col, z_col]):
|
|
134
|
+
col_names = ", ".join(map(str, df_elec.columns))
|
|
135
|
+
raise ValueError(f"ระบบหาตำแหน่ง x, y, z ในไฟล์ไม่พบครับ\n\n📍 ตารางที่ตรวจพบ: {col_names}")
|
|
136
|
+
|
|
137
|
+
def normalize(name):
|
|
138
|
+
s = str(name).upper().strip()
|
|
139
|
+
s = re.sub(r'^(EEG|CH|ELE|REF|EOG)\s*0*', '', s)
|
|
140
|
+
s = re.sub(r'[-_\s](EEG|CH|ELE|REF|EOG)$', '', s)
|
|
141
|
+
return s
|
|
142
|
+
|
|
143
|
+
def get_digits(name):
|
|
144
|
+
digits = "".join(re.findall(r'\d+', str(name)))
|
|
145
|
+
return str(int(digits)) if digits else ""
|
|
146
|
+
|
|
147
|
+
final_ch_pos = {}
|
|
148
|
+
rename_map = {}
|
|
149
|
+
tsv_samples = []
|
|
150
|
+
|
|
151
|
+
for _, row in df_elec.iterrows():
|
|
152
|
+
tsv_name_raw = str(row[name_col]).strip()
|
|
153
|
+
if tsv_name_raw.lower() in ['n/a', 'nan', '']: continue
|
|
154
|
+
tsv_samples.append(tsv_name_raw)
|
|
155
|
+
|
|
156
|
+
matched_raw = None
|
|
157
|
+
for r_name in raw_names_before:
|
|
158
|
+
if r_name.upper() == tsv_name_raw.upper():
|
|
159
|
+
matched_raw = r_name; break
|
|
160
|
+
if not matched_raw:
|
|
161
|
+
tsv_norm = normalize(tsv_name_raw)
|
|
162
|
+
for r_name in raw_names_before:
|
|
163
|
+
if normalize(r_name) == tsv_norm:
|
|
164
|
+
matched_raw = r_name; break
|
|
165
|
+
if not matched_raw:
|
|
166
|
+
tsv_digits = get_digits(tsv_name_raw)
|
|
167
|
+
if tsv_digits:
|
|
168
|
+
for r_name in raw_names_before:
|
|
169
|
+
if get_digits(r_name) == tsv_digits:
|
|
170
|
+
matched_raw = r_name; break
|
|
171
|
+
|
|
172
|
+
if matched_raw:
|
|
173
|
+
try:
|
|
174
|
+
px, py, pz = float(row[x_col]), float(row[y_col]), float(row[z_col])
|
|
175
|
+
except (ValueError, TypeError):
|
|
176
|
+
continue
|
|
177
|
+
pos = np.array([px, py, pz]) / 100.0
|
|
178
|
+
final_ch_pos[tsv_name_raw] = pos
|
|
179
|
+
rename_map[matched_raw] = tsv_name_raw
|
|
180
|
+
|
|
181
|
+
if rename_map:
|
|
182
|
+
raw.rename_channels(rename_map)
|
|
183
|
+
raw.set_channel_types({ch: 'eeg' for ch in rename_map.values()})
|
|
184
|
+
std_1020 = mne.channels.make_standard_montage('standard_1020')
|
|
185
|
+
fid_pos = std_1020.get_positions()
|
|
186
|
+
montage = mne.channels.make_dig_montage(
|
|
187
|
+
ch_pos=final_ch_pos,
|
|
188
|
+
nasion=fid_pos.get('nasion'), lpa=fid_pos.get('lpa'), rpa=fid_pos.get('rpa'),
|
|
189
|
+
coord_frame='head'
|
|
190
|
+
)
|
|
191
|
+
raw.set_montage(montage, on_missing='warn')
|
|
192
|
+
applied_tsv = True
|
|
193
|
+
debug_log.append(f"✅ จับคู่ชื่อสำเร็จ: {len(rename_map)} electrodes")
|
|
194
|
+
else:
|
|
195
|
+
valid_rows = []
|
|
196
|
+
for _, row in df_elec.iterrows():
|
|
197
|
+
try:
|
|
198
|
+
px, py, pz = float(row[x_col]), float(row[y_col]), float(row[z_col])
|
|
199
|
+
valid_rows.append((px, py, pz))
|
|
200
|
+
except (ValueError, TypeError):
|
|
201
|
+
valid_rows.append(None)
|
|
202
|
+
|
|
203
|
+
order_ch_pos = {}
|
|
204
|
+
order_rename = {}
|
|
205
|
+
paired = 0
|
|
206
|
+
for i, r_name in enumerate(raw_names_before):
|
|
207
|
+
if i >= len(valid_rows): break
|
|
208
|
+
if valid_rows[i] is None: continue
|
|
209
|
+
px, py, pz = valid_rows[i]
|
|
210
|
+
tsv_name_order = str(df_elec.iloc[i][name_col]).strip()
|
|
211
|
+
order_ch_pos[tsv_name_order] = np.array([px, py, pz]) / 100.0
|
|
212
|
+
order_rename[r_name] = tsv_name_order
|
|
213
|
+
paired += 1
|
|
214
|
+
|
|
215
|
+
if paired >= 3:
|
|
216
|
+
raw.rename_channels(order_rename)
|
|
217
|
+
raw.set_channel_types({ch: 'eeg' for ch in order_rename.values()})
|
|
218
|
+
std_1020 = mne.channels.make_standard_montage('standard_1020')
|
|
219
|
+
fid_pos = std_1020.get_positions()
|
|
220
|
+
montage = mne.channels.make_dig_montage(
|
|
221
|
+
ch_pos=order_ch_pos,
|
|
222
|
+
nasion=fid_pos.get('nasion'), lpa=fid_pos.get('lpa'), rpa=fid_pos.get('rpa'),
|
|
223
|
+
coord_frame='head'
|
|
224
|
+
)
|
|
225
|
+
raw.set_montage(montage, on_missing='warn')
|
|
226
|
+
applied_tsv = True
|
|
227
|
+
debug_log.append(f"⚠️ จับคู่ด้วยลำดับ: {paired} electrodes")
|
|
228
|
+
warnings.append(f"⚠️ ระบบจับคู่ขั้วไฟฟ้าด้วยลำดับ เนื่องจากชื่อไม่ตรงกัน")
|
|
229
|
+
else:
|
|
230
|
+
raise ValueError(f"ไม่สามารถจับคู่ขั้วไฟฟ้าได้ครับ")
|
|
231
|
+
|
|
232
|
+
except Exception as e_tsv:
|
|
233
|
+
if isinstance(e_tsv, ValueError): raise e_tsv
|
|
234
|
+
debug_log.append(f"❌ TSV error: {str(e_tsv)[:200]}")
|
|
235
|
+
|
|
236
|
+
matched_std = []
|
|
237
|
+
if not applied_tsv:
|
|
238
|
+
std = mne.channels.make_standard_montage('standard_1020')
|
|
239
|
+
std_names_upper = [ch.upper() for ch in std.ch_names]
|
|
240
|
+
matched_std = [ch for ch in raw.ch_names if ch.upper() in std_names_upper]
|
|
241
|
+
|
|
242
|
+
if len(matched_std) >= 3:
|
|
243
|
+
raw.set_montage(std, on_missing='ignore')
|
|
244
|
+
debug_log.append(f"✅ ใช้ standard_1020: match ได้ {len(matched_std)} channels")
|
|
245
|
+
else:
|
|
246
|
+
std_ch_names = std.ch_names
|
|
247
|
+
ch_pos_std = {ch: std.get_positions()['ch_pos'][ch] for ch in std_ch_names}
|
|
248
|
+
order_ch_pos = {}
|
|
249
|
+
order_rename = {}
|
|
250
|
+
for i, r_name in enumerate(raw.ch_names):
|
|
251
|
+
if i >= len(std_ch_names): break
|
|
252
|
+
std_name = std_ch_names[i]
|
|
253
|
+
order_ch_pos[std_name] = ch_pos_std[std_name]
|
|
254
|
+
order_rename[r_name] = std_name
|
|
255
|
+
|
|
256
|
+
if len(order_rename) >= 3:
|
|
257
|
+
raw.rename_channels(order_rename)
|
|
258
|
+
raw.set_montage(std, on_missing='ignore')
|
|
259
|
+
applied_tsv = True
|
|
260
|
+
warnings.append(f"⚠️ ไม่มีไฟล์ TSV — ระบบจับคู่ด้วยลำดับกับ standard_1020")
|
|
261
|
+
debug_log.append(f"⚠️ order-based กับ standard_1020: {len(order_rename)} channels")
|
|
262
|
+
else:
|
|
263
|
+
raise ValueError(f"ชื่อขั้วไฟฟ้าในไฟล์ไม่ตรงกับมาตรฐาน 10-20 และไม่มีไฟล์พิกัดที่ใช้งานได้ครับ")
|
|
264
|
+
|
|
265
|
+
raw.filter(1, 45, verbose=False)
|
|
266
|
+
raw.pick_types(eeg=True, exclude='bads')
|
|
267
|
+
|
|
268
|
+
valid_chs = [ch['ch_name'] for ch in raw.info['chs']
|
|
269
|
+
if not np.all(ch['loc'][:3] == 0) and not np.any(np.isnan(ch['loc'][:3]))]
|
|
270
|
+
|
|
271
|
+
if len(valid_chs) < 3:
|
|
272
|
+
raise ValueError(f"พบขั้วไฟฟ้าที่มีพิกัดน้อยเกินไป (พบแค่ {len(valid_chs)} จุด)")
|
|
273
|
+
|
|
274
|
+
raw.pick_channels(valid_chs)
|
|
275
|
+
raw.set_eeg_reference('average', projection=True).apply_proj()
|
|
276
|
+
|
|
277
|
+
return raw, debug_log, warnings
|
cydms-0.1.0/cydms/mri.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""
|
|
2
|
+
cydms.mri
|
|
3
|
+
---------
|
|
4
|
+
MRI processing — skull stripping, mesh generation, cortical thickness
|
|
5
|
+
"""
|
|
6
|
+
import numpy as np
|
|
7
|
+
import nibabel as nib
|
|
8
|
+
import gc
|
|
9
|
+
from scipy import ndimage
|
|
10
|
+
from scipy.ndimage import label
|
|
11
|
+
from skimage import measure
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def process_mri(mri_path):
|
|
15
|
+
img = nib.load(mri_path)
|
|
16
|
+
m_data = img.get_fdata().astype(np.float32)
|
|
17
|
+
m_ds = m_data[::1, ::1, ::1]
|
|
18
|
+
|
|
19
|
+
voxel_size = np.abs(img.header.get_zooms()[:3])
|
|
20
|
+
min_vox = float(np.min(voxel_size))
|
|
21
|
+
lo_pct = 77 if min_vox >= 0.9 else 50
|
|
22
|
+
lo_thresh = np.percentile(m_ds[m_ds > 0], lo_pct)
|
|
23
|
+
hi_thresh = np.percentile(m_ds[m_ds > 0], 98)
|
|
24
|
+
|
|
25
|
+
brain_mask = (m_ds > lo_thresh) & (m_ds < hi_thresh)
|
|
26
|
+
brain_mask = ndimage.binary_fill_holes(brain_mask)
|
|
27
|
+
brain_mask = ndimage.binary_erosion(brain_mask, iterations=5)
|
|
28
|
+
brain_mask = ndimage.binary_dilation(brain_mask, iterations=2)
|
|
29
|
+
brain_mask = ndimage.binary_fill_holes(brain_mask)
|
|
30
|
+
|
|
31
|
+
labeled, n = label(brain_mask)
|
|
32
|
+
if n > 0:
|
|
33
|
+
sizes = [np.sum(labeled == i) for i in range(1, n+1)]
|
|
34
|
+
brain_mask = labeled == (np.argmax(sizes) + 1)
|
|
35
|
+
|
|
36
|
+
m_clean = m_ds * brain_mask.astype(np.float32)
|
|
37
|
+
del m_data; gc.collect()
|
|
38
|
+
|
|
39
|
+
verts, all_faces, _, _ = measure.marching_cubes(m_clean, level=np.percentile(m_clean[m_clean > 0], 40))
|
|
40
|
+
v_center = (verts.min(axis=0) + verts.max(axis=0)) / 2
|
|
41
|
+
verts_final = (verts - v_center) * 2.0
|
|
42
|
+
del m_clean; gc.collect()
|
|
43
|
+
|
|
44
|
+
# Cortical thickness
|
|
45
|
+
from scipy import ndimage as ndi
|
|
46
|
+
img_thick = nib.load(mri_path)
|
|
47
|
+
mri_thick = img_thick.get_fdata().astype(np.float32)
|
|
48
|
+
mri_ds = mri_thick[::4, ::4, ::4]
|
|
49
|
+
|
|
50
|
+
lo_t = np.percentile(mri_ds[mri_ds > 0], 20)
|
|
51
|
+
hi_t = np.percentile(mri_ds[mri_ds > 0], 98)
|
|
52
|
+
mask_t = (mri_ds > lo_t) & (mri_ds < hi_t)
|
|
53
|
+
mask_t = ndi.binary_fill_holes(mask_t)
|
|
54
|
+
mask_t = ndi.binary_erosion(mask_t, iterations=1)
|
|
55
|
+
|
|
56
|
+
def get_lobe_mask(arr, lobe):
|
|
57
|
+
h = arr.shape
|
|
58
|
+
if lobe == 'frontal': return arr[:, int(h[1]*0.6):, :]
|
|
59
|
+
if lobe == 'occipital': return arr[:, :int(h[1]*0.2), :]
|
|
60
|
+
if lobe == 'temporal': return arr[:, int(h[1]*0.2):int(h[1]*0.5), :int(h[2]*0.4)]
|
|
61
|
+
return arr[:, int(h[1]*0.2):int(h[1]*0.6), int(h[2]*0.3):]
|
|
62
|
+
|
|
63
|
+
thickness = {}
|
|
64
|
+
for lobe in ['frontal', 'occipital', 'temporal', 'parietal']:
|
|
65
|
+
lobe_mask = get_lobe_mask(mask_t, lobe)
|
|
66
|
+
thickness[lobe] = float(np.sum(lobe_mask)) / max(lobe_mask.size, 1) * 100
|
|
67
|
+
|
|
68
|
+
left_sum = float(np.sum(mask_t[:mask_t.shape[0]//2, :, :]))
|
|
69
|
+
right_sum = float(np.sum(mask_t[mask_t.shape[0]//2:, :, :]))
|
|
70
|
+
asymmetry = abs(left_sum - right_sum) / max(left_sum + right_sum, 1)
|
|
71
|
+
del mri_thick, mri_ds, mask_t; gc.collect()
|
|
72
|
+
|
|
73
|
+
return {
|
|
74
|
+
'verts': verts_final.tolist(),
|
|
75
|
+
'faces': all_faces.tolist(),
|
|
76
|
+
'thickness': thickness,
|
|
77
|
+
'asymmetry': round(asymmetry * 100, 2),
|
|
78
|
+
'voxel_size': min_vox,
|
|
79
|
+
'threshold_pct': lo_pct,
|
|
80
|
+
}
|
|
@@ -0,0 +1,276 @@
|
|
|
1
|
+
"""
|
|
2
|
+
cydms.source
|
|
3
|
+
------------
|
|
4
|
+
Forward solution, inverse solution (sLORETA), band power, clean segment selection
|
|
5
|
+
"""
|
|
6
|
+
import numpy as np
|
|
7
|
+
import mne
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
BANDS = {
|
|
11
|
+
'Delta': (1, 4),
|
|
12
|
+
'Theta': (4, 8),
|
|
13
|
+
'Alpha': (8, 13),
|
|
14
|
+
'Beta': (13, 30),
|
|
15
|
+
'Gamma': (30, 45),
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
COLORS = {
|
|
19
|
+
'Delta': '#ff3333',
|
|
20
|
+
'Theta': '#ffcc00',
|
|
21
|
+
'Alpha': '#33ff33',
|
|
22
|
+
'Beta': '#3333ff',
|
|
23
|
+
'Gamma': '#ff33ff',
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
NORMAL_RANGES = {
|
|
27
|
+
'Delta': (5, 25),
|
|
28
|
+
'Theta': (5, 25),
|
|
29
|
+
'Alpha': (25, 45),
|
|
30
|
+
'Beta': (10, 30),
|
|
31
|
+
'Gamma': (2, 15),
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
DIAG_MAP = {
|
|
35
|
+
'Delta': {
|
|
36
|
+
'high': '⚠️ สูงกว่าปกติ: เสี่ยง ADHD, บาดเจ็บสมอง, หรือง่วงมาก',
|
|
37
|
+
'normal': '✅ ปกติ: สมองพักผ่อนหรือนอนหลับลึก',
|
|
38
|
+
'low': '⚠️ ต่ำกว่าปกติ: อาจนอนหลับไม่พอ',
|
|
39
|
+
},
|
|
40
|
+
'Theta': {
|
|
41
|
+
'high': '⚠️ สูงกว่าปกติ: เสี่ยง ADHD, วิตกกังวล, หรือซึมเศร้า',
|
|
42
|
+
'normal': '✅ ปกติ: ผ่อนคลาย ใกล้หลับ',
|
|
43
|
+
'low': '⚠️ ต่ำกว่าปกติ: ตื่นตัวสูงมากผิดปกติ',
|
|
44
|
+
},
|
|
45
|
+
'Alpha': {
|
|
46
|
+
'high': '✅ สูงกว่าปกติ: ผ่อนคลายดีเยี่ยม สมาธิดี',
|
|
47
|
+
'normal': '✅ ปกติ: สมองทำงานสมดุล',
|
|
48
|
+
'low': '⚠️ ต่ำกว่าปกติ: เครียด วิตกกังวล หรือสมาธิลดลง',
|
|
49
|
+
},
|
|
50
|
+
'Beta': {
|
|
51
|
+
'high': '⚠️ สูงกว่าปกติ: เครียดสูง วิตกกังวล หรือ OCD',
|
|
52
|
+
'normal': '✅ ปกติ: ตื่นตัว ทำงานได้ดี',
|
|
53
|
+
'low': '⚠️ ต่ำกว่าปกติ: ง่วงนอน หรือขาดสมาธิ',
|
|
54
|
+
},
|
|
55
|
+
'Gamma': {
|
|
56
|
+
'high': '⚠️ สูงกว่าปกติ: Hyperactivity หรือเครียดสูงมาก',
|
|
57
|
+
'normal': '✅ ปกติ: ประมวลผลข้อมูลดี',
|
|
58
|
+
'low': '⚠️ ต่ำกว่าปกติ: การประมวลผลสมองช้าลง',
|
|
59
|
+
},
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def get_lobe_name(pos):
|
|
64
|
+
y, z = pos[1], pos[2]
|
|
65
|
+
if y > 25: return "Frontal Lobe"
|
|
66
|
+
elif y < -45: return "Occipital Lobe"
|
|
67
|
+
elif z < -15: return "Temporal Lobe"
|
|
68
|
+
else: return "Parietal Lobe"
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def find_clean_segment(band_data, sfreq, win_sec=25.0, step_sec=5.0, debug_log=None):
|
|
72
|
+
n_samples = band_data.shape[1]
|
|
73
|
+
win = int(win_sec * sfreq)
|
|
74
|
+
step = int(step_sec * sfreq)
|
|
75
|
+
if n_samples <= win:
|
|
76
|
+
return None, 'low'
|
|
77
|
+
|
|
78
|
+
wins, variances = [], []
|
|
79
|
+
for i in range(0, n_samples - win, step):
|
|
80
|
+
seg = band_data[:, i:i+win]
|
|
81
|
+
wins.append(i)
|
|
82
|
+
variances.append(float(np.var(seg)))
|
|
83
|
+
|
|
84
|
+
variances = np.array(variances)
|
|
85
|
+
median_var = np.median(variances)
|
|
86
|
+
diffs = np.abs(variances - median_var)
|
|
87
|
+
best_idx = np.argmin(diffs)
|
|
88
|
+
|
|
89
|
+
if diffs[best_idx] > median_var * 2:
|
|
90
|
+
if debug_log is not None:
|
|
91
|
+
debug_log.append("⚠️ ไฟล์ noisy มาก — ใช้ทั้งไฟล์แทน")
|
|
92
|
+
return None, 'low'
|
|
93
|
+
|
|
94
|
+
start = wins[best_idx]
|
|
95
|
+
return (start, start + win), 'normal'
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def compute_source_localization(raw, debug_log=None):
|
|
99
|
+
if debug_log is None:
|
|
100
|
+
debug_log = []
|
|
101
|
+
src_logs = []
|
|
102
|
+
|
|
103
|
+
fs_dir = mne.datasets.fetch_fsaverage(verbose=False)
|
|
104
|
+
subjects_dir = __import__('os').path.dirname(fs_dir)
|
|
105
|
+
src = mne.setup_source_space('fsaverage', spacing='oct2',
|
|
106
|
+
subjects_dir=subjects_dir, add_dist=False, verbose=False)
|
|
107
|
+
bem = __import__('os').path.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif')
|
|
108
|
+
|
|
109
|
+
try:
|
|
110
|
+
fwd = mne.make_forward_solution(raw.info, trans='fsaverage', src=src,
|
|
111
|
+
bem=bem, eeg=True, mindist=2.0,
|
|
112
|
+
ignore_ref=False, verbose=False)
|
|
113
|
+
except Exception as e_fwd:
|
|
114
|
+
err_msg = str(e_fwd)
|
|
115
|
+
if "No EEG channels" in err_msg:
|
|
116
|
+
err_msg += " (Make sure your SET file contains EEG channels)"
|
|
117
|
+
raise RuntimeError(f"Forward solution failed: {err_msg}")
|
|
118
|
+
|
|
119
|
+
inv = mne.minimum_norm.make_inverse_operator(raw.info, fwd,
|
|
120
|
+
mne.make_ad_hoc_cov(raw.info),
|
|
121
|
+
verbose=False)
|
|
122
|
+
sfreq = raw.info['sfreq']
|
|
123
|
+
final_results = {}
|
|
124
|
+
total_pwr = 0
|
|
125
|
+
raw_results = []
|
|
126
|
+
|
|
127
|
+
for name, (fmin, fmax) in BANDS.items():
|
|
128
|
+
raw_filtered = raw.copy().filter(fmin, fmax, verbose=False)
|
|
129
|
+
band_data = raw_filtered.get_data()
|
|
130
|
+
|
|
131
|
+
seg_range, conf = find_clean_segment(band_data, sfreq, debug_log=src_logs)
|
|
132
|
+
if seg_range:
|
|
133
|
+
clean_data = band_data[:, seg_range[0]:seg_range[1]]
|
|
134
|
+
src_logs.append(f"✅ {name}: ใช้ช่วง {seg_range[0]/sfreq:.1f}s-{seg_range[1]/sfreq:.1f}s")
|
|
135
|
+
else:
|
|
136
|
+
clean_data = band_data
|
|
137
|
+
src_logs.append(f"{'⚠️' if conf=='low' else '✅'} {name}: ใช้ทั้งไฟล์ (confidence: {conf})")
|
|
138
|
+
|
|
139
|
+
stc = mne.minimum_norm.apply_inverse_raw(raw_filtered, inv,
|
|
140
|
+
lambda2=1.0/9.0,
|
|
141
|
+
method='sLORETA', verbose=False)
|
|
142
|
+
d_mean = np.mean(np.abs(stc.data), axis=1)
|
|
143
|
+
pk = np.argmax(d_mean)
|
|
144
|
+
n_lh = len(src[0]['rr'])
|
|
145
|
+
p_std = src[0]['rr'][pk] if pk < n_lh else src[1]['rr'][pk - n_lh]
|
|
146
|
+
|
|
147
|
+
pwr = float(np.mean(np.abs(clean_data)))
|
|
148
|
+
total_pwr += pwr
|
|
149
|
+
raw_results.append({
|
|
150
|
+
'name': name,
|
|
151
|
+
'pos': (p_std * 100 * 2.5).tolist(),
|
|
152
|
+
'pos_raw': (p_std * 100).tolist(),
|
|
153
|
+
'pwr': pwr,
|
|
154
|
+
'confidence': conf,
|
|
155
|
+
})
|
|
156
|
+
|
|
157
|
+
for item in raw_results:
|
|
158
|
+
pct = round((item['pwr'] / (total_pwr if total_pwr > 0 else 1)) * 100, 2)
|
|
159
|
+
lobe = get_lobe_name(item['pos_raw'])
|
|
160
|
+
name = item['name']
|
|
161
|
+
lo, hi = NORMAL_RANGES.get(name, (10, 30))
|
|
162
|
+
status = 'high' if pct > hi else ('low' if pct < lo else 'normal')
|
|
163
|
+
diag = DIAG_MAP.get(name, {}).get(status, '')
|
|
164
|
+
final_results[name] = {
|
|
165
|
+
'val': pct,
|
|
166
|
+
'pos': item['pos'],
|
|
167
|
+
'color': COLORS[name],
|
|
168
|
+
'lobe': lobe,
|
|
169
|
+
'status': status,
|
|
170
|
+
'diag': diag,
|
|
171
|
+
'confidence': item.get('confidence', 'normal'),
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
# Waveform
|
|
175
|
+
waveform = {}
|
|
176
|
+
try:
|
|
177
|
+
ds = max(1, int(sfreq // 50))
|
|
178
|
+
waveform['times'] = raw.times[::ds].tolist()
|
|
179
|
+
for band_name, (fmin, fmax) in BANDS.items():
|
|
180
|
+
band_data = raw.copy().filter(fmin, fmax, verbose=False).get_data()
|
|
181
|
+
avg = np.mean(band_data, axis=0)[::ds]
|
|
182
|
+
mx = np.max(np.abs(avg)) or 1
|
|
183
|
+
waveform[band_name] = (avg / mx).tolist()
|
|
184
|
+
except:
|
|
185
|
+
pass
|
|
186
|
+
|
|
187
|
+
return final_results, waveform, src_logs
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def compute_clinical_findings(results, thickness, asymmetry_pct):
|
|
191
|
+
asymmetry = asymmetry_pct / 100.0
|
|
192
|
+
|
|
193
|
+
delta_pct = results.get('Delta', {}).get('val', 0)
|
|
194
|
+
theta_pct = results.get('Theta', {}).get('val', 0)
|
|
195
|
+
alpha_pct = results.get('Alpha', {}).get('val', 0)
|
|
196
|
+
beta_pct = results.get('Beta', {}).get('val', 0)
|
|
197
|
+
gamma_pct = results.get('Gamma', {}).get('val', 0)
|
|
198
|
+
|
|
199
|
+
delta_st = results.get('Delta', {}).get('status', 'normal')
|
|
200
|
+
theta_st = results.get('Theta', {}).get('status', 'normal')
|
|
201
|
+
alpha_st = results.get('Alpha', {}).get('status', 'normal')
|
|
202
|
+
beta_st = results.get('Beta', {}).get('status', 'normal')
|
|
203
|
+
gamma_st = results.get('Gamma', {}).get('status', 'normal')
|
|
204
|
+
delta_lobe = results.get('Delta', {}).get('lobe', '')
|
|
205
|
+
|
|
206
|
+
mean_thick = np.mean(list(thickness.values()))
|
|
207
|
+
mri_focal_thick = any(v > mean_thick * 1.3 for v in thickness.values())
|
|
208
|
+
mri_focal_thin = any(v < mean_thick * 0.7 for v in thickness.values())
|
|
209
|
+
mri_asymmetry = asymmetry > 0.08
|
|
210
|
+
|
|
211
|
+
findings = []
|
|
212
|
+
|
|
213
|
+
if delta_st == 'high' and mri_focal_thick:
|
|
214
|
+
findings.append({
|
|
215
|
+
'condition': 'Suspected Brain Tumor',
|
|
216
|
+
'confidence': 'HIGH' if delta_pct > 30 else 'MEDIUM',
|
|
217
|
+
'evidence': [f'Delta abnormally high ({delta_pct}%) at {delta_lobe}', 'MRI shows abnormal cortical thickness'],
|
|
218
|
+
'recommend': 'Recommend MRI with contrast for further evaluation',
|
|
219
|
+
})
|
|
220
|
+
elif delta_st == 'high':
|
|
221
|
+
findings.append({
|
|
222
|
+
'condition': 'Possible Brain Tumor',
|
|
223
|
+
'confidence': 'LOW',
|
|
224
|
+
'evidence': [f'Delta abnormally high ({delta_pct}%) at {delta_lobe}', 'No clear MRI thickness abnormality'],
|
|
225
|
+
'recommend': 'Follow up and further examination recommended',
|
|
226
|
+
})
|
|
227
|
+
|
|
228
|
+
if delta_st == 'high' and (mri_focal_thin or mri_asymmetry):
|
|
229
|
+
findings.append({
|
|
230
|
+
'condition': 'Suspected Epilepsy',
|
|
231
|
+
'confidence': 'HIGH' if (mri_focal_thin and mri_asymmetry) else 'MEDIUM',
|
|
232
|
+
'evidence': [f'Delta abnormally high ({delta_pct}%)',
|
|
233
|
+
'MRI shows cortical thinning or asymmetry' if mri_focal_thin else 'MRI shows asymmetry'],
|
|
234
|
+
'recommend': 'Recommend prolonged EEG monitoring and neurology consult',
|
|
235
|
+
})
|
|
236
|
+
elif delta_st == 'high' and not mri_focal_thick:
|
|
237
|
+
findings.append({
|
|
238
|
+
'condition': 'Possible Epilepsy',
|
|
239
|
+
'confidence': 'LOW',
|
|
240
|
+
'evidence': [f'Delta abnormally high ({delta_pct}%)', 'No clear MRI abnormality'],
|
|
241
|
+
'recommend': 'Monitor symptoms and consult a physician',
|
|
242
|
+
})
|
|
243
|
+
|
|
244
|
+
if theta_st == 'high' and alpha_st == 'low':
|
|
245
|
+
findings.append({
|
|
246
|
+
'condition': 'Suspected ADHD',
|
|
247
|
+
'confidence': 'MEDIUM',
|
|
248
|
+
'evidence': [f'Theta elevated ({theta_pct}%)', f'Beta elevated ({beta_pct}%)', f'Alpha low ({alpha_pct}%)'],
|
|
249
|
+
'recommend': 'Recommend psychological assessment and physician consult',
|
|
250
|
+
})
|
|
251
|
+
|
|
252
|
+
if delta_st == 'high' and theta_st == 'high' and alpha_st == 'low':
|
|
253
|
+
findings.append({
|
|
254
|
+
'condition': 'Suspected Cognitive Decline / Dementia',
|
|
255
|
+
'confidence': 'MEDIUM',
|
|
256
|
+
'evidence': [f'Delta elevated ({delta_pct}%)', f'Theta elevated ({theta_pct}%)', f'Alpha low ({alpha_pct}%)'],
|
|
257
|
+
'recommend': 'Recommend cognitive and memory evaluation',
|
|
258
|
+
})
|
|
259
|
+
|
|
260
|
+
if beta_st == 'high' and gamma_st == 'high':
|
|
261
|
+
findings.append({
|
|
262
|
+
'condition': 'Suspected Anxiety / OCD',
|
|
263
|
+
'confidence': 'LOW',
|
|
264
|
+
'evidence': [f'Beta elevated ({beta_pct}%)', f'Gamma elevated ({gamma_pct}%)'],
|
|
265
|
+
'recommend': 'Recommend mental health evaluation',
|
|
266
|
+
})
|
|
267
|
+
|
|
268
|
+
if not findings:
|
|
269
|
+
findings.append({
|
|
270
|
+
'condition': 'No significant abnormal pattern detected',
|
|
271
|
+
'confidence': 'NORMAL',
|
|
272
|
+
'evidence': ['All brainwave bands within normal range', 'No clear MRI abnormality'],
|
|
273
|
+
'recommend': 'Results appear normal. Please confirm with a specialist.',
|
|
274
|
+
})
|
|
275
|
+
|
|
276
|
+
return findings
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: cydms
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: EEG + MRI source localization library — no FreeSurfer required
|
|
5
|
+
Requires-Python: >=3.8
|
|
6
|
+
Requires-Dist: numpy
|
|
7
|
+
Requires-Dist: nibabel
|
|
8
|
+
Requires-Dist: scipy
|
|
9
|
+
Requires-Dist: scikit-image
|
|
10
|
+
Requires-Dist: mne
|
|
11
|
+
Requires-Dist: pandas
|
|
12
|
+
Dynamic: requires-dist
|
|
13
|
+
Dynamic: requires-python
|
|
14
|
+
Dynamic: summary
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
cydms
|
cydms-0.1.0/setup.cfg
ADDED
cydms-0.1.0/setup.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from setuptools import setup, find_packages
|
|
2
|
+
|
|
3
|
+
setup(
|
|
4
|
+
name='cydms',
|
|
5
|
+
version='0.1.0',
|
|
6
|
+
description='EEG + MRI source localization library — no FreeSurfer required',
|
|
7
|
+
packages=find_packages(),
|
|
8
|
+
install_requires=[
|
|
9
|
+
'numpy',
|
|
10
|
+
'nibabel',
|
|
11
|
+
'scipy',
|
|
12
|
+
'scikit-image',
|
|
13
|
+
'mne',
|
|
14
|
+
'pandas',
|
|
15
|
+
],
|
|
16
|
+
python_requires='>=3.8',
|
|
17
|
+
)
|