nimare 0.4.2rc4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- benchmarks/__init__.py +0 -0
- benchmarks/bench_cbma.py +57 -0
- nimare/__init__.py +45 -0
- nimare/_version.py +21 -0
- nimare/annotate/__init__.py +21 -0
- nimare/annotate/cogat.py +213 -0
- nimare/annotate/gclda.py +924 -0
- nimare/annotate/lda.py +147 -0
- nimare/annotate/text.py +75 -0
- nimare/annotate/utils.py +87 -0
- nimare/base.py +217 -0
- nimare/cli.py +124 -0
- nimare/correct.py +462 -0
- nimare/dataset.py +685 -0
- nimare/decode/__init__.py +33 -0
- nimare/decode/base.py +115 -0
- nimare/decode/continuous.py +462 -0
- nimare/decode/discrete.py +753 -0
- nimare/decode/encode.py +110 -0
- nimare/decode/utils.py +44 -0
- nimare/diagnostics.py +510 -0
- nimare/estimator.py +139 -0
- nimare/extract/__init__.py +19 -0
- nimare/extract/extract.py +466 -0
- nimare/extract/utils.py +295 -0
- nimare/generate.py +331 -0
- nimare/io.py +635 -0
- nimare/meta/__init__.py +39 -0
- nimare/meta/cbma/__init__.py +6 -0
- nimare/meta/cbma/ale.py +951 -0
- nimare/meta/cbma/base.py +947 -0
- nimare/meta/cbma/mkda.py +1361 -0
- nimare/meta/cbmr.py +970 -0
- nimare/meta/ibma.py +1683 -0
- nimare/meta/kernel.py +501 -0
- nimare/meta/models.py +1199 -0
- nimare/meta/utils.py +494 -0
- nimare/nimads.py +492 -0
- nimare/reports/__init__.py +24 -0
- nimare/reports/base.py +664 -0
- nimare/reports/default.yml +123 -0
- nimare/reports/figures.py +651 -0
- nimare/reports/report.tpl +160 -0
- nimare/resources/__init__.py +1 -0
- nimare/resources/atlases/Harvard-Oxford-LICENSE +93 -0
- nimare/resources/atlases/HarvardOxford-cort-maxprob-thr25-2mm.nii.gz +0 -0
- nimare/resources/database_file_manifest.json +142 -0
- nimare/resources/english_spellings.csv +1738 -0
- nimare/resources/filenames.json +32 -0
- nimare/resources/neurosynth_laird_studies.json +58773 -0
- nimare/resources/neurosynth_stoplist.txt +396 -0
- nimare/resources/nidm_pain_dset.json +1349 -0
- nimare/resources/references.bib +541 -0
- nimare/resources/semantic_knowledge_children.txt +325 -0
- nimare/resources/semantic_relatedness_children.txt +249 -0
- nimare/resources/templates/MNI152_2x2x2_brainmask.nii.gz +0 -0
- nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_T1w.nii.gz +0 -0
- nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_desc-brain_mask.nii.gz +0 -0
- nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_T1w.nii.gz +0 -0
- nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_desc-brain_mask.nii.gz +0 -0
- nimare/results.py +225 -0
- nimare/stats.py +276 -0
- nimare/tests/__init__.py +1 -0
- nimare/tests/conftest.py +229 -0
- nimare/tests/data/amygdala_roi.nii.gz +0 -0
- nimare/tests/data/data-neurosynth_version-7_coordinates.tsv.gz +0 -0
- nimare/tests/data/data-neurosynth_version-7_metadata.tsv.gz +0 -0
- nimare/tests/data/data-neurosynth_version-7_vocab-terms_source-abstract_type-tfidf_features.npz +0 -0
- nimare/tests/data/data-neurosynth_version-7_vocab-terms_vocabulary.txt +100 -0
- nimare/tests/data/neurosynth_dset.json +2868 -0
- nimare/tests/data/neurosynth_laird_studies.json +58773 -0
- nimare/tests/data/nidm_pain_dset.json +1349 -0
- nimare/tests/data/nimads_annotation.json +1 -0
- nimare/tests/data/nimads_studyset.json +1 -0
- nimare/tests/data/test_baseline.txt +2 -0
- nimare/tests/data/test_pain_dataset.json +1278 -0
- nimare/tests/data/test_pain_dataset_multiple_contrasts.json +1242 -0
- nimare/tests/data/test_sleuth_file.txt +18 -0
- nimare/tests/data/test_sleuth_file2.txt +10 -0
- nimare/tests/data/test_sleuth_file3.txt +5 -0
- nimare/tests/data/test_sleuth_file4.txt +5 -0
- nimare/tests/data/test_sleuth_file5.txt +5 -0
- nimare/tests/test_annotate_cogat.py +32 -0
- nimare/tests/test_annotate_gclda.py +86 -0
- nimare/tests/test_annotate_lda.py +27 -0
- nimare/tests/test_dataset.py +99 -0
- nimare/tests/test_decode_continuous.py +132 -0
- nimare/tests/test_decode_discrete.py +92 -0
- nimare/tests/test_diagnostics.py +168 -0
- nimare/tests/test_estimator_performance.py +385 -0
- nimare/tests/test_extract.py +46 -0
- nimare/tests/test_generate.py +247 -0
- nimare/tests/test_io.py +240 -0
- nimare/tests/test_meta_ale.py +298 -0
- nimare/tests/test_meta_cbmr.py +295 -0
- nimare/tests/test_meta_ibma.py +240 -0
- nimare/tests/test_meta_kernel.py +209 -0
- nimare/tests/test_meta_mkda.py +234 -0
- nimare/tests/test_nimads.py +21 -0
- nimare/tests/test_reports.py +110 -0
- nimare/tests/test_stats.py +101 -0
- nimare/tests/test_transforms.py +272 -0
- nimare/tests/test_utils.py +200 -0
- nimare/tests/test_workflows.py +221 -0
- nimare/tests/utils.py +126 -0
- nimare/transforms.py +907 -0
- nimare/utils.py +1367 -0
- nimare/workflows/__init__.py +14 -0
- nimare/workflows/base.py +189 -0
- nimare/workflows/cbma.py +165 -0
- nimare/workflows/ibma.py +108 -0
- nimare/workflows/macm.py +77 -0
- nimare/workflows/misc.py +65 -0
- nimare-0.4.2rc4.dist-info/LICENSE +21 -0
- nimare-0.4.2rc4.dist-info/METADATA +124 -0
- nimare-0.4.2rc4.dist-info/RECORD +119 -0
- nimare-0.4.2rc4.dist-info/WHEEL +5 -0
- nimare-0.4.2rc4.dist-info/entry_points.txt +2 -0
- nimare-0.4.2rc4.dist-info/top_level.txt +2 -0
@@ -0,0 +1,247 @@
|
|
1
|
+
"""Tests for the nimare.generate module."""
|
2
|
+
|
3
|
+
from contextlib import ExitStack as does_not_raise
|
4
|
+
|
5
|
+
import pytest
|
6
|
+
from numpy.random import RandomState
|
7
|
+
|
8
|
+
from nimare.dataset import Dataset
|
9
|
+
from nimare.generate import (
|
10
|
+
_array_like,
|
11
|
+
_create_foci,
|
12
|
+
_create_source,
|
13
|
+
create_coordinate_dataset,
|
14
|
+
create_neurovault_dataset,
|
15
|
+
)
|
16
|
+
|
17
|
+
|
18
|
+
@pytest.mark.parametrize(
|
19
|
+
"kwargs,expectation",
|
20
|
+
[
|
21
|
+
pytest.param(
|
22
|
+
{
|
23
|
+
"foci": [(0, 0, 0)],
|
24
|
+
"foci_percentage": "60%",
|
25
|
+
"fwhm": 10.0,
|
26
|
+
"n_studies": 5,
|
27
|
+
"n_noise_foci": 2,
|
28
|
+
"rng": RandomState(seed=42),
|
29
|
+
"space": "MNI",
|
30
|
+
},
|
31
|
+
does_not_raise(),
|
32
|
+
id="specify_foci_coord",
|
33
|
+
),
|
34
|
+
pytest.param(
|
35
|
+
{
|
36
|
+
"foci": 1,
|
37
|
+
"foci_percentage": "60%",
|
38
|
+
"fwhm": 10.0,
|
39
|
+
"n_studies": 5,
|
40
|
+
"n_noise_foci": 2,
|
41
|
+
"rng": RandomState(seed=42),
|
42
|
+
"space": "MNI",
|
43
|
+
},
|
44
|
+
does_not_raise(),
|
45
|
+
id="integer_foci",
|
46
|
+
),
|
47
|
+
pytest.param(
|
48
|
+
{
|
49
|
+
"foci": 0,
|
50
|
+
"foci_percentage": "60%",
|
51
|
+
"fwhm": 10.0,
|
52
|
+
"n_studies": 5,
|
53
|
+
"n_noise_foci": 0,
|
54
|
+
"rng": RandomState(seed=42),
|
55
|
+
"space": "MNI",
|
56
|
+
},
|
57
|
+
does_not_raise(),
|
58
|
+
id="no_foci",
|
59
|
+
),
|
60
|
+
],
|
61
|
+
)
|
62
|
+
def test_create_foci(kwargs, expectation):
|
63
|
+
"""Smoke test for _create_foci."""
|
64
|
+
with expectation:
|
65
|
+
ground_truth_foci, foci_dict = _create_foci(**kwargs)
|
66
|
+
if isinstance(expectation, does_not_raise):
|
67
|
+
assert all(isinstance(key, int) for key in foci_dict)
|
68
|
+
assert all(isinstance(coord, tuple) for coord in ground_truth_foci)
|
69
|
+
|
70
|
+
|
71
|
+
def test_create_source():
|
72
|
+
"""Smoke test for _create_source."""
|
73
|
+
source_dict = _create_source(foci={0: [(0, 0, 0)]}, sample_sizes=[25])
|
74
|
+
assert source_dict["study-0"]["contrasts"]["1"]["metadata"]["sample_sizes"] == [25]
|
75
|
+
|
76
|
+
|
77
|
+
@pytest.mark.parametrize(
|
78
|
+
"kwargs,expectation",
|
79
|
+
[
|
80
|
+
pytest.param(
|
81
|
+
{
|
82
|
+
"foci": 2,
|
83
|
+
"foci_percentage": 1.0,
|
84
|
+
"fwhm": 10.0,
|
85
|
+
"sample_size": (10, 20),
|
86
|
+
"n_studies": 5,
|
87
|
+
"n_noise_foci": 0,
|
88
|
+
"seed": 42,
|
89
|
+
"space": "MNI",
|
90
|
+
},
|
91
|
+
does_not_raise(),
|
92
|
+
id="random_sample_size",
|
93
|
+
),
|
94
|
+
pytest.param(
|
95
|
+
{
|
96
|
+
"foci": [(0, 0, 0), (0, 10, 10)],
|
97
|
+
"foci_percentage": "100%",
|
98
|
+
"fwhm": 10.0,
|
99
|
+
"sample_size": [30] * 5,
|
100
|
+
"n_studies": 5,
|
101
|
+
"n_noise_foci": 0,
|
102
|
+
"seed": 42,
|
103
|
+
"space": "MNI",
|
104
|
+
},
|
105
|
+
does_not_raise(),
|
106
|
+
id="specified_sample_size",
|
107
|
+
),
|
108
|
+
pytest.param(
|
109
|
+
{
|
110
|
+
"foci": 2,
|
111
|
+
"fwhm": 10.0,
|
112
|
+
"sample_size": [30] * 4,
|
113
|
+
"n_studies": 5,
|
114
|
+
"n_noise_foci": 0,
|
115
|
+
"seed": 42,
|
116
|
+
"space": "MNI",
|
117
|
+
},
|
118
|
+
pytest.raises(ValueError),
|
119
|
+
id="incorrect_sample_size_list",
|
120
|
+
),
|
121
|
+
pytest.param(
|
122
|
+
{
|
123
|
+
"foci": 0,
|
124
|
+
"foci_percentage": 1.0,
|
125
|
+
"fwhm": 10.0,
|
126
|
+
"sample_size": (10, 20),
|
127
|
+
"n_studies": 5,
|
128
|
+
"n_noise_foci": 0,
|
129
|
+
"seed": 42,
|
130
|
+
"space": "MNI",
|
131
|
+
},
|
132
|
+
does_not_raise(),
|
133
|
+
id="no_foci",
|
134
|
+
),
|
135
|
+
pytest.param(
|
136
|
+
{
|
137
|
+
"foci": 0,
|
138
|
+
"foci_percentage": "50%",
|
139
|
+
"fwhm": 10.0,
|
140
|
+
"sample_size": (10, 20),
|
141
|
+
"n_studies": 5,
|
142
|
+
"n_noise_foci": 10,
|
143
|
+
"seed": 42,
|
144
|
+
"space": "MNI",
|
145
|
+
},
|
146
|
+
does_not_raise(),
|
147
|
+
id="only_noise_foci",
|
148
|
+
),
|
149
|
+
pytest.param(
|
150
|
+
{
|
151
|
+
"foci": 1,
|
152
|
+
"foci_percentage": "50%",
|
153
|
+
"fwhm": 10.0,
|
154
|
+
"sample_size": (10, 20),
|
155
|
+
"n_studies": 5,
|
156
|
+
"n_noise_foci": 0,
|
157
|
+
"seed": 42,
|
158
|
+
"space": "MNI",
|
159
|
+
},
|
160
|
+
does_not_raise(),
|
161
|
+
id="insufficient_foci",
|
162
|
+
),
|
163
|
+
pytest.param(
|
164
|
+
{
|
165
|
+
"foci": "INVALID_FOCI",
|
166
|
+
"foci_percentage": "50%",
|
167
|
+
"fwhm": 10.0,
|
168
|
+
"sample_size": (10, 20),
|
169
|
+
"n_studies": 5,
|
170
|
+
"n_noise_foci": 0,
|
171
|
+
"seed": 42,
|
172
|
+
"space": "MNI",
|
173
|
+
},
|
174
|
+
pytest.raises(ValueError),
|
175
|
+
id="invalid_foci",
|
176
|
+
),
|
177
|
+
pytest.param(
|
178
|
+
{
|
179
|
+
"foci": 1,
|
180
|
+
"foci_percentage": "INVALID_PERCENT",
|
181
|
+
"fwhm": 10.0,
|
182
|
+
"sample_size": (10, 20),
|
183
|
+
"n_studies": 5,
|
184
|
+
"n_noise_foci": 0,
|
185
|
+
"seed": 42,
|
186
|
+
"space": "MNI",
|
187
|
+
},
|
188
|
+
pytest.raises(ValueError),
|
189
|
+
id="invalid_percent",
|
190
|
+
),
|
191
|
+
pytest.param(
|
192
|
+
{
|
193
|
+
"foci": 1,
|
194
|
+
"foci_percentage": "60%",
|
195
|
+
"fwhm": 10.0,
|
196
|
+
"sample_size": "INVALID_SAMPLE_SIZE",
|
197
|
+
"n_studies": 5,
|
198
|
+
"n_noise_foci": 0,
|
199
|
+
"seed": 42,
|
200
|
+
"space": "MNI",
|
201
|
+
},
|
202
|
+
pytest.raises(ValueError),
|
203
|
+
id="invalid_sample_size",
|
204
|
+
),
|
205
|
+
pytest.param(
|
206
|
+
{
|
207
|
+
"foci": 1,
|
208
|
+
"foci_percentage": "60%",
|
209
|
+
"fwhm": 10.0,
|
210
|
+
"sample_size": 30,
|
211
|
+
"n_studies": 5,
|
212
|
+
"n_noise_foci": 0,
|
213
|
+
"seed": 42,
|
214
|
+
"space": "INVALID_SPACE",
|
215
|
+
},
|
216
|
+
pytest.raises(NotImplementedError),
|
217
|
+
id="invalid_space",
|
218
|
+
),
|
219
|
+
],
|
220
|
+
)
|
221
|
+
def test_create_coordinate_dataset(kwargs, expectation):
|
222
|
+
"""Create a coordinate Dataset according to parameters."""
|
223
|
+
with expectation:
|
224
|
+
ground_truth_foci, dataset = create_coordinate_dataset(**kwargs)
|
225
|
+
if isinstance(expectation, does_not_raise):
|
226
|
+
assert isinstance(dataset, Dataset)
|
227
|
+
assert len(dataset.ids) == kwargs["n_studies"]
|
228
|
+
# test if the number of observed coordinates in the dataset is correct
|
229
|
+
if _array_like(kwargs["foci"]):
|
230
|
+
n_foci = len(kwargs["foci"])
|
231
|
+
else:
|
232
|
+
n_foci = kwargs["foci"]
|
233
|
+
expected_coordinate_number = max(
|
234
|
+
kwargs["n_studies"],
|
235
|
+
(kwargs["n_studies"] * n_foci) + (kwargs["n_studies"] * kwargs["n_noise_foci"]),
|
236
|
+
)
|
237
|
+
assert len(dataset.coordinates) == expected_coordinate_number
|
238
|
+
|
239
|
+
|
240
|
+
def test_create_neurovault_dataset():
|
241
|
+
"""Test creating a neurovault dataset."""
|
242
|
+
dset = create_neurovault_dataset(
|
243
|
+
collection_ids=(8836,),
|
244
|
+
contrasts={"animal": "as-Animal"},
|
245
|
+
)
|
246
|
+
expected_columns = {"beta", "t", "varcope", "z"}
|
247
|
+
assert expected_columns.issubset(dset.images.columns)
|
nimare/tests/test_io.py
ADDED
@@ -0,0 +1,240 @@
|
|
1
|
+
"""Test nimare.io (Dataset IO/transformations)."""
|
2
|
+
|
3
|
+
import os
|
4
|
+
|
5
|
+
import pytest
|
6
|
+
|
7
|
+
import nimare
|
8
|
+
from nimare import io
|
9
|
+
from nimare.nimads import Studyset
|
10
|
+
from nimare.tests.utils import get_test_data_path
|
11
|
+
from nimare.utils import get_template
|
12
|
+
|
13
|
+
|
14
|
+
def test_convert_nimads_to_dataset(example_nimads_studyset, example_nimads_annotation):
|
15
|
+
"""Conversion of nimads JSON to nimare dataset."""
|
16
|
+
studyset = Studyset(example_nimads_studyset)
|
17
|
+
dset1 = io.convert_nimads_to_dataset(studyset)
|
18
|
+
studyset.annotations = example_nimads_annotation
|
19
|
+
dset2 = io.convert_nimads_to_dataset(studyset)
|
20
|
+
|
21
|
+
assert isinstance(dset1, nimare.dataset.Dataset)
|
22
|
+
assert isinstance(dset2, nimare.dataset.Dataset)
|
23
|
+
|
24
|
+
|
25
|
+
def test_convert_sleuth_to_dataset_smoke():
|
26
|
+
"""Smoke test for Sleuth text file conversion."""
|
27
|
+
sleuth_file = os.path.join(get_test_data_path(), "test_sleuth_file.txt")
|
28
|
+
sleuth_file2 = os.path.join(get_test_data_path(), "test_sleuth_file2.txt")
|
29
|
+
sleuth_file3 = os.path.join(get_test_data_path(), "test_sleuth_file3.txt")
|
30
|
+
sleuth_file4 = os.path.join(get_test_data_path(), "test_sleuth_file4.txt")
|
31
|
+
sleuth_file5 = os.path.join(get_test_data_path(), "test_sleuth_file5.txt")
|
32
|
+
# Use one input file
|
33
|
+
dset = io.convert_sleuth_to_dataset(sleuth_file)
|
34
|
+
assert isinstance(dset, nimare.dataset.Dataset)
|
35
|
+
assert dset.coordinates.shape[0] == 7
|
36
|
+
assert len(dset.ids) == 3
|
37
|
+
# Use two input files
|
38
|
+
dset2 = io.convert_sleuth_to_dataset([sleuth_file, sleuth_file2])
|
39
|
+
assert isinstance(dset2, nimare.dataset.Dataset)
|
40
|
+
assert dset2.coordinates.shape[0] == 11
|
41
|
+
assert len(dset2.ids) == 5
|
42
|
+
# Use invalid input
|
43
|
+
with pytest.raises(ValueError):
|
44
|
+
io.convert_sleuth_to_dataset(5)
|
45
|
+
# Use invalid input (one coordinate is a str instead of a number)
|
46
|
+
with pytest.raises(ValueError):
|
47
|
+
io.convert_sleuth_to_dataset(sleuth_file3)
|
48
|
+
# Use invalid input (one has x & y, but not z)
|
49
|
+
with pytest.raises(ValueError):
|
50
|
+
io.convert_sleuth_to_dataset(sleuth_file4)
|
51
|
+
# Use invalid input (bad space)
|
52
|
+
with pytest.raises(ValueError):
|
53
|
+
io.convert_sleuth_to_dataset(sleuth_file5)
|
54
|
+
|
55
|
+
|
56
|
+
def test_convert_sleuth_to_json_smoke():
|
57
|
+
"""Smoke test for Sleuth text file conversion."""
|
58
|
+
out_file = os.path.abspath("temp.json")
|
59
|
+
sleuth_file = os.path.join(get_test_data_path(), "test_sleuth_file.txt")
|
60
|
+
sleuth_file2 = os.path.join(get_test_data_path(), "test_sleuth_file2.txt")
|
61
|
+
sleuth_file3 = os.path.join(get_test_data_path(), "test_sleuth_file3.txt")
|
62
|
+
sleuth_file4 = os.path.join(get_test_data_path(), "test_sleuth_file4.txt")
|
63
|
+
sleuth_file5 = os.path.join(get_test_data_path(), "test_sleuth_file5.txt")
|
64
|
+
# Use one input file
|
65
|
+
io.convert_sleuth_to_json(sleuth_file, out_file)
|
66
|
+
dset = nimare.dataset.Dataset(out_file)
|
67
|
+
assert os.path.isfile(out_file)
|
68
|
+
assert isinstance(dset, nimare.dataset.Dataset)
|
69
|
+
assert dset.coordinates.shape[0] == 7
|
70
|
+
assert len(dset.ids) == 3
|
71
|
+
os.remove(out_file)
|
72
|
+
# Use two input files
|
73
|
+
io.convert_sleuth_to_json([sleuth_file, sleuth_file2], out_file)
|
74
|
+
dset2 = nimare.dataset.Dataset(out_file)
|
75
|
+
assert isinstance(dset2, nimare.dataset.Dataset)
|
76
|
+
assert dset2.coordinates.shape[0] == 11
|
77
|
+
assert len(dset2.ids) == 5
|
78
|
+
# Use invalid input (number instead of file)
|
79
|
+
with pytest.raises(ValueError):
|
80
|
+
io.convert_sleuth_to_json(5, out_file)
|
81
|
+
# Use invalid input (one coordinate is a str instead of a number)
|
82
|
+
with pytest.raises(ValueError):
|
83
|
+
io.convert_sleuth_to_json(sleuth_file3, out_file)
|
84
|
+
# Use invalid input (one has x & y, but not z)
|
85
|
+
with pytest.raises(ValueError):
|
86
|
+
io.convert_sleuth_to_json(sleuth_file4, out_file)
|
87
|
+
# Use invalid input (bad space)
|
88
|
+
with pytest.raises(ValueError):
|
89
|
+
io.convert_sleuth_to_json(sleuth_file5, out_file)
|
90
|
+
|
91
|
+
|
92
|
+
def test_convert_neurosynth_to_dataset_smoke():
|
93
|
+
"""Smoke test for Neurosynth file conversion."""
|
94
|
+
coordinates_file = os.path.join(
|
95
|
+
get_test_data_path(),
|
96
|
+
"data-neurosynth_version-7_coordinates.tsv.gz",
|
97
|
+
)
|
98
|
+
metadata_file = os.path.join(
|
99
|
+
get_test_data_path(),
|
100
|
+
"data-neurosynth_version-7_metadata.tsv.gz",
|
101
|
+
)
|
102
|
+
features = {
|
103
|
+
"features": os.path.join(
|
104
|
+
get_test_data_path(),
|
105
|
+
"data-neurosynth_version-7_vocab-terms_source-abstract_type-tfidf_features.npz",
|
106
|
+
),
|
107
|
+
"vocabulary": os.path.join(
|
108
|
+
get_test_data_path(), "data-neurosynth_version-7_vocab-terms_vocabulary.txt"
|
109
|
+
),
|
110
|
+
}
|
111
|
+
dset = io.convert_neurosynth_to_dataset(
|
112
|
+
coordinates_file,
|
113
|
+
metadata_file,
|
114
|
+
annotations_files=features,
|
115
|
+
)
|
116
|
+
assert isinstance(dset, nimare.dataset.Dataset)
|
117
|
+
assert "terms_abstract_tfidf__abilities" in dset.annotations.columns
|
118
|
+
|
119
|
+
|
120
|
+
def test_convert_neurosynth_to_json_smoke():
|
121
|
+
"""Smoke test for Neurosynth file conversion."""
|
122
|
+
out_file = os.path.abspath("temp.json")
|
123
|
+
coordinates_file = os.path.join(
|
124
|
+
get_test_data_path(),
|
125
|
+
"data-neurosynth_version-7_coordinates.tsv.gz",
|
126
|
+
)
|
127
|
+
metadata_file = os.path.join(
|
128
|
+
get_test_data_path(),
|
129
|
+
"data-neurosynth_version-7_metadata.tsv.gz",
|
130
|
+
)
|
131
|
+
features = {
|
132
|
+
"features": os.path.join(
|
133
|
+
get_test_data_path(),
|
134
|
+
"data-neurosynth_version-7_vocab-terms_source-abstract_type-tfidf_features.npz",
|
135
|
+
),
|
136
|
+
"vocabulary": os.path.join(
|
137
|
+
get_test_data_path(), "data-neurosynth_version-7_vocab-terms_vocabulary.txt"
|
138
|
+
),
|
139
|
+
}
|
140
|
+
io.convert_neurosynth_to_json(
|
141
|
+
coordinates_file,
|
142
|
+
metadata_file,
|
143
|
+
out_file,
|
144
|
+
annotations_files=features,
|
145
|
+
)
|
146
|
+
dset = nimare.dataset.Dataset(out_file)
|
147
|
+
assert os.path.isfile(out_file)
|
148
|
+
assert isinstance(dset, nimare.dataset.Dataset)
|
149
|
+
os.remove(out_file)
|
150
|
+
|
151
|
+
|
152
|
+
@pytest.mark.parametrize(
|
153
|
+
"kwargs",
|
154
|
+
[
|
155
|
+
(
|
156
|
+
{
|
157
|
+
"collection_ids": (8836,),
|
158
|
+
"contrasts": {"animal": "as-Animal"},
|
159
|
+
}
|
160
|
+
),
|
161
|
+
(
|
162
|
+
{
|
163
|
+
"collection_ids": {"informative_name": 8836},
|
164
|
+
"contrasts": {"animal": "as-Animal"},
|
165
|
+
"map_type_conversion": {"T map": "t"},
|
166
|
+
"target": "mni152_2mm",
|
167
|
+
"mask": get_template("mni152_2mm", mask="brain"),
|
168
|
+
}
|
169
|
+
),
|
170
|
+
(
|
171
|
+
{
|
172
|
+
"collection_ids": (6348, 6419),
|
173
|
+
"contrasts": {"action": "action"},
|
174
|
+
"map_type_conversion": {"univariate-beta map": "beta"},
|
175
|
+
}
|
176
|
+
),
|
177
|
+
(
|
178
|
+
{
|
179
|
+
"collection_ids": (778,), # collection not found
|
180
|
+
"contrasts": {"action": "action"},
|
181
|
+
"map_type_conversion": {"univariate-beta map": "beta"},
|
182
|
+
}
|
183
|
+
),
|
184
|
+
(
|
185
|
+
{
|
186
|
+
"collection_ids": (11303,),
|
187
|
+
"contrasts": {"rms": "rms"},
|
188
|
+
"map_type_conversion": {"univariate-beta map": "beta"},
|
189
|
+
}
|
190
|
+
),
|
191
|
+
(
|
192
|
+
{
|
193
|
+
"collection_ids": (8836,),
|
194
|
+
"contrasts": {"crab_people": "cannot hurt you because they do not exist"},
|
195
|
+
}
|
196
|
+
),
|
197
|
+
],
|
198
|
+
)
|
199
|
+
def test_convert_neurovault_to_dataset(kwargs):
|
200
|
+
"""Test conversion of neurovault collection to a dataset."""
|
201
|
+
if 778 in kwargs["collection_ids"]:
|
202
|
+
with pytest.raises(ValueError) as excinfo:
|
203
|
+
dset = io.convert_neurovault_to_dataset(**kwargs)
|
204
|
+
assert "Collection 778 not found." in str(excinfo.value)
|
205
|
+
return
|
206
|
+
elif "crab_people" in kwargs["contrasts"].keys():
|
207
|
+
with pytest.raises(ValueError) as excinfo:
|
208
|
+
dset = io.convert_neurovault_to_dataset(**kwargs)
|
209
|
+
assert "No images were found for contrast crab_people" in str(excinfo.value)
|
210
|
+
return
|
211
|
+
else:
|
212
|
+
dset = io.convert_neurovault_to_dataset(**kwargs)
|
213
|
+
|
214
|
+
# check if names are propagated into Dataset
|
215
|
+
if isinstance(kwargs.get("collection_ids"), dict):
|
216
|
+
study_ids = set(kwargs["collection_ids"].keys())
|
217
|
+
else:
|
218
|
+
study_ids = set(map(str, kwargs["collection_ids"]))
|
219
|
+
dset_ids = {id_.split("-")[1] for id_ in dset.ids}
|
220
|
+
|
221
|
+
assert study_ids == dset_ids
|
222
|
+
|
223
|
+
# check if images were downloaded and are unique
|
224
|
+
if kwargs.get("map_type_conversion"):
|
225
|
+
for img_type in kwargs.get("map_type_conversion").values():
|
226
|
+
assert not dset.images[img_type].empty
|
227
|
+
assert len(set(dset.images[img_type])) == len(dset.images[img_type])
|
228
|
+
|
229
|
+
|
230
|
+
@pytest.mark.parametrize(
|
231
|
+
"sample_sizes,expected_sample_size",
|
232
|
+
[
|
233
|
+
([1, 2, 1], 1),
|
234
|
+
([None, None, 1], 1),
|
235
|
+
([1, 1, 2, 2], 1),
|
236
|
+
],
|
237
|
+
)
|
238
|
+
def test_resolve_sample_sizes(sample_sizes, expected_sample_size):
|
239
|
+
"""Test modal sample size heuristic."""
|
240
|
+
assert io._resolve_sample_size(sample_sizes) == expected_sample_size
|