nimare 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. benchmarks/__init__.py +0 -0
  2. benchmarks/bench_cbma.py +57 -0
  3. nimare/__init__.py +45 -0
  4. nimare/_version.py +21 -0
  5. nimare/annotate/__init__.py +21 -0
  6. nimare/annotate/cogat.py +213 -0
  7. nimare/annotate/gclda.py +924 -0
  8. nimare/annotate/lda.py +147 -0
  9. nimare/annotate/text.py +75 -0
  10. nimare/annotate/utils.py +87 -0
  11. nimare/base.py +217 -0
  12. nimare/cli.py +124 -0
  13. nimare/correct.py +462 -0
  14. nimare/dataset.py +685 -0
  15. nimare/decode/__init__.py +33 -0
  16. nimare/decode/base.py +115 -0
  17. nimare/decode/continuous.py +462 -0
  18. nimare/decode/discrete.py +753 -0
  19. nimare/decode/encode.py +110 -0
  20. nimare/decode/utils.py +44 -0
  21. nimare/diagnostics.py +510 -0
  22. nimare/estimator.py +139 -0
  23. nimare/extract/__init__.py +19 -0
  24. nimare/extract/extract.py +466 -0
  25. nimare/extract/utils.py +295 -0
  26. nimare/generate.py +331 -0
  27. nimare/io.py +667 -0
  28. nimare/meta/__init__.py +39 -0
  29. nimare/meta/cbma/__init__.py +6 -0
  30. nimare/meta/cbma/ale.py +951 -0
  31. nimare/meta/cbma/base.py +947 -0
  32. nimare/meta/cbma/mkda.py +1361 -0
  33. nimare/meta/cbmr.py +970 -0
  34. nimare/meta/ibma.py +1683 -0
  35. nimare/meta/kernel.py +501 -0
  36. nimare/meta/models.py +1199 -0
  37. nimare/meta/utils.py +494 -0
  38. nimare/nimads.py +492 -0
  39. nimare/reports/__init__.py +24 -0
  40. nimare/reports/base.py +664 -0
  41. nimare/reports/default.yml +123 -0
  42. nimare/reports/figures.py +651 -0
  43. nimare/reports/report.tpl +160 -0
  44. nimare/resources/__init__.py +1 -0
  45. nimare/resources/atlases/Harvard-Oxford-LICENSE +93 -0
  46. nimare/resources/atlases/HarvardOxford-cort-maxprob-thr25-2mm.nii.gz +0 -0
  47. nimare/resources/database_file_manifest.json +142 -0
  48. nimare/resources/english_spellings.csv +1738 -0
  49. nimare/resources/filenames.json +32 -0
  50. nimare/resources/neurosynth_laird_studies.json +58773 -0
  51. nimare/resources/neurosynth_stoplist.txt +396 -0
  52. nimare/resources/nidm_pain_dset.json +1349 -0
  53. nimare/resources/references.bib +541 -0
  54. nimare/resources/semantic_knowledge_children.txt +325 -0
  55. nimare/resources/semantic_relatedness_children.txt +249 -0
  56. nimare/resources/templates/MNI152_2x2x2_brainmask.nii.gz +0 -0
  57. nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_T1w.nii.gz +0 -0
  58. nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_desc-brain_mask.nii.gz +0 -0
  59. nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_T1w.nii.gz +0 -0
  60. nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_desc-brain_mask.nii.gz +0 -0
  61. nimare/results.py +225 -0
  62. nimare/stats.py +276 -0
  63. nimare/tests/__init__.py +1 -0
  64. nimare/tests/conftest.py +229 -0
  65. nimare/tests/data/amygdala_roi.nii.gz +0 -0
  66. nimare/tests/data/data-neurosynth_version-7_coordinates.tsv.gz +0 -0
  67. nimare/tests/data/data-neurosynth_version-7_metadata.tsv.gz +0 -0
  68. nimare/tests/data/data-neurosynth_version-7_vocab-terms_source-abstract_type-tfidf_features.npz +0 -0
  69. nimare/tests/data/data-neurosynth_version-7_vocab-terms_vocabulary.txt +100 -0
  70. nimare/tests/data/neurosynth_dset.json +2868 -0
  71. nimare/tests/data/neurosynth_laird_studies.json +58773 -0
  72. nimare/tests/data/nidm_pain_dset.json +1349 -0
  73. nimare/tests/data/nimads_annotation.json +1 -0
  74. nimare/tests/data/nimads_studyset.json +1 -0
  75. nimare/tests/data/test_baseline.txt +2 -0
  76. nimare/tests/data/test_pain_dataset.json +1278 -0
  77. nimare/tests/data/test_pain_dataset_multiple_contrasts.json +1242 -0
  78. nimare/tests/data/test_sleuth_file.txt +18 -0
  79. nimare/tests/data/test_sleuth_file2.txt +10 -0
  80. nimare/tests/data/test_sleuth_file3.txt +5 -0
  81. nimare/tests/data/test_sleuth_file4.txt +5 -0
  82. nimare/tests/data/test_sleuth_file5.txt +5 -0
  83. nimare/tests/test_annotate_cogat.py +32 -0
  84. nimare/tests/test_annotate_gclda.py +86 -0
  85. nimare/tests/test_annotate_lda.py +27 -0
  86. nimare/tests/test_dataset.py +99 -0
  87. nimare/tests/test_decode_continuous.py +132 -0
  88. nimare/tests/test_decode_discrete.py +92 -0
  89. nimare/tests/test_diagnostics.py +168 -0
  90. nimare/tests/test_estimator_performance.py +385 -0
  91. nimare/tests/test_extract.py +46 -0
  92. nimare/tests/test_generate.py +247 -0
  93. nimare/tests/test_io.py +294 -0
  94. nimare/tests/test_meta_ale.py +298 -0
  95. nimare/tests/test_meta_cbmr.py +295 -0
  96. nimare/tests/test_meta_ibma.py +240 -0
  97. nimare/tests/test_meta_kernel.py +209 -0
  98. nimare/tests/test_meta_mkda.py +234 -0
  99. nimare/tests/test_nimads.py +21 -0
  100. nimare/tests/test_reports.py +110 -0
  101. nimare/tests/test_stats.py +101 -0
  102. nimare/tests/test_transforms.py +272 -0
  103. nimare/tests/test_utils.py +200 -0
  104. nimare/tests/test_workflows.py +221 -0
  105. nimare/tests/utils.py +126 -0
  106. nimare/transforms.py +907 -0
  107. nimare/utils.py +1367 -0
  108. nimare/workflows/__init__.py +14 -0
  109. nimare/workflows/base.py +189 -0
  110. nimare/workflows/cbma.py +165 -0
  111. nimare/workflows/ibma.py +108 -0
  112. nimare/workflows/macm.py +77 -0
  113. nimare/workflows/misc.py +65 -0
  114. nimare-0.4.2.dist-info/LICENSE +21 -0
  115. nimare-0.4.2.dist-info/METADATA +124 -0
  116. nimare-0.4.2.dist-info/RECORD +119 -0
  117. nimare-0.4.2.dist-info/WHEEL +5 -0
  118. nimare-0.4.2.dist-info/entry_points.txt +2 -0
  119. nimare-0.4.2.dist-info/top_level.txt +2 -0
@@ -0,0 +1,247 @@
1
+ """Tests for the nimare.generate module."""
2
+
3
+ from contextlib import ExitStack as does_not_raise
4
+
5
+ import pytest
6
+ from numpy.random import RandomState
7
+
8
+ from nimare.dataset import Dataset
9
+ from nimare.generate import (
10
+ _array_like,
11
+ _create_foci,
12
+ _create_source,
13
+ create_coordinate_dataset,
14
+ create_neurovault_dataset,
15
+ )
16
+
17
+
18
+ @pytest.mark.parametrize(
19
+ "kwargs,expectation",
20
+ [
21
+ pytest.param(
22
+ {
23
+ "foci": [(0, 0, 0)],
24
+ "foci_percentage": "60%",
25
+ "fwhm": 10.0,
26
+ "n_studies": 5,
27
+ "n_noise_foci": 2,
28
+ "rng": RandomState(seed=42),
29
+ "space": "MNI",
30
+ },
31
+ does_not_raise(),
32
+ id="specify_foci_coord",
33
+ ),
34
+ pytest.param(
35
+ {
36
+ "foci": 1,
37
+ "foci_percentage": "60%",
38
+ "fwhm": 10.0,
39
+ "n_studies": 5,
40
+ "n_noise_foci": 2,
41
+ "rng": RandomState(seed=42),
42
+ "space": "MNI",
43
+ },
44
+ does_not_raise(),
45
+ id="integer_foci",
46
+ ),
47
+ pytest.param(
48
+ {
49
+ "foci": 0,
50
+ "foci_percentage": "60%",
51
+ "fwhm": 10.0,
52
+ "n_studies": 5,
53
+ "n_noise_foci": 0,
54
+ "rng": RandomState(seed=42),
55
+ "space": "MNI",
56
+ },
57
+ does_not_raise(),
58
+ id="no_foci",
59
+ ),
60
+ ],
61
+ )
62
+ def test_create_foci(kwargs, expectation):
63
+ """Smoke test for _create_foci."""
64
+ with expectation:
65
+ ground_truth_foci, foci_dict = _create_foci(**kwargs)
66
+ if isinstance(expectation, does_not_raise):
67
+ assert all(isinstance(key, int) for key in foci_dict)
68
+ assert all(isinstance(coord, tuple) for coord in ground_truth_foci)
69
+
70
+
71
+ def test_create_source():
72
+ """Smoke test for _create_source."""
73
+ source_dict = _create_source(foci={0: [(0, 0, 0)]}, sample_sizes=[25])
74
+ assert source_dict["study-0"]["contrasts"]["1"]["metadata"]["sample_sizes"] == [25]
75
+
76
+
77
+ @pytest.mark.parametrize(
78
+ "kwargs,expectation",
79
+ [
80
+ pytest.param(
81
+ {
82
+ "foci": 2,
83
+ "foci_percentage": 1.0,
84
+ "fwhm": 10.0,
85
+ "sample_size": (10, 20),
86
+ "n_studies": 5,
87
+ "n_noise_foci": 0,
88
+ "seed": 42,
89
+ "space": "MNI",
90
+ },
91
+ does_not_raise(),
92
+ id="random_sample_size",
93
+ ),
94
+ pytest.param(
95
+ {
96
+ "foci": [(0, 0, 0), (0, 10, 10)],
97
+ "foci_percentage": "100%",
98
+ "fwhm": 10.0,
99
+ "sample_size": [30] * 5,
100
+ "n_studies": 5,
101
+ "n_noise_foci": 0,
102
+ "seed": 42,
103
+ "space": "MNI",
104
+ },
105
+ does_not_raise(),
106
+ id="specified_sample_size",
107
+ ),
108
+ pytest.param(
109
+ {
110
+ "foci": 2,
111
+ "fwhm": 10.0,
112
+ "sample_size": [30] * 4,
113
+ "n_studies": 5,
114
+ "n_noise_foci": 0,
115
+ "seed": 42,
116
+ "space": "MNI",
117
+ },
118
+ pytest.raises(ValueError),
119
+ id="incorrect_sample_size_list",
120
+ ),
121
+ pytest.param(
122
+ {
123
+ "foci": 0,
124
+ "foci_percentage": 1.0,
125
+ "fwhm": 10.0,
126
+ "sample_size": (10, 20),
127
+ "n_studies": 5,
128
+ "n_noise_foci": 0,
129
+ "seed": 42,
130
+ "space": "MNI",
131
+ },
132
+ does_not_raise(),
133
+ id="no_foci",
134
+ ),
135
+ pytest.param(
136
+ {
137
+ "foci": 0,
138
+ "foci_percentage": "50%",
139
+ "fwhm": 10.0,
140
+ "sample_size": (10, 20),
141
+ "n_studies": 5,
142
+ "n_noise_foci": 10,
143
+ "seed": 42,
144
+ "space": "MNI",
145
+ },
146
+ does_not_raise(),
147
+ id="only_noise_foci",
148
+ ),
149
+ pytest.param(
150
+ {
151
+ "foci": 1,
152
+ "foci_percentage": "50%",
153
+ "fwhm": 10.0,
154
+ "sample_size": (10, 20),
155
+ "n_studies": 5,
156
+ "n_noise_foci": 0,
157
+ "seed": 42,
158
+ "space": "MNI",
159
+ },
160
+ does_not_raise(),
161
+ id="insufficient_foci",
162
+ ),
163
+ pytest.param(
164
+ {
165
+ "foci": "INVALID_FOCI",
166
+ "foci_percentage": "50%",
167
+ "fwhm": 10.0,
168
+ "sample_size": (10, 20),
169
+ "n_studies": 5,
170
+ "n_noise_foci": 0,
171
+ "seed": 42,
172
+ "space": "MNI",
173
+ },
174
+ pytest.raises(ValueError),
175
+ id="invalid_foci",
176
+ ),
177
+ pytest.param(
178
+ {
179
+ "foci": 1,
180
+ "foci_percentage": "INVALID_PERCENT",
181
+ "fwhm": 10.0,
182
+ "sample_size": (10, 20),
183
+ "n_studies": 5,
184
+ "n_noise_foci": 0,
185
+ "seed": 42,
186
+ "space": "MNI",
187
+ },
188
+ pytest.raises(ValueError),
189
+ id="invalid_percent",
190
+ ),
191
+ pytest.param(
192
+ {
193
+ "foci": 1,
194
+ "foci_percentage": "60%",
195
+ "fwhm": 10.0,
196
+ "sample_size": "INVALID_SAMPLE_SIZE",
197
+ "n_studies": 5,
198
+ "n_noise_foci": 0,
199
+ "seed": 42,
200
+ "space": "MNI",
201
+ },
202
+ pytest.raises(ValueError),
203
+ id="invalid_sample_size",
204
+ ),
205
+ pytest.param(
206
+ {
207
+ "foci": 1,
208
+ "foci_percentage": "60%",
209
+ "fwhm": 10.0,
210
+ "sample_size": 30,
211
+ "n_studies": 5,
212
+ "n_noise_foci": 0,
213
+ "seed": 42,
214
+ "space": "INVALID_SPACE",
215
+ },
216
+ pytest.raises(NotImplementedError),
217
+ id="invalid_space",
218
+ ),
219
+ ],
220
+ )
221
+ def test_create_coordinate_dataset(kwargs, expectation):
222
+ """Create a coordinate Dataset according to parameters."""
223
+ with expectation:
224
+ ground_truth_foci, dataset = create_coordinate_dataset(**kwargs)
225
+ if isinstance(expectation, does_not_raise):
226
+ assert isinstance(dataset, Dataset)
227
+ assert len(dataset.ids) == kwargs["n_studies"]
228
+ # test if the number of observed coordinates in the dataset is correct
229
+ if _array_like(kwargs["foci"]):
230
+ n_foci = len(kwargs["foci"])
231
+ else:
232
+ n_foci = kwargs["foci"]
233
+ expected_coordinate_number = max(
234
+ kwargs["n_studies"],
235
+ (kwargs["n_studies"] * n_foci) + (kwargs["n_studies"] * kwargs["n_noise_foci"]),
236
+ )
237
+ assert len(dataset.coordinates) == expected_coordinate_number
238
+
239
+
240
+ def test_create_neurovault_dataset():
241
+ """Test creating a neurovault dataset."""
242
+ dset = create_neurovault_dataset(
243
+ collection_ids=(8836,),
244
+ contrasts={"animal": "as-Animal"},
245
+ )
246
+ expected_columns = {"beta", "t", "varcope", "z"}
247
+ assert expected_columns.issubset(dset.images.columns)
@@ -0,0 +1,294 @@
1
+ """Test nimare.io (Dataset IO/transformations)."""
2
+
3
+ import os
4
+
5
+ import pytest
6
+
7
+ import nimare
8
+ from nimare import io
9
+ from nimare.nimads import Studyset
10
+ from nimare.tests.utils import get_test_data_path
11
+ from nimare.utils import get_template
12
+
13
+
14
+ def test_convert_nimads_to_dataset(example_nimads_studyset, example_nimads_annotation):
15
+ """Conversion of nimads JSON to nimare dataset."""
16
+ studyset = Studyset(example_nimads_studyset)
17
+ dset1 = io.convert_nimads_to_dataset(studyset)
18
+ studyset.annotations = example_nimads_annotation
19
+ dset2 = io.convert_nimads_to_dataset(studyset)
20
+
21
+ assert isinstance(dset1, nimare.dataset.Dataset)
22
+ assert isinstance(dset2, nimare.dataset.Dataset)
23
+
24
+
25
+ def test_convert_nimads_to_dataset_sample_sizes(
26
+ example_nimads_studyset, example_nimads_annotation
27
+ ):
28
+ """Conversion of nimads JSON to nimare dataset."""
29
+ studyset = Studyset(example_nimads_studyset)
30
+ for study in studyset.studies:
31
+ for analysis in study.analyses:
32
+ analysis.metadata["sample_sizes"] = [2, 20]
33
+
34
+ dset = io.convert_nimads_to_dataset(studyset)
35
+
36
+ assert isinstance(dset, nimare.dataset.Dataset)
37
+ assert "sample_sizes" in dset.metadata.columns
38
+
39
+
40
+ def test_convert_nimads_to_dataset_single_sample_size(
41
+ example_nimads_studyset, example_nimads_annotation
42
+ ):
43
+ """Test conversion of nimads JSON to nimare dataset with a single sample size value."""
44
+ studyset = Studyset(example_nimads_studyset)
45
+ for study in studyset.studies:
46
+ for analysis in study.analyses:
47
+ analysis.metadata["sample_size"] = 20
48
+
49
+ dset = io.convert_nimads_to_dataset(studyset)
50
+
51
+ assert isinstance(dset, nimare.dataset.Dataset)
52
+ assert "sample_sizes" in dset.metadata.columns
53
+
54
+
55
+ def test_analysis_to_dict_invalid_sample_sizes_type(example_nimads_studyset):
56
+ """Test _analysis_to_dict raises ValueError when sample_sizes is not a list/tuple."""
57
+ studyset = Studyset(example_nimads_studyset)
58
+ # Set sample_sizes to an int rather than list/tuple
59
+ for study in studyset.studies:
60
+ for analysis in study.analyses:
61
+ analysis.metadata["sample_sizes"] = 5
62
+ with pytest.raises(TypeError):
63
+ # Trigger conversion which internally calls _analysis_to_dict
64
+ io.convert_nimads_to_dataset(studyset)
65
+
66
+
67
+ def test_analysis_to_dict_invalid_annotations_format(example_nimads_studyset):
68
+ """Test _analysis_to_dict raises ValueError when annotations are in an invalid format."""
69
+ studyset = Studyset(example_nimads_studyset)
70
+ # Here we assume that the annotation is expected to be a dict
71
+ # Set annotation to an invalid format (e.g., a string)
72
+ for study in studyset.studies:
73
+ for analysis in study.analyses:
74
+ analysis.metadata["annotations"] = "invalid_format"
75
+ with pytest.raises(TypeError):
76
+ io.convert_nimads_to_dataset(studyset)
77
+
78
+
79
+ def test_convert_sleuth_to_dataset_smoke():
80
+ """Smoke test for Sleuth text file conversion."""
81
+ sleuth_file = os.path.join(get_test_data_path(), "test_sleuth_file.txt")
82
+ sleuth_file2 = os.path.join(get_test_data_path(), "test_sleuth_file2.txt")
83
+ sleuth_file3 = os.path.join(get_test_data_path(), "test_sleuth_file3.txt")
84
+ sleuth_file4 = os.path.join(get_test_data_path(), "test_sleuth_file4.txt")
85
+ sleuth_file5 = os.path.join(get_test_data_path(), "test_sleuth_file5.txt")
86
+ # Use one input file
87
+ dset = io.convert_sleuth_to_dataset(sleuth_file)
88
+ assert isinstance(dset, nimare.dataset.Dataset)
89
+ assert dset.coordinates.shape[0] == 7
90
+ assert len(dset.ids) == 3
91
+ # Use two input files
92
+ dset2 = io.convert_sleuth_to_dataset([sleuth_file, sleuth_file2])
93
+ assert isinstance(dset2, nimare.dataset.Dataset)
94
+ assert dset2.coordinates.shape[0] == 11
95
+ assert len(dset2.ids) == 5
96
+ # Use invalid input
97
+ with pytest.raises(ValueError):
98
+ io.convert_sleuth_to_dataset(5)
99
+ # Use invalid input (one coordinate is a str instead of a number)
100
+ with pytest.raises(ValueError):
101
+ io.convert_sleuth_to_dataset(sleuth_file3)
102
+ # Use invalid input (one has x & y, but not z)
103
+ with pytest.raises(ValueError):
104
+ io.convert_sleuth_to_dataset(sleuth_file4)
105
+ # Use invalid input (bad space)
106
+ with pytest.raises(ValueError):
107
+ io.convert_sleuth_to_dataset(sleuth_file5)
108
+
109
+
110
+ def test_convert_sleuth_to_json_smoke():
111
+ """Smoke test for Sleuth text file conversion."""
112
+ out_file = os.path.abspath("temp.json")
113
+ sleuth_file = os.path.join(get_test_data_path(), "test_sleuth_file.txt")
114
+ sleuth_file2 = os.path.join(get_test_data_path(), "test_sleuth_file2.txt")
115
+ sleuth_file3 = os.path.join(get_test_data_path(), "test_sleuth_file3.txt")
116
+ sleuth_file4 = os.path.join(get_test_data_path(), "test_sleuth_file4.txt")
117
+ sleuth_file5 = os.path.join(get_test_data_path(), "test_sleuth_file5.txt")
118
+ # Use one input file
119
+ io.convert_sleuth_to_json(sleuth_file, out_file)
120
+ dset = nimare.dataset.Dataset(out_file)
121
+ assert os.path.isfile(out_file)
122
+ assert isinstance(dset, nimare.dataset.Dataset)
123
+ assert dset.coordinates.shape[0] == 7
124
+ assert len(dset.ids) == 3
125
+ os.remove(out_file)
126
+ # Use two input files
127
+ io.convert_sleuth_to_json([sleuth_file, sleuth_file2], out_file)
128
+ dset2 = nimare.dataset.Dataset(out_file)
129
+ assert isinstance(dset2, nimare.dataset.Dataset)
130
+ assert dset2.coordinates.shape[0] == 11
131
+ assert len(dset2.ids) == 5
132
+ # Use invalid input (number instead of file)
133
+ with pytest.raises(ValueError):
134
+ io.convert_sleuth_to_json(5, out_file)
135
+ # Use invalid input (one coordinate is a str instead of a number)
136
+ with pytest.raises(ValueError):
137
+ io.convert_sleuth_to_json(sleuth_file3, out_file)
138
+ # Use invalid input (one has x & y, but not z)
139
+ with pytest.raises(ValueError):
140
+ io.convert_sleuth_to_json(sleuth_file4, out_file)
141
+ # Use invalid input (bad space)
142
+ with pytest.raises(ValueError):
143
+ io.convert_sleuth_to_json(sleuth_file5, out_file)
144
+
145
+
146
+ def test_convert_neurosynth_to_dataset_smoke():
147
+ """Smoke test for Neurosynth file conversion."""
148
+ coordinates_file = os.path.join(
149
+ get_test_data_path(),
150
+ "data-neurosynth_version-7_coordinates.tsv.gz",
151
+ )
152
+ metadata_file = os.path.join(
153
+ get_test_data_path(),
154
+ "data-neurosynth_version-7_metadata.tsv.gz",
155
+ )
156
+ features = {
157
+ "features": os.path.join(
158
+ get_test_data_path(),
159
+ "data-neurosynth_version-7_vocab-terms_source-abstract_type-tfidf_features.npz",
160
+ ),
161
+ "vocabulary": os.path.join(
162
+ get_test_data_path(), "data-neurosynth_version-7_vocab-terms_vocabulary.txt"
163
+ ),
164
+ }
165
+ dset = io.convert_neurosynth_to_dataset(
166
+ coordinates_file,
167
+ metadata_file,
168
+ annotations_files=features,
169
+ )
170
+ assert isinstance(dset, nimare.dataset.Dataset)
171
+ assert "terms_abstract_tfidf__abilities" in dset.annotations.columns
172
+
173
+
174
+ def test_convert_neurosynth_to_json_smoke():
175
+ """Smoke test for Neurosynth file conversion."""
176
+ out_file = os.path.abspath("temp.json")
177
+ coordinates_file = os.path.join(
178
+ get_test_data_path(),
179
+ "data-neurosynth_version-7_coordinates.tsv.gz",
180
+ )
181
+ metadata_file = os.path.join(
182
+ get_test_data_path(),
183
+ "data-neurosynth_version-7_metadata.tsv.gz",
184
+ )
185
+ features = {
186
+ "features": os.path.join(
187
+ get_test_data_path(),
188
+ "data-neurosynth_version-7_vocab-terms_source-abstract_type-tfidf_features.npz",
189
+ ),
190
+ "vocabulary": os.path.join(
191
+ get_test_data_path(), "data-neurosynth_version-7_vocab-terms_vocabulary.txt"
192
+ ),
193
+ }
194
+ io.convert_neurosynth_to_json(
195
+ coordinates_file,
196
+ metadata_file,
197
+ out_file,
198
+ annotations_files=features,
199
+ )
200
+ dset = nimare.dataset.Dataset(out_file)
201
+ assert os.path.isfile(out_file)
202
+ assert isinstance(dset, nimare.dataset.Dataset)
203
+ os.remove(out_file)
204
+
205
+
206
+ @pytest.mark.parametrize(
207
+ "kwargs",
208
+ [
209
+ (
210
+ {
211
+ "collection_ids": (8836,),
212
+ "contrasts": {"animal": "as-Animal"},
213
+ }
214
+ ),
215
+ (
216
+ {
217
+ "collection_ids": {"informative_name": 8836},
218
+ "contrasts": {"animal": "as-Animal"},
219
+ "map_type_conversion": {"T map": "t"},
220
+ "target": "mni152_2mm",
221
+ "mask": get_template("mni152_2mm", mask="brain"),
222
+ }
223
+ ),
224
+ (
225
+ {
226
+ "collection_ids": (6348, 6419),
227
+ "contrasts": {"action": "action"},
228
+ "map_type_conversion": {"univariate-beta map": "beta"},
229
+ }
230
+ ),
231
+ (
232
+ {
233
+ "collection_ids": (778,), # collection not found
234
+ "contrasts": {"action": "action"},
235
+ "map_type_conversion": {"univariate-beta map": "beta"},
236
+ }
237
+ ),
238
+ (
239
+ {
240
+ "collection_ids": (11303,),
241
+ "contrasts": {"rms": "rms"},
242
+ "map_type_conversion": {"univariate-beta map": "beta"},
243
+ }
244
+ ),
245
+ (
246
+ {
247
+ "collection_ids": (8836,),
248
+ "contrasts": {"crab_people": "cannot hurt you because they do not exist"},
249
+ }
250
+ ),
251
+ ],
252
+ )
253
+ def test_convert_neurovault_to_dataset(kwargs):
254
+ """Test conversion of neurovault collection to a dataset."""
255
+ if 778 in kwargs["collection_ids"]:
256
+ with pytest.raises(ValueError) as excinfo:
257
+ dset = io.convert_neurovault_to_dataset(**kwargs)
258
+ assert "Collection 778 not found." in str(excinfo.value)
259
+ return
260
+ elif "crab_people" in kwargs["contrasts"].keys():
261
+ with pytest.raises(ValueError) as excinfo:
262
+ dset = io.convert_neurovault_to_dataset(**kwargs)
263
+ assert "No images were found for contrast crab_people" in str(excinfo.value)
264
+ return
265
+ else:
266
+ dset = io.convert_neurovault_to_dataset(**kwargs)
267
+
268
+ # check if names are propagated into Dataset
269
+ if isinstance(kwargs.get("collection_ids"), dict):
270
+ study_ids = set(kwargs["collection_ids"].keys())
271
+ else:
272
+ study_ids = set(map(str, kwargs["collection_ids"]))
273
+ dset_ids = {id_.split("-")[1] for id_ in dset.ids}
274
+
275
+ assert study_ids == dset_ids
276
+
277
+ # check if images were downloaded and are unique
278
+ if kwargs.get("map_type_conversion"):
279
+ for img_type in kwargs.get("map_type_conversion").values():
280
+ assert not dset.images[img_type].empty
281
+ assert len(set(dset.images[img_type])) == len(dset.images[img_type])
282
+
283
+
284
+ @pytest.mark.parametrize(
285
+ "sample_sizes,expected_sample_size",
286
+ [
287
+ ([1, 2, 1], 1),
288
+ ([None, None, 1], 1),
289
+ ([1, 1, 2, 2], 1),
290
+ ],
291
+ )
292
+ def test_resolve_sample_sizes(sample_sizes, expected_sample_size):
293
+ """Test modal sample size heuristic."""
294
+ assert io._resolve_sample_size(sample_sizes) == expected_sample_size