nimare 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. benchmarks/__init__.py +0 -0
  2. benchmarks/bench_cbma.py +57 -0
  3. nimare/__init__.py +45 -0
  4. nimare/_version.py +21 -0
  5. nimare/annotate/__init__.py +21 -0
  6. nimare/annotate/cogat.py +213 -0
  7. nimare/annotate/gclda.py +924 -0
  8. nimare/annotate/lda.py +147 -0
  9. nimare/annotate/text.py +75 -0
  10. nimare/annotate/utils.py +87 -0
  11. nimare/base.py +217 -0
  12. nimare/cli.py +124 -0
  13. nimare/correct.py +462 -0
  14. nimare/dataset.py +685 -0
  15. nimare/decode/__init__.py +33 -0
  16. nimare/decode/base.py +115 -0
  17. nimare/decode/continuous.py +462 -0
  18. nimare/decode/discrete.py +753 -0
  19. nimare/decode/encode.py +110 -0
  20. nimare/decode/utils.py +44 -0
  21. nimare/diagnostics.py +510 -0
  22. nimare/estimator.py +139 -0
  23. nimare/extract/__init__.py +19 -0
  24. nimare/extract/extract.py +466 -0
  25. nimare/extract/utils.py +295 -0
  26. nimare/generate.py +331 -0
  27. nimare/io.py +667 -0
  28. nimare/meta/__init__.py +39 -0
  29. nimare/meta/cbma/__init__.py +6 -0
  30. nimare/meta/cbma/ale.py +951 -0
  31. nimare/meta/cbma/base.py +947 -0
  32. nimare/meta/cbma/mkda.py +1361 -0
  33. nimare/meta/cbmr.py +970 -0
  34. nimare/meta/ibma.py +1683 -0
  35. nimare/meta/kernel.py +501 -0
  36. nimare/meta/models.py +1199 -0
  37. nimare/meta/utils.py +494 -0
  38. nimare/nimads.py +492 -0
  39. nimare/reports/__init__.py +24 -0
  40. nimare/reports/base.py +664 -0
  41. nimare/reports/default.yml +123 -0
  42. nimare/reports/figures.py +651 -0
  43. nimare/reports/report.tpl +160 -0
  44. nimare/resources/__init__.py +1 -0
  45. nimare/resources/atlases/Harvard-Oxford-LICENSE +93 -0
  46. nimare/resources/atlases/HarvardOxford-cort-maxprob-thr25-2mm.nii.gz +0 -0
  47. nimare/resources/database_file_manifest.json +142 -0
  48. nimare/resources/english_spellings.csv +1738 -0
  49. nimare/resources/filenames.json +32 -0
  50. nimare/resources/neurosynth_laird_studies.json +58773 -0
  51. nimare/resources/neurosynth_stoplist.txt +396 -0
  52. nimare/resources/nidm_pain_dset.json +1349 -0
  53. nimare/resources/references.bib +541 -0
  54. nimare/resources/semantic_knowledge_children.txt +325 -0
  55. nimare/resources/semantic_relatedness_children.txt +249 -0
  56. nimare/resources/templates/MNI152_2x2x2_brainmask.nii.gz +0 -0
  57. nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_T1w.nii.gz +0 -0
  58. nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_desc-brain_mask.nii.gz +0 -0
  59. nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_T1w.nii.gz +0 -0
  60. nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_desc-brain_mask.nii.gz +0 -0
  61. nimare/results.py +225 -0
  62. nimare/stats.py +276 -0
  63. nimare/tests/__init__.py +1 -0
  64. nimare/tests/conftest.py +229 -0
  65. nimare/tests/data/amygdala_roi.nii.gz +0 -0
  66. nimare/tests/data/data-neurosynth_version-7_coordinates.tsv.gz +0 -0
  67. nimare/tests/data/data-neurosynth_version-7_metadata.tsv.gz +0 -0
  68. nimare/tests/data/data-neurosynth_version-7_vocab-terms_source-abstract_type-tfidf_features.npz +0 -0
  69. nimare/tests/data/data-neurosynth_version-7_vocab-terms_vocabulary.txt +100 -0
  70. nimare/tests/data/neurosynth_dset.json +2868 -0
  71. nimare/tests/data/neurosynth_laird_studies.json +58773 -0
  72. nimare/tests/data/nidm_pain_dset.json +1349 -0
  73. nimare/tests/data/nimads_annotation.json +1 -0
  74. nimare/tests/data/nimads_studyset.json +1 -0
  75. nimare/tests/data/test_baseline.txt +2 -0
  76. nimare/tests/data/test_pain_dataset.json +1278 -0
  77. nimare/tests/data/test_pain_dataset_multiple_contrasts.json +1242 -0
  78. nimare/tests/data/test_sleuth_file.txt +18 -0
  79. nimare/tests/data/test_sleuth_file2.txt +10 -0
  80. nimare/tests/data/test_sleuth_file3.txt +5 -0
  81. nimare/tests/data/test_sleuth_file4.txt +5 -0
  82. nimare/tests/data/test_sleuth_file5.txt +5 -0
  83. nimare/tests/test_annotate_cogat.py +32 -0
  84. nimare/tests/test_annotate_gclda.py +86 -0
  85. nimare/tests/test_annotate_lda.py +27 -0
  86. nimare/tests/test_dataset.py +99 -0
  87. nimare/tests/test_decode_continuous.py +132 -0
  88. nimare/tests/test_decode_discrete.py +92 -0
  89. nimare/tests/test_diagnostics.py +168 -0
  90. nimare/tests/test_estimator_performance.py +385 -0
  91. nimare/tests/test_extract.py +46 -0
  92. nimare/tests/test_generate.py +247 -0
  93. nimare/tests/test_io.py +294 -0
  94. nimare/tests/test_meta_ale.py +298 -0
  95. nimare/tests/test_meta_cbmr.py +295 -0
  96. nimare/tests/test_meta_ibma.py +240 -0
  97. nimare/tests/test_meta_kernel.py +209 -0
  98. nimare/tests/test_meta_mkda.py +234 -0
  99. nimare/tests/test_nimads.py +21 -0
  100. nimare/tests/test_reports.py +110 -0
  101. nimare/tests/test_stats.py +101 -0
  102. nimare/tests/test_transforms.py +272 -0
  103. nimare/tests/test_utils.py +200 -0
  104. nimare/tests/test_workflows.py +221 -0
  105. nimare/tests/utils.py +126 -0
  106. nimare/transforms.py +907 -0
  107. nimare/utils.py +1367 -0
  108. nimare/workflows/__init__.py +14 -0
  109. nimare/workflows/base.py +189 -0
  110. nimare/workflows/cbma.py +165 -0
  111. nimare/workflows/ibma.py +108 -0
  112. nimare/workflows/macm.py +77 -0
  113. nimare/workflows/misc.py +65 -0
  114. nimare-0.4.2.dist-info/LICENSE +21 -0
  115. nimare-0.4.2.dist-info/METADATA +124 -0
  116. nimare-0.4.2.dist-info/RECORD +119 -0
  117. nimare-0.4.2.dist-info/WHEEL +5 -0
  118. nimare-0.4.2.dist-info/entry_points.txt +2 -0
  119. nimare-0.4.2.dist-info/top_level.txt +2 -0
@@ -0,0 +1,168 @@
1
+ """Tests for the nimare.diagnostics module."""
2
+
3
+ import os.path as op
4
+
5
+ import pytest
6
+ from nilearn.input_data import NiftiLabelsMasker
7
+
8
+ from nimare import diagnostics
9
+ from nimare.meta import cbma, ibma
10
+ from nimare.tests.utils import get_test_data_path
11
+
12
+
13
+ @pytest.mark.parametrize(
14
+ "estimator,meta_type,n_samples,target_image,voxel_thresh",
15
+ [
16
+ (cbma.ALE, "cbma", "onesample", "z", 1.65),
17
+ (cbma.MKDADensity, "cbma", "onesample", "z", 1.65),
18
+ (cbma.KDA, "cbma", "onesample", "z", 1.65),
19
+ (cbma.MKDAChi2, "cbma", "twosample", "z_desc-uniformity", 1.65),
20
+ (ibma.Fishers, "ibma", "onesample", "z", 0.1),
21
+ (ibma.Stouffers, "ibma", "onesample", "z", 0.1),
22
+ (ibma.WeightedLeastSquares, "ibma", "onesample", "z", 0.1),
23
+ (ibma.DerSimonianLaird, "ibma", "onesample", "z", 0.1),
24
+ (ibma.Hedges, "ibma", "onesample", "z", 0.1),
25
+ # (ibma.SampleSizeBasedLikelihood, "ibma", "onesample", "z"),
26
+ # (ibma.VarianceBasedLikelihood, "ibma", "onesample", "z"),
27
+ # (ibma.PermutedOLS, "ibma", "onesample", "z"),
28
+ ],
29
+ )
30
+ def test_jackknife_smoke(
31
+ testdata_ibma,
32
+ testdata_cbma_full,
33
+ estimator,
34
+ meta_type,
35
+ n_samples,
36
+ target_image,
37
+ voxel_thresh,
38
+ ):
39
+ """Smoke test the Jackknife method."""
40
+ dset1 = testdata_cbma_full.slice(testdata_cbma_full.ids[:10])
41
+ dset2 = testdata_cbma_full.slice(testdata_cbma_full.ids[10:])
42
+
43
+ meta = estimator()
44
+ testdata = testdata_ibma if meta_type == "ibma" else testdata_cbma_full
45
+ res = meta.fit(dset1, dset2) if n_samples == "twosample" else meta.fit(testdata)
46
+
47
+ jackknife = diagnostics.Jackknife(target_image=target_image, voxel_thresh=voxel_thresh)
48
+ results = jackknife.transform(res)
49
+
50
+ image_name = "_".join(target_image.split("_")[1:])
51
+ image_name = f"_{image_name}" if image_name else image_name
52
+
53
+ # For ibma.WeightedLeastSquares we have both positive and negative tail combined.
54
+ contribution_table = (
55
+ results.tables[f"{target_image}_diag-Jackknife_tab-counts"]
56
+ if estimator == ibma.WeightedLeastSquares
57
+ else results.tables[f"{target_image}_diag-Jackknife_tab-counts_tail-positive"]
58
+ )
59
+
60
+ clusters_table = results.tables[f"{target_image}_tab-clust"]
61
+ label_maps = results.maps[f"label{image_name}_tail-positive"]
62
+ ids_ = meta.inputs_["id"] if n_samples == "onesample" else meta.inputs_["id1"]
63
+
64
+ assert contribution_table.shape[0] == len(ids_)
65
+ assert clusters_table.shape[0] >= contribution_table.shape[1] - 1
66
+ assert len(label_maps) > 0
67
+
68
+
69
+ def test_jackknife_with_zero_clusters(testdata_cbma_full):
70
+ """Ensure that Jackknife will work with zero clusters."""
71
+ meta = cbma.ALE()
72
+ res = meta.fit(testdata_cbma_full)
73
+
74
+ jackknife = diagnostics.Jackknife(target_image="z", voxel_thresh=10)
75
+ results = jackknife.transform(res)
76
+
77
+ contribution_table = results.tables["z_diag-Jackknife_tab-counts"]
78
+ clusters_table = results.tables["z_tab-clust"]
79
+ label_maps = results.maps["label_tail-positive"]
80
+ assert contribution_table is None
81
+ assert clusters_table.empty
82
+ assert not label_maps
83
+
84
+
85
+ def test_jackknife_with_custom_masker_smoke(testdata_ibma):
86
+ """Ensure that Jackknife will work with NiftiLabelsMaskers.
87
+
88
+ CBMAs don't work with NiftiLabelsMaskers and VarianceBasedLikelihood takes ~1 minute,
89
+ which is too long for a single test, so I'm just using SampleSizeBasedLikelihood.
90
+ """
91
+ atlas = op.join(get_test_data_path(), "test_pain_dataset", "atlas.nii.gz")
92
+ masker = NiftiLabelsMasker(atlas)
93
+
94
+ meta = ibma.SampleSizeBasedLikelihood(mask=masker)
95
+ res = meta.fit(testdata_ibma)
96
+
97
+ jackknife = diagnostics.Jackknife(target_image="z", voxel_thresh=0.5)
98
+ results = jackknife.transform(res)
99
+ contribution_table = results.tables["z_diag-Jackknife_tab-counts_tail-positive"]
100
+ assert contribution_table.shape[0] == len(meta.inputs_["id"])
101
+
102
+ # A Jackknife with a target_image that isn't present in the MetaResult raises a ValueError.
103
+ with pytest.raises(ValueError):
104
+ jackknife = diagnostics.Jackknife(target_image="doggy", voxel_thresh=0.5)
105
+ jackknife.transform(res)
106
+
107
+
108
+ @pytest.mark.parametrize(
109
+ "estimator,meta_type,n_samples,target_image",
110
+ [
111
+ (cbma.ALE, "cbma", "onesample", "z"),
112
+ (cbma.MKDADensity, "cbma", "onesample", "z"),
113
+ (cbma.KDA, "cbma", "onesample", "z"),
114
+ (cbma.MKDAChi2, "cbma", "twosample", "z_desc-uniformity"),
115
+ (ibma.Stouffers, "ibma", "onesample", "z"),
116
+ ],
117
+ )
118
+ def test_focuscounter_smoke(
119
+ testdata_ibma,
120
+ testdata_cbma_full,
121
+ estimator,
122
+ meta_type,
123
+ n_samples,
124
+ target_image,
125
+ ):
126
+ """Smoke test the FocusCounter method."""
127
+ dset1 = testdata_cbma_full.slice(testdata_cbma_full.ids[:10])
128
+ dset2 = testdata_cbma_full.slice(testdata_cbma_full.ids[10:])
129
+
130
+ meta = estimator()
131
+ testdata = testdata_ibma if meta_type == "ibma" else testdata_cbma_full
132
+ res = meta.fit(dset1, dset2) if n_samples == "twosample" else meta.fit(testdata)
133
+
134
+ counter = diagnostics.FocusCounter(target_image=target_image, voxel_thresh=1.65)
135
+ if meta_type == "ibma":
136
+ with pytest.raises(ValueError):
137
+ counter.transform(res)
138
+ else:
139
+ results = counter.transform(res)
140
+
141
+ image_name = "_".join(target_image.split("_")[1:])
142
+ image_name = f"_{image_name}" if image_name else image_name
143
+
144
+ contribution_table = results.tables[
145
+ f"{target_image}_diag-FocusCounter_tab-counts_tail-positive"
146
+ ]
147
+ clusters_table = results.tables[f"{target_image}_tab-clust"]
148
+ label_maps = results.maps[f"label{image_name}_tail-positive"]
149
+ ids_ = meta.inputs_["id"] if n_samples == "onesample" else meta.inputs_["id1"]
150
+
151
+ assert contribution_table.shape[0] == len(ids_)
152
+ assert clusters_table.shape[0] >= contribution_table.shape[1] - 1
153
+ assert len(label_maps) > 0
154
+
155
+
156
+ def test_focusfilter(testdata_laird):
157
+ """Ensure that the FocusFilter removes out-of-mask coordinates.
158
+
159
+ The Laird dataset contains 16 foci outside of the MNI brain mask, which the filter should
160
+ remove.
161
+ """
162
+ n_coordinates_all = testdata_laird.coordinates.shape[0]
163
+ ffilter = diagnostics.FocusFilter()
164
+ filtered_dset = ffilter.transform(testdata_laird)
165
+ n_coordinates_filtered = filtered_dset.coordinates.shape[0]
166
+ assert n_coordinates_all == 1117
167
+ assert n_coordinates_filtered == 1101
168
+ assert n_coordinates_filtered <= n_coordinates_all
@@ -0,0 +1,385 @@
1
+ """Test estimator, kerneltransformer, and multiple comparisons corrector performance."""
2
+
3
+ import os
4
+ from contextlib import ExitStack as does_not_raise
5
+
6
+ import numpy as np
7
+ import pytest
8
+
9
+ from nimare.correct import FDRCorrector, FWECorrector
10
+ from nimare.generate import create_coordinate_dataset
11
+ from nimare.meta import ale, kernel, mkda
12
+ from nimare.results import MetaResult
13
+ from nimare.tests.utils import _check_p_values, _create_signal_mask, _transform_res
14
+ from nimare.utils import mm2vox
15
+
16
+ # set significance levels used for testing.
17
+ ALPHA = 0.05
18
+
19
+ if os.environ.get("CIRCLECI"):
20
+ N_CORES = 1
21
+ else:
22
+ N_CORES = -1
23
+
24
+
25
+ # PRECOMPUTED FIXTURES
26
+ # --------------------
27
+
28
+
29
+ ##########################################
30
+ # random state
31
+ ##########################################
32
+ @pytest.fixture(scope="session")
33
+ def random():
34
+ """Set random state for the tests."""
35
+ np.random.seed(1939)
36
+
37
+
38
+ ##########################################
39
+ # simulated dataset(s)
40
+ ##########################################
41
+ @pytest.fixture(
42
+ scope="session",
43
+ params=[
44
+ pytest.param(
45
+ {
46
+ "foci": 5,
47
+ "fwhm": 10.0,
48
+ "n_studies": 40,
49
+ "sample_size": 30,
50
+ "n_noise_foci": 20,
51
+ "seed": 1939,
52
+ },
53
+ id="normal_data",
54
+ )
55
+ ],
56
+ )
57
+ def simulatedata_cbma(request):
58
+ """Set the simulated CBMA data according to parameters."""
59
+ return request.param["fwhm"], create_coordinate_dataset(**request.param)
60
+
61
+
62
+ ##########################################
63
+ # signal and non-signal masks
64
+ ##########################################
65
+ @pytest.fixture(scope="session")
66
+ def signal_masks(simulatedata_cbma):
67
+ """Define masks of signal and non-signal for performance evaluation."""
68
+ _, (ground_truth_foci, dataset) = simulatedata_cbma
69
+ ground_truth_foci_ijks = [
70
+ tuple(mm2vox(focus, dataset.masker.mask_img.affine)) for focus in ground_truth_foci
71
+ ]
72
+ return _create_signal_mask(np.array(ground_truth_foci_ijks), dataset.masker.mask_img)
73
+
74
+
75
+ ##########################################
76
+ # meta-analysis estimators
77
+ ##########################################
78
+ @pytest.fixture(
79
+ scope="session",
80
+ params=[
81
+ pytest.param(ale.ALE, id="ale"),
82
+ pytest.param(mkda.MKDADensity, id="mkda"),
83
+ pytest.param(mkda.KDA, id="kda"),
84
+ ],
85
+ )
86
+ def meta_est(request):
87
+ """Define meta-analysis estimators for tests."""
88
+ return request.param
89
+
90
+
91
+ ##########################################
92
+ # meta-analysis estimator parameters
93
+ ##########################################
94
+ @pytest.fixture(
95
+ scope="session",
96
+ params=[
97
+ pytest.param({"null_method": "montecarlo", "n_iters": 100}, id="montecarlo"),
98
+ pytest.param({"null_method": "approximate"}, id="approximate"),
99
+ pytest.param(
100
+ {"null_method": "reduced_montecarlo", "n_iters": 100}, id="reduced_montecarlo"
101
+ ),
102
+ ],
103
+ )
104
+ def meta_params(request):
105
+ """Define meta-analysis Estimator parameters for tests."""
106
+ return request.param
107
+
108
+
109
+ ##########################################
110
+ # meta-analysis kernels
111
+ ##########################################
112
+ @pytest.fixture(
113
+ scope="session",
114
+ params=[
115
+ pytest.param(kernel.ALEKernel, id="ale_kernel"),
116
+ pytest.param(kernel.MKDAKernel, id="mkda_kernel"),
117
+ pytest.param(kernel.KDAKernel, id="kda_kernel"),
118
+ ],
119
+ )
120
+ def kern(request):
121
+ """Define kernel transformers for tests."""
122
+ return request.param
123
+
124
+
125
+ ##########################################
126
+ # multiple comparison correctors (testing)
127
+ ##########################################
128
+ @pytest.fixture(
129
+ scope="session",
130
+ params=[
131
+ pytest.param(FWECorrector(method="bonferroni"), id="fwe_bonferroni"),
132
+ pytest.param(
133
+ FWECorrector(method="montecarlo", voxel_thresh=ALPHA, n_iters=100, n_cores=N_CORES),
134
+ id="fwe_montecarlo",
135
+ ),
136
+ pytest.param(FDRCorrector(method="indep", alpha=ALPHA), id="fdr_indep"),
137
+ pytest.param(FDRCorrector(method="negcorr", alpha=ALPHA), id="fdr_negcorr"),
138
+ ],
139
+ )
140
+ def corr(request):
141
+ """Define multiple comparisons correctors for tests."""
142
+ return request.param
143
+
144
+
145
+ ##########################################
146
+ # multiple comparison correctors (smoke)
147
+ ##########################################
148
+ @pytest.fixture(
149
+ scope="session",
150
+ params=[
151
+ pytest.param(FWECorrector(method="bonferroni"), id="fwe_bonferroni"),
152
+ pytest.param(
153
+ FWECorrector(method="montecarlo", voxel_thresh=ALPHA, n_iters=2, n_cores=1),
154
+ id="fwe_montecarlo",
155
+ ),
156
+ pytest.param(FDRCorrector(method="indep", alpha=ALPHA), id="fdr_indep"),
157
+ pytest.param(FDRCorrector(method="negcorr", alpha=ALPHA), id="fdr_negcorr"),
158
+ ],
159
+ )
160
+ def corr_small(request):
161
+ """Define multiple comparisons correctors for tests."""
162
+ return request.param
163
+
164
+
165
+ ###########################################
166
+ # all meta-analysis estimator/kernel combos
167
+ ###########################################
168
+ @pytest.fixture(scope="session")
169
+ def meta(simulatedata_cbma, meta_est, kern, meta_params):
170
+ """Define estimator/kernel combinations for tests."""
171
+ fwhm, (_, _) = simulatedata_cbma
172
+ if kern == kernel.KDAKernel or kern == kernel.MKDAKernel:
173
+ kern = kern(r=fwhm / 2)
174
+ else:
175
+ kern = kern()
176
+
177
+ # instantiate meta-analysis estimator
178
+ return meta_est(kern, **meta_params)
179
+
180
+
181
+ ###########################################
182
+ # meta-analysis estimator results
183
+ ###########################################
184
+ @pytest.fixture(scope="session")
185
+ def meta_res(simulatedata_cbma, meta, random):
186
+ """Define estimators for tests."""
187
+ _, (_, dataset) = simulatedata_cbma
188
+ # CHECK IF META/KERNEL WORK TOGETHER
189
+ ####################################
190
+ meta_expectation = does_not_raise()
191
+
192
+ with meta_expectation:
193
+ res = meta.fit(dataset)
194
+ # if creating the result failed (expected), do not continue
195
+ if isinstance(meta_expectation, type(pytest.raises(ValueError))):
196
+ pytest.xfail("this meta-analysis & kernel combo fails")
197
+ # instantiate meta-analysis estimator
198
+ return res
199
+
200
+
201
+ ###########################################
202
+ # corrected results (testing)
203
+ ###########################################
204
+ @pytest.fixture(scope="session")
205
+ def meta_cres(meta, meta_res, corr, random):
206
+ """Define corrected results for tests."""
207
+ return _transform_res(meta, meta_res, corr)
208
+
209
+
210
+ ###########################################
211
+ # corrected results (smoke)
212
+ ###########################################
213
+ @pytest.fixture(scope="session")
214
+ def meta_cres_small(meta, meta_res, corr_small, random):
215
+ """Define corrected results for tests."""
216
+ return _transform_res(meta, meta_res, corr_small)
217
+
218
+
219
+ # --------------
220
+ # TEST FUNCTIONS
221
+ # --------------
222
+
223
+
224
+ @pytest.mark.performance_smoke
225
+ def test_meta_fit_smoke(meta_res):
226
+ """Smoke test for meta-analytic estimator fit."""
227
+ assert isinstance(meta_res, MetaResult)
228
+
229
+
230
+ @pytest.mark.performance_estimators
231
+ def test_meta_fit_performance(meta_res, signal_masks, simulatedata_cbma):
232
+ """Test meta-analytic estimator fit performance."""
233
+ _, (ground_truth_foci, _) = simulatedata_cbma
234
+ mask = meta_res.masker.mask_img
235
+ ground_truth_foci_ijks = [tuple(mm2vox(focus, mask.affine)) for focus in ground_truth_foci]
236
+ sig_idx, nonsig_idx = [
237
+ meta_res.masker.transform(img).astype(bool).squeeze() for img in signal_masks
238
+ ]
239
+
240
+ # all estimators generate p-values
241
+ p_array = meta_res.get_map("p", return_type="array")
242
+
243
+ # poor performer(s)
244
+ if (
245
+ isinstance(meta_res.estimator, ale.ALE)
246
+ and isinstance(meta_res.estimator.kernel_transformer, kernel.KDAKernel)
247
+ and meta_res.estimator.get_params().get("null_method") == "approximate"
248
+ ):
249
+ good_sensitivity = True
250
+ good_specificity = False
251
+ elif (
252
+ isinstance(meta_res.estimator, ale.ALE)
253
+ and isinstance(meta_res.estimator.kernel_transformer, kernel.KDAKernel)
254
+ and "montecarlo" in meta_res.estimator.get_params().get("null_method")
255
+ ):
256
+ good_sensitivity = False
257
+ good_specificity = True
258
+ elif (
259
+ isinstance(meta_res.estimator, ale.ALE)
260
+ and isinstance(meta_res.estimator.kernel_transformer, kernel.KDAKernel)
261
+ and "montecarlo" in meta_res.estimator.get_params().get("null_method")
262
+ ):
263
+ good_sensitivity = False
264
+ good_specificity = True
265
+ elif (
266
+ isinstance(meta_res.estimator, ale.ALE)
267
+ and isinstance(meta_res.estimator.kernel_transformer, kernel.KDAKernel)
268
+ and meta_res.estimator.get_params().get("null_method") == "approximate"
269
+ ):
270
+ good_sensitivity = True
271
+ good_specificity = False
272
+ elif (
273
+ isinstance(meta_res.estimator, mkda.MKDADensity)
274
+ and isinstance(meta_res.estimator.kernel_transformer, kernel.ALEKernel)
275
+ and meta_res.estimator.get_params().get("null_method") != "reduced_montecarlo"
276
+ ):
277
+ good_sensitivity = False
278
+ good_specificity = True
279
+ else:
280
+ good_sensitivity = True
281
+ good_specificity = True
282
+
283
+ _check_p_values(
284
+ p_array,
285
+ meta_res.masker,
286
+ sig_idx,
287
+ nonsig_idx,
288
+ ALPHA,
289
+ ground_truth_foci_ijks,
290
+ n_iters=None,
291
+ good_sensitivity=good_sensitivity,
292
+ good_specificity=good_specificity,
293
+ )
294
+
295
+
296
+ @pytest.mark.performance_smoke
297
+ def test_corr_transform_smoke(meta_cres_small):
298
+ """Smoke test for corrector transform."""
299
+ assert isinstance(meta_cres_small, MetaResult)
300
+
301
+
302
+ @pytest.mark.performance_correctors
303
+ def test_corr_transform_performance(meta_cres, corr, signal_masks, simulatedata_cbma):
304
+ """Test corrector transform performance."""
305
+ _, (ground_truth_foci, _) = simulatedata_cbma
306
+ mask = meta_cres.masker.mask_img
307
+ ground_truth_foci_ijks = [tuple(mm2vox(focus, mask.affine)) for focus in ground_truth_foci]
308
+ sig_idx, nonsig_idx = [
309
+ meta_cres.masker.transform(img).astype(bool).squeeze() for img in signal_masks
310
+ ]
311
+
312
+ p_array = meta_cres.maps.get("p")
313
+ if p_array is None or corr.method == "montecarlo":
314
+ p_array = 10 ** -meta_cres.maps.get("logp_level-voxel_corr-FWE_method-montecarlo")
315
+
316
+ n_iters = corr.parameters.get("n_iters")
317
+
318
+ # ALE with MKDA kernel with montecarlo correction
319
+ # combination gives poor performance
320
+ if (
321
+ isinstance(meta_cres.estimator, ale.ALE)
322
+ and isinstance(meta_cres.estimator.kernel_transformer, kernel.MKDAKernel)
323
+ and meta_cres.estimator.get_params().get("null_method") == "approximate"
324
+ and corr.method != "montecarlo"
325
+ ):
326
+ good_sensitivity = True
327
+ good_specificity = False
328
+ elif (
329
+ isinstance(meta_cres.estimator, ale.ALE)
330
+ and isinstance(meta_cres.estimator.kernel_transformer, kernel.MKDAKernel)
331
+ and "montecarlo" in meta_cres.estimator.get_params().get("null_method")
332
+ ):
333
+ good_sensitivity = False
334
+ good_specificity = True
335
+ elif (
336
+ isinstance(meta_cres.estimator, ale.ALE)
337
+ and isinstance(meta_cres.estimator.kernel_transformer, kernel.MKDAKernel)
338
+ and meta_cres.estimator.get_params().get("null_method") == "approximate"
339
+ and corr.method == "montecarlo"
340
+ ):
341
+ good_sensitivity = False
342
+ good_specificity = True
343
+ elif (
344
+ isinstance(meta_cres.estimator, ale.ALE)
345
+ and isinstance(meta_cres.estimator.kernel_transformer, kernel.KDAKernel)
346
+ and (
347
+ "montecarlo" in meta_cres.estimator.get_params().get("null_method")
348
+ or (
349
+ meta_cres.estimator.get_params().get("null_method") == "approximate"
350
+ and corr.method == "montecarlo"
351
+ )
352
+ )
353
+ ):
354
+ good_sensitivity = False
355
+ good_specificity = True
356
+ elif (
357
+ isinstance(meta_cres.estimator, ale.ALE)
358
+ and isinstance(meta_cres.estimator.kernel_transformer, kernel.KDAKernel)
359
+ and meta_cres.estimator.get_params().get("null_method") == "approximate"
360
+ ):
361
+ good_sensitivity = True
362
+ good_specificity = False
363
+ elif (
364
+ isinstance(meta_cres.estimator, mkda.MKDADensity)
365
+ and isinstance(meta_cres.estimator.kernel_transformer, kernel.ALEKernel)
366
+ and meta_cres.estimator.get_params().get("null_method") != "reduced_montecarlo"
367
+ and corr.method != "montecarlo"
368
+ ):
369
+ good_sensitivity = False
370
+ good_specificity = True
371
+ else:
372
+ good_sensitivity = True
373
+ good_specificity = True
374
+
375
+ _check_p_values(
376
+ p_array,
377
+ meta_cres.masker,
378
+ sig_idx,
379
+ nonsig_idx,
380
+ ALPHA,
381
+ ground_truth_foci_ijks,
382
+ n_iters=n_iters,
383
+ good_sensitivity=good_sensitivity,
384
+ good_specificity=good_specificity,
385
+ )
@@ -0,0 +1,46 @@
1
+ """Test nimare.extract."""
2
+
3
+ import os
4
+ from glob import glob
5
+
6
+ import nimare
7
+
8
+
9
+ def test_fetch_neurosynth(tmp_path_factory):
10
+ """Smoke test for extract.fetch_neurosynth.
11
+
12
+ Taken from the Neurosynth Python package.
13
+ """
14
+ tmpdir = tmp_path_factory.mktemp("test_fetch_neurosynth")
15
+ data_files = nimare.extract.fetch_neurosynth(
16
+ data_dir=tmpdir,
17
+ version="7",
18
+ overwrite=False,
19
+ source="abstract",
20
+ vocab="terms",
21
+ )
22
+ files = glob(os.path.join(tmpdir, "neurosynth", "*"))
23
+ assert len(files) == 4
24
+
25
+ # One set of files found
26
+ assert isinstance(data_files, list)
27
+ assert len(data_files) == 1
28
+
29
+
30
+ def test_fetch_neuroquery(tmp_path_factory):
31
+ """Smoke test for extract.fetch_neuroquery."""
32
+ tmpdir = tmp_path_factory.mktemp("test_fetch_neuroquery")
33
+ data_files = nimare.extract.fetch_neuroquery(
34
+ data_dir=tmpdir,
35
+ version="1",
36
+ overwrite=False,
37
+ source="abstract",
38
+ vocab="neuroquery7547",
39
+ type="count",
40
+ )
41
+ files = glob(os.path.join(tmpdir, "neuroquery", "*"))
42
+ assert len(files) == 4
43
+
44
+ # One set of files found
45
+ assert isinstance(data_files, list)
46
+ assert len(data_files) == 1