nimare 0.4.2rc4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. benchmarks/__init__.py +0 -0
  2. benchmarks/bench_cbma.py +57 -0
  3. nimare/__init__.py +45 -0
  4. nimare/_version.py +21 -0
  5. nimare/annotate/__init__.py +21 -0
  6. nimare/annotate/cogat.py +213 -0
  7. nimare/annotate/gclda.py +924 -0
  8. nimare/annotate/lda.py +147 -0
  9. nimare/annotate/text.py +75 -0
  10. nimare/annotate/utils.py +87 -0
  11. nimare/base.py +217 -0
  12. nimare/cli.py +124 -0
  13. nimare/correct.py +462 -0
  14. nimare/dataset.py +685 -0
  15. nimare/decode/__init__.py +33 -0
  16. nimare/decode/base.py +115 -0
  17. nimare/decode/continuous.py +462 -0
  18. nimare/decode/discrete.py +753 -0
  19. nimare/decode/encode.py +110 -0
  20. nimare/decode/utils.py +44 -0
  21. nimare/diagnostics.py +510 -0
  22. nimare/estimator.py +139 -0
  23. nimare/extract/__init__.py +19 -0
  24. nimare/extract/extract.py +466 -0
  25. nimare/extract/utils.py +295 -0
  26. nimare/generate.py +331 -0
  27. nimare/io.py +635 -0
  28. nimare/meta/__init__.py +39 -0
  29. nimare/meta/cbma/__init__.py +6 -0
  30. nimare/meta/cbma/ale.py +951 -0
  31. nimare/meta/cbma/base.py +947 -0
  32. nimare/meta/cbma/mkda.py +1361 -0
  33. nimare/meta/cbmr.py +970 -0
  34. nimare/meta/ibma.py +1683 -0
  35. nimare/meta/kernel.py +501 -0
  36. nimare/meta/models.py +1199 -0
  37. nimare/meta/utils.py +494 -0
  38. nimare/nimads.py +492 -0
  39. nimare/reports/__init__.py +24 -0
  40. nimare/reports/base.py +664 -0
  41. nimare/reports/default.yml +123 -0
  42. nimare/reports/figures.py +651 -0
  43. nimare/reports/report.tpl +160 -0
  44. nimare/resources/__init__.py +1 -0
  45. nimare/resources/atlases/Harvard-Oxford-LICENSE +93 -0
  46. nimare/resources/atlases/HarvardOxford-cort-maxprob-thr25-2mm.nii.gz +0 -0
  47. nimare/resources/database_file_manifest.json +142 -0
  48. nimare/resources/english_spellings.csv +1738 -0
  49. nimare/resources/filenames.json +32 -0
  50. nimare/resources/neurosynth_laird_studies.json +58773 -0
  51. nimare/resources/neurosynth_stoplist.txt +396 -0
  52. nimare/resources/nidm_pain_dset.json +1349 -0
  53. nimare/resources/references.bib +541 -0
  54. nimare/resources/semantic_knowledge_children.txt +325 -0
  55. nimare/resources/semantic_relatedness_children.txt +249 -0
  56. nimare/resources/templates/MNI152_2x2x2_brainmask.nii.gz +0 -0
  57. nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_T1w.nii.gz +0 -0
  58. nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_desc-brain_mask.nii.gz +0 -0
  59. nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_T1w.nii.gz +0 -0
  60. nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_desc-brain_mask.nii.gz +0 -0
  61. nimare/results.py +225 -0
  62. nimare/stats.py +276 -0
  63. nimare/tests/__init__.py +1 -0
  64. nimare/tests/conftest.py +229 -0
  65. nimare/tests/data/amygdala_roi.nii.gz +0 -0
  66. nimare/tests/data/data-neurosynth_version-7_coordinates.tsv.gz +0 -0
  67. nimare/tests/data/data-neurosynth_version-7_metadata.tsv.gz +0 -0
  68. nimare/tests/data/data-neurosynth_version-7_vocab-terms_source-abstract_type-tfidf_features.npz +0 -0
  69. nimare/tests/data/data-neurosynth_version-7_vocab-terms_vocabulary.txt +100 -0
  70. nimare/tests/data/neurosynth_dset.json +2868 -0
  71. nimare/tests/data/neurosynth_laird_studies.json +58773 -0
  72. nimare/tests/data/nidm_pain_dset.json +1349 -0
  73. nimare/tests/data/nimads_annotation.json +1 -0
  74. nimare/tests/data/nimads_studyset.json +1 -0
  75. nimare/tests/data/test_baseline.txt +2 -0
  76. nimare/tests/data/test_pain_dataset.json +1278 -0
  77. nimare/tests/data/test_pain_dataset_multiple_contrasts.json +1242 -0
  78. nimare/tests/data/test_sleuth_file.txt +18 -0
  79. nimare/tests/data/test_sleuth_file2.txt +10 -0
  80. nimare/tests/data/test_sleuth_file3.txt +5 -0
  81. nimare/tests/data/test_sleuth_file4.txt +5 -0
  82. nimare/tests/data/test_sleuth_file5.txt +5 -0
  83. nimare/tests/test_annotate_cogat.py +32 -0
  84. nimare/tests/test_annotate_gclda.py +86 -0
  85. nimare/tests/test_annotate_lda.py +27 -0
  86. nimare/tests/test_dataset.py +99 -0
  87. nimare/tests/test_decode_continuous.py +132 -0
  88. nimare/tests/test_decode_discrete.py +92 -0
  89. nimare/tests/test_diagnostics.py +168 -0
  90. nimare/tests/test_estimator_performance.py +385 -0
  91. nimare/tests/test_extract.py +46 -0
  92. nimare/tests/test_generate.py +247 -0
  93. nimare/tests/test_io.py +240 -0
  94. nimare/tests/test_meta_ale.py +298 -0
  95. nimare/tests/test_meta_cbmr.py +295 -0
  96. nimare/tests/test_meta_ibma.py +240 -0
  97. nimare/tests/test_meta_kernel.py +209 -0
  98. nimare/tests/test_meta_mkda.py +234 -0
  99. nimare/tests/test_nimads.py +21 -0
  100. nimare/tests/test_reports.py +110 -0
  101. nimare/tests/test_stats.py +101 -0
  102. nimare/tests/test_transforms.py +272 -0
  103. nimare/tests/test_utils.py +200 -0
  104. nimare/tests/test_workflows.py +221 -0
  105. nimare/tests/utils.py +126 -0
  106. nimare/transforms.py +907 -0
  107. nimare/utils.py +1367 -0
  108. nimare/workflows/__init__.py +14 -0
  109. nimare/workflows/base.py +189 -0
  110. nimare/workflows/cbma.py +165 -0
  111. nimare/workflows/ibma.py +108 -0
  112. nimare/workflows/macm.py +77 -0
  113. nimare/workflows/misc.py +65 -0
  114. nimare-0.4.2rc4.dist-info/LICENSE +21 -0
  115. nimare-0.4.2rc4.dist-info/METADATA +124 -0
  116. nimare-0.4.2rc4.dist-info/RECORD +119 -0
  117. nimare-0.4.2rc4.dist-info/WHEEL +5 -0
  118. nimare-0.4.2rc4.dist-info/entry_points.txt +2 -0
  119. nimare-0.4.2rc4.dist-info/top_level.txt +2 -0
@@ -0,0 +1,240 @@
1
+ """Test nimare.meta.ibma (image-based meta-analytic estimators)."""
2
+
3
+ import logging
4
+ import os.path as op
5
+
6
+ import numpy as np
7
+ import pytest
8
+ from nilearn.input_data import NiftiLabelsMasker
9
+
10
+ import nimare
11
+ from nimare.correct import FDRCorrector, FWECorrector
12
+ from nimare.meta import ibma
13
+ from nimare.tests.utils import get_test_data_path
14
+
15
+
16
+ @pytest.mark.parametrize(
17
+ "meta,meta_kwargs,corrector,corrector_kwargs,maps",
18
+ [
19
+ pytest.param(
20
+ ibma.Fishers,
21
+ {},
22
+ FDRCorrector,
23
+ {"method": "indep", "alpha": 0.001},
24
+ ("z", "p", "dof"),
25
+ id="Fishers",
26
+ ),
27
+ pytest.param(
28
+ ibma.Stouffers,
29
+ {"use_sample_size": False, "normalize_contrast_weights": False},
30
+ None,
31
+ {},
32
+ ("z", "p", "dof"),
33
+ id="Stouffers",
34
+ ),
35
+ pytest.param(
36
+ ibma.Stouffers,
37
+ {"use_sample_size": True, "normalize_contrast_weights": False},
38
+ None,
39
+ {},
40
+ ("z", "p", "dof"),
41
+ id="Stouffers_sample_weighted",
42
+ ),
43
+ pytest.param(
44
+ ibma.Stouffers,
45
+ {"use_sample_size": False, "normalize_contrast_weights": True},
46
+ None,
47
+ {},
48
+ ("z", "p", "dof"),
49
+ id="Stouffers_contrast_weighted",
50
+ ),
51
+ pytest.param(
52
+ ibma.Stouffers,
53
+ {"use_sample_size": True, "normalize_contrast_weights": True},
54
+ None,
55
+ {},
56
+ ("z", "p", "dof"),
57
+ id="Stouffers_sample_contrast_weighted",
58
+ ),
59
+ pytest.param(
60
+ ibma.WeightedLeastSquares,
61
+ {"tau2": 0},
62
+ None,
63
+ {},
64
+ ("z", "p", "est", "se", "dof"),
65
+ id="WeightedLeastSquares",
66
+ ),
67
+ pytest.param(
68
+ ibma.DerSimonianLaird,
69
+ {},
70
+ None,
71
+ {},
72
+ ("z", "p", "est", "se", "tau2", "dof"),
73
+ id="DerSimonianLaird",
74
+ ),
75
+ pytest.param(
76
+ ibma.Hedges,
77
+ {},
78
+ None,
79
+ {},
80
+ ("z", "p", "est", "se", "tau2", "dof"),
81
+ id="Hedges",
82
+ ),
83
+ pytest.param(
84
+ ibma.SampleSizeBasedLikelihood,
85
+ {"method": "ml"},
86
+ None,
87
+ {},
88
+ ("z", "p", "est", "se", "tau2", "sigma2", "dof"),
89
+ id="SampleSizeBasedLikelihood_ml",
90
+ ),
91
+ pytest.param(
92
+ ibma.SampleSizeBasedLikelihood,
93
+ {"method": "reml"},
94
+ None,
95
+ {},
96
+ ("z", "p", "est", "se", "tau2", "sigma2", "dof"),
97
+ id="SampleSizeBasedLikelihood_reml",
98
+ ),
99
+ pytest.param(
100
+ ibma.VarianceBasedLikelihood,
101
+ {"method": "ml"},
102
+ None,
103
+ {},
104
+ ("z", "p", "est", "se", "tau2", "dof"),
105
+ id="VarianceBasedLikelihood_ml",
106
+ ),
107
+ pytest.param(
108
+ ibma.VarianceBasedLikelihood,
109
+ {"method": "reml"},
110
+ None,
111
+ {},
112
+ ("z", "p", "est", "se", "tau2", "dof"),
113
+ id="VarianceBasedLikelihood_reml",
114
+ ),
115
+ pytest.param(
116
+ ibma.PermutedOLS,
117
+ {"two_sided": True},
118
+ FWECorrector,
119
+ {"method": "montecarlo", "n_iters": 100, "n_cores": 1},
120
+ ("t", "z", "dof"),
121
+ id="PermutedOLS",
122
+ ),
123
+ pytest.param(
124
+ ibma.FixedEffectsHedges,
125
+ {"tau2": 0},
126
+ None,
127
+ {},
128
+ ("z", "p", "est", "se", "dof"),
129
+ id="FixedEffectsHedges",
130
+ ),
131
+ ],
132
+ )
133
+ @pytest.mark.parametrize("aggressive_mask", [True, False], ids=["aggressive", "liberal"])
134
+ def test_ibma_smoke(
135
+ testdata_ibma,
136
+ meta,
137
+ aggressive_mask,
138
+ meta_kwargs,
139
+ corrector,
140
+ corrector_kwargs,
141
+ maps,
142
+ ):
143
+ """Smoke test for IBMA estimators."""
144
+ meta = meta(aggressive_mask=aggressive_mask, **meta_kwargs)
145
+ results = meta.fit(testdata_ibma)
146
+ for expected_map in maps:
147
+ assert expected_map in results.maps.keys()
148
+
149
+ assert isinstance(results, nimare.results.MetaResult)
150
+ assert isinstance(results.description_, str)
151
+ assert results.get_map("z", return_type="array").ndim == 1
152
+ z_img = results.get_map("z")
153
+ assert z_img.ndim == 3
154
+ assert z_img.shape == (10, 10, 10)
155
+ if corrector:
156
+ corr = corrector(**corrector_kwargs)
157
+ corr_results = corr.transform(results)
158
+ assert isinstance(corr_results, nimare.results.MetaResult)
159
+ assert isinstance(corr_results.description_, str)
160
+ assert corr_results.get_map("z", return_type="array").ndim == 1
161
+ assert corr_results.get_map("z").ndim == 3
162
+
163
+
164
+ @pytest.mark.parametrize(
165
+ "estimator,expectation,masker_source",
166
+ [
167
+ (ibma.Fishers, "error", "estimator"),
168
+ (ibma.Stouffers, "error", "estimator"),
169
+ (ibma.WeightedLeastSquares, "warning", "estimator"),
170
+ (ibma.DerSimonianLaird, "warning", "estimator"),
171
+ (ibma.Hedges, "warning", "estimator"),
172
+ (ibma.SampleSizeBasedLikelihood, "no warning", "estimator"),
173
+ (ibma.VarianceBasedLikelihood, "warning", "estimator"),
174
+ (ibma.PermutedOLS, "no warning", "estimator"),
175
+ ],
176
+ )
177
+ def test_ibma_with_custom_masker(testdata_ibma, caplog, estimator, expectation, masker_source):
178
+ """Ensure voxel-to-ROI reduction works, but only for Estimators that allow it.
179
+
180
+ Notes
181
+ -----
182
+ Currently masker_source is not used, but ultimately we will want to test cases where the
183
+ Dataset uses a NiftiLabelsMasker.
184
+ """
185
+ atlas = op.join(get_test_data_path(), "test_pain_dataset", "atlas.nii.gz")
186
+ masker = NiftiLabelsMasker(atlas)
187
+
188
+ dset = testdata_ibma
189
+ meta = estimator(mask=masker)
190
+
191
+ if expectation == "error":
192
+ with pytest.raises(ValueError):
193
+ meta.fit(dset)
194
+ elif expectation == "warning":
195
+ with caplog.at_level(logging.WARNING, logger="nimare.meta.ibma"):
196
+ results = meta.fit(dset)
197
+ assert "will likely produce biased results" in caplog.text
198
+ caplog.clear()
199
+ else:
200
+ with caplog.at_level(logging.WARNING, logger="nimare.meta.ibma"):
201
+ results = meta.fit(dset)
202
+ assert "will likely produce biased results" not in caplog.text
203
+ caplog.clear()
204
+
205
+ # Only fit the estimator if it doesn't raise a ValueError
206
+ if expectation != "error":
207
+ assert isinstance(results, nimare.results.MetaResult)
208
+ # There are five "labels", but one of them has no good data,
209
+ # so the outputs should be 4 long.
210
+ assert results.maps["z"].shape == (5,)
211
+ assert np.isnan(results.maps["z"][0])
212
+ assert results.get_map("z").shape == (10, 10, 10)
213
+
214
+
215
+ @pytest.mark.parametrize(
216
+ "resample_kwargs",
217
+ [
218
+ {},
219
+ {"resample__clip": False, "resample__interpolation": "continuous"},
220
+ ],
221
+ )
222
+ def test_ibma_resampling(testdata_ibma_resample, resample_kwargs):
223
+ """Test image-based resampling performance."""
224
+ meta = ibma.Fishers(**resample_kwargs)
225
+ results = meta.fit(testdata_ibma_resample)
226
+
227
+ assert isinstance(results, nimare.results.MetaResult)
228
+
229
+
230
+ @pytest.mark.parametrize("aggressive_mask", [True, False], ids=["aggressive", "liberal"])
231
+ def test_stouffers_multiple_contrasts(testdata_ibma_multiple_contrasts, aggressive_mask):
232
+ """Test Stouffer's correction with multiple contrasts."""
233
+ meta = ibma.Stouffers(aggressive_mask=aggressive_mask)
234
+ results = meta.fit(testdata_ibma_multiple_contrasts)
235
+
236
+ assert isinstance(results, nimare.results.MetaResult)
237
+ assert results.get_map("z", return_type="array").ndim == 1
238
+ z_img = results.get_map("z")
239
+ assert z_img.ndim == 3
240
+ assert z_img.shape == (10, 10, 10)
@@ -0,0 +1,209 @@
1
+ """Test nimare.meta.kernel (CBMA kernel estimators)."""
2
+
3
+ import nibabel as nib
4
+ import numpy as np
5
+ import pytest
6
+ from scipy.ndimage import center_of_mass
7
+
8
+ from nimare.meta import kernel
9
+ from nimare.utils import get_masker, get_template, mm2vox
10
+
11
+
12
+ @pytest.mark.parametrize(
13
+ "kern, res, param, return_type, kwargs",
14
+ [
15
+ (kernel.ALEKernel, 1, "dataset", "image", {"sample_size": 20}),
16
+ (kernel.ALEKernel, 2, "dataset", "image", {"sample_size": 20}),
17
+ (kernel.ALEKernel, 1, "dataframe", "image", {"sample_size": 20}),
18
+ (kernel.ALEKernel, 2, "dataframe", "image", {"sample_size": 20}),
19
+ (kernel.ALEKernel, 1, "dataset", "array", {"sample_size": 20}),
20
+ (kernel.ALEKernel, 2, "dataset", "array", {"sample_size": 20}),
21
+ (kernel.ALEKernel, 1, "dataframe", "array", {"sample_size": 20}),
22
+ (kernel.ALEKernel, 2, "dataframe", "array", {"sample_size": 20}),
23
+ (kernel.MKDAKernel, 1, "dataset", "image", {"r": 4, "value": 1}),
24
+ (kernel.MKDAKernel, 2, "dataset", "image", {"r": 4, "value": 1}),
25
+ (kernel.MKDAKernel, 1, "dataframe", "image", {"r": 4, "value": 1}),
26
+ (kernel.MKDAKernel, 2, "dataframe", "image", {"r": 4, "value": 1}),
27
+ (kernel.KDAKernel, 1, "dataset", "image", {"r": 4, "value": 1}),
28
+ (kernel.KDAKernel, 2, "dataset", "image", {"r": 4, "value": 1}),
29
+ (kernel.KDAKernel, 1, "dataframe", "image", {"r": 4, "value": 1}),
30
+ (kernel.KDAKernel, 2, "dataframe", "image", {"r": 4, "value": 1}),
31
+ ],
32
+ )
33
+ def test_kernel_peaks(testdata_cbma, tmp_path_factory, kern, res, param, return_type, kwargs):
34
+ """Peak/COMs of kernel maps should match the foci fed in (assuming focus isn't masked out).
35
+
36
+ Notes
37
+ -----
38
+ Remember that dataframe --> dataset won't work.
39
+ Test on multiple template resolutions.
40
+ """
41
+ tmpdir = tmp_path_factory.mktemp("test_kernel_peaks")
42
+ testdata_cbma.update_path(tmpdir)
43
+
44
+ id_ = "pain_03.nidm-1"
45
+
46
+ template = get_template(space=f"mni152_{res}mm", mask="brain")
47
+ masker = get_masker(template)
48
+
49
+ xyz = testdata_cbma.coordinates.loc[testdata_cbma.coordinates["id"] == id_, ["x", "y", "z"]]
50
+ ijk = mm2vox(xyz, masker.mask_img.affine)
51
+ ijk = np.squeeze(ijk.astype(int))
52
+
53
+ if param == "dataframe":
54
+ input_ = testdata_cbma.coordinates.copy()
55
+ elif param == "dataset":
56
+ input_ = testdata_cbma.copy()
57
+
58
+ kern_instance = kern(**kwargs)
59
+ output = kern_instance.transform(input_, masker, return_type=return_type)
60
+
61
+ if return_type == "image":
62
+ kern_data = output[0].get_fdata()
63
+ elif return_type == "array":
64
+ kern_data = np.squeeze(masker.inverse_transform(output[:1, :]).get_fdata())
65
+ else:
66
+ f = output.images.loc[output.images["id"] == id_, kern_instance.image_type].values[0]
67
+ kern_data = nib.load(f).get_fdata()
68
+
69
+ if isinstance(kern_instance, kernel.ALEKernel):
70
+ loc_idx = np.array(np.where(kern_data == np.max(kern_data))).T
71
+ elif isinstance(kern_instance, (kernel.MKDAKernel, kernel.KDAKernel)):
72
+ loc_idx = np.array(center_of_mass(kern_data)).astype(int).T
73
+ else:
74
+ raise Exception(f"A {type(kern_instance)}? Why?")
75
+
76
+ loc_ijk = np.squeeze(loc_idx)
77
+
78
+ assert np.array_equal(ijk, loc_ijk)
79
+
80
+
81
+ @pytest.mark.parametrize(
82
+ "kern, kwargs",
83
+ [
84
+ (kernel.ALEKernel, {"sample_size": 20}),
85
+ (kernel.MKDAKernel, {"r": 4, "value": 1}),
86
+ (kernel.KDAKernel, {"r": 4, "value": 1}),
87
+ ],
88
+ )
89
+ def test_kernel_transform_attributes(kern, kwargs):
90
+ """Check that attributes are added at transform."""
91
+ kern_instance = kern(**kwargs)
92
+ assert not hasattr(kern_instance, "filename_pattern")
93
+ assert not hasattr(kern_instance, "image_type")
94
+ kern_instance._infer_names()
95
+ assert hasattr(kern_instance, "filename_pattern")
96
+ assert hasattr(kern_instance, "image_type")
97
+
98
+
99
+ @pytest.mark.parametrize(
100
+ "kern, kwargs, set_kwargs",
101
+ [
102
+ (kernel.ALEKernel, {"sample_size": 20}, {"sample_size": None, "fwhm": 10}),
103
+ (kernel.MKDAKernel, {"r": 4, "value": 1}, {"r": 10, "value": 3}),
104
+ (kernel.KDAKernel, {"r": 4, "value": 1}, {"r": 10, "value": 3}),
105
+ ],
106
+ )
107
+ def test_kernel_smoke(testdata_cbma, kern, kwargs, set_kwargs):
108
+ """Smoke test for different kernel transformers and check that you can reset params."""
109
+ coordinates = testdata_cbma.coordinates.copy()
110
+
111
+ kern_instance = kern(**kwargs)
112
+ ma_maps = kern_instance.transform(coordinates, testdata_cbma.masker, return_type="image")
113
+ assert len(ma_maps) == len(testdata_cbma.ids) - 2
114
+ ma_maps = kern_instance.transform(coordinates, testdata_cbma.masker, return_type="array")
115
+ assert ma_maps.shape[0] == len(testdata_cbma.ids) - 2
116
+
117
+ # Test set_params
118
+ kern_instance.set_params(**set_kwargs)
119
+ kern_instance2 = kern(**set_kwargs)
120
+ ma_maps1 = kern_instance.transform(coordinates, testdata_cbma.masker, return_type="array")
121
+ ma_maps2 = kern_instance2.transform(coordinates, testdata_cbma.masker, return_type="array")
122
+ assert ma_maps1.shape[0] == ma_maps2.shape[0] == len(testdata_cbma.ids) - 2
123
+ assert np.array_equal(ma_maps1, ma_maps2)
124
+
125
+
126
+ def test_ALEKernel_fwhm(testdata_cbma):
127
+ """Peaks of ALE kernel maps should match the foci fed in (assuming focus isn't masked out).
128
+
129
+ Test with explicit FWHM.
130
+ """
131
+ coordinates = testdata_cbma.coordinates.copy()
132
+
133
+ id_ = "pain_03.nidm-1"
134
+ kern = kernel.ALEKernel(fwhm=10)
135
+ ma_maps = kern.transform(coordinates, masker=testdata_cbma.masker, return_type="image")
136
+
137
+ xyz = coordinates.loc[coordinates["id"] == id_, ["x", "y", "z"]]
138
+ ijk = mm2vox(xyz, testdata_cbma.masker.mask_img.affine)
139
+ ijk = np.squeeze(ijk.astype(int))
140
+
141
+ kern_data = ma_maps[0].get_fdata()
142
+ max_idx = np.array(np.where(kern_data == np.max(kern_data))).T
143
+ max_ijk = np.squeeze(max_idx)
144
+ assert np.array_equal(ijk, max_ijk)
145
+
146
+
147
+ def test_ALEKernel_sample_size(testdata_cbma):
148
+ """Peaks of ALE kernel maps should match the foci fed in (assuming focus isn't masked out).
149
+
150
+ Test with explicit sample size.
151
+ """
152
+ coordinates = testdata_cbma.coordinates.copy()
153
+
154
+ id_ = "pain_03.nidm-1"
155
+ kern = kernel.ALEKernel(sample_size=20)
156
+ ma_maps = kern.transform(coordinates, masker=testdata_cbma.masker, return_type="image")
157
+
158
+ xyz = coordinates.loc[coordinates["id"] == id_, ["x", "y", "z"]]
159
+ ijk = mm2vox(xyz, testdata_cbma.masker.mask_img.affine)
160
+ ijk = np.squeeze(ijk.astype(int))
161
+
162
+ kern_data = ma_maps[0].get_fdata()
163
+ max_idx = np.array(np.where(kern_data == np.max(kern_data))).T
164
+ max_ijk = np.squeeze(max_idx)
165
+ assert np.array_equal(ijk, max_ijk)
166
+
167
+
168
+ def test_ALEKernel_memory(testdata_cbma, tmp_path_factory):
169
+ """Test ALEKernel with memory caching enable."""
170
+ cachedir = tmp_path_factory.mktemp("test_ALE_memory")
171
+
172
+ coord = testdata_cbma.coordinates.copy()
173
+
174
+ kern_cached = kernel.ALEKernel(sample_size=20, memory=str(cachedir), memory_level=2)
175
+ ma_maps_cached = kern_cached.transform(coord, masker=testdata_cbma.masker, return_type="array")
176
+
177
+ kern = kernel.ALEKernel(sample_size=20, memory=None)
178
+ ma_maps = kern.transform(coord, masker=testdata_cbma.masker, return_type="array")
179
+
180
+ assert np.array_equal(ma_maps_cached, ma_maps)
181
+
182
+ # Test that memory is actually used
183
+ kern_cached_fast = kernel.ALEKernel(sample_size=20, memory=str(cachedir), memory_level=2)
184
+ ma_maps_cached_fast = kern_cached_fast.transform(
185
+ coord, masker=testdata_cbma.masker, return_type="array"
186
+ )
187
+
188
+ assert np.array_equal(ma_maps_cached_fast, ma_maps)
189
+
190
+
191
+ def test_MKDA_kernel_sum_across(testdata_cbma):
192
+ """Test if creating a summary array is equivalent to summing across the sparse array."""
193
+ kern = kernel.MKDAKernel(r=10, value=1)
194
+ coordinates = testdata_cbma.coordinates.copy()
195
+ sparse_ma_maps = kern.transform(coordinates, masker=testdata_cbma.masker, return_type="sparse")
196
+ summary_map = kern.transform(
197
+ coordinates, masker=testdata_cbma.masker, return_type="summary_array"
198
+ )
199
+
200
+ summary_sparse_ma_map = sparse_ma_maps.sum(axis=0)
201
+ mask_data = testdata_cbma.masker.mask_img.get_fdata().astype(bool)
202
+
203
+ # Indexing the sparse array is slow, perform masking in the dense array
204
+ summary_sparse_ma_map = summary_sparse_ma_map.todense().reshape(-1)
205
+ summary_sparse_ma_map = summary_sparse_ma_map[mask_data.reshape(-1)]
206
+
207
+ assert (
208
+ np.testing.assert_array_equal(summary_map, summary_sparse_ma_map.astype(np.int32)) is None
209
+ )
@@ -0,0 +1,234 @@
1
+ """Test nimare.meta.mkda (KDA-based meta-analytic algorithms)."""
2
+
3
+ import logging
4
+
5
+ import numpy as np
6
+
7
+ import nimare
8
+ from nimare.correct import FDRCorrector, FWECorrector
9
+ from nimare.meta import KDA, MKDAChi2, MKDADensity, MKDAKernel
10
+
11
+
12
+ def test_MKDADensity_kernel_instance_with_kwargs(testdata_cbma):
13
+ """Smoke test for MKDADensity with a kernel transformer object.
14
+
15
+ With kernel arguments provided, which should result in a warning, but the original
16
+ object's parameters should remain untouched.
17
+ """
18
+ kern = MKDAKernel(r=2)
19
+ meta = MKDADensity(kern, kernel__r=6, null_method="montecarlo", n_iters=10)
20
+
21
+ assert meta.kernel_transformer.get_params().get("r") == 2
22
+
23
+
24
+ def test_MKDADensity_kernel_class(testdata_cbma):
25
+ """Smoke test for MKDADensity with a kernel transformer class."""
26
+ meta = MKDADensity(MKDAKernel, kernel__r=5, null_method="montecarlo", n_iters=10)
27
+ results = meta.fit(testdata_cbma)
28
+ assert isinstance(results, nimare.results.MetaResult)
29
+
30
+
31
+ def test_MKDADensity_kernel_instance(testdata_cbma):
32
+ """Smoke test for MKDADensity with a kernel transformer object."""
33
+ kern = MKDAKernel(r=5)
34
+ meta = MKDADensity(kern, null_method="montecarlo", n_iters=10)
35
+ results = meta.fit(testdata_cbma)
36
+ assert isinstance(results, nimare.results.MetaResult)
37
+
38
+
39
+ def test_MKDADensity_approximate_null(testdata_cbma_full, caplog):
40
+ """Smoke test for MKDADensity with the "approximate" null_method."""
41
+ meta = MKDADensity(null="approximate")
42
+ results = meta.fit(testdata_cbma_full)
43
+ corr = FWECorrector(method="montecarlo", voxel_thresh=0.001, n_iters=5, n_cores=1)
44
+ corr_results = corr.transform(results)
45
+ assert isinstance(results, nimare.results.MetaResult)
46
+ assert isinstance(results.description_, str)
47
+ assert isinstance(corr_results, nimare.results.MetaResult)
48
+ assert isinstance(corr_results.description_, str)
49
+
50
+ # Check that the vfwe_only option does not work
51
+ corr2 = FWECorrector(
52
+ method="montecarlo",
53
+ voxel_thresh=0.001,
54
+ n_iters=5,
55
+ n_cores=1,
56
+ vfwe_only=True,
57
+ )
58
+ with caplog.at_level(logging.WARNING):
59
+ corr_results2 = corr2.transform(results)
60
+
61
+ assert "Running permutations from scratch." in caplog.text
62
+
63
+ assert isinstance(corr_results2, nimare.results.MetaResult)
64
+ assert "logp_level-voxel_corr-FWE_method-montecarlo" in corr_results2.maps
65
+ assert "logp_desc-size_level-cluster_corr-FWE_method-montecarlo" not in corr_results2.maps
66
+
67
+
68
+ def test_MKDADensity_montecarlo_null(testdata_cbma):
69
+ """Smoke test for MKDADensity with the "montecarlo" null_method."""
70
+ meta = MKDADensity(null_method="montecarlo", n_iters=10)
71
+ results = meta.fit(testdata_cbma)
72
+ corr = FWECorrector(method="montecarlo", voxel_thresh=0.001, n_iters=5, n_cores=1)
73
+ corr_results = corr.transform(results)
74
+ assert isinstance(results, nimare.results.MetaResult)
75
+ assert isinstance(results.description_, str)
76
+ assert isinstance(corr_results, nimare.results.MetaResult)
77
+ assert isinstance(corr_results.description_, str)
78
+
79
+ # Check that the vfwe_only option works
80
+ corr2 = FWECorrector(
81
+ method="montecarlo",
82
+ voxel_thresh=0.001,
83
+ n_iters=5,
84
+ n_cores=1,
85
+ vfwe_only=True,
86
+ )
87
+ corr_results2 = corr2.transform(results)
88
+ assert isinstance(corr_results2, nimare.results.MetaResult)
89
+ assert "logp_level-voxel_corr-FWE_method-montecarlo" in corr_results2.maps
90
+ assert "logp_desc-size_level-cluster_corr-FWE_method-montecarlo" not in corr_results2.maps
91
+
92
+
93
+ def test_MKDAChi2_fdr(testdata_cbma):
94
+ """Smoke test for MKDAChi2."""
95
+ meta = MKDAChi2()
96
+ results = meta.fit(testdata_cbma, testdata_cbma)
97
+ corr = FDRCorrector(method="indep", alpha=0.001)
98
+ corr_results = corr.transform(results)
99
+ assert isinstance(results, nimare.results.MetaResult)
100
+ assert isinstance(results.description_, str)
101
+ assert isinstance(corr_results, nimare.results.MetaResult)
102
+ assert isinstance(corr_results.description_, str)
103
+
104
+ methods = FDRCorrector.inspect(results)
105
+ assert methods == ["indep", "negcorr"]
106
+
107
+
108
+ def test_MKDAChi2_fwe_1core(testdata_cbma):
109
+ """Smoke test for MKDAChi2."""
110
+ meta = MKDAChi2()
111
+ results = meta.fit(testdata_cbma, testdata_cbma)
112
+ valid_methods = FWECorrector.inspect(results)
113
+ assert "montecarlo" in valid_methods
114
+
115
+ corr = FWECorrector(method="montecarlo", n_iters=5, n_cores=1)
116
+ corr_results = corr.transform(results)
117
+ assert isinstance(results, nimare.results.MetaResult)
118
+ assert isinstance(results.description_, str)
119
+ assert isinstance(corr_results, nimare.results.MetaResult)
120
+ assert isinstance(corr_results.description_, str)
121
+ assert (
122
+ "values_desc-pFgA_level-voxel_corr-fwe_method-montecarlo"
123
+ in corr_results.estimator.null_distributions_.keys()
124
+ )
125
+ assert (
126
+ "values_desc-pAgF_level-voxel_corr-fwe_method-montecarlo"
127
+ in corr_results.estimator.null_distributions_.keys()
128
+ )
129
+
130
+
131
+ def test_MKDAChi2_fwe_2core(testdata_cbma):
132
+ """Smoke test for MKDAChi2."""
133
+ meta = MKDAChi2()
134
+ results = meta.fit(testdata_cbma, testdata_cbma)
135
+ assert isinstance(results, nimare.results.MetaResult)
136
+ corr_2core = FWECorrector(method="montecarlo", n_iters=5, n_cores=2)
137
+ cres_2core = corr_2core.transform(results)
138
+ assert isinstance(cres_2core, nimare.results.MetaResult)
139
+
140
+
141
+ def test_KDA_approximate_null(testdata_cbma):
142
+ """Smoke test for KDA with approximate null and FWE correction."""
143
+ meta = KDA(null_method="approximate")
144
+ results = meta.fit(testdata_cbma)
145
+ corr = FWECorrector(method="montecarlo", n_iters=5, n_cores=1)
146
+ corr_results = corr.transform(results)
147
+ assert isinstance(results, nimare.results.MetaResult)
148
+ assert isinstance(results.description_, str)
149
+ assert results.get_map("p", return_type="array").dtype == np.float64
150
+ assert isinstance(corr_results, nimare.results.MetaResult)
151
+ assert isinstance(corr_results.description_, str)
152
+ assert (
153
+ corr_results.get_map(
154
+ "logp_level-voxel_corr-FWE_method-montecarlo", return_type="array"
155
+ ).dtype
156
+ == np.float64
157
+ )
158
+ assert (
159
+ corr_results.get_map(
160
+ "logp_desc-size_level-cluster_corr-FWE_method-montecarlo", return_type="array"
161
+ ).dtype
162
+ == np.float64
163
+ )
164
+ assert (
165
+ corr_results.get_map(
166
+ "logp_desc-mass_level-cluster_corr-FWE_method-montecarlo", return_type="array"
167
+ ).dtype
168
+ == np.float64
169
+ )
170
+
171
+
172
+ def test_KDA_fwe_1core(testdata_cbma):
173
+ """Smoke test for KDA with montecarlo null and FWE correction."""
174
+ meta = KDA(null_method="montecarlo", n_iters=10)
175
+ results = meta.fit(testdata_cbma)
176
+ corr = FWECorrector(method="montecarlo", n_iters=5, n_cores=1)
177
+ corr_results = corr.transform(results)
178
+ assert isinstance(results, nimare.results.MetaResult)
179
+ assert results.get_map("p", return_type="array").dtype == np.float64
180
+ assert isinstance(corr_results, nimare.results.MetaResult)
181
+ assert (
182
+ corr_results.get_map(
183
+ "logp_level-voxel_corr-FWE_method-montecarlo", return_type="array"
184
+ ).dtype
185
+ == np.float64
186
+ )
187
+ assert (
188
+ corr_results.get_map(
189
+ "logp_desc-mass_level-cluster_corr-FWE_method-montecarlo", return_type="array"
190
+ ).dtype
191
+ == np.float64
192
+ )
193
+ assert (
194
+ corr_results.get_map(
195
+ "logp_desc-size_level-cluster_corr-FWE_method-montecarlo", return_type="array"
196
+ ).dtype
197
+ == np.float64
198
+ )
199
+
200
+
201
+ def test_MKDADensity_approximate_montecarlo_convergence(testdata_cbma_full):
202
+ """Evaluate convergence between approximate and montecarlo null methods in MKDA."""
203
+ est_a = MKDADensity(null_method="approximate")
204
+ n_iters = 10
205
+ est_e = MKDADensity(null_method="montecarlo", n_iters=n_iters)
206
+ res_a = est_a.fit(testdata_cbma_full)
207
+ res_e = est_e.fit(testdata_cbma_full)
208
+ # Get smallest p-value above 0 from the montecarlo estimator; above this,
209
+ # the two should converge reasonably closely.
210
+ min_p = 1 / n_iters
211
+ p_idx = res_e.maps["p"] > min_p
212
+ p_approximate = res_a.maps["p"][p_idx]
213
+ p_montecarlo = res_e.maps["p"][p_idx]
214
+ # Correlation must be near unity and mean difference should be tiny
215
+ assert np.corrcoef(p_approximate, p_montecarlo)[0, 1] > 0.98
216
+ assert (p_approximate - p_montecarlo).mean() < 1e-3
217
+
218
+
219
+ def test_KDA_approximate_montecarlo_convergence(testdata_cbma_full):
220
+ """Evaluate convergence between approximate and montecarlo null methods in KDA."""
221
+ est_a = KDA(null_method="approximate")
222
+ n_iters = 10
223
+ est_e = KDA(null_method="montecarlo", n_iters=n_iters)
224
+ res_a = est_a.fit(testdata_cbma_full)
225
+ res_e = est_e.fit(testdata_cbma_full)
226
+ # Get smallest p-value above 0 from the montecarlo estimator; above this,
227
+ # the two should converge reasonably closely.
228
+ min_p = 1 / n_iters
229
+ p_idx = res_e.maps["p"] > min_p
230
+ p_approximate = res_a.maps["p"][p_idx]
231
+ p_montecarlo = res_e.maps["p"][p_idx]
232
+ # Correlation must be near unity and mean difference should be tiny
233
+ assert np.corrcoef(p_approximate, p_montecarlo)[0, 1] > 0.98
234
+ assert (p_approximate - p_montecarlo).mean() < 1e-3