nimare 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. benchmarks/__init__.py +0 -0
  2. benchmarks/bench_cbma.py +57 -0
  3. nimare/__init__.py +45 -0
  4. nimare/_version.py +21 -0
  5. nimare/annotate/__init__.py +21 -0
  6. nimare/annotate/cogat.py +213 -0
  7. nimare/annotate/gclda.py +924 -0
  8. nimare/annotate/lda.py +147 -0
  9. nimare/annotate/text.py +75 -0
  10. nimare/annotate/utils.py +87 -0
  11. nimare/base.py +217 -0
  12. nimare/cli.py +124 -0
  13. nimare/correct.py +462 -0
  14. nimare/dataset.py +685 -0
  15. nimare/decode/__init__.py +33 -0
  16. nimare/decode/base.py +115 -0
  17. nimare/decode/continuous.py +462 -0
  18. nimare/decode/discrete.py +753 -0
  19. nimare/decode/encode.py +110 -0
  20. nimare/decode/utils.py +44 -0
  21. nimare/diagnostics.py +510 -0
  22. nimare/estimator.py +139 -0
  23. nimare/extract/__init__.py +19 -0
  24. nimare/extract/extract.py +466 -0
  25. nimare/extract/utils.py +295 -0
  26. nimare/generate.py +331 -0
  27. nimare/io.py +667 -0
  28. nimare/meta/__init__.py +39 -0
  29. nimare/meta/cbma/__init__.py +6 -0
  30. nimare/meta/cbma/ale.py +951 -0
  31. nimare/meta/cbma/base.py +947 -0
  32. nimare/meta/cbma/mkda.py +1361 -0
  33. nimare/meta/cbmr.py +970 -0
  34. nimare/meta/ibma.py +1683 -0
  35. nimare/meta/kernel.py +501 -0
  36. nimare/meta/models.py +1199 -0
  37. nimare/meta/utils.py +494 -0
  38. nimare/nimads.py +492 -0
  39. nimare/reports/__init__.py +24 -0
  40. nimare/reports/base.py +664 -0
  41. nimare/reports/default.yml +123 -0
  42. nimare/reports/figures.py +651 -0
  43. nimare/reports/report.tpl +160 -0
  44. nimare/resources/__init__.py +1 -0
  45. nimare/resources/atlases/Harvard-Oxford-LICENSE +93 -0
  46. nimare/resources/atlases/HarvardOxford-cort-maxprob-thr25-2mm.nii.gz +0 -0
  47. nimare/resources/database_file_manifest.json +142 -0
  48. nimare/resources/english_spellings.csv +1738 -0
  49. nimare/resources/filenames.json +32 -0
  50. nimare/resources/neurosynth_laird_studies.json +58773 -0
  51. nimare/resources/neurosynth_stoplist.txt +396 -0
  52. nimare/resources/nidm_pain_dset.json +1349 -0
  53. nimare/resources/references.bib +541 -0
  54. nimare/resources/semantic_knowledge_children.txt +325 -0
  55. nimare/resources/semantic_relatedness_children.txt +249 -0
  56. nimare/resources/templates/MNI152_2x2x2_brainmask.nii.gz +0 -0
  57. nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_T1w.nii.gz +0 -0
  58. nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_desc-brain_mask.nii.gz +0 -0
  59. nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_T1w.nii.gz +0 -0
  60. nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_desc-brain_mask.nii.gz +0 -0
  61. nimare/results.py +225 -0
  62. nimare/stats.py +276 -0
  63. nimare/tests/__init__.py +1 -0
  64. nimare/tests/conftest.py +229 -0
  65. nimare/tests/data/amygdala_roi.nii.gz +0 -0
  66. nimare/tests/data/data-neurosynth_version-7_coordinates.tsv.gz +0 -0
  67. nimare/tests/data/data-neurosynth_version-7_metadata.tsv.gz +0 -0
  68. nimare/tests/data/data-neurosynth_version-7_vocab-terms_source-abstract_type-tfidf_features.npz +0 -0
  69. nimare/tests/data/data-neurosynth_version-7_vocab-terms_vocabulary.txt +100 -0
  70. nimare/tests/data/neurosynth_dset.json +2868 -0
  71. nimare/tests/data/neurosynth_laird_studies.json +58773 -0
  72. nimare/tests/data/nidm_pain_dset.json +1349 -0
  73. nimare/tests/data/nimads_annotation.json +1 -0
  74. nimare/tests/data/nimads_studyset.json +1 -0
  75. nimare/tests/data/test_baseline.txt +2 -0
  76. nimare/tests/data/test_pain_dataset.json +1278 -0
  77. nimare/tests/data/test_pain_dataset_multiple_contrasts.json +1242 -0
  78. nimare/tests/data/test_sleuth_file.txt +18 -0
  79. nimare/tests/data/test_sleuth_file2.txt +10 -0
  80. nimare/tests/data/test_sleuth_file3.txt +5 -0
  81. nimare/tests/data/test_sleuth_file4.txt +5 -0
  82. nimare/tests/data/test_sleuth_file5.txt +5 -0
  83. nimare/tests/test_annotate_cogat.py +32 -0
  84. nimare/tests/test_annotate_gclda.py +86 -0
  85. nimare/tests/test_annotate_lda.py +27 -0
  86. nimare/tests/test_dataset.py +99 -0
  87. nimare/tests/test_decode_continuous.py +132 -0
  88. nimare/tests/test_decode_discrete.py +92 -0
  89. nimare/tests/test_diagnostics.py +168 -0
  90. nimare/tests/test_estimator_performance.py +385 -0
  91. nimare/tests/test_extract.py +46 -0
  92. nimare/tests/test_generate.py +247 -0
  93. nimare/tests/test_io.py +294 -0
  94. nimare/tests/test_meta_ale.py +298 -0
  95. nimare/tests/test_meta_cbmr.py +295 -0
  96. nimare/tests/test_meta_ibma.py +240 -0
  97. nimare/tests/test_meta_kernel.py +209 -0
  98. nimare/tests/test_meta_mkda.py +234 -0
  99. nimare/tests/test_nimads.py +21 -0
  100. nimare/tests/test_reports.py +110 -0
  101. nimare/tests/test_stats.py +101 -0
  102. nimare/tests/test_transforms.py +272 -0
  103. nimare/tests/test_utils.py +200 -0
  104. nimare/tests/test_workflows.py +221 -0
  105. nimare/tests/utils.py +126 -0
  106. nimare/transforms.py +907 -0
  107. nimare/utils.py +1367 -0
  108. nimare/workflows/__init__.py +14 -0
  109. nimare/workflows/base.py +189 -0
  110. nimare/workflows/cbma.py +165 -0
  111. nimare/workflows/ibma.py +108 -0
  112. nimare/workflows/macm.py +77 -0
  113. nimare/workflows/misc.py +65 -0
  114. nimare-0.4.2.dist-info/LICENSE +21 -0
  115. nimare-0.4.2.dist-info/METADATA +124 -0
  116. nimare-0.4.2.dist-info/RECORD +119 -0
  117. nimare-0.4.2.dist-info/WHEEL +5 -0
  118. nimare-0.4.2.dist-info/entry_points.txt +2 -0
  119. nimare-0.4.2.dist-info/top_level.txt +2 -0
@@ -0,0 +1,21 @@
1
+ """Test NiMADS functionality."""
2
+
3
+ from nimare import nimads
4
+ from nimare.dataset import Dataset
5
+
6
+
7
+ def test_load_nimads(example_nimads_studyset, example_nimads_annotation):
8
+ """Test loading a NiMADS studyset."""
9
+ studyset = nimads.Studyset(example_nimads_studyset)
10
+ studyset.annotations = example_nimads_annotation
11
+ # filter the studyset to only include analyses with include=True
12
+ annotation = studyset.annotations[0]
13
+ analysis_ids = [n.analysis.id for n in annotation.notes if n.note["include"]]
14
+ analysis_ids = analysis_ids[:5]
15
+ filtered_studyset = studyset.slice(analyses=analysis_ids)
16
+ # Combine analyses after filtering
17
+ filtered_studyset = filtered_studyset.combine_analyses()
18
+
19
+ assert isinstance(filtered_studyset, nimads.Studyset)
20
+ dataset = filtered_studyset.to_dataset()
21
+ assert isinstance(dataset, Dataset)
@@ -0,0 +1,110 @@
1
+ """Test nimare.reports."""
2
+
3
+ import os.path as op
4
+
5
+ import pytest
6
+
7
+ from nimare.correct import FWECorrector
8
+ from nimare.diagnostics import FocusCounter, Jackknife
9
+ from nimare.meta.cbma import ALESubtraction
10
+ from nimare.meta.ibma import FixedEffectsHedges, Stouffers
11
+ from nimare.reports.base import run_reports
12
+ from nimare.workflows import CBMAWorkflow, IBMAWorkflow, PairwiseCBMAWorkflow
13
+
14
+
15
+ @pytest.mark.parametrize(
16
+ "estimator,corrector,diagnostics,meta_type",
17
+ [
18
+ ("ale", FWECorrector(method="montecarlo", n_iters=10), "jackknife", "cbma"),
19
+ ("kda", "fdr", "focuscounter", "cbma"),
20
+ (
21
+ "mkdachi2",
22
+ FWECorrector(method="montecarlo", n_iters=10),
23
+ Jackknife(voxel_thresh=0.1),
24
+ "pairwise_cbma",
25
+ ),
26
+ (
27
+ ALESubtraction(n_iters=10),
28
+ "fdr",
29
+ FocusCounter(voxel_thresh=0.01, display_second_group=True),
30
+ "pairwise_cbma",
31
+ ),
32
+ ],
33
+ )
34
+ def test_reports_function_smoke(
35
+ tmp_path_factory,
36
+ testdata_cbma_full,
37
+ estimator,
38
+ corrector,
39
+ diagnostics,
40
+ meta_type,
41
+ ):
42
+ """Run smoke test for CBMA workflow."""
43
+ tmpdir = tmp_path_factory.mktemp("test_reports_function_smoke")
44
+
45
+ if meta_type == "cbma":
46
+ workflow = CBMAWorkflow(
47
+ estimator=estimator,
48
+ corrector=corrector,
49
+ diagnostics=diagnostics,
50
+ output_dir=tmpdir,
51
+ )
52
+ results = workflow.fit(testdata_cbma_full)
53
+
54
+ elif meta_type == "pairwise_cbma":
55
+ dset1 = testdata_cbma_full.slice(testdata_cbma_full.ids[:10])
56
+ dset2 = testdata_cbma_full.slice(testdata_cbma_full.ids[10:])
57
+
58
+ workflow = PairwiseCBMAWorkflow(
59
+ estimator=estimator,
60
+ corrector=corrector,
61
+ diagnostics=diagnostics,
62
+ output_dir=tmpdir,
63
+ )
64
+ results = workflow.fit(dset1, dset2)
65
+
66
+ run_reports(results, tmpdir)
67
+
68
+ filename = "report.html"
69
+ outpath = op.join(tmpdir, filename)
70
+ assert op.isfile(outpath)
71
+
72
+
73
+ @pytest.mark.parametrize("aggressive_mask", [True, False], ids=["aggressive", "liberal"])
74
+ def test_reports_ibma_smoke(tmp_path_factory, testdata_ibma, aggressive_mask):
75
+ """Smoke test for IBMA reports."""
76
+ tmpdir = tmp_path_factory.mktemp("test_reports_ibma_smoke")
77
+
78
+ # Generate a report with z maps as inputs
79
+ stouffers_dir = op.join(tmpdir, "stouffers")
80
+ workflow = IBMAWorkflow(
81
+ estimator=Stouffers(aggressive_mask=aggressive_mask),
82
+ corrector="fdr",
83
+ diagnostics="jackknife",
84
+ voxel_thresh=3.2,
85
+ output_dir=stouffers_dir,
86
+ )
87
+ results = workflow.fit(testdata_ibma)
88
+
89
+ run_reports(results, stouffers_dir)
90
+
91
+ filename = "report.html"
92
+ outpath = op.join(stouffers_dir, filename)
93
+ assert op.isfile(outpath)
94
+
95
+ # Generate a report with t maps as inputs
96
+ hedges_dir = op.join(tmpdir, "hedges")
97
+ workflow = IBMAWorkflow(
98
+ estimator=FixedEffectsHedges(aggressive_mask=aggressive_mask),
99
+ corrector="fdr",
100
+ diagnostics="jackknife",
101
+ voxel_thresh=3.2,
102
+ output_dir=hedges_dir,
103
+ )
104
+ results = workflow.fit(testdata_ibma)
105
+
106
+ run_reports(results, hedges_dir)
107
+
108
+ filename = "report.html"
109
+ outpath = op.join(hedges_dir, filename)
110
+ assert op.isfile(outpath)
@@ -0,0 +1,101 @@
1
+ """Test nimare.stats."""
2
+
3
+ import math
4
+
5
+ import numpy as np
6
+
7
+ from nimare.stats import null_to_p, nullhist_to_p
8
+
9
+
10
+ def test_null_to_p_float():
11
+ """Test null_to_p with single float input, assuming asymmetric null dist."""
12
+ null = [-10, -9, -9, -3, -2, -1, -1, 0, 1, 1, 1, 2, 3, 3, 4, 4, 7, 8, 8, 9]
13
+
14
+ # Two-tailed
15
+ assert math.isclose(null_to_p(0, null, "two"), 0.8)
16
+ assert math.isclose(null_to_p(9, null, "two"), 0.1)
17
+ assert math.isclose(null_to_p(10, null, "two"), 0.05)
18
+ assert math.isclose(null_to_p(-9, null, "two"), 0.3)
19
+ assert math.isclose(null_to_p(-10, null, "two"), 0.1)
20
+ # Still 0.05 because minimum valid p-value is 1 / len(null)
21
+ result = null_to_p(20, null, "two")
22
+ assert result == null_to_p(-20, null, "two")
23
+ assert math.isclose(result, 0.05)
24
+
25
+ # Left/lower-tailed
26
+ assert math.isclose(null_to_p(9, null, "lower"), 0.95)
27
+ assert math.isclose(null_to_p(-9, null, "lower"), 0.15)
28
+ assert math.isclose(null_to_p(0, null, "lower"), 0.4)
29
+
30
+ # Right/upper-tailed
31
+ assert math.isclose(null_to_p(9, null, "upper"), 0.05)
32
+ assert math.isclose(null_to_p(-9, null, "upper"), 0.95)
33
+ assert math.isclose(null_to_p(0, null, "upper"), 0.65)
34
+
35
+ # Test that 1/n(null) is preserved with extreme values
36
+ nulldist = np.random.normal(size=10000)
37
+ assert math.isclose(null_to_p(20, nulldist, "two"), 1 / 10000)
38
+ assert math.isclose(null_to_p(20, nulldist, "lower"), 1 - 1 / 10000)
39
+
40
+
41
+ def test_null_to_p_float_symmetric():
42
+ """Test null_to_p with single float input, assuming symmetric null dist."""
43
+ null = [-10, -9, -9, -3, -2, -1, -1, 0, 1, 1, 1, 2, 3, 3, 4, 4, 7, 8, 8, 9]
44
+
45
+ # Only need to test two-tailed; symmetry is irrelevant for one-tailed
46
+ assert math.isclose(null_to_p(0, null, "two", symmetric=True), 0.95)
47
+ result = null_to_p(9, null, "two", symmetric=True)
48
+ assert result == null_to_p(-9, null, "two", symmetric=True)
49
+ assert math.isclose(result, 0.2)
50
+ result = null_to_p(10, null, "two", symmetric=True)
51
+ assert result == null_to_p(-10, null, "two", symmetric=True)
52
+ assert math.isclose(result, 0.05)
53
+ # Still 0.05 because minimum valid p-value is 1 / len(null)
54
+ result = null_to_p(20, null, "two", symmetric=True)
55
+ assert result == null_to_p(-20, null, "two", symmetric=True)
56
+ assert math.isclose(result, 0.05)
57
+
58
+
59
+ def test_null_to_p_array():
60
+ """Test nimare.stats.null_to_p with 1d array input."""
61
+ N = 10000
62
+ nulldist = np.random.normal(size=N)
63
+ t = np.sort(np.random.normal(size=N))
64
+ p = np.sort(null_to_p(t, nulldist))
65
+ assert p.shape == (N,)
66
+ assert (p < 1).all()
67
+ assert (p > 0).all()
68
+ # Resulting distribution should be roughly uniform
69
+ assert np.abs(p.mean() - 0.5) < 0.02
70
+ assert np.abs(p.var() - 1 / 12) < 0.02
71
+
72
+
73
+ def test_nullhist_to_p():
74
+ """Test nimare.stats.nullhist_to_p."""
75
+ n_voxels = 5
76
+
77
+ # Test cross-voxel null distribution
78
+ histogram_bins = np.arange(0, 101, 1) # 101 bins
79
+ histogram_weights = np.ones(histogram_bins.shape)
80
+ histogram_weights[-1] = 0 # last bin is outside range, so there are 100 bins with values
81
+
82
+ # When input is a single value
83
+ assert math.isclose(nullhist_to_p(0, histogram_weights, histogram_bins), 1.0)
84
+ assert math.isclose(nullhist_to_p(1, histogram_weights, histogram_bins), 0.99)
85
+ assert math.isclose(nullhist_to_p(99, histogram_weights, histogram_bins), 0.01)
86
+ assert math.isclose(nullhist_to_p(100, histogram_weights, histogram_bins), 0.01)
87
+
88
+ # When input is an array
89
+ assert np.allclose(
90
+ nullhist_to_p([0, 1, 99, 100, 101], histogram_weights, histogram_bins),
91
+ np.array([1.0, 0.99, 0.01, 0.01, 0.01]),
92
+ )
93
+
94
+ # Test voxel-wise null distributions
95
+ histogram_weights = np.ones((histogram_bins.shape[0], n_voxels))
96
+ histogram_weights[-1, :] = 0 # last bin is outside range, so there are 100 bins with values
97
+
98
+ assert np.allclose(
99
+ nullhist_to_p([0, 1, 99, 100, 101], histogram_weights, histogram_bins),
100
+ np.array([1.0, 0.99, 0.01, 0.01, 0.01]),
101
+ )
@@ -0,0 +1,272 @@
1
+ """Test nimare.transforms."""
2
+
3
+ import re
4
+
5
+ import nibabel as nib
6
+ import numpy as np
7
+ import pytest
8
+
9
+ from nimare import transforms
10
+
11
+
12
+ def test_ImageTransformer(testdata_ibma):
13
+ """Smoke test on transforms.ImageTransformer."""
14
+ dset = testdata_ibma
15
+ z_files = dset.images["z"].tolist()
16
+ z_transformer = transforms.ImageTransformer(target="z")
17
+ new_dset = z_transformer.transform(dset)
18
+ new_z_files = new_dset.images["z"].tolist()
19
+ assert z_files[:-1] == new_z_files[:-1]
20
+ # new z statistic map should have 3 dimensions
21
+ assert len(nib.load(new_z_files[-1]).shape) == 3
22
+ assert all([nzf is not None for nzf in new_z_files])
23
+
24
+ varcope_files = dset.images["varcope"].tolist()
25
+ varcope_p_transformer = transforms.ImageTransformer(target=["varcope", "p"])
26
+ new_dset = varcope_p_transformer.transform(dset)
27
+ new_varcope_files = new_dset.images["varcope"].tolist()
28
+ assert not all([isinstance(vf, str) for vf in varcope_files])
29
+ assert all([isinstance(vf, str) for vf in new_varcope_files])
30
+ new_p_files = new_dset.images["p"].tolist()
31
+ assert all([isinstance(pf, str) for pf in new_p_files])
32
+
33
+ t_files = dset.images["t"].tolist()
34
+ t_transformer = transforms.ImageTransformer(target="t")
35
+ new_dset = t_transformer.transform(dset)
36
+ new_t_files = new_dset.images["t"].tolist()
37
+ assert t_files[:-1] == new_t_files[:-1]
38
+
39
+
40
+ def test_transform_images(testdata_ibma):
41
+ """Smoke test on transforms.transform_images."""
42
+ dset = testdata_ibma
43
+ z_files = dset.images["z"].tolist()
44
+ new_images = transforms.transform_images(
45
+ dset.images, target="z", masker=dset.masker, metadata_df=dset.metadata
46
+ )
47
+ new_z_files = new_images["z"].tolist()
48
+ assert z_files[:-1] == new_z_files[:-1]
49
+ # new z statistic map should have 3 dimensions
50
+ assert len(nib.load(new_z_files[-1]).shape) == 3
51
+ assert all([nzf is not None for nzf in new_z_files])
52
+
53
+ varcope_files = dset.images["varcope"].tolist()
54
+ new_images = transforms.transform_images(
55
+ dset.images, target="varcope", masker=dset.masker, metadata_df=dset.metadata
56
+ )
57
+ new_varcope_files = new_images["varcope"].tolist()
58
+ assert not all([isinstance(vf, str) for vf in varcope_files])
59
+ assert all([isinstance(vf, str) for vf in new_varcope_files])
60
+
61
+
62
+ def test_sample_sizes_to_dof():
63
+ """Unit tests for transforms.sample_sizes_to_dof."""
64
+ sample_sizes = [20, 20, 20]
65
+ dof = 57
66
+ assert transforms.sample_sizes_to_dof(sample_sizes) == dof
67
+ sample_sizes = [20]
68
+ dof = 19
69
+ assert transforms.sample_sizes_to_dof(sample_sizes) == dof
70
+
71
+
72
+ def test_sample_sizes_to_sample_size():
73
+ """Unit tests for transforms.sample_sizes_to_sample_size."""
74
+ sample_sizes = [20, 20, 20]
75
+ sample_size = 60
76
+ assert transforms.sample_sizes_to_sample_size(sample_sizes) == sample_size
77
+ sample_sizes = [20]
78
+ sample_size = 20
79
+ assert transforms.sample_sizes_to_sample_size(sample_sizes) == sample_size
80
+
81
+
82
+ def test_t_to_z():
83
+ """Smoke test for transforms.t_to_z."""
84
+ t_arr = np.random.random(100)
85
+ z_arr = transforms.t_to_z(t_arr, dof=20)
86
+ assert z_arr.shape == t_arr.shape
87
+ t_arr2 = transforms.z_to_t(z_arr, dof=20)
88
+ assert np.allclose(t_arr, t_arr2)
89
+
90
+
91
+ NO_OUTPUT_PATTERN = re.compile(
92
+ (
93
+ r"^No clusters were found for ([\w\.0-9+-]+) at a threshold of [0-9]+\.[0-9]+$|"
94
+ r"No Z or p map for ([\w-]+), skipping..."
95
+ )
96
+ )
97
+
98
+
99
+ @pytest.mark.parametrize(
100
+ "kwargs,drop_data,add_data",
101
+ [
102
+ ({"merge_strategy": "fill"}, "z", "p"),
103
+ ({"merge_strategy": "replace"}, None, None),
104
+ ({"merge_strategy": "demolish", "remove_subpeaks": True}, None, None),
105
+ ({"merge_strategy": "fill", "two_sided": True}, "z", "p"),
106
+ (
107
+ {
108
+ "merge_strategy": "demolish",
109
+ "two_sided": True,
110
+ "z_threshold": 1.9,
111
+ },
112
+ None,
113
+ None,
114
+ ),
115
+ ({"merge_strategy": "demolish", "z_threshold": 10.0}, None, None),
116
+ ],
117
+ )
118
+ def test_images_to_coordinates(tmp_path, caplog, testdata_ibma, kwargs, drop_data, add_data):
119
+ """Test conversion of statistical images to coordinates."""
120
+ # only catch warnings from the transforms logger
121
+ caplog.set_level("WARNING", logger=transforms.LGR.name)
122
+
123
+ img2coord = transforms.ImagesToCoordinates(**kwargs)
124
+ tst_dset = testdata_ibma.copy()
125
+
126
+ if add_data:
127
+ tst_dset.images = transforms.transform_images(
128
+ tst_dset.images,
129
+ add_data,
130
+ tst_dset.masker,
131
+ tst_dset.metadata,
132
+ tmp_path,
133
+ )
134
+
135
+ if drop_data:
136
+ tst_dset.images = tst_dset.images.drop(columns=drop_data)
137
+
138
+ new_dset = img2coord.transform(tst_dset)
139
+
140
+ # metadata column "coordinate_source" should exist
141
+ assert "coordinate_source" in new_dset.metadata.columns
142
+
143
+ # get the studies that did not generate coordinates
144
+ # either because the threshold was too high or
145
+ # because there were no images to generate coordinates
146
+ studies_without_coordinates = []
147
+ for msg in caplog.messages:
148
+ match = NO_OUTPUT_PATTERN.match(msg)
149
+ if match:
150
+ studies_without_coordinates.append(
151
+ match.group(1) if match.group(1) else match.group(2)
152
+ )
153
+
154
+ # if there is not a z map for a study contrast, raise a warning
155
+ # unless the strategy is fill since all studies already have coordinates
156
+ if drop_data == "z" and add_data == "p" and img2coord.merge_strategy != "fill":
157
+ assert "No Z map for" in caplog.messages[0]
158
+
159
+ # if someone is trying to use two-sided on a study contrast with a p map, raise a warning
160
+ if img2coord.two_sided:
161
+ assert "Cannot use two_sided threshold using a p map for" in caplog.messages[0]
162
+
163
+ # if two_sided was specified and z maps were used, there
164
+ # should be peaks with negative values.
165
+ if img2coord.two_sided and not drop_data and not add_data:
166
+ assert np.any(new_dset.coordinates["z_stat"] < 0.0)
167
+
168
+ # since testdata_ibma already has coordinate data for every study
169
+ # this transformation should retain the same number of unique ids
170
+ # unless the merge_strategy was demolish
171
+ if img2coord.merge_strategy == "demolish":
172
+ expected_studies_with_coordinates = set(
173
+ tst_dset.images.loc[~tst_dset.images["z"].isnull(), "id"]
174
+ ) - set(studies_without_coordinates)
175
+ else:
176
+ expected_studies_with_coordinates = set(tst_dset.coordinates["id"]).union(
177
+ ["pain_01.nidm-1"]
178
+ )
179
+
180
+ assert set(new_dset.coordinates["id"]) == expected_studies_with_coordinates, set(
181
+ new_dset.coordinates["id"]
182
+ )
183
+
184
+
185
+ def test_ddimages_to_coordinates_merge_strategy(testdata_ibma):
186
+ """Test different merging strategies."""
187
+ img2coord = transforms.ImagesToCoordinates(z_threshold=1.9)
188
+
189
+ # keep pain_01.nidm-1, pain_02.nidm-1, pain_03.nidm-1, pain_04.nidm-1
190
+ tst_dset = testdata_ibma.slice(
191
+ ["pain_01.nidm-1", "pain_02.nidm-1", "pain_03.nidm-1", "pain_04.nidm-1"]
192
+ )
193
+
194
+ # remove image data for pain_01.nidm-1 and pain_03.nidm-1
195
+ # coordinate data for pain_01.nidm-1 and pain_02.nidm-1 are already removed
196
+ tst_dset.images = tst_dset.images.query("id != 'pain_01.nidm-1'")
197
+ tst_dset.images = tst_dset.images.query("id != 'pain_03.nidm-1'")
198
+
199
+ # | study | image | coordinate |
200
+ # |--------------|-------|------------|
201
+ # | pain_01.nidm | no | no |
202
+ # | pain_02.nidm | yes | no |
203
+ # | pain_03.nidm | no | yes |
204
+ # | pain_04.nidm | yes | yes |
205
+
206
+ # test 'fill' strategy
207
+ # only pain_02.nidm should have new data.
208
+ # pain_01.nidm, pain_03.nidm, and pain_04.nidm should remain the same
209
+ img2coord.merge_strategy = "fill"
210
+ fill_dset = img2coord.transform(tst_dset)
211
+ # pain_01.nidm and pain_03.nidm should be unchanged
212
+ assert set(fill_dset.coordinates.query("id != 'pain_02.nidm-1'")["x"]) == set(
213
+ tst_dset.coordinates["x"]
214
+ )
215
+ # pain_02.nidm should be in the coordinates now
216
+ assert "pain_02.nidm-1" in fill_dset.coordinates["id"].unique()
217
+
218
+ # test 'replace' strategy
219
+ # pain_02.nidm and pain_04.nidm should have new data,
220
+ # but pain_01.nidm and pain_03.nidm should remain the same
221
+ img2coord.merge_strategy = "replace"
222
+ replace_dset = img2coord.transform(tst_dset)
223
+
224
+ # pain_01.nidm should remain the same
225
+ assert set(replace_dset.coordinates.query("id == 'pain_01.nidm-1'")["x"]) == set(
226
+ tst_dset.coordinates.query("id == 'pain_01.nidm-1'")["x"]
227
+ )
228
+ # pain_02.nidm should be new
229
+ assert "pain_02.nidm-1" in replace_dset.coordinates["id"].unique()
230
+ # pain_03.nidm should remain the same
231
+ assert set(replace_dset.coordinates.query("id == 'pain_03.nidm-1'")["x"]) == set(
232
+ tst_dset.coordinates.query("id == 'pain_03.nidm-1'")["x"]
233
+ )
234
+ # pain_04.nidm should be new (and have different coordinates from the old version)
235
+ assert set(replace_dset.coordinates.query("id == 'pain_04.nidm-1'")["x"]) != set(
236
+ tst_dset.coordinates.query("id == 'pain_04.nidm-1'")["x"]
237
+ )
238
+
239
+ # test 'demolish' strategy
240
+ # pain_03.nidm will be removed, and pain_02.nidm and pain_04.nidm will be new
241
+ img2coord.merge_strategy = "demolish"
242
+ demolish_dset = img2coord.transform(tst_dset)
243
+
244
+ # pain_01.nidm should not be in the dset
245
+ assert "pain_01.nidm-1" not in demolish_dset.coordinates["id"].unique()
246
+ # pain_02.nidm should be new
247
+ assert "pain_02.nidm-1" in demolish_dset.coordinates["id"].unique()
248
+ # pain_03.nidm should not be in the dset
249
+ assert "pain_03.nidm-1" not in demolish_dset.coordinates["id"].unique()
250
+ # pain_04.nidm should be new (and have different coordinates from the old version)
251
+ assert set(demolish_dset.coordinates.query("id == 'pain_04.nidm-1'")["x"]) != set(
252
+ tst_dset.coordinates.query("id == 'pain_04.nidm-1'")["x"]
253
+ )
254
+
255
+
256
+ @pytest.mark.parametrize(
257
+ "z,tail,expected_p",
258
+ [
259
+ (0.0, "two", 1.0),
260
+ (0.0, "one", 0.5),
261
+ (1.959963, "two", 0.05),
262
+ (1.959963, "one", 0.025),
263
+ (-1.959963, "one", 0.975),
264
+ (-1.959963, "two", 0.05),
265
+ ([0.0, 1.959963, -1.959963], "two", [1.0, 0.05, 0.05]),
266
+ ],
267
+ )
268
+ def test_z_to_p(z, tail, expected_p):
269
+ """Test z to p conversion."""
270
+ p = transforms.z_to_p(z, tail)
271
+
272
+ assert np.all(np.isclose(p, expected_p))