nimare 0.4.2rc4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. benchmarks/__init__.py +0 -0
  2. benchmarks/bench_cbma.py +57 -0
  3. nimare/__init__.py +45 -0
  4. nimare/_version.py +21 -0
  5. nimare/annotate/__init__.py +21 -0
  6. nimare/annotate/cogat.py +213 -0
  7. nimare/annotate/gclda.py +924 -0
  8. nimare/annotate/lda.py +147 -0
  9. nimare/annotate/text.py +75 -0
  10. nimare/annotate/utils.py +87 -0
  11. nimare/base.py +217 -0
  12. nimare/cli.py +124 -0
  13. nimare/correct.py +462 -0
  14. nimare/dataset.py +685 -0
  15. nimare/decode/__init__.py +33 -0
  16. nimare/decode/base.py +115 -0
  17. nimare/decode/continuous.py +462 -0
  18. nimare/decode/discrete.py +753 -0
  19. nimare/decode/encode.py +110 -0
  20. nimare/decode/utils.py +44 -0
  21. nimare/diagnostics.py +510 -0
  22. nimare/estimator.py +139 -0
  23. nimare/extract/__init__.py +19 -0
  24. nimare/extract/extract.py +466 -0
  25. nimare/extract/utils.py +295 -0
  26. nimare/generate.py +331 -0
  27. nimare/io.py +635 -0
  28. nimare/meta/__init__.py +39 -0
  29. nimare/meta/cbma/__init__.py +6 -0
  30. nimare/meta/cbma/ale.py +951 -0
  31. nimare/meta/cbma/base.py +947 -0
  32. nimare/meta/cbma/mkda.py +1361 -0
  33. nimare/meta/cbmr.py +970 -0
  34. nimare/meta/ibma.py +1683 -0
  35. nimare/meta/kernel.py +501 -0
  36. nimare/meta/models.py +1199 -0
  37. nimare/meta/utils.py +494 -0
  38. nimare/nimads.py +492 -0
  39. nimare/reports/__init__.py +24 -0
  40. nimare/reports/base.py +664 -0
  41. nimare/reports/default.yml +123 -0
  42. nimare/reports/figures.py +651 -0
  43. nimare/reports/report.tpl +160 -0
  44. nimare/resources/__init__.py +1 -0
  45. nimare/resources/atlases/Harvard-Oxford-LICENSE +93 -0
  46. nimare/resources/atlases/HarvardOxford-cort-maxprob-thr25-2mm.nii.gz +0 -0
  47. nimare/resources/database_file_manifest.json +142 -0
  48. nimare/resources/english_spellings.csv +1738 -0
  49. nimare/resources/filenames.json +32 -0
  50. nimare/resources/neurosynth_laird_studies.json +58773 -0
  51. nimare/resources/neurosynth_stoplist.txt +396 -0
  52. nimare/resources/nidm_pain_dset.json +1349 -0
  53. nimare/resources/references.bib +541 -0
  54. nimare/resources/semantic_knowledge_children.txt +325 -0
  55. nimare/resources/semantic_relatedness_children.txt +249 -0
  56. nimare/resources/templates/MNI152_2x2x2_brainmask.nii.gz +0 -0
  57. nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_T1w.nii.gz +0 -0
  58. nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_desc-brain_mask.nii.gz +0 -0
  59. nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_T1w.nii.gz +0 -0
  60. nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_desc-brain_mask.nii.gz +0 -0
  61. nimare/results.py +225 -0
  62. nimare/stats.py +276 -0
  63. nimare/tests/__init__.py +1 -0
  64. nimare/tests/conftest.py +229 -0
  65. nimare/tests/data/amygdala_roi.nii.gz +0 -0
  66. nimare/tests/data/data-neurosynth_version-7_coordinates.tsv.gz +0 -0
  67. nimare/tests/data/data-neurosynth_version-7_metadata.tsv.gz +0 -0
  68. nimare/tests/data/data-neurosynth_version-7_vocab-terms_source-abstract_type-tfidf_features.npz +0 -0
  69. nimare/tests/data/data-neurosynth_version-7_vocab-terms_vocabulary.txt +100 -0
  70. nimare/tests/data/neurosynth_dset.json +2868 -0
  71. nimare/tests/data/neurosynth_laird_studies.json +58773 -0
  72. nimare/tests/data/nidm_pain_dset.json +1349 -0
  73. nimare/tests/data/nimads_annotation.json +1 -0
  74. nimare/tests/data/nimads_studyset.json +1 -0
  75. nimare/tests/data/test_baseline.txt +2 -0
  76. nimare/tests/data/test_pain_dataset.json +1278 -0
  77. nimare/tests/data/test_pain_dataset_multiple_contrasts.json +1242 -0
  78. nimare/tests/data/test_sleuth_file.txt +18 -0
  79. nimare/tests/data/test_sleuth_file2.txt +10 -0
  80. nimare/tests/data/test_sleuth_file3.txt +5 -0
  81. nimare/tests/data/test_sleuth_file4.txt +5 -0
  82. nimare/tests/data/test_sleuth_file5.txt +5 -0
  83. nimare/tests/test_annotate_cogat.py +32 -0
  84. nimare/tests/test_annotate_gclda.py +86 -0
  85. nimare/tests/test_annotate_lda.py +27 -0
  86. nimare/tests/test_dataset.py +99 -0
  87. nimare/tests/test_decode_continuous.py +132 -0
  88. nimare/tests/test_decode_discrete.py +92 -0
  89. nimare/tests/test_diagnostics.py +168 -0
  90. nimare/tests/test_estimator_performance.py +385 -0
  91. nimare/tests/test_extract.py +46 -0
  92. nimare/tests/test_generate.py +247 -0
  93. nimare/tests/test_io.py +240 -0
  94. nimare/tests/test_meta_ale.py +298 -0
  95. nimare/tests/test_meta_cbmr.py +295 -0
  96. nimare/tests/test_meta_ibma.py +240 -0
  97. nimare/tests/test_meta_kernel.py +209 -0
  98. nimare/tests/test_meta_mkda.py +234 -0
  99. nimare/tests/test_nimads.py +21 -0
  100. nimare/tests/test_reports.py +110 -0
  101. nimare/tests/test_stats.py +101 -0
  102. nimare/tests/test_transforms.py +272 -0
  103. nimare/tests/test_utils.py +200 -0
  104. nimare/tests/test_workflows.py +221 -0
  105. nimare/tests/utils.py +126 -0
  106. nimare/transforms.py +907 -0
  107. nimare/utils.py +1367 -0
  108. nimare/workflows/__init__.py +14 -0
  109. nimare/workflows/base.py +189 -0
  110. nimare/workflows/cbma.py +165 -0
  111. nimare/workflows/ibma.py +108 -0
  112. nimare/workflows/macm.py +77 -0
  113. nimare/workflows/misc.py +65 -0
  114. nimare-0.4.2rc4.dist-info/LICENSE +21 -0
  115. nimare-0.4.2rc4.dist-info/METADATA +124 -0
  116. nimare-0.4.2rc4.dist-info/RECORD +119 -0
  117. nimare-0.4.2rc4.dist-info/WHEEL +5 -0
  118. nimare-0.4.2rc4.dist-info/entry_points.txt +2 -0
  119. nimare-0.4.2rc4.dist-info/top_level.txt +2 -0
@@ -0,0 +1,110 @@
1
+ """Methods for encoding text into brain maps."""
2
+
3
+ import numpy as np
4
+ from nilearn.masking import unmask
5
+ from sklearn.feature_extraction.text import CountVectorizer
6
+
7
+ from nimare.decode.utils import weight_priors
8
+
9
+
10
+ def gclda_encode(model, text, out_file=None, topic_priors=None, prior_weight=1.0):
11
+ r"""Perform text-to-image encoding according to the method described in Rubin et al. (2017).
12
+
13
+ This method was originally described in :footcite:t:`rubin2017decoding`.
14
+
15
+ Parameters
16
+ ----------
17
+ model : :obj:`~nimare.annotate.gclda.GCLDAModel`
18
+ Model object needed for decoding.
19
+ text : :obj:`str` or :obj:`list`
20
+ Text to encode into an image.
21
+ out_file : :obj:`str`, optional
22
+ If not None, writes the encoded image to a file.
23
+ topic_priors : :obj:`numpy.ndarray` of :obj:`float`, optional
24
+ A 1d array of size (n_topics) with values for topic weighting.
25
+ If None, no weighting is done. Default is None.
26
+ prior_weight : :obj:`float`, optional
27
+ The weight by which the prior will affect the encoding.
28
+ Default is 1.
29
+
30
+ Returns
31
+ -------
32
+ img : :obj:`nibabel.nifti1.Nifti1Image`
33
+ The encoded image.
34
+ topic_weights : :obj:`numpy.ndarray` of :obj:`float`
35
+ The weights of the topics used in encoding.
36
+
37
+ Notes
38
+ -----
39
+ ====================== ==============================================================
40
+ Notation Meaning
41
+ ====================== ==============================================================
42
+ :math:`v` Voxel
43
+ :math:`t` Topic
44
+ :math:`w` Word type
45
+ :math:`h` Input text
46
+ :math:`p(v|t)` Probability of voxel given topic (``p_voxel_g_topic_``)
47
+ :math:`\\tau_{t}` Topic weight vector (``topic_weights``)
48
+ :math:`p(w|t)` Probability of word type given topic (``p_word_g_topic``)
49
+ :math:`\omega` 1d array from input image (``input_values``)
50
+ ====================== ==============================================================
51
+
52
+ 1. Compute :math:`p(v|t)` (``p_voxel_g_topic``).
53
+
54
+ - From :func:`gclda.model.Model.get_spatial_probs()`
55
+
56
+ 2. Compute :math:`p(t|w)` (``p_topic_g_word``).
57
+ 3. Vectorize input text according to model vocabulary.
58
+ 4. Reduce :math:`p(t|w)` to only include word types in input text.
59
+ 5. Compute :math:`p(t|h)` (``p_topic_g_text``) by multiplying :math:`p(t|w)` by word counts
60
+ for input text.
61
+ 6. Sum topic weights (:math:`\\tau_{t}`) across words.
62
+
63
+ - :math:`\\tau_{t} = \sum_{i}{p(t|h_{i})}`
64
+
65
+ 7. Compute voxel weights.
66
+
67
+ - :math:`p(v|h) \propto p(v|t) \cdot \\tau_{t}`
68
+
69
+ 8. The resulting array (``voxel_weights``) reflects arbitrarily scaled voxel weights for the
70
+ input text.
71
+ 9. Unmask and reshape ``voxel_weights`` into brain image.
72
+
73
+ See Also
74
+ --------
75
+ :class:`~nimare.annotate.gclda.GCLDAModel`
76
+ :func:`~nimare.decode.continuous.gclda_decode_map`
77
+ :func:`~nimare.decode.discrete.gclda_decode_roi`
78
+
79
+ References
80
+ ----------
81
+ .. footbibliography::
82
+ """
83
+ if isinstance(text, list):
84
+ text = " ".join(text)
85
+
86
+ # Assume that words in vocabulary are underscore-separated.
87
+ # Convert to space-separation for vectorization of input string.
88
+ vocabulary = [term.replace("_", " ") for term in model.vocabulary]
89
+ max_len = max([len(term.split(" ")) for term in vocabulary])
90
+ vectorizer = CountVectorizer(vocabulary=model.vocabulary, ngram_range=(1, max_len))
91
+ word_counts = np.squeeze(vectorizer.fit_transform([text]).toarray())
92
+ keep_idx = np.where(word_counts > 0)[0]
93
+ text_counts = word_counts[keep_idx]
94
+
95
+ # n_topics_per_word_token = np.sum(model.n_word_tokens_word_by_topic, axis=1)
96
+ # p_topic_g_word = model.n_word_tokens_word_by_topic / n_topics_per_word_token[:, None]
97
+ # p_topic_g_word = np.nan_to_num(p_topic_g_word, 0)
98
+ p_topic_g_text = model.p_topic_g_word_[keep_idx] # p(T|W) for words in text only
99
+ prod = p_topic_g_text * text_counts[:, None] # Multiply p(T|W) by words in text
100
+ topic_weights = np.sum(prod, axis=0) # Sum across words
101
+ if topic_priors is not None:
102
+ weighted_priors = weight_priors(topic_priors, prior_weight)
103
+ topic_weights *= weighted_priors
104
+
105
+ voxel_weights = np.dot(model.p_voxel_g_topic_, topic_weights)
106
+ img = unmask(voxel_weights, model.mask)
107
+
108
+ if out_file is not None:
109
+ img.to_filename(out_file)
110
+ return img, topic_weights
nimare/decode/utils.py ADDED
@@ -0,0 +1,44 @@
1
+ """Utility functions for decoding/encoding."""
2
+
3
+ import numpy as np
4
+
5
+
6
+ def weight_priors(topic_priors, prior_weight):
7
+ """Combine topic priors with prior weight.
8
+
9
+ Parameters
10
+ ----------
11
+ topic_priors : array-like
12
+ The prior weights for topics (n_topics-long array). Scale may be
13
+ arbitrary, as the array will be normalized.
14
+ prior_weight : :obj:`float`
15
+ Scalar by which to weight priors.
16
+
17
+ Returns
18
+ -------
19
+ weighted_priors : :obj:`numpy.ndarray`
20
+ Updated prior weights for topics.
21
+ """
22
+ if not isinstance(prior_weight, (float, int)):
23
+ raise IOError("Input prior_weight must be a float in range (0, 1)")
24
+ elif not 0.0 <= prior_weight <= 1:
25
+ raise ValueError("Input prior_weight must be in range (0, 1)")
26
+
27
+ # Enforce compatible types
28
+ topic_priors = topic_priors.astype(float)
29
+ prior_weight = float(prior_weight)
30
+
31
+ # Normalize priors
32
+ topic_priors /= np.sum(topic_priors)
33
+
34
+ # Weight priors
35
+ topic_priors *= prior_weight
36
+
37
+ # Create uniform distribution to combine with priors
38
+ uniform = np.ones(topic_priors.shape)
39
+ uniform /= np.sum(uniform)
40
+ uniform *= 1 - prior_weight
41
+
42
+ # Weight priors with uniform base
43
+ weighted_priors = topic_priors + uniform
44
+ return weighted_priors
nimare/diagnostics.py ADDED
@@ -0,0 +1,510 @@
1
+ """Methods for diagnosing problems in meta-analytic datasets or analyses."""
2
+
3
+ import copy
4
+ import logging
5
+ from abc import abstractmethod
6
+
7
+ import numpy as np
8
+ import pandas as pd
9
+ from joblib import Parallel, delayed
10
+ from nilearn import input_data
11
+ from nilearn.reporting import get_clusters_table
12
+ from scipy.spatial.distance import cdist
13
+ from tqdm.auto import tqdm
14
+
15
+ from nimare.base import NiMAREBase
16
+ from nimare.meta.cbma.base import PairwiseCBMAEstimator
17
+ from nimare.meta.ibma import IBMAEstimator
18
+ from nimare.utils import _check_ncores, get_masker, mm2vox
19
+
20
+ LGR = logging.getLogger(__name__)
21
+
22
+ POSTAIL_LBL = "PositiveTail" # Label assigned to positive tail clusters
23
+ NEGTAIL_LBL = "NegativeTail" # Label assigned to negative tail clusters
24
+
25
+
26
+ class Diagnostics(NiMAREBase):
27
+ """Base class for diagnostic methods.
28
+
29
+ .. versionchanged:: 0.1.2
30
+
31
+ * New parameter display_second_group, which controls whether the second group is displayed.
32
+
33
+ .. versionchanged:: 0.1.0
34
+
35
+ * Transform now returns a MetaResult object.
36
+
37
+ .. versionadded:: 0.0.14
38
+
39
+ Parameters
40
+ ----------
41
+ target_image : :obj:`str`, optional
42
+ The meta-analytic map for which clusters will be characterized.
43
+ The default is z because log-p will not always have value of zero for non-cluster voxels.
44
+ voxel_thresh : :obj:`float` or None, optional
45
+ An optional voxel-level threshold that may be applied to the ``target_image`` to define
46
+ clusters. This can be None if the ``target_image`` is already thresholded
47
+ (e.g., a cluster-level corrected map).
48
+ Default is None.
49
+ cluster_threshold : :obj:`int` or None, optional
50
+ Cluster size threshold, in :term:`voxels<voxel>`.
51
+ If None, then no cluster size threshold will be applied. Default=None.
52
+ n_cores : :obj:`int`, optional
53
+ Number of cores to use for parallelization.
54
+ If <=0, defaults to using all available cores.
55
+ Default is 1.
56
+
57
+ """
58
+
59
+ def __init__(
60
+ self,
61
+ target_image="z_desc-size_level-cluster_corr-FWE_method-montecarlo",
62
+ voxel_thresh=None,
63
+ cluster_threshold=None,
64
+ display_second_group=False,
65
+ n_cores=1,
66
+ ):
67
+ self.target_image = target_image
68
+ self.voxel_thresh = voxel_thresh
69
+ self.cluster_threshold = cluster_threshold
70
+ self.display_second_group = display_second_group
71
+ self.n_cores = _check_ncores(n_cores)
72
+
73
+ @abstractmethod
74
+ def _transform(self, expid, label_map, result):
75
+ """Apply transform to study ID and label map.
76
+
77
+ Must return a 1D array with the contribution of `expid` in each cluster of `label_map`.
78
+ """
79
+
80
+ def transform(self, result):
81
+ """Apply the analysis to a MetaResult.
82
+
83
+ Parameters
84
+ ----------
85
+ result : :obj:`~nimare.results.MetaResult`
86
+ A MetaResult produced by a coordinate- or image-based meta-analysis.
87
+
88
+ Returns
89
+ -------
90
+ :obj:`~nimare.results.MetaResult`
91
+ Results of Diagnostics fitting.
92
+
93
+ Notes
94
+ -----
95
+ This method adds two new keys to ``maps`` and ``tables`` attributes of the
96
+ MetaResult object.
97
+
98
+ - ``<target_image>_diag-<Jackknife|FocusCounter>_tab-counts`` :
99
+ :obj:`pandas.DataFrame` or None.
100
+ A DataFrame with information about relative contributions of each experiment
101
+ to each cluster in the thresholded map.
102
+ There is one row for each experiment.
103
+ There is one column for each cluster, with column names being
104
+ ``PostiveTail``/``NegativeTail`` indicating the sign (+/-) of the cluster's
105
+ statistical values, plus an integer indicating the cluster's associated value
106
+ in the ``label_maps[0]``/``label_maps[1]`` output.
107
+ If no clusters are found or a pairwise Estimator was used, ``None`` is returned.
108
+ - ``<target_image>_tab-clust`` : :obj:`pandas.DataFrame`
109
+ A DataFrame with information about each cluster.
110
+ There is one row for each cluster.
111
+ The columns in this table include: ``Cluster ID`` (the cluster id, plus a letter
112
+ for subpeaks only), ``X``/``Y``/``Z`` (coordinate for the center of mass),
113
+ ``Max Stat`` (statistical value of the peak), and ``Cluster Size (mm3)``
114
+ (the size of the cluster, in cubic millimeters).
115
+ If no clusters are found, this table will be empty.
116
+ - ``label_<target_image>_tail-<positive|negative>`` : :obj:`numpy.ndarray`
117
+ Label maps.
118
+ Each cluster in the map has a single value, which corresponds to the cluster number
119
+ of the column name in ``contribution_table``.
120
+ If target_image has negative values after thresholding, first and second maps
121
+ correspond to positive and negative tails.
122
+ If no clusters are found, this list will be empty.
123
+ """
124
+ self._is_pairwaise_estimator = issubclass(type(result.estimator), PairwiseCBMAEstimator)
125
+ masker = result.estimator.masker
126
+ diag_name = self.__class__.__name__
127
+
128
+ # Collect the thresholded cluster map
129
+ if self.target_image in result.maps:
130
+ target_img = result.get_map(self.target_image, return_type="image")
131
+ else:
132
+ available_maps = [f"'{m}'" for m in result.maps.keys()]
133
+ raise ValueError(
134
+ f"Target image ('{self.target_image}') not present in result. "
135
+ f"Available maps in result are: {', '.join(available_maps)}."
136
+ )
137
+
138
+ # Get clusters table and label maps
139
+ stat_threshold = self.voxel_thresh or 0
140
+
141
+ if hasattr(result.estimator, "two_sided"):
142
+ # Only present in Fisher's and Stouffer's estimators
143
+ two_sided = getattr(result.estimator, "two_sided")
144
+ else:
145
+ two_sided = (target_img.get_fdata() < 0).any()
146
+
147
+ clusters_table, label_maps = get_clusters_table(
148
+ target_img,
149
+ stat_threshold,
150
+ self.cluster_threshold,
151
+ two_sided=two_sided,
152
+ return_label_maps=True,
153
+ )
154
+
155
+ n_clusters = clusters_table.shape[0]
156
+ if n_clusters == 0:
157
+ LGR.warning("No clusters found")
158
+ else:
159
+ LGR.info(f"{n_clusters} clusters found")
160
+ # Make sure cluster IDs are strings
161
+ clusters_table = clusters_table.astype({"Cluster ID": "str"})
162
+ # Rename the clusters_table cluster IDs to match the contribution table columns
163
+ clusters_table["Cluster ID"] = [
164
+ (
165
+ f"{POSTAIL_LBL} {row['Cluster ID']}"
166
+ if row["Peak Stat"] > 0
167
+ else f"{NEGTAIL_LBL} {row['Cluster ID']}"
168
+ )
169
+ for _, row in clusters_table.iterrows()
170
+ ]
171
+
172
+ # Define bids-like names for tables and maps
173
+ image_name = "_".join(self.target_image.split("_")[1:])
174
+ image_name = f"_{image_name}" if image_name else image_name
175
+ clusters_table_name = f"{self.target_image}_tab-clust"
176
+ contribution_table_name = f"{self.target_image}_diag-{diag_name}_tab-counts"
177
+ label_map_names = (
178
+ [f"label{image_name}_tail-positive", f"label{image_name}_tail-negative"]
179
+ if len(label_maps) == 2
180
+ else [f"label{image_name}_tail-positive"]
181
+ )
182
+
183
+ # Check number of clusters
184
+ if n_clusters == 0:
185
+ result.tables[clusters_table_name] = clusters_table
186
+ result.tables[contribution_table_name] = None
187
+ result.maps[label_map_names[0]] = None
188
+
189
+ result.diagnostics.append(self)
190
+ return result
191
+
192
+ tables_dict = {clusters_table_name: clusters_table}
193
+ maps_dict = {
194
+ label_map_name: np.squeeze(masker.transform(label_map))
195
+ for label_map_name, label_map in zip(label_map_names, label_maps)
196
+ }
197
+
198
+ # Use study IDs in inputs_ instead of dataset, because we don't want to try fitting the
199
+ # estimator to a study that might have been filtered out by the estimator's criteria.
200
+ # For pairwise estimators, use id1 for positive tail and id2 for negative tail.
201
+ # Run diagnostics with id2 for pairwise estimators and display_second_group=True.
202
+ if self._is_pairwaise_estimator:
203
+ if self.display_second_group and len(label_maps) == 2:
204
+ meta_ids_lst = [result.estimator.inputs_["id1"], result.estimator.inputs_["id2"]]
205
+ signs = [POSTAIL_LBL, NEGTAIL_LBL]
206
+ else:
207
+ meta_ids_lst = [result.estimator.inputs_["id1"]]
208
+ signs = [POSTAIL_LBL]
209
+ elif len(label_maps) == 2:
210
+ # Non pairwise estimator with two tails (IBMA estimators)
211
+ meta_ids_lst = [result.estimator.inputs_["id"], result.estimator.inputs_["id"]]
212
+ signs = [POSTAIL_LBL, NEGTAIL_LBL]
213
+ else:
214
+ # Non pairwise estimator with one tail (CBMA estimators)
215
+ meta_ids_lst = [result.estimator.inputs_["id"]]
216
+ signs = [POSTAIL_LBL]
217
+
218
+ contribution_tables = []
219
+ for sign, label_map, meta_ids in zip(signs, label_maps, meta_ids_lst):
220
+ cluster_ids = sorted(list(np.unique(label_map.get_fdata())[1:]))
221
+ rows = list(meta_ids)
222
+
223
+ # Create contribution table
224
+ cols = [f"{sign} {int(c_id)}" for c_id in cluster_ids]
225
+ contribution_table = pd.DataFrame(index=rows, columns=cols)
226
+ contribution_table.index.name = "id"
227
+
228
+ contributions = [
229
+ r
230
+ for r in tqdm(
231
+ Parallel(return_as="generator", n_jobs=self.n_cores)(
232
+ delayed(self._transform)(expid, label_map, sign, result)
233
+ for expid in meta_ids
234
+ ),
235
+ total=len(meta_ids),
236
+ )
237
+ ]
238
+
239
+ # Add results to table
240
+ for expid, stat_prop_values in zip(meta_ids, contributions):
241
+ contribution_table.loc[expid] = stat_prop_values
242
+
243
+ contribution_tables.append(contribution_table.reset_index())
244
+
245
+ tails = ["positive", "negative"] if len(contribution_tables) == 2 else ["positive"]
246
+ if not self._is_pairwaise_estimator and len(contribution_tables) == 2:
247
+ # Merge POSTAIL_LBL and NEGTAIL_LBL tables for IBMA
248
+ contribution_table = (
249
+ contribution_tables[0].merge(contribution_tables[1], how="outer").fillna(0)
250
+ )
251
+ tables_dict[contribution_table_name] = contribution_table
252
+ else:
253
+ # Plot separate tables for CBMA
254
+ for tail, contribution_table in zip(tails, contribution_tables):
255
+ tables_dict[f"{contribution_table_name}_tail-{tail}"] = contribution_table
256
+
257
+ # Save tables and maps to result
258
+ result.tables.update(tables_dict)
259
+ result.maps.update(maps_dict)
260
+
261
+ # Add diagnostics class to result, since more than one can be run
262
+ result.diagnostics.append(self)
263
+ return result
264
+
265
+
266
+ class Jackknife(Diagnostics):
267
+ """Run a jackknife analysis on a meta-analysis result.
268
+
269
+ .. versionchanged:: 0.1.2
270
+
271
+ * Support for pairwise meta-analyses.
272
+
273
+ .. versionchanged:: 0.0.14
274
+
275
+ * New parameter: `cluster_threshold`.
276
+ * Return clusters table.
277
+
278
+ .. versionchanged:: 0.0.13
279
+
280
+ * Change cluster neighborhood from faces+edges to faces, to match Nilearn.
281
+
282
+ .. versionadded:: 0.0.11
283
+
284
+ Notes
285
+ -----
286
+ This analysis characterizes the relative contribution of each experiment in a meta-analysis
287
+ to the resulting clusters by looping through experiments, calculating the Estimator's summary
288
+ statistic for all experiments *except* the target experiment, dividing the resulting test
289
+ summary statistics by the summary statistics from the original meta-analysis, and finally
290
+ averaging the resulting proportion values across all voxels in each cluster.
291
+ """
292
+
293
+ def _transform(self, expid, label_map, sign, result):
294
+ """Apply transform to study ID and label map.
295
+
296
+ Parameters
297
+ ----------
298
+ expid : :obj:`str`
299
+ Study ID.
300
+ label_map : :class:`nibabel.Nifti1Image`
301
+ The cluster label map image.
302
+ sign : :obj:`str`
303
+ The sign of the label map.
304
+ result : :obj:`~nimare.results.MetaResult`
305
+ A MetaResult produced by a coordinate- or image-based meta-analysis.
306
+
307
+ Returns
308
+ -------
309
+ stat_prop_values : 1D :obj:`numpy.ndarray`
310
+ 1D array with the contribution of `expid` in each cluster of `label_map`.
311
+ """
312
+ # We need to copy the estimator because it will otherwise overwrite the original version
313
+ # with one missing a study in its inputs.
314
+ estimator = copy.deepcopy(result.estimator)
315
+
316
+ if self._is_pairwaise_estimator:
317
+ all_ids = estimator.inputs_["id1"] if sign == POSTAIL_LBL else estimator.inputs_["id2"]
318
+ else:
319
+ all_ids = estimator.inputs_["id"]
320
+
321
+ original_masker = estimator.masker
322
+
323
+ # Mask using a labels masker, so that we can easily get the mean value for each cluster
324
+ cluster_masker = input_data.NiftiLabelsMasker(label_map)
325
+ cluster_masker.fit(label_map)
326
+
327
+ # CBMAs have "stat" maps, while most IBMAs have "est" maps. ALESubtraction has
328
+ # stat_desc-group1MinusGroup2" maps, while MKDAChi2 has "z_desc-association' maps.
329
+ # Fisher's and Stouffer's only have "z" maps though.
330
+ target_value_keys = {"stat", "est", "stat_desc-group1MinusGroup2", "z_desc-association"}
331
+ avail_value_keys = set(result.maps.keys())
332
+ union_value_keys = list(target_value_keys & avail_value_keys)
333
+ target_value_map = union_value_keys[0] if union_value_keys else "z"
334
+
335
+ stat_values = result.get_map(target_value_map, return_type="array")
336
+
337
+ # Fit Estimator to all studies except the target study
338
+ other_ids = [id_ for id_ in all_ids if id_ != expid]
339
+ if self._is_pairwaise_estimator:
340
+ if sign == POSTAIL_LBL:
341
+ temp_dset = estimator.dataset1.slice(other_ids)
342
+ temp_result = estimator.fit(temp_dset, estimator.dataset2)
343
+ else:
344
+ temp_dset = estimator.dataset2.slice(other_ids)
345
+ temp_result = estimator.fit(estimator.dataset1, temp_dset)
346
+ else:
347
+ temp_dset = estimator.dataset.slice(other_ids)
348
+ temp_result = estimator.fit(temp_dset)
349
+
350
+ # Collect the target values (e.g., ALE values) from the N-1 meta-analysis
351
+ temp_stat_img = temp_result.get_map(target_value_map, return_type="image")
352
+ temp_stat_vals = np.squeeze(original_masker.transform(temp_stat_img))
353
+
354
+ # Voxelwise proportional reduction of each statistic after removal of the experiment
355
+ with np.errstate(divide="ignore", invalid="ignore"):
356
+ prop_values = np.true_divide(temp_stat_vals, stat_values)
357
+ prop_values = np.nan_to_num(prop_values)
358
+
359
+ voxelwise_stat_prop_values = 1 - prop_values
360
+ stat_prop_img = original_masker.inverse_transform(voxelwise_stat_prop_values)
361
+ stat_prop_values = cluster_masker.transform(stat_prop_img)
362
+
363
+ return stat_prop_values.flatten()
364
+
365
+
366
+ class FocusCounter(Diagnostics):
367
+ """Run a focus-count analysis on a coordinate-based meta-analysis result.
368
+
369
+ .. versionchanged:: 0.1.2
370
+
371
+ * Support for pairwise meta-analyses.
372
+
373
+ .. versionchanged:: 0.0.14
374
+
375
+ * New parameter: `cluster_threshold`.
376
+ * Return clusters table.
377
+
378
+ .. versionchanged:: 0.0.13
379
+
380
+ Change cluster neighborhood from faces+edges to faces, to match Nilearn.
381
+
382
+ .. versionadded:: 0.0.12
383
+
384
+ Notes
385
+ -----
386
+ This analysis characterizes the relative contribution of each experiment in a meta-analysis
387
+ to the resulting clusters by counting the number of peaks from each experiment that fall within
388
+ each significant cluster.
389
+
390
+ Warnings
391
+ --------
392
+ This method only works for coordinate-based meta-analyses.
393
+ """
394
+
395
+ def _transform(self, expid, label_map, sign, result):
396
+ """Apply transform to study ID and label map.
397
+
398
+ Parameters
399
+ ----------
400
+ expid : :obj:`str`
401
+ Study ID.
402
+ label_map : :class:`nibabel.Nifti1Image`
403
+ The cluster label map image.
404
+ sign : :obj:`str`
405
+ The sign of the label map.
406
+ result : :obj:`~nimare.results.MetaResult`
407
+ A MetaResult produced by a coordinate- or image-based meta-analysis.
408
+
409
+ Returns
410
+ -------
411
+ stat_prop_values : 1D :obj:`numpy.ndarray`
412
+ 1D array with the contribution of `expid` in each cluster of `label_map`.
413
+ """
414
+ if issubclass(type(result.estimator), IBMAEstimator):
415
+ raise ValueError("This method only works for coordinate-based meta-analyses.")
416
+
417
+ affine = label_map.affine
418
+ label_arr = label_map.get_fdata()
419
+ clust_ids = sorted(list(np.unique(label_arr)[1:]))
420
+
421
+ if self._is_pairwaise_estimator:
422
+ coordinates_df = (
423
+ result.estimator.inputs_["coordinates1"]
424
+ if sign == POSTAIL_LBL
425
+ else result.estimator.inputs_["coordinates2"]
426
+ )
427
+ else:
428
+ coordinates_df = result.estimator.inputs_["coordinates"]
429
+
430
+ coords = coordinates_df.loc[coordinates_df["id"] == expid]
431
+ ijk = mm2vox(coords[["x", "y", "z"]], affine)
432
+
433
+ focus_counts = []
434
+ for c_val in clust_ids:
435
+ cluster_mask = label_arr == c_val
436
+ cluster_idx = np.vstack(np.where(cluster_mask))
437
+ distances = cdist(cluster_idx.T, ijk)
438
+ distances = distances < 1
439
+ distances = np.any(distances, axis=0)
440
+ n_included_voxels = np.sum(distances)
441
+ focus_counts.append(n_included_voxels)
442
+
443
+ return np.array(focus_counts)
444
+
445
+
446
+ class FocusFilter(NiMAREBase):
447
+ """Remove coordinates outside of the Dataset's mask from the Dataset.
448
+
449
+ .. versionadded:: 0.0.13
450
+
451
+ Parameters
452
+ ----------
453
+ mask : :obj:`str`, :class:`~nibabel.nifti1.Nifti1Image`, \
454
+ :class:`~nilearn.maskers.NiftiMasker` or similar, or None, optional
455
+ Mask(er) to use. If None, uses the masker of the Dataset provided in ``transform``.
456
+
457
+ Notes
458
+ -----
459
+ This filter removes any coordinates outside of the brain mask.
460
+ It does not remove studies without coordinates in the brain mask, since a Dataset does not
461
+ need to have coordinates for all studies (e.g., some may only have images).
462
+ """
463
+
464
+ def __init__(self, mask=None):
465
+ if mask is not None:
466
+ mask = get_masker(mask)
467
+
468
+ self.masker = mask
469
+
470
+ def transform(self, dataset):
471
+ """Apply the filter to a Dataset.
472
+
473
+ Parameters
474
+ ----------
475
+ dataset : :obj:`~nimare.dataset.Dataset`
476
+ The Dataset to filter.
477
+
478
+ Returns
479
+ -------
480
+ dataset : :obj:`~nimare.dataset.Dataset`
481
+ The filtered Dataset.
482
+ """
483
+ masker = self.masker or dataset.masker
484
+ # use 0 or 1 to indicate if voxels are in the mask
485
+ masker_array = masker.mask_img_.dataobj
486
+
487
+ # Get matrix indices for Dataset coordinates
488
+ dset_xyz = dataset.coordinates[["x", "y", "z"]].values
489
+
490
+ # mm2vox automatically rounds the coordinates
491
+ dset_ijk = mm2vox(dset_xyz, masker.mask_img.affine)
492
+
493
+ # Only retain coordinates inside the brain mask
494
+ def check_coord(coord):
495
+ try:
496
+ return masker_array[coord[0], coord[1], coord[2]] == 1
497
+ except IndexError:
498
+ return False
499
+
500
+ keep_idx = [i for i, coord in enumerate(dset_ijk) if check_coord(coord)]
501
+
502
+ LGR.info(
503
+ f"{dset_ijk.shape[0] - len(keep_idx)}/{dset_ijk.shape[0]} coordinates fall outside of "
504
+ "the mask. Removing them."
505
+ )
506
+
507
+ # Only retain coordinates inside the brain mask
508
+ dataset.coordinates = dataset.coordinates.iloc[keep_idx]
509
+
510
+ return dataset