nimare 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. benchmarks/__init__.py +0 -0
  2. benchmarks/bench_cbma.py +57 -0
  3. nimare/__init__.py +45 -0
  4. nimare/_version.py +21 -0
  5. nimare/annotate/__init__.py +21 -0
  6. nimare/annotate/cogat.py +213 -0
  7. nimare/annotate/gclda.py +924 -0
  8. nimare/annotate/lda.py +147 -0
  9. nimare/annotate/text.py +75 -0
  10. nimare/annotate/utils.py +87 -0
  11. nimare/base.py +217 -0
  12. nimare/cli.py +124 -0
  13. nimare/correct.py +462 -0
  14. nimare/dataset.py +685 -0
  15. nimare/decode/__init__.py +33 -0
  16. nimare/decode/base.py +115 -0
  17. nimare/decode/continuous.py +462 -0
  18. nimare/decode/discrete.py +753 -0
  19. nimare/decode/encode.py +110 -0
  20. nimare/decode/utils.py +44 -0
  21. nimare/diagnostics.py +510 -0
  22. nimare/estimator.py +139 -0
  23. nimare/extract/__init__.py +19 -0
  24. nimare/extract/extract.py +466 -0
  25. nimare/extract/utils.py +295 -0
  26. nimare/generate.py +331 -0
  27. nimare/io.py +667 -0
  28. nimare/meta/__init__.py +39 -0
  29. nimare/meta/cbma/__init__.py +6 -0
  30. nimare/meta/cbma/ale.py +951 -0
  31. nimare/meta/cbma/base.py +947 -0
  32. nimare/meta/cbma/mkda.py +1361 -0
  33. nimare/meta/cbmr.py +970 -0
  34. nimare/meta/ibma.py +1683 -0
  35. nimare/meta/kernel.py +501 -0
  36. nimare/meta/models.py +1199 -0
  37. nimare/meta/utils.py +494 -0
  38. nimare/nimads.py +492 -0
  39. nimare/reports/__init__.py +24 -0
  40. nimare/reports/base.py +664 -0
  41. nimare/reports/default.yml +123 -0
  42. nimare/reports/figures.py +651 -0
  43. nimare/reports/report.tpl +160 -0
  44. nimare/resources/__init__.py +1 -0
  45. nimare/resources/atlases/Harvard-Oxford-LICENSE +93 -0
  46. nimare/resources/atlases/HarvardOxford-cort-maxprob-thr25-2mm.nii.gz +0 -0
  47. nimare/resources/database_file_manifest.json +142 -0
  48. nimare/resources/english_spellings.csv +1738 -0
  49. nimare/resources/filenames.json +32 -0
  50. nimare/resources/neurosynth_laird_studies.json +58773 -0
  51. nimare/resources/neurosynth_stoplist.txt +396 -0
  52. nimare/resources/nidm_pain_dset.json +1349 -0
  53. nimare/resources/references.bib +541 -0
  54. nimare/resources/semantic_knowledge_children.txt +325 -0
  55. nimare/resources/semantic_relatedness_children.txt +249 -0
  56. nimare/resources/templates/MNI152_2x2x2_brainmask.nii.gz +0 -0
  57. nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_T1w.nii.gz +0 -0
  58. nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_desc-brain_mask.nii.gz +0 -0
  59. nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_T1w.nii.gz +0 -0
  60. nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_desc-brain_mask.nii.gz +0 -0
  61. nimare/results.py +225 -0
  62. nimare/stats.py +276 -0
  63. nimare/tests/__init__.py +1 -0
  64. nimare/tests/conftest.py +229 -0
  65. nimare/tests/data/amygdala_roi.nii.gz +0 -0
  66. nimare/tests/data/data-neurosynth_version-7_coordinates.tsv.gz +0 -0
  67. nimare/tests/data/data-neurosynth_version-7_metadata.tsv.gz +0 -0
  68. nimare/tests/data/data-neurosynth_version-7_vocab-terms_source-abstract_type-tfidf_features.npz +0 -0
  69. nimare/tests/data/data-neurosynth_version-7_vocab-terms_vocabulary.txt +100 -0
  70. nimare/tests/data/neurosynth_dset.json +2868 -0
  71. nimare/tests/data/neurosynth_laird_studies.json +58773 -0
  72. nimare/tests/data/nidm_pain_dset.json +1349 -0
  73. nimare/tests/data/nimads_annotation.json +1 -0
  74. nimare/tests/data/nimads_studyset.json +1 -0
  75. nimare/tests/data/test_baseline.txt +2 -0
  76. nimare/tests/data/test_pain_dataset.json +1278 -0
  77. nimare/tests/data/test_pain_dataset_multiple_contrasts.json +1242 -0
  78. nimare/tests/data/test_sleuth_file.txt +18 -0
  79. nimare/tests/data/test_sleuth_file2.txt +10 -0
  80. nimare/tests/data/test_sleuth_file3.txt +5 -0
  81. nimare/tests/data/test_sleuth_file4.txt +5 -0
  82. nimare/tests/data/test_sleuth_file5.txt +5 -0
  83. nimare/tests/test_annotate_cogat.py +32 -0
  84. nimare/tests/test_annotate_gclda.py +86 -0
  85. nimare/tests/test_annotate_lda.py +27 -0
  86. nimare/tests/test_dataset.py +99 -0
  87. nimare/tests/test_decode_continuous.py +132 -0
  88. nimare/tests/test_decode_discrete.py +92 -0
  89. nimare/tests/test_diagnostics.py +168 -0
  90. nimare/tests/test_estimator_performance.py +385 -0
  91. nimare/tests/test_extract.py +46 -0
  92. nimare/tests/test_generate.py +247 -0
  93. nimare/tests/test_io.py +294 -0
  94. nimare/tests/test_meta_ale.py +298 -0
  95. nimare/tests/test_meta_cbmr.py +295 -0
  96. nimare/tests/test_meta_ibma.py +240 -0
  97. nimare/tests/test_meta_kernel.py +209 -0
  98. nimare/tests/test_meta_mkda.py +234 -0
  99. nimare/tests/test_nimads.py +21 -0
  100. nimare/tests/test_reports.py +110 -0
  101. nimare/tests/test_stats.py +101 -0
  102. nimare/tests/test_transforms.py +272 -0
  103. nimare/tests/test_utils.py +200 -0
  104. nimare/tests/test_workflows.py +221 -0
  105. nimare/tests/utils.py +126 -0
  106. nimare/transforms.py +907 -0
  107. nimare/utils.py +1367 -0
  108. nimare/workflows/__init__.py +14 -0
  109. nimare/workflows/base.py +189 -0
  110. nimare/workflows/cbma.py +165 -0
  111. nimare/workflows/ibma.py +108 -0
  112. nimare/workflows/macm.py +77 -0
  113. nimare/workflows/misc.py +65 -0
  114. nimare-0.4.2.dist-info/LICENSE +21 -0
  115. nimare-0.4.2.dist-info/METADATA +124 -0
  116. nimare-0.4.2.dist-info/RECORD +119 -0
  117. nimare-0.4.2.dist-info/WHEEL +5 -0
  118. nimare-0.4.2.dist-info/entry_points.txt +2 -0
  119. nimare-0.4.2.dist-info/top_level.txt +2 -0
@@ -0,0 +1,951 @@
1
+ """CBMA methods from the activation likelihood estimation (ALE) family."""
2
+
3
+ import logging
4
+
5
+ import numpy as np
6
+ import pandas as pd
7
+ import sparse
8
+ from joblib import Memory, Parallel, delayed
9
+ from tqdm.auto import tqdm
10
+
11
+ from nimare import _version
12
+ from nimare.meta.cbma.base import CBMAEstimator, PairwiseCBMAEstimator
13
+ from nimare.meta.kernel import ALEKernel
14
+ from nimare.stats import null_to_p, nullhist_to_p
15
+ from nimare.transforms import p_to_z
16
+ from nimare.utils import _check_ncores, use_memmap
17
+
18
+ LGR = logging.getLogger(__name__)
19
+ __version__ = _version.get_versions()["version"]
20
+
21
+
22
+ class ALE(CBMAEstimator):
23
+ """Activation likelihood estimation.
24
+
25
+ .. versionchanged:: 0.2.1
26
+
27
+ - New parameters: ``memory`` and ``memory_level`` for memory caching.
28
+
29
+ .. versionchanged:: 0.0.12
30
+
31
+ - Use a 4D sparse array for modeled activation maps.
32
+
33
+ Parameters
34
+ ----------
35
+ kernel_transformer : :obj:`~nimare.meta.kernel.KernelTransformer`, optional
36
+ Kernel with which to convolve coordinates from dataset.
37
+ Default is ALEKernel.
38
+ null_method : {"approximate", "montecarlo"}, optional
39
+ Method by which to determine uncorrected p-values. The available options are
40
+
41
+ ======================= =================================================================
42
+ "approximate" (default) Build a histogram of summary-statistic values and their
43
+ expected frequencies under the assumption of random spatial
44
+ associated between studies, via a weighted convolution, as
45
+ described in :footcite:t:`eickhoff2012activation`.
46
+
47
+ This method is much faster, but slightly less accurate, than the
48
+ "montecarlo" option.
49
+ "montecarlo" Perform a large number of permutations, in which the coordinates
50
+ in the studies are randomly drawn from the Estimator's brain mask
51
+ and the full set of resulting summary-statistic values are
52
+ incorporated into a null distribution (stored as a histogram for
53
+ memory reasons).
54
+
55
+ This method is must slower, and is only slightly more accurate.
56
+ ======================= =================================================================
57
+
58
+ n_iters : :obj:`int`, default=5000
59
+ Number of iterations to use to define the null distribution.
60
+ This is only used if ``null_method=="montecarlo"``.
61
+ Default is 5000.
62
+ memory : instance of :class:`joblib.Memory`, :obj:`str`, or :class:`pathlib.Path`
63
+ Used to cache the output of a function. By default, no caching is done.
64
+ If a :obj:`str` is given, it is the path to the caching directory.
65
+ memory_level : :obj:`int`, default=0
66
+ Rough estimator of the amount of memory used by caching.
67
+ Higher value means more memory for caching. Zero means no caching.
68
+ n_cores : :obj:`int`, default=1
69
+ Number of cores to use for parallelization.
70
+ This is only used if ``null_method=="montecarlo"``.
71
+ If <=0, defaults to using all available cores.
72
+ Default is 1.
73
+ **kwargs
74
+ Keyword arguments. Arguments for the kernel_transformer can be assigned here,
75
+ with the prefix ``kernel__`` in the variable name.
76
+ Another optional argument is ``mask``.
77
+
78
+ Attributes
79
+ ----------
80
+ masker : :class:`~nilearn.input_data.NiftiMasker` or similar
81
+ Masker object.
82
+ inputs_ : :obj:`dict`
83
+ Inputs to the Estimator. For CBMA estimators, there is only one key: coordinates.
84
+ This is an edited version of the dataset's coordinates DataFrame.
85
+ null_distributions_ : :obj:`dict` of :class:`numpy.ndarray`
86
+ Null distributions for the uncorrected summary-statistic-to-p-value conversion and any
87
+ multiple-comparisons correction methods.
88
+ Entries are added to this attribute if and when the corresponding method is applied.
89
+
90
+ If ``null_method == "approximate"``:
91
+
92
+ - ``histogram_bins``: Array of bin centers for the null distribution histogram,
93
+ ranging from zero to the maximum possible summary statistic value for the Dataset.
94
+ - ``histweights_corr-none_method-approximate``: Array of weights for the null
95
+ distribution histogram, with one value for each bin in ``histogram_bins``.
96
+
97
+ If ``null_method == "montecarlo"``:
98
+
99
+ - ``histogram_bins``: Array of bin centers for the null distribution histogram,
100
+ ranging from zero to the maximum possible summary statistic value for the Dataset.
101
+ - ``histweights_corr-none_method-montecarlo``: Array of weights for the null
102
+ distribution histogram, with one value for each bin in ``histogram_bins``.
103
+ These values are derived from the full set of summary statistics from each
104
+ iteration of the Monte Carlo procedure.
105
+ - ``histweights_level-voxel_corr-fwe_method-montecarlo``: Array of weights for the
106
+ voxel-level FWE-correction null distribution, with one value for each bin in
107
+ ``histogram_bins``. These values are derived from the maximum summary statistic
108
+ from each iteration of the Monte Carlo procedure.
109
+
110
+ If :meth:`correct_fwe_montecarlo` is applied:
111
+
112
+ - ``values_level-voxel_corr-fwe_method-montecarlo``: The maximum summary statistic
113
+ value from each Monte Carlo iteration. An array of shape (n_iters,).
114
+ - ``values_desc-size_level-cluster_corr-fwe_method-montecarlo``: The maximum cluster
115
+ size from each Monte Carlo iteration. An array of shape (n_iters,).
116
+ - ``values_desc-mass_level-cluster_corr-fwe_method-montecarlo``: The maximum cluster
117
+ mass from each Monte Carlo iteration. An array of shape (n_iters,).
118
+
119
+ Notes
120
+ -----
121
+ The ALE algorithm was originally developed in :footcite:t:`turkeltaub2002meta`,
122
+ then updated in :footcite:t:`turkeltaub2012minimizing` and
123
+ :footcite:t:`eickhoff2012activation`.
124
+
125
+ The ALE algorithm is also implemented as part of the GingerALE app provided by the BrainMap
126
+ organization (https://www.brainmap.org/ale/).
127
+
128
+ Available correction methods: :meth:`~nimare.meta.cbma.ale.ALE.correct_fwe_montecarlo`.
129
+
130
+ References
131
+ ----------
132
+ .. footbibliography::
133
+ """
134
+
135
+ def __init__(
136
+ self,
137
+ kernel_transformer=ALEKernel,
138
+ null_method="approximate",
139
+ n_iters=5000,
140
+ memory=Memory(location=None, verbose=0),
141
+ memory_level=0,
142
+ n_cores=1,
143
+ **kwargs,
144
+ ):
145
+ if not (isinstance(kernel_transformer, ALEKernel) or kernel_transformer == ALEKernel):
146
+ LGR.warning(
147
+ f"The KernelTransformer being used ({kernel_transformer}) is not optimized "
148
+ f"for the {type(self).__name__} algorithm. "
149
+ "Expect suboptimal performance and beware bugs."
150
+ )
151
+
152
+ # Add kernel transformer attribute and process keyword arguments
153
+ super().__init__(
154
+ kernel_transformer=kernel_transformer,
155
+ memory=memory,
156
+ memory_level=memory_level,
157
+ **kwargs,
158
+ )
159
+ self.null_method = null_method
160
+ self.n_iters = None if null_method == "approximate" else n_iters or 5000
161
+ self.n_cores = _check_ncores(n_cores)
162
+ self.dataset = None
163
+
164
+ def _generate_description(self):
165
+ """Generate a description of the fitted Estimator.
166
+
167
+ Returns
168
+ -------
169
+ str
170
+ Description of the Estimator.
171
+ """
172
+ if self.null_method == "montecarlo":
173
+ null_method_str = (
174
+ "a Monte Carlo-based null distribution, in which dataset coordinates were "
175
+ "randomly drawn from the analysis mask and the full set of ALE values were "
176
+ f"retained, using {self.n_iters} iterations"
177
+ )
178
+ else:
179
+ null_method_str = "an approximate null distribution \\citep{eickhoff2012activation}"
180
+
181
+ if (
182
+ hasattr(self.kernel_transformer, "sample_size") # Only kernels that allow sample sizes
183
+ and (self.kernel_transformer.sample_size is None)
184
+ and (self.kernel_transformer.fwhm is None)
185
+ ):
186
+ # Get the total number of subjects in the inputs.
187
+ n_subjects = (
188
+ self.inputs_["coordinates"].groupby("id")["sample_size"].mean().values.sum()
189
+ )
190
+ sample_size_str = f", with a total of {int(n_subjects)} participants"
191
+ else:
192
+ sample_size_str = ""
193
+
194
+ description = (
195
+ "An activation likelihood estimation (ALE) meta-analysis "
196
+ "\\citep{turkeltaub2002meta,turkeltaub2012minimizing,eickhoff2012activation} was "
197
+ f"performed with NiMARE {__version__} "
198
+ "(RRID:SCR_017398; \\citealt{Salo2023}), using a(n) "
199
+ f"{self.kernel_transformer.__class__.__name__.replace('Kernel', '')} kernel. "
200
+ f"{self.kernel_transformer._generate_description()} "
201
+ f"ALE values were converted to p-values using {null_method_str}. "
202
+ f"The input dataset included {self.inputs_['coordinates'].shape[0]} foci from "
203
+ f"{len(self.inputs_['id'])} experiments{sample_size_str}."
204
+ )
205
+ return description
206
+
207
+ def _compute_summarystat_est(self, ma_values):
208
+ stat_values = 1.0 - np.prod(1.0 - ma_values, axis=0)
209
+
210
+ # np.array type is used by _determine_histogram_bins to calculate max_poss_ale
211
+ if isinstance(stat_values, sparse._coo.core.COO):
212
+ # NOTE: This may not work correctly with a non-NiftiMasker.
213
+ mask_data = self.masker.mask_img.get_fdata().astype(bool)
214
+
215
+ stat_values = stat_values.todense().reshape(-1) # Indexing a .reshape(-1) is faster
216
+ stat_values = stat_values[mask_data.reshape(-1)]
217
+
218
+ # This is used by _compute_null_approximate
219
+ self.__n_mask_voxels = stat_values.shape[0]
220
+
221
+ return stat_values
222
+
223
+ def _determine_histogram_bins(self, ma_maps):
224
+ """Determine histogram bins for null distribution methods.
225
+
226
+ Parameters
227
+ ----------
228
+ ma_maps : :obj:`sparse._coo.core.COO`
229
+ MA maps.
230
+
231
+ Notes
232
+ -----
233
+ This method adds one entry to the null_distributions_ dict attribute: "histogram_bins".
234
+ """
235
+ if not isinstance(ma_maps, sparse._coo.core.COO):
236
+ raise ValueError(f"Unsupported data type '{type(ma_maps)}'")
237
+
238
+ # Determine bins for null distribution histogram
239
+ # Remember that numpy histogram bins are bin edges, not centers
240
+ # Assuming values of 0, .001, .002, etc., bins are -.0005-.0005, .0005-.0015, etc.
241
+ INV_STEP_SIZE = 100000
242
+ step_size = 1 / INV_STEP_SIZE
243
+ # Need to convert to dense because np.ceil is too slow with sparse
244
+ max_ma_values = ma_maps.max(axis=[1, 2, 3]).todense()
245
+
246
+ # round up based on resolution
247
+ max_ma_values = np.ceil(max_ma_values * INV_STEP_SIZE) / INV_STEP_SIZE
248
+ max_poss_ale = self._compute_summarystat(max_ma_values)
249
+ # create bin centers
250
+ hist_bins = np.round(np.arange(0, max_poss_ale + (1.5 * step_size), step_size), 5)
251
+ self.null_distributions_["histogram_bins"] = hist_bins
252
+
253
+ def _compute_null_approximate(self, ma_maps):
254
+ """Compute uncorrected ALE null distribution using approximate solution.
255
+
256
+ Parameters
257
+ ----------
258
+ ma_maps : :obj:`sparse._coo.core.COO`
259
+ MA maps.
260
+
261
+ Notes
262
+ -----
263
+ This method adds two entries to the null_distributions_ dict attribute:
264
+
265
+ - "histogram_bins"
266
+ - "histweights_corr-none_method-approximate"
267
+ """
268
+ if not isinstance(ma_maps, sparse._coo.core.COO):
269
+ raise ValueError(f"Unsupported data type '{type(ma_maps)}'")
270
+
271
+ assert "histogram_bins" in self.null_distributions_.keys()
272
+
273
+ # Derive bin edges from histogram bin centers for numpy histogram function
274
+ bin_centers = self.null_distributions_["histogram_bins"]
275
+ step_size = bin_centers[1] - bin_centers[0]
276
+ inv_step_size = 1 / step_size
277
+ bin_edges = bin_centers - (step_size / 2)
278
+ bin_edges = np.append(bin_centers, bin_centers[-1] + step_size)
279
+
280
+ n_exp = ma_maps.shape[0]
281
+ n_bins = bin_centers.shape[0]
282
+ ma_hists = np.zeros((n_exp, n_bins))
283
+ data = ma_maps.data
284
+ coords = ma_maps.coords
285
+ for exp_idx in range(n_exp):
286
+ # The first column of coords is the fourth dimension of the dense array
287
+ study_ma_values = data[coords[0, :] == exp_idx]
288
+
289
+ n_nonzero_voxels = study_ma_values.shape[0]
290
+ n_zero_voxels = self.__n_mask_voxels - n_nonzero_voxels
291
+
292
+ ma_hists[exp_idx, :] = np.histogram(study_ma_values, bins=bin_edges, density=False)[
293
+ 0
294
+ ].astype(float)
295
+ ma_hists[exp_idx, 0] += n_zero_voxels
296
+
297
+ # Normalize MA histograms to get probabilities
298
+ ma_hists /= ma_hists.sum(1)[:, None]
299
+
300
+ ale_hist = ma_hists[0, :].copy()
301
+
302
+ for i_exp in range(1, ma_hists.shape[0]):
303
+ exp_hist = ma_hists[i_exp, :]
304
+
305
+ # Find histogram bins with nonzero values for each histogram.
306
+ ale_idx = np.where(ale_hist > 0)[0]
307
+ exp_idx = np.where(exp_hist > 0)[0]
308
+
309
+ # Compute output MA values, ale_hist indices, and probabilities
310
+ ale_scores = (
311
+ 1 - np.outer((1 - bin_centers[exp_idx]), (1 - bin_centers[ale_idx])).ravel()
312
+ )
313
+ score_idx = np.floor(ale_scores * inv_step_size).astype(int)
314
+ probabilities = np.outer(exp_hist[exp_idx], ale_hist[ale_idx]).ravel()
315
+
316
+ # Reset histogram and set probabilities.
317
+ # Use at() instead of setting values directly (ale_hist[score_idx] = probabilities)
318
+ # because there can be redundant values in score_idx.
319
+ ale_hist = np.zeros(ale_hist.shape)
320
+ np.add.at(ale_hist, score_idx, probabilities)
321
+
322
+ self.null_distributions_["histweights_corr-none_method-approximate"] = ale_hist
323
+
324
+
325
+ class ALESubtraction(PairwiseCBMAEstimator):
326
+ """ALE subtraction analysis.
327
+
328
+ .. versionchanged:: 0.2.1
329
+
330
+ - New parameters: ``memory`` and ``memory_level`` for memory caching.
331
+
332
+ .. versionchanged:: 0.0.12
333
+
334
+ - Use memmapped array for null distribution and remove ``memory_limit`` parameter.
335
+ - Support parallelization and add progress bar.
336
+ - Add ALE-difference (stat) and -log10(p) (logp) maps to results.
337
+ - Use a 4D sparse array for modeled activation maps.
338
+
339
+ .. versionchanged:: 0.0.8
340
+
341
+ * [FIX] Assume non-symmetric null distribution.
342
+
343
+ .. versionchanged:: 0.0.7
344
+
345
+ * [FIX] Assume a zero-centered and symmetric null distribution.
346
+
347
+ Parameters
348
+ ----------
349
+ kernel_transformer : :obj:`~nimare.meta.kernel.KernelTransformer`, optional
350
+ Kernel with which to convolve coordinates from dataset.
351
+ Default is ALEKernel.
352
+ n_iters : :obj:`int`, default=5000
353
+ Default is 5000.
354
+ memory : instance of :class:`joblib.Memory`, :obj:`str`, or :class:`pathlib.Path`
355
+ Used to cache the output of a function. By default, no caching is done.
356
+ If a :obj:`str` is given, it is the path to the caching directory.
357
+ memory_level : :obj:`int`, default=0
358
+ Rough estimator of the amount of memory used by caching.
359
+ Higher value means more memory for caching. Zero means no caching.
360
+ n_cores : :obj:`int`, default=1
361
+ Number of processes to use for meta-analysis. If -1, use all available cores.
362
+ Default is 1.
363
+
364
+ .. versionadded:: 0.0.12
365
+ **kwargs
366
+ Keyword arguments. Arguments for the kernel_transformer can be assigned here,
367
+ with the prefix ``kernel__`` in the variable name. Another optional argument is ``mask``.
368
+
369
+ Attributes
370
+ ----------
371
+ masker : :class:`~nilearn.input_data.NiftiMasker` or similar
372
+ Masker object.
373
+ inputs_ : :obj:`dict`
374
+ Inputs to the Estimator. For CBMA estimators, there is only one key: coordinates.
375
+ This is an edited version of the dataset's coordinates DataFrame.
376
+
377
+ Notes
378
+ -----
379
+ This method was originally developed in :footcite:t:`laird2005ale` and refined in
380
+ :footcite:t:`eickhoff2012activation`.
381
+
382
+ The ALE subtraction algorithm is also implemented as part of the GingerALE app provided by the
383
+ BrainMap organization (https://www.brainmap.org/ale/).
384
+
385
+ The voxel-wise null distributions used by this Estimator are very large, so they are not
386
+ retained as Estimator attributes.
387
+
388
+ Warnings
389
+ --------
390
+ This implementation contains one key difference from the original version.
391
+
392
+ In the original version, group 1 > group 2 difference values are only evaluated for voxels
393
+ significant in the group 1 meta-analysis, and group 2 > group 1 difference values are only
394
+ evaluated for voxels significant in the group 2 meta-analysis.
395
+
396
+ In NiMARE's implementation, the analysis is run in a two-sided manner for *all* voxels in the
397
+ mask.
398
+
399
+ References
400
+ ----------
401
+ .. footbibliography::
402
+ """
403
+
404
+ def __init__(
405
+ self,
406
+ kernel_transformer=ALEKernel,
407
+ n_iters=5000,
408
+ memory=Memory(location=None, verbose=0),
409
+ memory_level=0,
410
+ n_cores=1,
411
+ **kwargs,
412
+ ):
413
+ if not (isinstance(kernel_transformer, ALEKernel) or kernel_transformer == ALEKernel):
414
+ LGR.warning(
415
+ f"The KernelTransformer being used ({kernel_transformer}) is not optimized "
416
+ f"for the {type(self).__name__} algorithm. "
417
+ "Expect suboptimal performance and beware bugs."
418
+ )
419
+
420
+ # Add kernel transformer attribute and process keyword arguments
421
+ super().__init__(
422
+ kernel_transformer=kernel_transformer,
423
+ memory=memory,
424
+ memory_level=memory_level,
425
+ **kwargs,
426
+ )
427
+
428
+ self.dataset1 = None
429
+ self.dataset2 = None
430
+ self.n_iters = n_iters
431
+ self.n_cores = _check_ncores(n_cores)
432
+ # memory_limit needs to exist to trigger use_memmap decorator, but it will also be used if
433
+ # a Dataset with pre-generated MA maps is provided.
434
+ self.memory_limit = "100mb"
435
+
436
+ def _generate_description(self):
437
+ if (
438
+ hasattr(self.kernel_transformer, "sample_size") # Only kernels that allow sample sizes
439
+ and (self.kernel_transformer.sample_size is None)
440
+ and (self.kernel_transformer.fwhm is None)
441
+ ):
442
+ # Get the total number of subjects in the inputs.
443
+ n_subjects = (
444
+ self.inputs_["coordinates1"].groupby("id")["sample_size"].mean().values.sum()
445
+ )
446
+ sample_size_str1 = f", with a total of {int(n_subjects)} participants"
447
+ n_subjects = (
448
+ self.inputs_["coordinates2"].groupby("id")["sample_size"].mean().values.sum()
449
+ )
450
+ sample_size_str2 = f", with a total of {int(n_subjects)} participants"
451
+ else:
452
+ sample_size_str1 = ""
453
+ sample_size_str2 = ""
454
+
455
+ description = (
456
+ "An activation likelihood estimation (ALE) subtraction analysis "
457
+ "\\citep{laird2005ale,eickhoff2012activation} was performed with NiMARE "
458
+ f"v{__version__} "
459
+ "(RRID:SCR_017398; \\citealt{Salo2023}), "
460
+ f"using a(n) {self.kernel_transformer.__class__.__name__.replace('Kernel', '')} "
461
+ "kernel. "
462
+ f"{self.kernel_transformer._generate_description()} "
463
+ "The subtraction analysis was implemented according to NiMARE's \\citep{Salo2023} "
464
+ "approach, which differs from the original version. "
465
+ "In this version, ALE-difference scores are calculated between the two datasets, "
466
+ "for all voxels in the mask, rather than for voxels significant in the main effects "
467
+ "analyses of the two datasets. "
468
+ "Next, voxel-wise null distributions of ALE-difference scores were generated via a "
469
+ "randomized group assignment procedure, in which the studies in the two datasets were "
470
+ "randomly reassigned and ALE-difference scores were calculated for the randomized "
471
+ "datasets. "
472
+ f"This randomization procedure was repeated {self.n_iters} times to build the null "
473
+ "distributions. "
474
+ "The significance of the original ALE-difference scores was assessed using a "
475
+ "two-sided statistical test. "
476
+ "The null distributions were assumed to be asymmetric, as ALE-difference scores will "
477
+ "be skewed based on the sample sizes of the two datasets. "
478
+ f"The first input dataset (group1) included {self.inputs_['coordinates1'].shape[0]} "
479
+ f"foci from {len(self.inputs_['id1'])} experiments{sample_size_str1}. "
480
+ f"The second input dataset (group2) included {self.inputs_['coordinates2'].shape[0]} "
481
+ f"foci from {len(self.inputs_['id2'])} experiments{sample_size_str2}. "
482
+ )
483
+ return description
484
+
485
+ @use_memmap(LGR, n_files=3)
486
+ def _fit(self, dataset1, dataset2):
487
+ self.dataset1 = dataset1
488
+ self.dataset2 = dataset2
489
+ self.masker = self.masker or dataset1.masker
490
+
491
+ ma_maps1 = self._collect_ma_maps(
492
+ maps_key="ma_maps1",
493
+ coords_key="coordinates1",
494
+ )
495
+ ma_maps2 = self._collect_ma_maps(
496
+ maps_key="ma_maps2",
497
+ coords_key="coordinates2",
498
+ )
499
+
500
+ # Get ALE values for the two groups and difference scores
501
+ grp1_ale_values = self._compute_summarystat_est(ma_maps1)
502
+ grp2_ale_values = self._compute_summarystat_est(ma_maps2)
503
+ diff_ale_values = grp1_ale_values - grp2_ale_values
504
+ del grp1_ale_values, grp2_ale_values
505
+
506
+ n_grp1 = ma_maps1.shape[0]
507
+ n_voxels = diff_ale_values.shape[0]
508
+
509
+ # Combine the MA maps into a single array to draw from for null distribution
510
+ ma_arr = sparse.concatenate((ma_maps1, ma_maps2))
511
+
512
+ del ma_maps1, ma_maps2
513
+
514
+ # Calculate null distribution for each voxel based on group-assignment randomization
515
+ # Use a memmapped 2D array
516
+ iter_diff_values = np.memmap(
517
+ self.memmap_filenames[2],
518
+ dtype=ma_arr.dtype,
519
+ mode="w+",
520
+ shape=(self.n_iters, n_voxels),
521
+ )
522
+
523
+ _ = [
524
+ r
525
+ for r in tqdm(
526
+ Parallel(return_as="generator", n_jobs=self.n_cores)(
527
+ delayed(self._run_permutation)(i_iter, n_grp1, ma_arr, iter_diff_values)
528
+ for i_iter in range(self.n_iters)
529
+ ),
530
+ total=self.n_iters,
531
+ )
532
+ ]
533
+
534
+ # Determine p-values based on voxel-wise null distributions
535
+ # I know that joblib probably preserves order of outputs, but I'm paranoid, so we track
536
+ # the iteration as well and sort the resulting p-value array based on that.
537
+ p_values, voxel_idx = tqdm(
538
+ zip(
539
+ *Parallel(return_as="generator", n_jobs=self.n_cores)(
540
+ delayed(self._alediff_to_p_voxel)(
541
+ i_voxel,
542
+ diff_ale_values[i_voxel],
543
+ iter_diff_values[:, i_voxel],
544
+ )
545
+ for i_voxel in range(n_voxels)
546
+ ),
547
+ ),
548
+ total=n_voxels,
549
+ )
550
+
551
+ # Convert to an array and sort the p-values array based on the voxel index.
552
+ p_values = np.array(p_values)[np.array(voxel_idx)]
553
+
554
+ diff_signs = np.sign(diff_ale_values - np.median(iter_diff_values, axis=0))
555
+
556
+ if isinstance(iter_diff_values, np.memmap):
557
+ LGR.debug(f"Closing memmap at {iter_diff_values.filename}")
558
+ iter_diff_values._mmap.close()
559
+
560
+ del iter_diff_values
561
+
562
+ z_arr = p_to_z(p_values, tail="two") * diff_signs
563
+ logp_arr = -np.log10(p_values)
564
+
565
+ maps = {
566
+ "stat_desc-group1MinusGroup2": diff_ale_values,
567
+ "p_desc-group1MinusGroup2": p_values,
568
+ "z_desc-group1MinusGroup2": z_arr,
569
+ "logp_desc-group1MinusGroup2": logp_arr,
570
+ }
571
+ description = self._generate_description()
572
+
573
+ return maps, {}, description
574
+
575
+ def _compute_summarystat_est(self, ma_values):
576
+ stat_values = 1.0 - np.prod(1.0 - ma_values, axis=0)
577
+
578
+ if isinstance(stat_values, sparse._coo.core.COO):
579
+ # NOTE: This may not work correctly with a non-NiftiMasker.
580
+ mask_data = self.masker.mask_img.get_fdata().astype(bool)
581
+
582
+ stat_values = stat_values.todense().reshape(-1) # Indexing a .reshape(-1) is faster
583
+ stat_values = stat_values[mask_data.reshape(-1)]
584
+
585
+ return stat_values
586
+
587
+ def _run_permutation(self, i_iter, n_grp1, ma_arr, iter_diff_values):
588
+ """Run a single permutations of the ALESubtraction null distribution procedure.
589
+
590
+ This method writes out a single row to the memmapped array in ``iter_diff_values``.
591
+
592
+ Parameters
593
+ ----------
594
+ i_iter : :obj:`int`
595
+ The iteration number.
596
+ n_grp1 : :obj:`int`
597
+ The number of experiments in the first group (of two, total).
598
+ ma_arr : :obj:`numpy.ndarray` of shape (E, V)
599
+ The voxel-wise (V) modeled activation values for all experiments E.
600
+ iter_diff_values : :obj:`numpy.memmap` of shape (I, V)
601
+ The null distribution of ALE-difference scores, with one row per iteration (I)
602
+ and one column per voxel (V).
603
+ """
604
+ gen = np.random.default_rng(seed=i_iter)
605
+ id_idx = np.arange(ma_arr.shape[0])
606
+ gen.shuffle(id_idx)
607
+ iter_grp1_ale_values = self._compute_summarystat_est(ma_arr[id_idx[:n_grp1], :])
608
+ iter_grp2_ale_values = self._compute_summarystat_est(ma_arr[id_idx[n_grp1:], :])
609
+ iter_diff_values[i_iter, :] = iter_grp1_ale_values - iter_grp2_ale_values
610
+
611
+ def _alediff_to_p_voxel(self, i_voxel, stat_value, voxel_null):
612
+ """Compute one voxel's p-value from its specific null distribution.
613
+
614
+ Notes
615
+ -----
616
+ In cases with differently-sized groups, the ALE-difference values will be biased and
617
+ skewed, but the null distributions will be too, so symmetric should be False.
618
+ """
619
+ p_value = null_to_p(stat_value, voxel_null, tail="two", symmetric=False)
620
+ return p_value, i_voxel
621
+
622
+ def correct_fwe_montecarlo(self):
623
+ """Perform Monte Carlo-based FWE correction.
624
+
625
+ Warnings
626
+ --------
627
+ This method is not implemented for this class.
628
+ """
629
+ raise NotImplementedError(
630
+ f"The {type(self)} class does not support `correct_fwe_montecarlo`."
631
+ )
632
+
633
+
634
+ class SCALE(CBMAEstimator):
635
+ r"""Specific coactivation likelihood estimation.
636
+
637
+ This method was originally introduced in :footcite:t:`langner2014meta`.
638
+
639
+ .. versionchanged:: 0.2.1
640
+
641
+ - New parameters: ``memory`` and ``memory_level`` for memory caching.
642
+
643
+ .. versionchanged:: 0.0.12
644
+
645
+ - Remove unused parameters ``voxel_thresh`` and ``memory_limit``.
646
+ - Use memmapped array for null distribution.
647
+ - Use a 4D sparse array for modeled activation maps.
648
+
649
+ .. versionchanged:: 0.0.10
650
+
651
+ Replace ``ijk`` with ``xyz``. This should be easier for users to collect.
652
+
653
+ Parameters
654
+ ----------
655
+ xyz : (N x 3) :obj:`numpy.ndarray`
656
+ Numpy array with XYZ coordinates.
657
+ Voxels are rows and x, y, z (meaning coordinates) values are the three columnns.
658
+
659
+ .. versionchanged:: 0.0.12
660
+
661
+ This parameter was previously incorrectly labeled as "optional" and indicated that
662
+ it supports tab-delimited files, which it does not (yet).
663
+
664
+ n_iters : int, default=5000
665
+ Number of iterations for statistical inference. Default: 5000
666
+ n_cores : int, default=1
667
+ Number of processes to use for meta-analysis. If -1, use all available cores.
668
+ Default: 1
669
+ kernel_transformer : :obj:`~nimare.meta.kernel.KernelTransformer`, optional
670
+ Kernel with which to convolve coordinates from dataset. Default is
671
+ :class:`~nimare.meta.kernel.ALEKernel`.
672
+ memory : instance of :class:`joblib.Memory`, :obj:`str`, or :class:`pathlib.Path`
673
+ Used to cache the output of a function. By default, no caching is done.
674
+ If a :obj:`str` is given, it is the path to the caching directory.
675
+ memory_level : :obj:`int`, default=0
676
+ Rough estimator of the amount of memory used by caching.
677
+ Higher value means more memory for caching. Zero means no caching.
678
+ **kwargs
679
+ Keyword arguments. Arguments for the kernel_transformer can be assigned here,
680
+ with the prefix '\kernel__' in the variable name.
681
+
682
+ Attributes
683
+ ----------
684
+ masker : :class:`~nilearn.input_data.NiftiMasker` or similar
685
+ Masker object.
686
+ inputs_ : :obj:`dict`
687
+ Inputs to the Estimator. For CBMA estimators, there is only one key: coordinates.
688
+ This is an edited version of the dataset's coordinates DataFrame.
689
+ null_distributions_ : :obj:`dict` of :class:`numpy.ndarray`
690
+ Null distribution information.
691
+ Entries are added to this attribute if and when the corresponding method is applied.
692
+
693
+ .. important::
694
+ The voxel-wise null distributions used by this Estimator are very large, so they are
695
+ not retained as Estimator attributes.
696
+
697
+ If :meth:`fit` is applied:
698
+
699
+ - ``histogram_bins``: Array of bin centers for the null distribution histogram,
700
+ ranging from zero to the maximum possible summary statistic value for the Dataset.
701
+
702
+ References
703
+ ----------
704
+ .. footbibliography::
705
+ """
706
+
707
+ def __init__(
708
+ self,
709
+ xyz,
710
+ n_iters=5000,
711
+ n_cores=1,
712
+ kernel_transformer=ALEKernel,
713
+ memory=Memory(location=None, verbose=0),
714
+ memory_level=0,
715
+ **kwargs,
716
+ ):
717
+ if not (isinstance(kernel_transformer, ALEKernel) or kernel_transformer == ALEKernel):
718
+ LGR.warning(
719
+ f"The KernelTransformer being used ({kernel_transformer}) is not optimized "
720
+ f"for the {type(self).__name__} algorithm. "
721
+ "Expect suboptimal performance and beware bugs."
722
+ )
723
+
724
+ # Add kernel transformer attribute and process keyword arguments
725
+ super().__init__(
726
+ kernel_transformer=kernel_transformer,
727
+ memory=memory,
728
+ memory_level=memory_level,
729
+ **kwargs,
730
+ )
731
+
732
+ if not isinstance(xyz, np.ndarray):
733
+ raise TypeError(f"Parameter 'xyz' must be a numpy.ndarray, not a {type(xyz)}")
734
+ elif xyz.ndim != 2:
735
+ raise ValueError(f"Parameter 'xyz' must be a 2D array, but has {xyz.ndim} dimensions")
736
+ elif xyz.shape[1] != 3:
737
+ raise ValueError(f"Parameter 'xyz' must have 3 columns, but has shape {xyz.shape}")
738
+
739
+ self.xyz = xyz
740
+ self.n_iters = n_iters
741
+ self.n_cores = _check_ncores(n_cores)
742
+ # memory_limit needs to exist to trigger use_memmap decorator, but it will also be used if
743
+ # a Dataset with pre-generated MA maps is provided.
744
+ self.memory_limit = "100mb"
745
+
746
+ def _generate_description(self):
747
+ if (
748
+ hasattr(self.kernel_transformer, "sample_size") # Only kernels that allow sample sizes
749
+ and (self.kernel_transformer.sample_size is None)
750
+ and (self.kernel_transformer.fwhm is None)
751
+ ):
752
+ # Get the total number of subjects in the inputs.
753
+ n_subjects = (
754
+ self.inputs_["coordinates"].groupby("id")["sample_size"].mean().values.sum()
755
+ )
756
+ sample_size_str = f", with a total of {int(n_subjects)} participants"
757
+ else:
758
+ sample_size_str = ""
759
+
760
+ description = (
761
+ "A specific coactivation likelihood estimation (SCALE) meta-analysis "
762
+ "\\citep{langner2014meta} was performed with NiMARE "
763
+ f"{__version__} "
764
+ "(RRID:SCR_017398; \\citealt{Salo2023}), with "
765
+ f"{self.n_iters} iterations. "
766
+ f"The input dataset included {self.inputs_['coordinates'].shape[0]} foci from "
767
+ f"{len(self.inputs_['id'])} experiments{sample_size_str}."
768
+ )
769
+ return description
770
+
771
+ @use_memmap(LGR, n_files=2)
772
+ def _fit(self, dataset):
773
+ """Perform specific coactivation likelihood estimation meta-analysis on dataset.
774
+
775
+ Parameters
776
+ ----------
777
+ dataset : :obj:`~nimare.dataset.Dataset`
778
+ Dataset to analyze.
779
+ """
780
+ self.dataset = dataset
781
+ self.masker = self.masker or dataset.masker
782
+ self.null_distributions_ = {}
783
+
784
+ ma_values = self._collect_ma_maps(
785
+ coords_key="coordinates",
786
+ maps_key="ma_maps",
787
+ )
788
+
789
+ # Determine bins for null distribution histogram
790
+ max_ma_values = ma_values.max(axis=[1, 2, 3]).todense()
791
+
792
+ max_poss_ale = self._compute_summarystat_est(max_ma_values)
793
+ self.null_distributions_["histogram_bins"] = np.round(
794
+ np.arange(0, max_poss_ale + 0.001, 0.0001), 4
795
+ )
796
+
797
+ stat_values = self._compute_summarystat_est(ma_values)
798
+
799
+ del ma_values
800
+
801
+ iter_df = self.inputs_["coordinates"].copy()
802
+ rand_idx = np.random.choice(self.xyz.shape[0], size=(iter_df.shape[0], self.n_iters))
803
+ rand_xyz = self.xyz[rand_idx, :]
804
+ iter_xyzs = np.split(rand_xyz, rand_xyz.shape[1], axis=1)
805
+
806
+ perm_scale_values = np.memmap(
807
+ self.memmap_filenames[1],
808
+ dtype=stat_values.dtype,
809
+ mode="w+",
810
+ shape=(self.n_iters, stat_values.shape[0]),
811
+ )
812
+ _ = [
813
+ r
814
+ for r in tqdm(
815
+ Parallel(return_as="generator", n_jobs=self.n_cores)(
816
+ delayed(self._run_permutation)(
817
+ i_iter, iter_xyzs[i_iter], iter_df, perm_scale_values
818
+ )
819
+ for i_iter in range(self.n_iters)
820
+ ),
821
+ total=self.n_iters,
822
+ )
823
+ ]
824
+
825
+ p_values, z_values = self._scale_to_p(stat_values, perm_scale_values)
826
+
827
+ if isinstance(perm_scale_values, np.memmap):
828
+ LGR.debug(f"Closing memmap at {perm_scale_values.filename}")
829
+ perm_scale_values._mmap.close()
830
+
831
+ del perm_scale_values
832
+
833
+ logp_values = -np.log10(p_values)
834
+ logp_values[np.isinf(logp_values)] = -np.log10(np.finfo(float).eps)
835
+
836
+ # Write out unthresholded value images
837
+ maps = {"stat": stat_values, "logp": logp_values, "z": z_values}
838
+ description = self._generate_description()
839
+
840
+ return maps, {}, description
841
+
842
+ def _compute_summarystat_est(self, data):
843
+ """Generate ALE-value array and null distribution from a list of contrasts.
844
+
845
+ For ALEs on the original dataset, computes the null distribution.
846
+ For permutation ALEs and all SCALEs, just computes ALE values.
847
+ Returns masked array of ALE values and 1XnBins null distribution.
848
+ """
849
+ if isinstance(data, pd.DataFrame):
850
+ ma_values = self.kernel_transformer.transform(
851
+ data, masker=self.masker, return_type="sparse"
852
+ )
853
+ elif isinstance(data, (np.ndarray, sparse._coo.core.COO)):
854
+ ma_values = data
855
+ else:
856
+ raise ValueError(f"Unsupported data type '{type(data)}'")
857
+
858
+ stat_values = 1.0 - np.prod(1.0 - ma_values, axis=0)
859
+
860
+ if isinstance(stat_values, sparse._coo.core.COO):
861
+ # NOTE: This may not work correctly with a non-NiftiMasker.
862
+ mask_data = self.masker.mask_img.get_fdata().astype(bool)
863
+
864
+ stat_values = stat_values.todense().reshape(-1) # Indexing a .reshape(-1) is faster
865
+ stat_values = stat_values[mask_data.reshape(-1)]
866
+
867
+ return stat_values
868
+
869
+ def _scale_to_p(self, stat_values, scale_values):
870
+ """Compute p- and z-values.
871
+
872
+ Parameters
873
+ ----------
874
+ stat_values : (V) array
875
+ ALE values.
876
+ scale_values : (I x V) array
877
+ Permutation ALE values.
878
+
879
+ Returns
880
+ -------
881
+ p_values : (V) array
882
+ z_values : (V) array
883
+
884
+ Notes
885
+ -----
886
+ This method also uses the "histogram_bins" element in the null_distributions_ attribute.
887
+ """
888
+ n_voxels = stat_values.shape[0]
889
+
890
+ # I know that joblib probably preserves order of outputs, but I'm paranoid, so we track
891
+ # the iteration as well and sort the resulting p-value array based on that.
892
+ p_values, voxel_idx = tqdm(
893
+ zip(
894
+ *Parallel(return_as="generator", n_jobs=self.n_cores)(
895
+ delayed(self._scale_to_p_voxel)(
896
+ i_voxel, stat_values[i_voxel], scale_values[:, i_voxel]
897
+ )
898
+ for i_voxel in range(n_voxels)
899
+ )
900
+ ),
901
+ total=n_voxels,
902
+ )
903
+
904
+ # Convert to an array and sort the p-values array based on the voxel index.
905
+ p_values = np.array(p_values)[np.array(voxel_idx)]
906
+
907
+ z_values = p_to_z(p_values, tail="one")
908
+ return p_values, z_values
909
+
910
+ def _scale_to_p_voxel(self, i_voxel, stat_value, voxel_null):
911
+ """Compute one voxel's p-value from its specific null distribution."""
912
+ scale_zeros = voxel_null == 0
913
+ n_zeros = np.sum(scale_zeros)
914
+ voxel_null[scale_zeros] = np.nan
915
+ scale_hist = np.empty(len(self.null_distributions_["histogram_bins"]))
916
+ scale_hist[0] = n_zeros
917
+
918
+ scale_hist[1:] = np.histogram(
919
+ a=voxel_null,
920
+ bins=self.null_distributions_["histogram_bins"],
921
+ range=(
922
+ np.min(self.null_distributions_["histogram_bins"]),
923
+ np.max(self.null_distributions_["histogram_bins"]),
924
+ ),
925
+ density=False,
926
+ )[0]
927
+
928
+ p_value = nullhist_to_p(
929
+ stat_value,
930
+ scale_hist,
931
+ self.null_distributions_["histogram_bins"],
932
+ )
933
+ return p_value, i_voxel
934
+
935
+ def _run_permutation(self, i_row, iter_xyz, iter_df, perm_scale_values):
936
+ """Run a single random SCALE permutation of a dataset."""
937
+ iter_xyz = np.squeeze(iter_xyz)
938
+ iter_df[["x", "y", "z"]] = iter_xyz
939
+ stat_values = self._compute_summarystat_est(iter_df)
940
+ perm_scale_values[i_row, :] = stat_values
941
+
942
+ def correct_fwe_montecarlo(self):
943
+ """Perform Monte Carlo-based FWE correction.
944
+
945
+ Warnings
946
+ --------
947
+ This method is not implemented for this class.
948
+ """
949
+ raise NotImplementedError(
950
+ f"The {type(self)} class does not support `correct_fwe_montecarlo`."
951
+ )