nimare 0.4.2rc4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. benchmarks/__init__.py +0 -0
  2. benchmarks/bench_cbma.py +57 -0
  3. nimare/__init__.py +45 -0
  4. nimare/_version.py +21 -0
  5. nimare/annotate/__init__.py +21 -0
  6. nimare/annotate/cogat.py +213 -0
  7. nimare/annotate/gclda.py +924 -0
  8. nimare/annotate/lda.py +147 -0
  9. nimare/annotate/text.py +75 -0
  10. nimare/annotate/utils.py +87 -0
  11. nimare/base.py +217 -0
  12. nimare/cli.py +124 -0
  13. nimare/correct.py +462 -0
  14. nimare/dataset.py +685 -0
  15. nimare/decode/__init__.py +33 -0
  16. nimare/decode/base.py +115 -0
  17. nimare/decode/continuous.py +462 -0
  18. nimare/decode/discrete.py +753 -0
  19. nimare/decode/encode.py +110 -0
  20. nimare/decode/utils.py +44 -0
  21. nimare/diagnostics.py +510 -0
  22. nimare/estimator.py +139 -0
  23. nimare/extract/__init__.py +19 -0
  24. nimare/extract/extract.py +466 -0
  25. nimare/extract/utils.py +295 -0
  26. nimare/generate.py +331 -0
  27. nimare/io.py +635 -0
  28. nimare/meta/__init__.py +39 -0
  29. nimare/meta/cbma/__init__.py +6 -0
  30. nimare/meta/cbma/ale.py +951 -0
  31. nimare/meta/cbma/base.py +947 -0
  32. nimare/meta/cbma/mkda.py +1361 -0
  33. nimare/meta/cbmr.py +970 -0
  34. nimare/meta/ibma.py +1683 -0
  35. nimare/meta/kernel.py +501 -0
  36. nimare/meta/models.py +1199 -0
  37. nimare/meta/utils.py +494 -0
  38. nimare/nimads.py +492 -0
  39. nimare/reports/__init__.py +24 -0
  40. nimare/reports/base.py +664 -0
  41. nimare/reports/default.yml +123 -0
  42. nimare/reports/figures.py +651 -0
  43. nimare/reports/report.tpl +160 -0
  44. nimare/resources/__init__.py +1 -0
  45. nimare/resources/atlases/Harvard-Oxford-LICENSE +93 -0
  46. nimare/resources/atlases/HarvardOxford-cort-maxprob-thr25-2mm.nii.gz +0 -0
  47. nimare/resources/database_file_manifest.json +142 -0
  48. nimare/resources/english_spellings.csv +1738 -0
  49. nimare/resources/filenames.json +32 -0
  50. nimare/resources/neurosynth_laird_studies.json +58773 -0
  51. nimare/resources/neurosynth_stoplist.txt +396 -0
  52. nimare/resources/nidm_pain_dset.json +1349 -0
  53. nimare/resources/references.bib +541 -0
  54. nimare/resources/semantic_knowledge_children.txt +325 -0
  55. nimare/resources/semantic_relatedness_children.txt +249 -0
  56. nimare/resources/templates/MNI152_2x2x2_brainmask.nii.gz +0 -0
  57. nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_T1w.nii.gz +0 -0
  58. nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_desc-brain_mask.nii.gz +0 -0
  59. nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_T1w.nii.gz +0 -0
  60. nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_desc-brain_mask.nii.gz +0 -0
  61. nimare/results.py +225 -0
  62. nimare/stats.py +276 -0
  63. nimare/tests/__init__.py +1 -0
  64. nimare/tests/conftest.py +229 -0
  65. nimare/tests/data/amygdala_roi.nii.gz +0 -0
  66. nimare/tests/data/data-neurosynth_version-7_coordinates.tsv.gz +0 -0
  67. nimare/tests/data/data-neurosynth_version-7_metadata.tsv.gz +0 -0
  68. nimare/tests/data/data-neurosynth_version-7_vocab-terms_source-abstract_type-tfidf_features.npz +0 -0
  69. nimare/tests/data/data-neurosynth_version-7_vocab-terms_vocabulary.txt +100 -0
  70. nimare/tests/data/neurosynth_dset.json +2868 -0
  71. nimare/tests/data/neurosynth_laird_studies.json +58773 -0
  72. nimare/tests/data/nidm_pain_dset.json +1349 -0
  73. nimare/tests/data/nimads_annotation.json +1 -0
  74. nimare/tests/data/nimads_studyset.json +1 -0
  75. nimare/tests/data/test_baseline.txt +2 -0
  76. nimare/tests/data/test_pain_dataset.json +1278 -0
  77. nimare/tests/data/test_pain_dataset_multiple_contrasts.json +1242 -0
  78. nimare/tests/data/test_sleuth_file.txt +18 -0
  79. nimare/tests/data/test_sleuth_file2.txt +10 -0
  80. nimare/tests/data/test_sleuth_file3.txt +5 -0
  81. nimare/tests/data/test_sleuth_file4.txt +5 -0
  82. nimare/tests/data/test_sleuth_file5.txt +5 -0
  83. nimare/tests/test_annotate_cogat.py +32 -0
  84. nimare/tests/test_annotate_gclda.py +86 -0
  85. nimare/tests/test_annotate_lda.py +27 -0
  86. nimare/tests/test_dataset.py +99 -0
  87. nimare/tests/test_decode_continuous.py +132 -0
  88. nimare/tests/test_decode_discrete.py +92 -0
  89. nimare/tests/test_diagnostics.py +168 -0
  90. nimare/tests/test_estimator_performance.py +385 -0
  91. nimare/tests/test_extract.py +46 -0
  92. nimare/tests/test_generate.py +247 -0
  93. nimare/tests/test_io.py +240 -0
  94. nimare/tests/test_meta_ale.py +298 -0
  95. nimare/tests/test_meta_cbmr.py +295 -0
  96. nimare/tests/test_meta_ibma.py +240 -0
  97. nimare/tests/test_meta_kernel.py +209 -0
  98. nimare/tests/test_meta_mkda.py +234 -0
  99. nimare/tests/test_nimads.py +21 -0
  100. nimare/tests/test_reports.py +110 -0
  101. nimare/tests/test_stats.py +101 -0
  102. nimare/tests/test_transforms.py +272 -0
  103. nimare/tests/test_utils.py +200 -0
  104. nimare/tests/test_workflows.py +221 -0
  105. nimare/tests/utils.py +126 -0
  106. nimare/transforms.py +907 -0
  107. nimare/utils.py +1367 -0
  108. nimare/workflows/__init__.py +14 -0
  109. nimare/workflows/base.py +189 -0
  110. nimare/workflows/cbma.py +165 -0
  111. nimare/workflows/ibma.py +108 -0
  112. nimare/workflows/macm.py +77 -0
  113. nimare/workflows/misc.py +65 -0
  114. nimare-0.4.2rc4.dist-info/LICENSE +21 -0
  115. nimare-0.4.2rc4.dist-info/METADATA +124 -0
  116. nimare-0.4.2rc4.dist-info/RECORD +119 -0
  117. nimare-0.4.2rc4.dist-info/WHEEL +5 -0
  118. nimare-0.4.2rc4.dist-info/entry_points.txt +2 -0
  119. nimare-0.4.2rc4.dist-info/top_level.txt +2 -0
nimare/results.py ADDED
@@ -0,0 +1,225 @@
1
+ """Tools for managing meta-analytic results."""
2
+
3
+ import copy
4
+ import logging
5
+ import os
6
+
7
+ import numpy as np
8
+ import pandas as pd
9
+ from nibabel.funcs import squeeze_image
10
+
11
+ from nimare.base import NiMAREBase
12
+ from nimare.utils import get_description_references, get_masker
13
+
14
+ LGR = logging.getLogger(__name__)
15
+
16
+
17
+ class MetaResult(NiMAREBase):
18
+ """Base class for meta-analytic results.
19
+
20
+ .. versionchanged:: 0.1.0
21
+
22
+ - Added corrector and diagnostics attributes.
23
+
24
+ .. versionchanged:: 0.0.12
25
+
26
+ - Added the description attribute.
27
+
28
+ Parameters
29
+ ----------
30
+ estimator : :class:`~nimare.base.Estimator`
31
+ The Estimator used to generate the maps in the MetaResult.
32
+ corrector : :class:`~nimare.correct.Corrector`
33
+ The Corrector used to correct the maps in the MetaResult.
34
+ diagnostics : :obj:`list` of :class:`~nimare.diagnostics.Diagnostics`
35
+ List of diagnostic classes.
36
+ mask : Niimg-like or `nilearn.input_data.base_masker.BaseMasker`
37
+ Mask for converting maps between arrays and images.
38
+ maps : None or :obj:`dict` of :obj:`numpy.ndarray`, optional
39
+ Maps to store in the object. The maps must be provided as 1D numpy arrays. Default is None.
40
+ tables : None or :obj:`dict` of :obj:`pandas.DataFrame`, optional
41
+ Pandas DataFrames to store in the object. Default is None.
42
+ description_ : :obj:`str`, optional
43
+ Description of the method that generated the result. Default is "".
44
+
45
+ Attributes
46
+ ----------
47
+ estimator : :class:`~nimare.base.Estimator`
48
+ The Estimator used to generate the maps in the MetaResult.
49
+ corrector : :class:`~nimare.correct.Corrector`
50
+ The Corrector used to correct the maps in the MetaResult.
51
+ diagnostics : :obj:`list` of :class:`~nimare.diagnostics.Diagnostics`
52
+ List of diagnostic classes.
53
+ masker : :class:`~nilearn.input_data.NiftiMasker` or similar
54
+ Masker object.
55
+ maps : :obj:`dict`
56
+ Keys are map names and values are 1D arrays.
57
+ tables : :obj:`dict`
58
+ Keys are table levels and values are pandas DataFrames.
59
+ description_ : :obj:`str`
60
+ A textual description of the method that generated the result.
61
+
62
+ Citations in this description are formatted according to ``natbib``'s LaTeX format.
63
+ bibtex_ : :obj:`str`
64
+ The BibTeX entries for any citations in ``description``.
65
+ These entries are extracted from NiMARE's references.bib file and filtered based on the
66
+ description automatically.
67
+
68
+ Users should be able to copy the contents of the ``bibtex`` attribute into their own
69
+ BibTeX file without issue.
70
+ """
71
+
72
+ def __init__(
73
+ self,
74
+ estimator,
75
+ corrector=None,
76
+ diagnostics=None,
77
+ mask=None,
78
+ maps=None,
79
+ tables=None,
80
+ description="",
81
+ ):
82
+ self.estimator = copy.deepcopy(estimator)
83
+ self.corrector = copy.deepcopy(corrector)
84
+ diagnostics = diagnostics or []
85
+ self.diagnostics = [copy.deepcopy(diagnostic) for diagnostic in diagnostics]
86
+ self.masker = get_masker(mask)
87
+
88
+ maps = maps or {}
89
+ tables = tables or {}
90
+
91
+ for map_name, map_ in maps.items():
92
+ if not isinstance(map_, np.ndarray):
93
+ raise ValueError(f"Maps must be numpy arrays. '{map_name}' is a {type(map_)}")
94
+
95
+ if map_.ndim != 1:
96
+ LGR.warning(f"Map '{map_name}' should be 1D, not {map_.ndim}D. Squeezing.")
97
+ map_ = np.squeeze(map_)
98
+
99
+ for table_name, table in tables.items():
100
+ if not isinstance(table, pd.DataFrame):
101
+ raise ValueError(f"Tables must be DataFrames. '{table_name}' is a {type(table)}")
102
+
103
+ self.maps = maps
104
+ self.tables = tables
105
+ self.metadata = {}
106
+ self.description_ = description
107
+
108
+ @property
109
+ def description_(self):
110
+ """:obj:`str`: A textual description of the method that generated the result."""
111
+ return self.__description
112
+
113
+ @description_.setter
114
+ def description_(self, desc):
115
+ """Automatically extract references when the description is set."""
116
+ self.__description = desc
117
+ self.bibtex_ = get_description_references(desc)
118
+
119
+ def get_map(self, name, return_type="image"):
120
+ """Get stored map as image or array.
121
+
122
+ Parameters
123
+ ----------
124
+ name : :obj:`str`
125
+ Name of the map. Used to index self.maps.
126
+ return_type : {'image', 'array'}, optional
127
+ Whether to return a niimg ('image') or a numpy array.
128
+ Default is 'image'.
129
+ """
130
+ m = self.maps.get(name)
131
+ if m is None:
132
+ raise ValueError(f"No map with name '{name}' found.")
133
+ if return_type == "image":
134
+ # pending resolution of https://github.com/nilearn/nilearn/issues/2724
135
+ try:
136
+ return self.masker.inverse_transform(m)
137
+ except IndexError:
138
+ return squeeze_image(self.masker.inverse_transform([m]))
139
+ return m
140
+
141
+ def save_maps(self, output_dir=".", prefix="", prefix_sep="_", names=None):
142
+ """Save results to files.
143
+
144
+ Parameters
145
+ ----------
146
+ output_dir : :obj:`str`, optional
147
+ Output directory in which to save results. If the directory doesn't
148
+ exist, it will be created. Default is current directory.
149
+ prefix : :obj:`str`, optional
150
+ Prefix to prepend to output file names.
151
+ Default is None.
152
+ prefix_sep : :obj:`str`, optional
153
+ Separator to add between prefix and default file names.
154
+ Default is _.
155
+ names : None or :obj:`list` of :obj:`str`, optional
156
+ Names of specific maps to write out. If None, save all maps.
157
+ Default is None.
158
+ """
159
+ if prefix == "":
160
+ prefix_sep = ""
161
+
162
+ if not prefix.endswith(prefix_sep):
163
+ prefix = prefix + prefix_sep
164
+
165
+ if not os.path.exists(output_dir):
166
+ os.makedirs(output_dir)
167
+
168
+ names = names or list(self.maps.keys())
169
+ maps = {k: self.get_map(k) for k in names if self.maps[k] is not None}
170
+
171
+ for imgtype, img in maps.items():
172
+ filename = prefix + imgtype + ".nii.gz"
173
+ outpath = os.path.join(output_dir, filename)
174
+ img.to_filename(outpath)
175
+
176
+ def save_tables(self, output_dir=".", prefix="", prefix_sep="_", names=None):
177
+ """Save result tables to TSV files.
178
+
179
+ Parameters
180
+ ----------
181
+ output_dir : :obj:`str`, optional
182
+ Output directory in which to save results. If the directory doesn't
183
+ exist, it will be created. Default is current directory.
184
+ prefix : :obj:`str`, optional
185
+ Prefix to prepend to output file names.
186
+ Default is None.
187
+ prefix_sep : :obj:`str`, optional
188
+ Separator to add between prefix and default file names.
189
+ Default is _.
190
+ names : None or :obj:`list` of :obj:`str`, optional
191
+ Names of specific tables to write out. If None, save all tables.
192
+ Default is None.
193
+ """
194
+ if prefix == "":
195
+ prefix_sep = ""
196
+
197
+ if not prefix.endswith(prefix_sep):
198
+ prefix = prefix + prefix_sep
199
+
200
+ if not os.path.exists(output_dir):
201
+ os.makedirs(output_dir)
202
+
203
+ names = names or list(self.tables.keys())
204
+ tables = {k: self.tables[k] for k in names}
205
+
206
+ for tabletype, table in tables.items():
207
+ filename = prefix + tabletype + ".tsv"
208
+ outpath = os.path.join(output_dir, filename)
209
+ if table is not None:
210
+ table.to_csv(outpath, sep="\t", index=False)
211
+ else:
212
+ LGR.warning(f"Table {tabletype} is None. Not saving.")
213
+
214
+ def copy(self):
215
+ """Return copy of result object."""
216
+ new = MetaResult(
217
+ estimator=self.estimator,
218
+ corrector=self.corrector,
219
+ diagnostics=self.diagnostics,
220
+ mask=self.masker,
221
+ maps=copy.deepcopy(self.maps),
222
+ tables=copy.deepcopy(self.tables),
223
+ description=self.description_,
224
+ )
225
+ return new
nimare/stats.py ADDED
@@ -0,0 +1,276 @@
1
+ """Various statistical helper functions."""
2
+
3
+ import logging
4
+ import warnings
5
+
6
+ import numpy as np
7
+
8
+ from nimare import utils
9
+
10
+ LGR = logging.getLogger(__name__)
11
+
12
+
13
+ def one_way(data, n):
14
+ """One-way chi-square test of independence.
15
+
16
+ Takes a 1D array as input and compares activation at each voxel to
17
+ proportion expected under a uniform distribution throughout the array.
18
+ Note that if you're testing activation with this, make sure that only
19
+ valid voxels (e.g., in-mask gray matter voxels) are included in the
20
+ array, or results won't make any sense!
21
+
22
+ Parameters
23
+ ----------
24
+ data : 1D array_like
25
+ Counts across voxels.
26
+ n : :obj:`int`
27
+ Maximum possible count (aka total number of units) for all cells in
28
+ ``data``. If data is n_voxels long, then ``n`` is the number of studies
29
+ in the analysis.
30
+
31
+ Returns
32
+ -------
33
+ chi2 : :class:`numpy.ndarray`
34
+ Chi-square values
35
+
36
+ Notes
37
+ -----
38
+ Taken from Neurosynth.
39
+ """
40
+ term = data.astype("float64")
41
+ no_term = n - term
42
+ t_exp = np.mean(term, 0)
43
+ t_exp = np.array([t_exp] * data.shape[0])
44
+ nt_exp = n - t_exp
45
+ t_mss = (term - t_exp) ** 2 / t_exp
46
+ nt_mss = (no_term - nt_exp) ** 2 / nt_exp
47
+ chi2 = t_mss + nt_mss
48
+ return chi2
49
+
50
+
51
+ def two_way(cells):
52
+ """Two-way chi-square test of independence.
53
+
54
+ Takes a 3D array as input: N(voxels) x 2 x 2, where the last two
55
+ dimensions are the contingency table for each of N voxels.
56
+
57
+ Parameters
58
+ ----------
59
+ cells : (N, 2, 2) array_like
60
+ Concatenated set of contingency tables. There are N contingency tables,
61
+ with the last two dimensions being the tables for each input.
62
+
63
+ Returns
64
+ -------
65
+ chi_sq : :class:`numpy.ndarray`
66
+ Chi-square values.
67
+
68
+ Notes
69
+ -----
70
+ Taken from Neurosynth.
71
+ """
72
+ # Mute divide-by-zero warning for bad voxels since we account for that
73
+ # later
74
+ warnings.simplefilter("ignore", RuntimeWarning)
75
+
76
+ cells = cells.astype("float64") # Make sure we don't overflow
77
+ total = np.apply_over_axes(np.sum, cells, [1, 2]).ravel()
78
+ chi_sq = np.zeros(cells.shape, dtype="float64")
79
+ for i in range(2):
80
+ for j in range(2):
81
+ exp = np.sum(cells[:, i, :], 1).ravel() * np.sum(cells[:, :, j], 1).ravel() / total
82
+ bad_vox = np.where(exp == 0)[0]
83
+ chi_sq[:, i, j] = (cells[:, i, j] - exp) ** 2 / exp
84
+ chi_sq[bad_vox, i, j] = 1.0 # Set p-value for invalid voxels to 1
85
+ chi_sq = np.apply_over_axes(np.sum, chi_sq, [1, 2]).ravel()
86
+ return chi_sq
87
+
88
+
89
+ def pearson(x, y):
90
+ """Correlate row vector x with each row vector in 2D array y, quickly.
91
+
92
+ Parameters
93
+ ----------
94
+ x : (1, N) array_like
95
+ Row vector to correlate with each row in ``y``.
96
+ y : (M, N) array_like
97
+ Array, for which each row is correlated with ``x``.
98
+
99
+ Returns
100
+ -------
101
+ rs : (M,) :class:`numpy.ndarray`
102
+ Pearson correlation coefficients for ``x`` against each row of ``y``.
103
+ """
104
+ data = np.vstack((x, y))
105
+ ms = data.mean(axis=1)[(slice(None, None, None), None)]
106
+ datam = data - ms
107
+ datass = np.sqrt(np.sum(datam**2, axis=1))
108
+ temp = np.dot(datam[1:], datam[0].T)
109
+ rs = temp / (datass[1:] * datass[0])
110
+ return rs
111
+
112
+
113
+ def null_to_p(test_value, null_array, tail="two", symmetric=False):
114
+ """Return p-value for test value(s) against null array.
115
+
116
+ .. versionchanged:: 0.0.7
117
+
118
+ * [FIX] Add parameter *symmetric*.
119
+
120
+ Parameters
121
+ ----------
122
+ test_value : 1D array_like
123
+ Values for which to determine p-value.
124
+ null_array : 1D array_like
125
+ Null distribution against which test_value is compared.
126
+ tail : {'two', 'upper', 'lower'}, optional
127
+ Whether to compare value against null distribution in a two-sided
128
+ ('two') or one-sided ('upper' or 'lower') manner.
129
+ If 'upper', then higher values for the test_value are more significant.
130
+ If 'lower', then lower values for the test_value are more significant.
131
+ Default is 'two'.
132
+ symmetric : bool
133
+ When tail="two", indicates how to compute p-values. When False (default),
134
+ both one-tailed p-values are computed, and the two-tailed p is double
135
+ the minimum one-tailed p. When True, it is assumed that the null
136
+ distribution is zero-centered and symmetric, and the two-tailed p-value
137
+ is computed as P(abs(test_value) >= abs(null_array)).
138
+
139
+ Returns
140
+ -------
141
+ p_value : :obj:`float`
142
+ P-value(s) associated with the test value when compared against the null
143
+ distribution. Return type matches input type (i.e., a float if
144
+ test_value is a single float, and an array if test_value is an array).
145
+
146
+ Notes
147
+ -----
148
+ P-values are clipped based on the number of elements in the null array.
149
+ Therefore no p-values of 0 or 1 should be produced.
150
+
151
+ When the null distribution is known to be symmetric and centered on zero,
152
+ and two-tailed p-values are desired, use symmetric=True, as it is
153
+ approximately twice as efficient computationally, and has lower variance.
154
+ """
155
+ if tail not in {"two", "upper", "lower"}:
156
+ raise ValueError('Argument "tail" must be one of ["two", "upper", "lower"]')
157
+
158
+ return_first = isinstance(test_value, (float, int))
159
+ test_value = np.atleast_1d(test_value)
160
+ null_array = np.array(null_array)
161
+
162
+ # For efficiency's sake, if there are more than 1000 values, pass only the unique
163
+ # values through percentileofscore(), and then reconstruct.
164
+ if len(test_value) > 1000:
165
+ reconstruct = True
166
+ test_value, uniq_idx = np.unique(test_value, return_inverse=True)
167
+ else:
168
+ reconstruct = False
169
+
170
+ def compute_p(t, null):
171
+ null = np.sort(null)
172
+ idx = np.searchsorted(null, t, side="left").astype(float)
173
+ return 1 - idx / len(null)
174
+
175
+ if tail == "two":
176
+ if symmetric:
177
+ p = compute_p(np.abs(test_value), np.abs(null_array))
178
+ else:
179
+ p_l = compute_p(test_value, null_array)
180
+ p_r = compute_p(test_value * -1, null_array * -1)
181
+ p = 2 * np.minimum(p_l, p_r)
182
+ elif tail == "lower":
183
+ p = compute_p(test_value * -1, null_array * -1)
184
+ else:
185
+ p = compute_p(test_value, null_array)
186
+
187
+ # ensure p_value in the following range:
188
+ # smallest_value <= p_value <= (1.0 - smallest_value)
189
+ smallest_value = np.maximum(np.finfo(float).eps, 1.0 / len(null_array))
190
+ result = np.maximum(smallest_value, np.minimum(p, 1.0 - smallest_value))
191
+
192
+ if reconstruct:
193
+ result = result[uniq_idx]
194
+
195
+ return result[0] if return_first else result
196
+
197
+
198
+ def nullhist_to_p(test_values, histogram_weights, histogram_bins):
199
+ """Return one-sided p-value for test value against null histogram.
200
+
201
+ .. versionadded:: 0.0.4
202
+
203
+ Parameters
204
+ ----------
205
+ test_values : float or 1D array_like
206
+ Values for which to determine p-value. Can be a single value or a one-dimensional array.
207
+ If a one-dimensional array, it should have the same length as the histogram_weights' last
208
+ dimension.
209
+ histogram_weights : (B [x V]) array
210
+ Histogram weights representing the null distribution against which test_value is compared.
211
+ These should be raw weights or counts, not a cumulatively-summed null distribution.
212
+ histogram_bins : (B) array
213
+ Histogram bin centers. Note that this differs from numpy.histogram's behavior, which uses
214
+ bin *edges*. Histogram bins created with numpy will need to be adjusted accordingly.
215
+
216
+ Returns
217
+ -------
218
+ p_value : :obj:`float`
219
+ P-value associated with the test value when compared against the null distribution.
220
+ P-values reflect the probability of a test value at or above the observed value if the
221
+ test value was drawn from the null distribution.
222
+ This is a one-sided p-value.
223
+
224
+ Notes
225
+ -----
226
+ P-values are clipped based on the largest observed non-zero weight in the null histogram.
227
+ Therefore no p-values of 0 should be produced.
228
+ """
229
+ test_values = np.asarray(test_values)
230
+ return_value = False
231
+ if test_values.ndim == 0:
232
+ return_value = True
233
+ test_values = np.atleast_1d(test_values)
234
+ assert test_values.ndim == 1
235
+ assert histogram_bins.ndim == 1
236
+ assert histogram_weights.shape[0] == histogram_bins.shape[0]
237
+ assert histogram_weights.ndim in (1, 2)
238
+ if histogram_weights.ndim == 2:
239
+ assert histogram_weights.shape[1] == test_values.shape[0]
240
+ voxelwise_null = True
241
+ else:
242
+ histogram_weights = histogram_weights[:, None]
243
+ voxelwise_null = False
244
+
245
+ n_bins = len(histogram_bins)
246
+ inv_step = 1 / (histogram_bins[1] - histogram_bins[0]) # assume equal spacing
247
+
248
+ # Convert histograms to null distributions
249
+ # The value in each bin represents the probability of finding a test value
250
+ # (stored in histogram_bins) of that value or lower.
251
+ null_distribution = histogram_weights / np.sum(histogram_weights, axis=0)
252
+ null_distribution = np.cumsum(null_distribution[::-1, :], axis=0)[::-1, :]
253
+ null_distribution /= np.max(null_distribution, axis=0)
254
+ null_distribution = np.squeeze(null_distribution)
255
+
256
+ smallest_value = np.min(null_distribution[null_distribution != 0])
257
+
258
+ p_values = np.ones(test_values.shape)
259
+ idx = np.where(test_values > 0)[0]
260
+ value_bins = utils._round2(test_values[idx] * inv_step)
261
+ value_bins[value_bins >= n_bins] = n_bins - 1 # limit to within null distribution
262
+
263
+ # Get p-values by getting the value_bins-th value in null_distribution
264
+ if voxelwise_null:
265
+ # Pair each test value with its associated null distribution
266
+ for i_voxel, voxel_idx in enumerate(idx):
267
+ p_values[voxel_idx] = null_distribution[value_bins[i_voxel], voxel_idx]
268
+ else:
269
+ p_values[idx] = null_distribution[value_bins]
270
+
271
+ # ensure p_value in the following range:
272
+ # smallest_value <= p_value <= 1.0
273
+ p_values = np.maximum(smallest_value, np.minimum(p_values, 1.0))
274
+ if return_value:
275
+ p_values = p_values[0]
276
+ return p_values
@@ -0,0 +1 @@
1
+ """Testing module for NiMARE."""