nimare 0.4.2rc4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. benchmarks/__init__.py +0 -0
  2. benchmarks/bench_cbma.py +57 -0
  3. nimare/__init__.py +45 -0
  4. nimare/_version.py +21 -0
  5. nimare/annotate/__init__.py +21 -0
  6. nimare/annotate/cogat.py +213 -0
  7. nimare/annotate/gclda.py +924 -0
  8. nimare/annotate/lda.py +147 -0
  9. nimare/annotate/text.py +75 -0
  10. nimare/annotate/utils.py +87 -0
  11. nimare/base.py +217 -0
  12. nimare/cli.py +124 -0
  13. nimare/correct.py +462 -0
  14. nimare/dataset.py +685 -0
  15. nimare/decode/__init__.py +33 -0
  16. nimare/decode/base.py +115 -0
  17. nimare/decode/continuous.py +462 -0
  18. nimare/decode/discrete.py +753 -0
  19. nimare/decode/encode.py +110 -0
  20. nimare/decode/utils.py +44 -0
  21. nimare/diagnostics.py +510 -0
  22. nimare/estimator.py +139 -0
  23. nimare/extract/__init__.py +19 -0
  24. nimare/extract/extract.py +466 -0
  25. nimare/extract/utils.py +295 -0
  26. nimare/generate.py +331 -0
  27. nimare/io.py +635 -0
  28. nimare/meta/__init__.py +39 -0
  29. nimare/meta/cbma/__init__.py +6 -0
  30. nimare/meta/cbma/ale.py +951 -0
  31. nimare/meta/cbma/base.py +947 -0
  32. nimare/meta/cbma/mkda.py +1361 -0
  33. nimare/meta/cbmr.py +970 -0
  34. nimare/meta/ibma.py +1683 -0
  35. nimare/meta/kernel.py +501 -0
  36. nimare/meta/models.py +1199 -0
  37. nimare/meta/utils.py +494 -0
  38. nimare/nimads.py +492 -0
  39. nimare/reports/__init__.py +24 -0
  40. nimare/reports/base.py +664 -0
  41. nimare/reports/default.yml +123 -0
  42. nimare/reports/figures.py +651 -0
  43. nimare/reports/report.tpl +160 -0
  44. nimare/resources/__init__.py +1 -0
  45. nimare/resources/atlases/Harvard-Oxford-LICENSE +93 -0
  46. nimare/resources/atlases/HarvardOxford-cort-maxprob-thr25-2mm.nii.gz +0 -0
  47. nimare/resources/database_file_manifest.json +142 -0
  48. nimare/resources/english_spellings.csv +1738 -0
  49. nimare/resources/filenames.json +32 -0
  50. nimare/resources/neurosynth_laird_studies.json +58773 -0
  51. nimare/resources/neurosynth_stoplist.txt +396 -0
  52. nimare/resources/nidm_pain_dset.json +1349 -0
  53. nimare/resources/references.bib +541 -0
  54. nimare/resources/semantic_knowledge_children.txt +325 -0
  55. nimare/resources/semantic_relatedness_children.txt +249 -0
  56. nimare/resources/templates/MNI152_2x2x2_brainmask.nii.gz +0 -0
  57. nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_T1w.nii.gz +0 -0
  58. nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_desc-brain_mask.nii.gz +0 -0
  59. nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_T1w.nii.gz +0 -0
  60. nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_desc-brain_mask.nii.gz +0 -0
  61. nimare/results.py +225 -0
  62. nimare/stats.py +276 -0
  63. nimare/tests/__init__.py +1 -0
  64. nimare/tests/conftest.py +229 -0
  65. nimare/tests/data/amygdala_roi.nii.gz +0 -0
  66. nimare/tests/data/data-neurosynth_version-7_coordinates.tsv.gz +0 -0
  67. nimare/tests/data/data-neurosynth_version-7_metadata.tsv.gz +0 -0
  68. nimare/tests/data/data-neurosynth_version-7_vocab-terms_source-abstract_type-tfidf_features.npz +0 -0
  69. nimare/tests/data/data-neurosynth_version-7_vocab-terms_vocabulary.txt +100 -0
  70. nimare/tests/data/neurosynth_dset.json +2868 -0
  71. nimare/tests/data/neurosynth_laird_studies.json +58773 -0
  72. nimare/tests/data/nidm_pain_dset.json +1349 -0
  73. nimare/tests/data/nimads_annotation.json +1 -0
  74. nimare/tests/data/nimads_studyset.json +1 -0
  75. nimare/tests/data/test_baseline.txt +2 -0
  76. nimare/tests/data/test_pain_dataset.json +1278 -0
  77. nimare/tests/data/test_pain_dataset_multiple_contrasts.json +1242 -0
  78. nimare/tests/data/test_sleuth_file.txt +18 -0
  79. nimare/tests/data/test_sleuth_file2.txt +10 -0
  80. nimare/tests/data/test_sleuth_file3.txt +5 -0
  81. nimare/tests/data/test_sleuth_file4.txt +5 -0
  82. nimare/tests/data/test_sleuth_file5.txt +5 -0
  83. nimare/tests/test_annotate_cogat.py +32 -0
  84. nimare/tests/test_annotate_gclda.py +86 -0
  85. nimare/tests/test_annotate_lda.py +27 -0
  86. nimare/tests/test_dataset.py +99 -0
  87. nimare/tests/test_decode_continuous.py +132 -0
  88. nimare/tests/test_decode_discrete.py +92 -0
  89. nimare/tests/test_diagnostics.py +168 -0
  90. nimare/tests/test_estimator_performance.py +385 -0
  91. nimare/tests/test_extract.py +46 -0
  92. nimare/tests/test_generate.py +247 -0
  93. nimare/tests/test_io.py +240 -0
  94. nimare/tests/test_meta_ale.py +298 -0
  95. nimare/tests/test_meta_cbmr.py +295 -0
  96. nimare/tests/test_meta_ibma.py +240 -0
  97. nimare/tests/test_meta_kernel.py +209 -0
  98. nimare/tests/test_meta_mkda.py +234 -0
  99. nimare/tests/test_nimads.py +21 -0
  100. nimare/tests/test_reports.py +110 -0
  101. nimare/tests/test_stats.py +101 -0
  102. nimare/tests/test_transforms.py +272 -0
  103. nimare/tests/test_utils.py +200 -0
  104. nimare/tests/test_workflows.py +221 -0
  105. nimare/tests/utils.py +126 -0
  106. nimare/transforms.py +907 -0
  107. nimare/utils.py +1367 -0
  108. nimare/workflows/__init__.py +14 -0
  109. nimare/workflows/base.py +189 -0
  110. nimare/workflows/cbma.py +165 -0
  111. nimare/workflows/ibma.py +108 -0
  112. nimare/workflows/macm.py +77 -0
  113. nimare/workflows/misc.py +65 -0
  114. nimare-0.4.2rc4.dist-info/LICENSE +21 -0
  115. nimare-0.4.2rc4.dist-info/METADATA +124 -0
  116. nimare-0.4.2rc4.dist-info/RECORD +119 -0
  117. nimare-0.4.2rc4.dist-info/WHEEL +5 -0
  118. nimare-0.4.2rc4.dist-info/entry_points.txt +2 -0
  119. nimare-0.4.2rc4.dist-info/top_level.txt +2 -0
nimare/io.py ADDED
@@ -0,0 +1,635 @@
1
+ """Input/Output operations."""
2
+
3
+ import json
4
+ import logging
5
+ import re
6
+ from collections import Counter
7
+ from itertools import groupby
8
+ from operator import itemgetter
9
+ from pathlib import Path
10
+
11
+ import numpy as np
12
+ import pandas as pd
13
+ import requests
14
+ from scipy import sparse
15
+
16
+ from nimare.dataset import Dataset
17
+ from nimare.extract.utils import _get_dataset_dir
18
+ from nimare.utils import _create_name, load_nimads
19
+
20
+ LGR = logging.getLogger(__name__)
21
+
22
+ DEFAULT_MAP_TYPE_CONVERSION = {
23
+ "T map": "t",
24
+ "variance": "varcope",
25
+ "univariate-beta map": "beta",
26
+ "Z map": "z",
27
+ "p map": "p",
28
+ }
29
+
30
+
31
+ def convert_nimads_to_dataset(studyset, annotation=None):
32
+ """Convert nimads studyset to a dataset.
33
+
34
+ .. versionadded:: 0.0.14
35
+
36
+ Parameters
37
+ ----------
38
+ studyset : :obj:`str`, :obj:`dict`, :obj:`nimare.nimads.StudySet`
39
+ Path to a JSON file containing a nimads studyset, a dictionary containing a nimads
40
+ studyset, or a nimads studyset object.
41
+ annotation : :obj:`str`, :obj:`dict`, :obj:`nimare.nimads.Annotation`, optional
42
+ Optional path to a JSON file containing a nimads annotation, a dictionary containing a
43
+ nimads annotation, or a nimads annotation object.
44
+
45
+ Returns
46
+ -------
47
+ dset : :obj:`nimare.dataset.Dataset`
48
+ NiMARE Dataset object containing experiment information from nimads studyset.
49
+ """
50
+
51
+ def _analysis_to_dict(study, analysis):
52
+ result = {
53
+ "metadata": {
54
+ "authors": study.name,
55
+ "journal": study.publication,
56
+ "title": study.name,
57
+ },
58
+ "coords": {
59
+ "space": analysis.points[0].space if analysis.points else "UNKNOWN",
60
+ "x": [p.x for p in analysis.points] or [None],
61
+ "y": [p.y for p in analysis.points] or [None],
62
+ "z": [p.z for p in analysis.points] or [None],
63
+ },
64
+ }
65
+ sample_size = study.metadata.get("sample_size")
66
+ if sample_size:
67
+ result["metadata"]["sample_sizes"] = [sample_size]
68
+ if analysis.annotations:
69
+ result["labels"] = {}
70
+ for annotation in analysis.annotations.values():
71
+ result["labels"].update(annotation)
72
+
73
+ return result
74
+
75
+ def _study_to_dict(study):
76
+ return {
77
+ "metadata": {
78
+ "authors": study.authors,
79
+ "journal": study.publication,
80
+ "title": study.name,
81
+ },
82
+ "contrasts": {_create_name(a): _analysis_to_dict(study, a) for a in study.analyses},
83
+ }
84
+
85
+ # load nimads studyset
86
+ studyset = load_nimads(studyset, annotation)
87
+ return Dataset({_create_name(s): _study_to_dict(s) for s in list(studyset.studies)})
88
+
89
+
90
+ def convert_neurosynth_to_dict(
91
+ coordinates_file,
92
+ metadata_file,
93
+ annotations_files=None,
94
+ feature_groups=None,
95
+ ):
96
+ """Convert Neurosynth/NeuroQuery database files to a dictionary.
97
+
98
+ .. versionchanged:: 0.0.10
99
+
100
+ * Use new format for Neurosynth and NeuroQuery files.
101
+
102
+ .. versionchanged:: 0.0.9
103
+
104
+ * Support annotations files organized in a dictionary.
105
+
106
+ Parameters
107
+ ----------
108
+ coordinates_file : :obj:`str`
109
+ TSV.GZ file with Neurosynth/NeuroQuery's coordinates.
110
+ metadata_file : :obj:`str`
111
+ TSV.GZ file with Neurosynth/NeuroQuery's metadata.
112
+ annotations_files : :obj:`dict`, :obj:`list` of :obj:`dict`, or None, optional
113
+ Optional file(s) with Neurosynth/NeuroQuery's annotations.
114
+ This should consist of a dictionary with two keys: "features" and "vocabulary".
115
+ "features" should have an NPZ file containing a sparse matrix of feature values.
116
+ "vocabulary" should have a TXT file containing labels.
117
+ The vocabulary corresponds to the columns of the feature matrix, while study IDs are
118
+ inferred from the metadata file, which MUST be in the same order as the features matrix.
119
+ Multiple sets of annotations may be provided, in which case "annotations_files" should be
120
+ a list of dictionaries. The appropriate name of each annotation set will be inferred from
121
+ the "features" filename, but this can be overwritten by using the "feature_groups"
122
+ parameter.
123
+ Default is None.
124
+ feature_groups : :obj:`list` of :obj:`str`, or None, optional
125
+ An optional list of names of annotation sets defined in "annotations_files".
126
+ This should only be used if "annotations_files" is used and the users wants to override
127
+ the automatically-extracted annotation set names.
128
+ Default is None.
129
+
130
+ Returns
131
+ -------
132
+ dset_dict : :obj:`dict`
133
+ NiMARE-organized dictionary containing experiment information from text files.
134
+
135
+ Warnings
136
+ --------
137
+ Starting in version 0.0.10, this function operates on the new Neurosynth/NeuroQuery file
138
+ format. Old code using this function **will not work** with the new version.
139
+ """
140
+ coords_df = pd.read_table(coordinates_file)
141
+ metadata_df = pd.read_table(metadata_file)
142
+ assert metadata_df["id"].is_unique, "Metadata file must have one row per ID."
143
+
144
+ coords_df["id"] = coords_df["id"].astype(str)
145
+ metadata_df["id"] = metadata_df["id"].astype(str)
146
+ metadata_df = metadata_df.set_index("id", drop=False)
147
+ ids = metadata_df["id"].tolist()
148
+
149
+ if "space" not in metadata_df.columns:
150
+ LGR.warning("No 'space' column detected. Defaulting to 'UNKNOWN'.")
151
+ metadata_df["space"] = "UNKNOWN"
152
+
153
+ if isinstance(annotations_files, dict):
154
+ annotations_files = [annotations_files]
155
+
156
+ if isinstance(feature_groups, str):
157
+ feature_groups = [feature_groups]
158
+
159
+ # Load labels into a single DataFrame
160
+ if annotations_files is not None:
161
+ label_dfs = []
162
+ if feature_groups is not None:
163
+ assert len(feature_groups) == len(annotations_files)
164
+
165
+ for i_feature_group, annotations_dict in enumerate(annotations_files):
166
+ features_file = annotations_dict["features"]
167
+ vocabulary_file = annotations_dict["vocabulary"]
168
+
169
+ vocab = re.findall("vocab-([a-zA-Z0-9]+)_", features_file)[0]
170
+ source = re.findall("source-([a-zA-Z0-9]+)_", features_file)[0]
171
+ value_type = re.findall("type-([a-zA-Z0-9]+)_", features_file)[0]
172
+
173
+ if feature_groups is not None:
174
+ feature_group = feature_groups[i_feature_group]
175
+ feature_group = feature_group.rstrip("_") + "__"
176
+ else:
177
+ feature_group = f"{vocab}_{source}_{value_type}__"
178
+
179
+ features = sparse.load_npz(features_file).todense()
180
+ vocab = np.loadtxt(vocabulary_file, dtype=str, delimiter="\t")
181
+
182
+ labels = [feature_group + label for label in vocab]
183
+
184
+ temp_label_df = pd.DataFrame(features, index=ids, columns=labels)
185
+ temp_label_df.index.name = "study_id"
186
+
187
+ label_dfs.append(temp_label_df)
188
+
189
+ label_df = pd.concat(label_dfs, axis=1)
190
+ else:
191
+ label_df = None
192
+
193
+ # Compile (pseudo-)NIMADS-format dictionary
194
+ x = coords_df["x"].values
195
+ y = coords_df["y"].values
196
+ z = coords_df["z"].values
197
+
198
+ dset_dict = {}
199
+
200
+ for sid, study_metadata in metadata_df.iterrows():
201
+ coord_inds = np.where(coords_df["id"].values == sid)[0]
202
+ study_dict = {}
203
+ study_dict["metadata"] = {}
204
+ study_dict["metadata"]["authors"] = study_metadata.get("authors", "n/a")
205
+ study_dict["metadata"]["journal"] = study_metadata.get("journal", "n/a")
206
+ study_dict["metadata"]["year"] = study_metadata.get("year", "n/a")
207
+ study_dict["metadata"]["title"] = study_metadata.get("title", "n/a")
208
+ study_dict["contrasts"] = {}
209
+ study_dict["contrasts"]["1"] = {}
210
+ # Duplicate metadata across study and contrast levels
211
+ study_dict["contrasts"]["1"]["metadata"] = {}
212
+ study_dict["contrasts"]["1"]["metadata"]["authors"] = study_metadata.get("authors", "n/a")
213
+ study_dict["contrasts"]["1"]["metadata"]["journal"] = study_metadata.get("journal", "n/a")
214
+ study_dict["contrasts"]["1"]["metadata"]["year"] = study_metadata.get("year", "n/a")
215
+ study_dict["contrasts"]["1"]["metadata"]["title"] = study_metadata.get("title", "n/a")
216
+ study_dict["contrasts"]["1"]["coords"] = {}
217
+ study_dict["contrasts"]["1"]["coords"]["space"] = study_metadata["space"]
218
+ study_dict["contrasts"]["1"]["coords"]["x"] = list(x[coord_inds])
219
+ study_dict["contrasts"]["1"]["coords"]["y"] = list(y[coord_inds])
220
+ study_dict["contrasts"]["1"]["coords"]["z"] = list(z[coord_inds])
221
+
222
+ if label_df is not None:
223
+ study_dict["contrasts"]["1"]["labels"] = label_df.loc[sid].to_dict()
224
+
225
+ dset_dict[sid] = study_dict
226
+
227
+ return dset_dict
228
+
229
+
230
+ def convert_neurosynth_to_json(
231
+ coordinates_file,
232
+ metadata_file,
233
+ out_file,
234
+ annotations_files=None,
235
+ feature_groups=None,
236
+ ):
237
+ """Convert Neurosynth/NeuroQuery dataset text file to a NiMARE json file.
238
+
239
+ .. versionchanged:: 0.0.10
240
+
241
+ * Use new format for Neurosynth and NeuroQuery files.
242
+
243
+ .. versionchanged:: 0.0.9
244
+
245
+ * Support annotations files organized in a dictionary.
246
+
247
+ Parameters
248
+ ----------
249
+ coordinates_file : :obj:`str`
250
+ TSV.GZ file with Neurosynth/NeuroQuery's coordinates and metadata.
251
+ metadata_file : :obj:`str`
252
+ TSV.GZ file with Neurosynth/NeuroQuery's metadata.
253
+ out_file : :obj:`str`
254
+ Output NiMARE-format json file.
255
+ annotations_files : :obj:`dict`, :obj:`list` of :obj:`dict`, or None, optional
256
+ Optional file(s) with Neurosynth/NeuroQuery's annotations.
257
+ This should consist of a dictionary with two keys: "features" and "vocabulary".
258
+ "features" should have an NPZ file containing a sparse matrix of feature values.
259
+ "vocabulary" should have a TXT file containing labels.
260
+ The vocabulary corresponds to the columns of the feature matrix, while study IDs are
261
+ inferred from the metadata file, which MUST be in the same order as the features matrix.
262
+ Multiple sets of annotations may be provided, in which case "annotations_files" should be
263
+ a list of dictionaries. The appropriate name of each annotation set will be inferred from
264
+ the "features" filename, but this can be overwritten by using the "feature_groups"
265
+ parameter.
266
+ Default is None.
267
+ feature_groups : :obj:`list` of :obj:`str`, or None, optional
268
+ An optional list of names of annotation sets defined in "annotations_files".
269
+ This should only be used if "annotations_files" is used and the users wants to override
270
+ the automatically-extracted annotation set names.
271
+ Default is None.
272
+
273
+ Warnings
274
+ --------
275
+ Starting in version 0.0.10, this function operates on the new Neurosynth/NeuroQuery file
276
+ format. Old code using this function **will not work** with the new version.
277
+ """
278
+ dset_dict = convert_neurosynth_to_dict(
279
+ coordinates_file, metadata_file, annotations_files, feature_groups
280
+ )
281
+ with open(out_file, "w") as fo:
282
+ json.dump(dset_dict, fo, indent=4, sort_keys=True)
283
+
284
+
285
+ def convert_neurosynth_to_dataset(
286
+ coordinates_file,
287
+ metadata_file,
288
+ annotations_files=None,
289
+ feature_groups=None,
290
+ target="mni152_2mm",
291
+ ):
292
+ """Convert Neurosynth/NeuroQuery database files into NiMARE Dataset.
293
+
294
+ .. versionchanged:: 0.0.10
295
+
296
+ * Use new format for Neurosynth and NeuroQuery files.
297
+
298
+ .. versionchanged:: 0.0.9
299
+
300
+ * Support annotations files organized in a dictionary.
301
+
302
+ Parameters
303
+ ----------
304
+ coordinates_file : :obj:`str`
305
+ TSV.GZ file with Neurosynth/NeuroQuery's coordinates and metadata.
306
+ metadata_file : :obj:`str`
307
+ TSV.GZ file with Neurosynth/NeuroQuery's metadata.
308
+ annotations_files : :obj:`dict`, :obj:`list` of :obj:`dict`, or None, optional
309
+ Optional file(s) with Neurosynth/NeuroQuery's annotations.
310
+ This should consist of a dictionary with two keys: "features" and "vocabulary".
311
+ "features" should have an NPZ file containing a sparse matrix of feature values.
312
+ "vocabulary" should have a TXT file containing labels.
313
+ The vocabulary corresponds to the columns of the feature matrix, while study IDs are
314
+ inferred from the metadata file, which MUST be in the same order as the features matrix.
315
+ Multiple sets of annotations may be provided, in which case "annotations_files" should be
316
+ a list of dictionaries. The appropriate name of each annotation set will be inferred from
317
+ the "features" filename, but this can be overwritten by using the "feature_groups"
318
+ parameter.
319
+ Default is None.
320
+ feature_groups : :obj:`list` of :obj:`str`, or None, optional
321
+ An optional list of names of annotation sets defined in "annotations_files".
322
+ This should only be used if "annotations_files" is used and the users wants to override
323
+ the automatically-extracted annotation set names.
324
+ Default is None.
325
+ target : {'mni152_2mm', 'ale_2mm'}, optional
326
+ Target template space for coordinates. Default is 'mni152_2mm'.
327
+
328
+ Returns
329
+ -------
330
+ :obj:`~nimare.dataset.Dataset`
331
+ Dataset object containing experiment information from text_file.
332
+
333
+ Warnings
334
+ --------
335
+ Starting in version 0.0.10, this function operates on the new Neurosynth/NeuroQuery file
336
+ format. Old code using this function **will not work** with the new version.
337
+ """
338
+ dset_dict = convert_neurosynth_to_dict(
339
+ coordinates_file,
340
+ metadata_file,
341
+ annotations_files,
342
+ feature_groups,
343
+ )
344
+ return Dataset(dset_dict, target=target)
345
+
346
+
347
+ def convert_sleuth_to_dict(text_file):
348
+ """Convert Sleuth text file to a dictionary.
349
+
350
+ Parameters
351
+ ----------
352
+ text_file : :obj:`str` or :obj:`list` of :obj:`str`
353
+ Path to Sleuth-format text file.
354
+ More than one text file may be provided.
355
+
356
+ Returns
357
+ -------
358
+ :obj:`dict`
359
+ NiMARE-organized dictionary containing experiment information from text file.
360
+ """
361
+ if isinstance(text_file, list):
362
+ dset_dict = {}
363
+ for tf in text_file:
364
+ temp_dict = convert_sleuth_to_dict(tf)
365
+ for sid in temp_dict.keys():
366
+ if sid in dset_dict.keys():
367
+ dset_dict[sid]["contrasts"] = {
368
+ **dset_dict[sid]["contrasts"],
369
+ **temp_dict[sid]["contrasts"],
370
+ }
371
+ else:
372
+ dset_dict[sid] = temp_dict[sid]
373
+ return dset_dict
374
+
375
+ with open(text_file, "r") as file_object:
376
+ data = file_object.read()
377
+
378
+ data = [line.rstrip() for line in re.split("\n\r|\r\n|\n|\r", data)]
379
+ data = [line for line in data if line]
380
+ # First line indicates space. The rest are studies, ns, and coords
381
+ space = data[0].replace(" ", "").replace("//Reference=", "")
382
+
383
+ SPACE_OPTS = ["MNI", "TAL", "Talairach"]
384
+ if space not in SPACE_OPTS:
385
+ raise ValueError(f"Space {space} unknown. Options supported: {', '.join(SPACE_OPTS)}.")
386
+
387
+ # Split into experiments
388
+ data = data[1:]
389
+ exp_idx = []
390
+ header_lines = [i for i in range(len(data)) if data[i].startswith("//")]
391
+
392
+ # Get contiguous header lines to define contrasts
393
+ ranges = []
394
+ for k, g in groupby(enumerate(header_lines), lambda x: x[0] - x[1]):
395
+ group = list(map(itemgetter(1), g))
396
+ ranges.append((group[0], group[-1]))
397
+ if "Subjects" not in data[group[-1]]:
398
+ raise ValueError(f"Sample size line missing for {data[group[0] : group[-1] + 1]}")
399
+ start_idx = [r[0] for r in ranges]
400
+ end_idx = start_idx[1:] + [len(data) + 1]
401
+ split_idx = zip(start_idx, end_idx)
402
+
403
+ dset_dict = {}
404
+ for i_exp, exp_idx in enumerate(split_idx):
405
+ exp_data = data[exp_idx[0] : exp_idx[1]]
406
+ if exp_data:
407
+ header_idx = [i for i in range(len(exp_data)) if exp_data[i].startswith("//")]
408
+ study_info_idx = header_idx[:-1]
409
+ n_idx = header_idx[-1]
410
+ study_info = [exp_data[i].replace("//", "").strip() for i in study_info_idx]
411
+ study_info = " ".join(study_info)
412
+ study_name = study_info.split(":")[0]
413
+ contrast_name = ":".join(study_info.split(":")[1:]).strip()
414
+ sample_size = int(exp_data[n_idx].replace(" ", "").replace("//Subjects=", ""))
415
+ xyz = exp_data[n_idx + 1 :] # Coords are everything after study info and n
416
+ xyz = [row.split() for row in xyz]
417
+ correct_shape = np.all([len(coord) == 3 for coord in xyz])
418
+ if not correct_shape:
419
+ all_shapes = np.unique([len(coord) for coord in xyz]).astype(str)
420
+ raise ValueError(
421
+ f"Coordinates for study '{study_info}' are not all "
422
+ f"correct length. Lengths detected: {', '.join(all_shapes)}."
423
+ )
424
+
425
+ try:
426
+ xyz = np.array(xyz, dtype=float)
427
+ except:
428
+ # Prettify xyz before reporting error
429
+ strs = [[str(e) for e in row] for row in xyz]
430
+ lens = [max(map(len, col)) for col in zip(*strs)]
431
+ fmt = "\t".join("{{:{}}}".format(x) for x in lens)
432
+ table = "\n".join([fmt.format(*row) for row in strs])
433
+ raise ValueError(
434
+ f"Conversion to numpy array failed for study '{study_info}'. Coords:\n{table}"
435
+ )
436
+
437
+ x, y, z = list(xyz[:, 0]), list(xyz[:, 1]), list(xyz[:, 2])
438
+
439
+ if study_name not in dset_dict.keys():
440
+ dset_dict[study_name] = {"contrasts": {}}
441
+ dset_dict[study_name]["contrasts"][contrast_name] = {"coords": {}, "metadata": {}}
442
+ dset_dict[study_name]["contrasts"][contrast_name]["coords"]["space"] = space
443
+ dset_dict[study_name]["contrasts"][contrast_name]["coords"]["x"] = x
444
+ dset_dict[study_name]["contrasts"][contrast_name]["coords"]["y"] = y
445
+ dset_dict[study_name]["contrasts"][contrast_name]["coords"]["z"] = z
446
+ dset_dict[study_name]["contrasts"][contrast_name]["metadata"]["sample_sizes"] = [
447
+ sample_size
448
+ ]
449
+ return dset_dict
450
+
451
+
452
+ def convert_sleuth_to_json(text_file, out_file):
453
+ """Convert Sleuth output text file into json.
454
+
455
+ Parameters
456
+ ----------
457
+ text_file : :obj:`str` or :obj:`list` of :obj:`str`
458
+ Path to Sleuth-format text file.
459
+ More than one text file may be provided.
460
+ out_file : :obj:`str`
461
+ Path to output json file.
462
+ """
463
+ if not isinstance(text_file, str) and not isinstance(text_file, list):
464
+ raise ValueError(f"Unsupported type for parameter 'text_file': {type(text_file)}")
465
+ dset_dict = convert_sleuth_to_dict(text_file)
466
+
467
+ with open(out_file, "w") as fo:
468
+ json.dump(dset_dict, fo, indent=4, sort_keys=True)
469
+
470
+
471
+ def convert_sleuth_to_dataset(text_file, target="ale_2mm"):
472
+ """Convert Sleuth output text file into NiMARE Dataset.
473
+
474
+ Parameters
475
+ ----------
476
+ text_file : :obj:`str` or :obj:`list` of :obj:`str`
477
+ Path to Sleuth-format text file.
478
+ More than one text file may be provided.
479
+ target : {'ale_2mm', 'mni152_2mm'}, optional
480
+ Target template space for coordinates. Default is 'ale_2mm'
481
+ (ALE-specific brainmask in MNI152 2mm space).
482
+
483
+ Returns
484
+ -------
485
+ :obj:`~nimare.dataset.Dataset`
486
+ Dataset object containing experiment information from text_file.
487
+ """
488
+ if not isinstance(text_file, str) and not isinstance(text_file, list):
489
+ raise ValueError(f"Unsupported type for parameter 'text_file': {type(text_file)}")
490
+ dset_dict = convert_sleuth_to_dict(text_file)
491
+ return Dataset(dset_dict, target=target)
492
+
493
+
494
+ def convert_neurovault_to_dataset(
495
+ collection_ids,
496
+ contrasts,
497
+ img_dir=None,
498
+ map_type_conversion=None,
499
+ **dset_kwargs,
500
+ ):
501
+ """Convert a group of NeuroVault collections into a NiMARE Dataset.
502
+
503
+ .. versionadded:: 0.0.8
504
+
505
+ Parameters
506
+ ----------
507
+ collection_ids : :obj:`list` of :obj:`int` or :obj:`dict`
508
+ A list of collections on neurovault specified by their id.
509
+ The collection ids can accessed through the neurovault API
510
+ (i.e., https://neurovault.org/api/collections) or
511
+ their main website (i.e., https://neurovault.org/collections).
512
+ For example, in this URL https://neurovault.org/collections/8836/,
513
+ `8836` is the collection id.
514
+ collection_ids can also be a dictionary whose keys are the informative
515
+ study name and the values are collection ids to give the collections
516
+ more informative names in the dataset.
517
+ contrasts : :obj:`dict`
518
+ Dictionary whose keys represent the name of the contrast in
519
+ the dataset and whose values represent a regular expression that would
520
+ match the names represented in NeuroVault.
521
+ For example, under the ``Name`` column in this URL
522
+ https://neurovault.org/collections/8836/,
523
+ a valid contrast could be "as-Animal", which will be called "animal" in the created
524
+ dataset if the contrasts argument is ``{'animal': "as-Animal"}``.
525
+ img_dir : :obj:`str` or None, optional
526
+ Base path to save all the downloaded images, by default the images
527
+ will be saved to a temporary directory with the prefix "neurovault".
528
+ map_type_conversion : :obj:`dict` or None, optional
529
+ Dictionary whose keys are what you expect the `map_type` name to
530
+ be in neurovault and the values are the name of the respective
531
+ statistic map in a nimare dataset. Default = None.
532
+ **dset_kwargs : keyword arguments passed to Dataset
533
+ Keyword arguments to pass in when creating the Dataset object.
534
+ see :obj:`~nimare.dataset.Dataset` for details.
535
+
536
+ Returns
537
+ -------
538
+ :obj:`~nimare.dataset.Dataset`
539
+ Dataset object containing experiment information from neurovault.
540
+ """
541
+ img_dir = Path(_get_dataset_dir("_".join(contrasts.keys()), data_dir=img_dir))
542
+
543
+ if map_type_conversion is None:
544
+ map_type_conversion = DEFAULT_MAP_TYPE_CONVERSION
545
+
546
+ if not isinstance(collection_ids, dict):
547
+ collection_ids = {nv_coll: nv_coll for nv_coll in collection_ids}
548
+
549
+ dataset_dict = {}
550
+ for coll_name, nv_coll in collection_ids.items():
551
+ nv_url = f"https://neurovault.org/api/collections/{nv_coll}/images/?format=json"
552
+ images = requests.get(nv_url).json()
553
+ if "Not found" in images.get("detail", ""):
554
+ raise ValueError(
555
+ f"Collection {nv_coll} not found. "
556
+ "Three likely causes are (1) the collection doesn't exist, "
557
+ "(2) the collection is private, or "
558
+ "(3) the provided ID corresponds to an image instead of a collection."
559
+ )
560
+
561
+ dataset_dict[f"study-{coll_name}"] = {"contrasts": {}}
562
+ for contrast_name, contrast_regex in contrasts.items():
563
+ dataset_dict[f"study-{coll_name}"]["contrasts"][contrast_name] = {
564
+ "images": {
565
+ "beta": None,
566
+ "t": None,
567
+ "varcope": None,
568
+ },
569
+ "metadata": {"sample_sizes": None},
570
+ }
571
+
572
+ sample_sizes = []
573
+ no_images = True
574
+ for img_dict in images["results"]:
575
+ if not (
576
+ re.match(contrast_regex, img_dict["name"])
577
+ and img_dict["map_type"] in map_type_conversion
578
+ and img_dict["analysis_level"] == "group"
579
+ ):
580
+ continue
581
+
582
+ no_images = False
583
+ filename = img_dir / (
584
+ f"collection-{nv_coll}_id-{img_dict['id']}_" + Path(img_dict["file"]).name
585
+ )
586
+
587
+ if not filename.exists():
588
+ r = requests.get(img_dict["file"])
589
+ with open(filename, "wb") as f:
590
+ f.write(r.content)
591
+
592
+ (
593
+ dataset_dict[f"study-{coll_name}"]["contrasts"][contrast_name]["images"][
594
+ map_type_conversion[img_dict["map_type"]]
595
+ ]
596
+ ) = filename.as_posix()
597
+
598
+ # aggregate sample sizes (should all be the same)
599
+ sample_sizes.append(img_dict["number_of_subjects"])
600
+
601
+ if no_images:
602
+ raise ValueError(
603
+ f"No images were found for contrast {contrast_name}. "
604
+ f"Please check the contrast regular expression: {contrast_regex}"
605
+ )
606
+ # take modal sample size (raise warning if there are multiple values)
607
+ if len(set(sample_sizes)) > 1:
608
+ sample_size = _resolve_sample_size(sample_sizes)
609
+ LGR.warning(
610
+ (
611
+ f"Multiple sample sizes were found for neurovault collection: {nv_coll}"
612
+ f"for contrast: {contrast_name}, sample sizes: {set(sample_sizes)}"
613
+ f", selecting modal sample size: {sample_size}"
614
+ )
615
+ )
616
+ else:
617
+ sample_size = sample_sizes[0]
618
+ (
619
+ dataset_dict[f"study-{coll_name}"]["contrasts"][contrast_name]["metadata"][
620
+ "sample_sizes"
621
+ ]
622
+ ) = [sample_size]
623
+
624
+ dataset = Dataset(dataset_dict, **dset_kwargs)
625
+
626
+ return dataset
627
+
628
+
629
+ def _resolve_sample_size(sample_sizes):
630
+ """Choose modal sample_size if there are multiple sample_sizes to choose from."""
631
+ sample_size_counts = Counter(sample_sizes)
632
+ if None in sample_size_counts:
633
+ sample_size_counts.pop(None)
634
+
635
+ return sample_size_counts.most_common()[0][0]
@@ -0,0 +1,39 @@
1
+ """Coordinate-, image-, and effect-size-based meta-analysis estimators."""
2
+
3
+ from . import ibma, kernel
4
+ from .cbma import ALE, KDA, SCALE, ALESubtraction, MKDAChi2, MKDADensity, ale, mkda
5
+ from .ibma import (
6
+ DerSimonianLaird,
7
+ Fishers,
8
+ Hedges,
9
+ PermutedOLS,
10
+ SampleSizeBasedLikelihood,
11
+ Stouffers,
12
+ VarianceBasedLikelihood,
13
+ WeightedLeastSquares,
14
+ )
15
+ from .kernel import ALEKernel, KDAKernel, MKDAKernel
16
+
17
+ __all__ = [
18
+ "ALE",
19
+ "ALESubtraction",
20
+ "SCALE",
21
+ "MKDADensity",
22
+ "MKDAChi2",
23
+ "KDA",
24
+ "DerSimonianLaird",
25
+ "Fishers",
26
+ "Hedges",
27
+ "PermutedOLS",
28
+ "SampleSizeBasedLikelihood",
29
+ "Stouffers",
30
+ "VarianceBasedLikelihood",
31
+ "WeightedLeastSquares",
32
+ "MKDAKernel",
33
+ "ALEKernel",
34
+ "KDAKernel",
35
+ "kernel",
36
+ "ibma",
37
+ "ale",
38
+ "mkda",
39
+ ]
@@ -0,0 +1,6 @@
1
+ """Coordinate-based meta-analytic estimators."""
2
+
3
+ from .ale import ALE, SCALE, ALESubtraction
4
+ from .mkda import KDA, MKDAChi2, MKDADensity
5
+
6
+ __all__ = ["ALE", "ALESubtraction", "SCALE", "MKDADensity", "MKDAChi2", "KDA"]