nimare 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. benchmarks/__init__.py +0 -0
  2. benchmarks/bench_cbma.py +57 -0
  3. nimare/__init__.py +45 -0
  4. nimare/_version.py +21 -0
  5. nimare/annotate/__init__.py +21 -0
  6. nimare/annotate/cogat.py +213 -0
  7. nimare/annotate/gclda.py +924 -0
  8. nimare/annotate/lda.py +147 -0
  9. nimare/annotate/text.py +75 -0
  10. nimare/annotate/utils.py +87 -0
  11. nimare/base.py +217 -0
  12. nimare/cli.py +124 -0
  13. nimare/correct.py +462 -0
  14. nimare/dataset.py +685 -0
  15. nimare/decode/__init__.py +33 -0
  16. nimare/decode/base.py +115 -0
  17. nimare/decode/continuous.py +462 -0
  18. nimare/decode/discrete.py +753 -0
  19. nimare/decode/encode.py +110 -0
  20. nimare/decode/utils.py +44 -0
  21. nimare/diagnostics.py +510 -0
  22. nimare/estimator.py +139 -0
  23. nimare/extract/__init__.py +19 -0
  24. nimare/extract/extract.py +466 -0
  25. nimare/extract/utils.py +295 -0
  26. nimare/generate.py +331 -0
  27. nimare/io.py +667 -0
  28. nimare/meta/__init__.py +39 -0
  29. nimare/meta/cbma/__init__.py +6 -0
  30. nimare/meta/cbma/ale.py +951 -0
  31. nimare/meta/cbma/base.py +947 -0
  32. nimare/meta/cbma/mkda.py +1361 -0
  33. nimare/meta/cbmr.py +970 -0
  34. nimare/meta/ibma.py +1683 -0
  35. nimare/meta/kernel.py +501 -0
  36. nimare/meta/models.py +1199 -0
  37. nimare/meta/utils.py +494 -0
  38. nimare/nimads.py +492 -0
  39. nimare/reports/__init__.py +24 -0
  40. nimare/reports/base.py +664 -0
  41. nimare/reports/default.yml +123 -0
  42. nimare/reports/figures.py +651 -0
  43. nimare/reports/report.tpl +160 -0
  44. nimare/resources/__init__.py +1 -0
  45. nimare/resources/atlases/Harvard-Oxford-LICENSE +93 -0
  46. nimare/resources/atlases/HarvardOxford-cort-maxprob-thr25-2mm.nii.gz +0 -0
  47. nimare/resources/database_file_manifest.json +142 -0
  48. nimare/resources/english_spellings.csv +1738 -0
  49. nimare/resources/filenames.json +32 -0
  50. nimare/resources/neurosynth_laird_studies.json +58773 -0
  51. nimare/resources/neurosynth_stoplist.txt +396 -0
  52. nimare/resources/nidm_pain_dset.json +1349 -0
  53. nimare/resources/references.bib +541 -0
  54. nimare/resources/semantic_knowledge_children.txt +325 -0
  55. nimare/resources/semantic_relatedness_children.txt +249 -0
  56. nimare/resources/templates/MNI152_2x2x2_brainmask.nii.gz +0 -0
  57. nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_T1w.nii.gz +0 -0
  58. nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_desc-brain_mask.nii.gz +0 -0
  59. nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_T1w.nii.gz +0 -0
  60. nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_desc-brain_mask.nii.gz +0 -0
  61. nimare/results.py +225 -0
  62. nimare/stats.py +276 -0
  63. nimare/tests/__init__.py +1 -0
  64. nimare/tests/conftest.py +229 -0
  65. nimare/tests/data/amygdala_roi.nii.gz +0 -0
  66. nimare/tests/data/data-neurosynth_version-7_coordinates.tsv.gz +0 -0
  67. nimare/tests/data/data-neurosynth_version-7_metadata.tsv.gz +0 -0
  68. nimare/tests/data/data-neurosynth_version-7_vocab-terms_source-abstract_type-tfidf_features.npz +0 -0
  69. nimare/tests/data/data-neurosynth_version-7_vocab-terms_vocabulary.txt +100 -0
  70. nimare/tests/data/neurosynth_dset.json +2868 -0
  71. nimare/tests/data/neurosynth_laird_studies.json +58773 -0
  72. nimare/tests/data/nidm_pain_dset.json +1349 -0
  73. nimare/tests/data/nimads_annotation.json +1 -0
  74. nimare/tests/data/nimads_studyset.json +1 -0
  75. nimare/tests/data/test_baseline.txt +2 -0
  76. nimare/tests/data/test_pain_dataset.json +1278 -0
  77. nimare/tests/data/test_pain_dataset_multiple_contrasts.json +1242 -0
  78. nimare/tests/data/test_sleuth_file.txt +18 -0
  79. nimare/tests/data/test_sleuth_file2.txt +10 -0
  80. nimare/tests/data/test_sleuth_file3.txt +5 -0
  81. nimare/tests/data/test_sleuth_file4.txt +5 -0
  82. nimare/tests/data/test_sleuth_file5.txt +5 -0
  83. nimare/tests/test_annotate_cogat.py +32 -0
  84. nimare/tests/test_annotate_gclda.py +86 -0
  85. nimare/tests/test_annotate_lda.py +27 -0
  86. nimare/tests/test_dataset.py +99 -0
  87. nimare/tests/test_decode_continuous.py +132 -0
  88. nimare/tests/test_decode_discrete.py +92 -0
  89. nimare/tests/test_diagnostics.py +168 -0
  90. nimare/tests/test_estimator_performance.py +385 -0
  91. nimare/tests/test_extract.py +46 -0
  92. nimare/tests/test_generate.py +247 -0
  93. nimare/tests/test_io.py +294 -0
  94. nimare/tests/test_meta_ale.py +298 -0
  95. nimare/tests/test_meta_cbmr.py +295 -0
  96. nimare/tests/test_meta_ibma.py +240 -0
  97. nimare/tests/test_meta_kernel.py +209 -0
  98. nimare/tests/test_meta_mkda.py +234 -0
  99. nimare/tests/test_nimads.py +21 -0
  100. nimare/tests/test_reports.py +110 -0
  101. nimare/tests/test_stats.py +101 -0
  102. nimare/tests/test_transforms.py +272 -0
  103. nimare/tests/test_utils.py +200 -0
  104. nimare/tests/test_workflows.py +221 -0
  105. nimare/tests/utils.py +126 -0
  106. nimare/transforms.py +907 -0
  107. nimare/utils.py +1367 -0
  108. nimare/workflows/__init__.py +14 -0
  109. nimare/workflows/base.py +189 -0
  110. nimare/workflows/cbma.py +165 -0
  111. nimare/workflows/ibma.py +108 -0
  112. nimare/workflows/macm.py +77 -0
  113. nimare/workflows/misc.py +65 -0
  114. nimare-0.4.2.dist-info/LICENSE +21 -0
  115. nimare-0.4.2.dist-info/METADATA +124 -0
  116. nimare-0.4.2.dist-info/RECORD +119 -0
  117. nimare-0.4.2.dist-info/WHEEL +5 -0
  118. nimare-0.4.2.dist-info/entry_points.txt +2 -0
  119. nimare-0.4.2.dist-info/top_level.txt +2 -0
nimare/io.py ADDED
@@ -0,0 +1,667 @@
1
+ """Input/Output operations."""
2
+
3
+ import json
4
+ import logging
5
+ import re
6
+ from collections import Counter
7
+ from itertools import groupby
8
+ from operator import itemgetter
9
+ from pathlib import Path
10
+
11
+ import numpy as np
12
+ import pandas as pd
13
+ import requests
14
+ from scipy import sparse
15
+
16
+ from nimare.dataset import Dataset
17
+ from nimare.extract.utils import _get_dataset_dir
18
+ from nimare.utils import _create_name, load_nimads
19
+
20
+ LGR = logging.getLogger(__name__)
21
+
22
+ DEFAULT_MAP_TYPE_CONVERSION = {
23
+ "T map": "t",
24
+ "variance": "varcope",
25
+ "univariate-beta map": "beta",
26
+ "Z map": "z",
27
+ "p map": "p",
28
+ }
29
+
30
+
31
+ def convert_nimads_to_dataset(studyset, annotation=None):
32
+ """Convert nimads studyset to a dataset.
33
+
34
+ .. versionadded:: 0.0.14
35
+
36
+ Parameters
37
+ ----------
38
+ studyset : :obj:`str`, :obj:`dict`, :obj:`nimare.nimads.StudySet`
39
+ Path to a JSON file containing a nimads studyset, a dictionary containing a nimads
40
+ studyset, or a nimads studyset object.
41
+ annotation : :obj:`str`, :obj:`dict`, :obj:`nimare.nimads.Annotation`, optional
42
+ Optional path to a JSON file containing a nimads annotation, a dictionary containing a
43
+ nimads annotation, or a nimads annotation object.
44
+
45
+ Returns
46
+ -------
47
+ dset : :obj:`nimare.dataset.Dataset`
48
+ NiMARE Dataset object containing experiment information from nimads studyset.
49
+ """
50
+
51
+ def _analysis_to_dict(study, analysis):
52
+ result = {
53
+ "metadata": {
54
+ "authors": study.name,
55
+ "journal": study.publication,
56
+ "title": study.name,
57
+ },
58
+ "coords": {
59
+ "space": analysis.points[0].space if analysis.points else "UNKNOWN",
60
+ "x": [p.x for p in analysis.points] or [None],
61
+ "y": [p.y for p in analysis.points] or [None],
62
+ "z": [p.z for p in analysis.points] or [None],
63
+ },
64
+ }
65
+
66
+ sample_sizes = analysis.metadata.get("sample_sizes")
67
+ sample_size = None
68
+
69
+ # Validate sample sizes if present
70
+ if sample_sizes is not None and not isinstance(sample_sizes, (list, tuple)):
71
+ raise TypeError(
72
+ f"Expected sample_sizes to be list or tuple, but got {type(sample_sizes)}"
73
+ )
74
+
75
+ if not sample_sizes:
76
+ # Try to get single sample size from analysis or study metadata
77
+ sample_size = analysis.metadata.get("sample_size")
78
+ if sample_size is None:
79
+ sample_size = study.metadata.get("sample_size")
80
+
81
+ # Validate single sample size if present
82
+ if sample_size is not None and not isinstance(sample_size, (int, float)):
83
+ raise TypeError(f"Expected sample_size to be numeric, but got {type(sample_size)}")
84
+
85
+ # Add sample size info to result if available
86
+ if sample_sizes or sample_size is not None:
87
+ try:
88
+ result["metadata"]["sample_sizes"] = sample_sizes or [sample_size]
89
+ except TypeError as e:
90
+ raise TypeError(f"Error converting sample size data to list: {str(e)}") from e
91
+
92
+ # Handle annotations if present
93
+ if analysis.annotations:
94
+ result["labels"] = {}
95
+ try:
96
+ for annotation in analysis.annotations.values():
97
+ if not isinstance(annotation, dict):
98
+ raise TypeError(
99
+ f"Expected annotation to be dict, but got {type(annotation)}"
100
+ )
101
+ result["labels"].update(annotation)
102
+ except (TypeError, AttributeError) as e:
103
+ raise ValueError(f"Invalid annotation format: {str(e)}") from e
104
+
105
+ return result
106
+
107
+ def _study_to_dict(study):
108
+ return {
109
+ "metadata": {
110
+ "authors": study.authors,
111
+ "journal": study.publication,
112
+ "title": study.name,
113
+ },
114
+ "contrasts": {_create_name(a): _analysis_to_dict(study, a) for a in study.analyses},
115
+ }
116
+
117
+ # load nimads studyset
118
+ studyset = load_nimads(studyset, annotation)
119
+ return Dataset({_create_name(s): _study_to_dict(s) for s in list(studyset.studies)})
120
+
121
+
122
+ def convert_neurosynth_to_dict(
123
+ coordinates_file,
124
+ metadata_file,
125
+ annotations_files=None,
126
+ feature_groups=None,
127
+ ):
128
+ """Convert Neurosynth/NeuroQuery database files to a dictionary.
129
+
130
+ .. versionchanged:: 0.0.10
131
+
132
+ * Use new format for Neurosynth and NeuroQuery files.
133
+
134
+ .. versionchanged:: 0.0.9
135
+
136
+ * Support annotations files organized in a dictionary.
137
+
138
+ Parameters
139
+ ----------
140
+ coordinates_file : :obj:`str`
141
+ TSV.GZ file with Neurosynth/NeuroQuery's coordinates.
142
+ metadata_file : :obj:`str`
143
+ TSV.GZ file with Neurosynth/NeuroQuery's metadata.
144
+ annotations_files : :obj:`dict`, :obj:`list` of :obj:`dict`, or None, optional
145
+ Optional file(s) with Neurosynth/NeuroQuery's annotations.
146
+ This should consist of a dictionary with two keys: "features" and "vocabulary".
147
+ "features" should have an NPZ file containing a sparse matrix of feature values.
148
+ "vocabulary" should have a TXT file containing labels.
149
+ The vocabulary corresponds to the columns of the feature matrix, while study IDs are
150
+ inferred from the metadata file, which MUST be in the same order as the features matrix.
151
+ Multiple sets of annotations may be provided, in which case "annotations_files" should be
152
+ a list of dictionaries. The appropriate name of each annotation set will be inferred from
153
+ the "features" filename, but this can be overwritten by using the "feature_groups"
154
+ parameter.
155
+ Default is None.
156
+ feature_groups : :obj:`list` of :obj:`str`, or None, optional
157
+ An optional list of names of annotation sets defined in "annotations_files".
158
+ This should only be used if "annotations_files" is used and the users wants to override
159
+ the automatically-extracted annotation set names.
160
+ Default is None.
161
+
162
+ Returns
163
+ -------
164
+ dset_dict : :obj:`dict`
165
+ NiMARE-organized dictionary containing experiment information from text files.
166
+
167
+ Warnings
168
+ --------
169
+ Starting in version 0.0.10, this function operates on the new Neurosynth/NeuroQuery file
170
+ format. Old code using this function **will not work** with the new version.
171
+ """
172
+ coords_df = pd.read_table(coordinates_file)
173
+ metadata_df = pd.read_table(metadata_file)
174
+ assert metadata_df["id"].is_unique, "Metadata file must have one row per ID."
175
+
176
+ coords_df["id"] = coords_df["id"].astype(str)
177
+ metadata_df["id"] = metadata_df["id"].astype(str)
178
+ metadata_df = metadata_df.set_index("id", drop=False)
179
+ ids = metadata_df["id"].tolist()
180
+
181
+ if "space" not in metadata_df.columns:
182
+ LGR.warning("No 'space' column detected. Defaulting to 'UNKNOWN'.")
183
+ metadata_df["space"] = "UNKNOWN"
184
+
185
+ if isinstance(annotations_files, dict):
186
+ annotations_files = [annotations_files]
187
+
188
+ if isinstance(feature_groups, str):
189
+ feature_groups = [feature_groups]
190
+
191
+ # Load labels into a single DataFrame
192
+ if annotations_files is not None:
193
+ label_dfs = []
194
+ if feature_groups is not None:
195
+ assert len(feature_groups) == len(annotations_files)
196
+
197
+ for i_feature_group, annotations_dict in enumerate(annotations_files):
198
+ features_file = annotations_dict["features"]
199
+ vocabulary_file = annotations_dict["vocabulary"]
200
+
201
+ vocab = re.findall("vocab-([a-zA-Z0-9]+)_", features_file)[0]
202
+ source = re.findall("source-([a-zA-Z0-9]+)_", features_file)[0]
203
+ value_type = re.findall("type-([a-zA-Z0-9]+)_", features_file)[0]
204
+
205
+ if feature_groups is not None:
206
+ feature_group = feature_groups[i_feature_group]
207
+ feature_group = feature_group.rstrip("_") + "__"
208
+ else:
209
+ feature_group = f"{vocab}_{source}_{value_type}__"
210
+
211
+ features = sparse.load_npz(features_file).todense()
212
+ vocab = np.loadtxt(vocabulary_file, dtype=str, delimiter="\t")
213
+
214
+ labels = [feature_group + label for label in vocab]
215
+
216
+ temp_label_df = pd.DataFrame(features, index=ids, columns=labels)
217
+ temp_label_df.index.name = "study_id"
218
+
219
+ label_dfs.append(temp_label_df)
220
+
221
+ label_df = pd.concat(label_dfs, axis=1)
222
+ else:
223
+ label_df = None
224
+
225
+ # Compile (pseudo-)NIMADS-format dictionary
226
+ x = coords_df["x"].values
227
+ y = coords_df["y"].values
228
+ z = coords_df["z"].values
229
+
230
+ dset_dict = {}
231
+
232
+ for sid, study_metadata in metadata_df.iterrows():
233
+ coord_inds = np.where(coords_df["id"].values == sid)[0]
234
+ study_dict = {}
235
+ study_dict["metadata"] = {}
236
+ study_dict["metadata"]["authors"] = study_metadata.get("authors", "n/a")
237
+ study_dict["metadata"]["journal"] = study_metadata.get("journal", "n/a")
238
+ study_dict["metadata"]["year"] = study_metadata.get("year", "n/a")
239
+ study_dict["metadata"]["title"] = study_metadata.get("title", "n/a")
240
+ study_dict["contrasts"] = {}
241
+ study_dict["contrasts"]["1"] = {}
242
+ # Duplicate metadata across study and contrast levels
243
+ study_dict["contrasts"]["1"]["metadata"] = {}
244
+ study_dict["contrasts"]["1"]["metadata"]["authors"] = study_metadata.get("authors", "n/a")
245
+ study_dict["contrasts"]["1"]["metadata"]["journal"] = study_metadata.get("journal", "n/a")
246
+ study_dict["contrasts"]["1"]["metadata"]["year"] = study_metadata.get("year", "n/a")
247
+ study_dict["contrasts"]["1"]["metadata"]["title"] = study_metadata.get("title", "n/a")
248
+ study_dict["contrasts"]["1"]["coords"] = {}
249
+ study_dict["contrasts"]["1"]["coords"]["space"] = study_metadata["space"]
250
+ study_dict["contrasts"]["1"]["coords"]["x"] = list(x[coord_inds])
251
+ study_dict["contrasts"]["1"]["coords"]["y"] = list(y[coord_inds])
252
+ study_dict["contrasts"]["1"]["coords"]["z"] = list(z[coord_inds])
253
+
254
+ if label_df is not None:
255
+ study_dict["contrasts"]["1"]["labels"] = label_df.loc[sid].to_dict()
256
+
257
+ dset_dict[sid] = study_dict
258
+
259
+ return dset_dict
260
+
261
+
262
+ def convert_neurosynth_to_json(
263
+ coordinates_file,
264
+ metadata_file,
265
+ out_file,
266
+ annotations_files=None,
267
+ feature_groups=None,
268
+ ):
269
+ """Convert Neurosynth/NeuroQuery dataset text file to a NiMARE json file.
270
+
271
+ .. versionchanged:: 0.0.10
272
+
273
+ * Use new format for Neurosynth and NeuroQuery files.
274
+
275
+ .. versionchanged:: 0.0.9
276
+
277
+ * Support annotations files organized in a dictionary.
278
+
279
+ Parameters
280
+ ----------
281
+ coordinates_file : :obj:`str`
282
+ TSV.GZ file with Neurosynth/NeuroQuery's coordinates and metadata.
283
+ metadata_file : :obj:`str`
284
+ TSV.GZ file with Neurosynth/NeuroQuery's metadata.
285
+ out_file : :obj:`str`
286
+ Output NiMARE-format json file.
287
+ annotations_files : :obj:`dict`, :obj:`list` of :obj:`dict`, or None, optional
288
+ Optional file(s) with Neurosynth/NeuroQuery's annotations.
289
+ This should consist of a dictionary with two keys: "features" and "vocabulary".
290
+ "features" should have an NPZ file containing a sparse matrix of feature values.
291
+ "vocabulary" should have a TXT file containing labels.
292
+ The vocabulary corresponds to the columns of the feature matrix, while study IDs are
293
+ inferred from the metadata file, which MUST be in the same order as the features matrix.
294
+ Multiple sets of annotations may be provided, in which case "annotations_files" should be
295
+ a list of dictionaries. The appropriate name of each annotation set will be inferred from
296
+ the "features" filename, but this can be overwritten by using the "feature_groups"
297
+ parameter.
298
+ Default is None.
299
+ feature_groups : :obj:`list` of :obj:`str`, or None, optional
300
+ An optional list of names of annotation sets defined in "annotations_files".
301
+ This should only be used if "annotations_files" is used and the users wants to override
302
+ the automatically-extracted annotation set names.
303
+ Default is None.
304
+
305
+ Warnings
306
+ --------
307
+ Starting in version 0.0.10, this function operates on the new Neurosynth/NeuroQuery file
308
+ format. Old code using this function **will not work** with the new version.
309
+ """
310
+ dset_dict = convert_neurosynth_to_dict(
311
+ coordinates_file, metadata_file, annotations_files, feature_groups
312
+ )
313
+ with open(out_file, "w") as fo:
314
+ json.dump(dset_dict, fo, indent=4, sort_keys=True)
315
+
316
+
317
+ def convert_neurosynth_to_dataset(
318
+ coordinates_file,
319
+ metadata_file,
320
+ annotations_files=None,
321
+ feature_groups=None,
322
+ target="mni152_2mm",
323
+ ):
324
+ """Convert Neurosynth/NeuroQuery database files into NiMARE Dataset.
325
+
326
+ .. versionchanged:: 0.0.10
327
+
328
+ * Use new format for Neurosynth and NeuroQuery files.
329
+
330
+ .. versionchanged:: 0.0.9
331
+
332
+ * Support annotations files organized in a dictionary.
333
+
334
+ Parameters
335
+ ----------
336
+ coordinates_file : :obj:`str`
337
+ TSV.GZ file with Neurosynth/NeuroQuery's coordinates and metadata.
338
+ metadata_file : :obj:`str`
339
+ TSV.GZ file with Neurosynth/NeuroQuery's metadata.
340
+ annotations_files : :obj:`dict`, :obj:`list` of :obj:`dict`, or None, optional
341
+ Optional file(s) with Neurosynth/NeuroQuery's annotations.
342
+ This should consist of a dictionary with two keys: "features" and "vocabulary".
343
+ "features" should have an NPZ file containing a sparse matrix of feature values.
344
+ "vocabulary" should have a TXT file containing labels.
345
+ The vocabulary corresponds to the columns of the feature matrix, while study IDs are
346
+ inferred from the metadata file, which MUST be in the same order as the features matrix.
347
+ Multiple sets of annotations may be provided, in which case "annotations_files" should be
348
+ a list of dictionaries. The appropriate name of each annotation set will be inferred from
349
+ the "features" filename, but this can be overwritten by using the "feature_groups"
350
+ parameter.
351
+ Default is None.
352
+ feature_groups : :obj:`list` of :obj:`str`, or None, optional
353
+ An optional list of names of annotation sets defined in "annotations_files".
354
+ This should only be used if "annotations_files" is used and the users wants to override
355
+ the automatically-extracted annotation set names.
356
+ Default is None.
357
+ target : {'mni152_2mm', 'ale_2mm'}, optional
358
+ Target template space for coordinates. Default is 'mni152_2mm'.
359
+
360
+ Returns
361
+ -------
362
+ :obj:`~nimare.dataset.Dataset`
363
+ Dataset object containing experiment information from text_file.
364
+
365
+ Warnings
366
+ --------
367
+ Starting in version 0.0.10, this function operates on the new Neurosynth/NeuroQuery file
368
+ format. Old code using this function **will not work** with the new version.
369
+ """
370
+ dset_dict = convert_neurosynth_to_dict(
371
+ coordinates_file,
372
+ metadata_file,
373
+ annotations_files,
374
+ feature_groups,
375
+ )
376
+ return Dataset(dset_dict, target=target)
377
+
378
+
379
+ def convert_sleuth_to_dict(text_file):
380
+ """Convert Sleuth text file to a dictionary.
381
+
382
+ Parameters
383
+ ----------
384
+ text_file : :obj:`str` or :obj:`list` of :obj:`str`
385
+ Path to Sleuth-format text file.
386
+ More than one text file may be provided.
387
+
388
+ Returns
389
+ -------
390
+ :obj:`dict`
391
+ NiMARE-organized dictionary containing experiment information from text file.
392
+ """
393
+ if isinstance(text_file, list):
394
+ dset_dict = {}
395
+ for tf in text_file:
396
+ temp_dict = convert_sleuth_to_dict(tf)
397
+ for sid in temp_dict.keys():
398
+ if sid in dset_dict.keys():
399
+ dset_dict[sid]["contrasts"] = {
400
+ **dset_dict[sid]["contrasts"],
401
+ **temp_dict[sid]["contrasts"],
402
+ }
403
+ else:
404
+ dset_dict[sid] = temp_dict[sid]
405
+ return dset_dict
406
+
407
+ with open(text_file, "r") as file_object:
408
+ data = file_object.read()
409
+
410
+ data = [line.rstrip() for line in re.split("\n\r|\r\n|\n|\r", data)]
411
+ data = [line for line in data if line]
412
+ # First line indicates space. The rest are studies, ns, and coords
413
+ space = data[0].replace(" ", "").replace("//Reference=", "")
414
+
415
+ SPACE_OPTS = ["MNI", "TAL", "Talairach"]
416
+ if space not in SPACE_OPTS:
417
+ raise ValueError(f"Space {space} unknown. Options supported: {', '.join(SPACE_OPTS)}.")
418
+
419
+ # Split into experiments
420
+ data = data[1:]
421
+ exp_idx = []
422
+ header_lines = [i for i in range(len(data)) if data[i].startswith("//")]
423
+
424
+ # Get contiguous header lines to define contrasts
425
+ ranges = []
426
+ for k, g in groupby(enumerate(header_lines), lambda x: x[0] - x[1]):
427
+ group = list(map(itemgetter(1), g))
428
+ ranges.append((group[0], group[-1]))
429
+ if "Subjects" not in data[group[-1]]:
430
+ raise ValueError(f"Sample size line missing for {data[group[0] : group[-1] + 1]}")
431
+ start_idx = [r[0] for r in ranges]
432
+ end_idx = start_idx[1:] + [len(data) + 1]
433
+ split_idx = zip(start_idx, end_idx)
434
+
435
+ dset_dict = {}
436
+ for i_exp, exp_idx in enumerate(split_idx):
437
+ exp_data = data[exp_idx[0] : exp_idx[1]]
438
+ if exp_data:
439
+ header_idx = [i for i in range(len(exp_data)) if exp_data[i].startswith("//")]
440
+ study_info_idx = header_idx[:-1]
441
+ n_idx = header_idx[-1]
442
+ study_info = [exp_data[i].replace("//", "").strip() for i in study_info_idx]
443
+ study_info = " ".join(study_info)
444
+ study_name = study_info.split(":")[0]
445
+ contrast_name = ":".join(study_info.split(":")[1:]).strip()
446
+ sample_size = int(exp_data[n_idx].replace(" ", "").replace("//Subjects=", ""))
447
+ xyz = exp_data[n_idx + 1 :] # Coords are everything after study info and n
448
+ xyz = [row.split() for row in xyz]
449
+ correct_shape = np.all([len(coord) == 3 for coord in xyz])
450
+ if not correct_shape:
451
+ all_shapes = np.unique([len(coord) for coord in xyz]).astype(str)
452
+ raise ValueError(
453
+ f"Coordinates for study '{study_info}' are not all "
454
+ f"correct length. Lengths detected: {', '.join(all_shapes)}."
455
+ )
456
+
457
+ try:
458
+ xyz = np.array(xyz, dtype=float)
459
+ except:
460
+ # Prettify xyz before reporting error
461
+ strs = [[str(e) for e in row] for row in xyz]
462
+ lens = [max(map(len, col)) for col in zip(*strs)]
463
+ fmt = "\t".join("{{:{}}}".format(x) for x in lens)
464
+ table = "\n".join([fmt.format(*row) for row in strs])
465
+ raise ValueError(
466
+ f"Conversion to numpy array failed for study '{study_info}'. Coords:\n{table}"
467
+ )
468
+
469
+ x, y, z = list(xyz[:, 0]), list(xyz[:, 1]), list(xyz[:, 2])
470
+
471
+ if study_name not in dset_dict.keys():
472
+ dset_dict[study_name] = {"contrasts": {}}
473
+ dset_dict[study_name]["contrasts"][contrast_name] = {"coords": {}, "metadata": {}}
474
+ dset_dict[study_name]["contrasts"][contrast_name]["coords"]["space"] = space
475
+ dset_dict[study_name]["contrasts"][contrast_name]["coords"]["x"] = x
476
+ dset_dict[study_name]["contrasts"][contrast_name]["coords"]["y"] = y
477
+ dset_dict[study_name]["contrasts"][contrast_name]["coords"]["z"] = z
478
+ dset_dict[study_name]["contrasts"][contrast_name]["metadata"]["sample_sizes"] = [
479
+ sample_size
480
+ ]
481
+ return dset_dict
482
+
483
+
484
+ def convert_sleuth_to_json(text_file, out_file):
485
+ """Convert Sleuth output text file into json.
486
+
487
+ Parameters
488
+ ----------
489
+ text_file : :obj:`str` or :obj:`list` of :obj:`str`
490
+ Path to Sleuth-format text file.
491
+ More than one text file may be provided.
492
+ out_file : :obj:`str`
493
+ Path to output json file.
494
+ """
495
+ if not isinstance(text_file, str) and not isinstance(text_file, list):
496
+ raise ValueError(f"Unsupported type for parameter 'text_file': {type(text_file)}")
497
+ dset_dict = convert_sleuth_to_dict(text_file)
498
+
499
+ with open(out_file, "w") as fo:
500
+ json.dump(dset_dict, fo, indent=4, sort_keys=True)
501
+
502
+
503
+ def convert_sleuth_to_dataset(text_file, target="ale_2mm"):
504
+ """Convert Sleuth output text file into NiMARE Dataset.
505
+
506
+ Parameters
507
+ ----------
508
+ text_file : :obj:`str` or :obj:`list` of :obj:`str`
509
+ Path to Sleuth-format text file.
510
+ More than one text file may be provided.
511
+ target : {'ale_2mm', 'mni152_2mm'}, optional
512
+ Target template space for coordinates. Default is 'ale_2mm'
513
+ (ALE-specific brainmask in MNI152 2mm space).
514
+
515
+ Returns
516
+ -------
517
+ :obj:`~nimare.dataset.Dataset`
518
+ Dataset object containing experiment information from text_file.
519
+ """
520
+ if not isinstance(text_file, str) and not isinstance(text_file, list):
521
+ raise ValueError(f"Unsupported type for parameter 'text_file': {type(text_file)}")
522
+ dset_dict = convert_sleuth_to_dict(text_file)
523
+ return Dataset(dset_dict, target=target)
524
+
525
+
526
+ def convert_neurovault_to_dataset(
527
+ collection_ids,
528
+ contrasts,
529
+ img_dir=None,
530
+ map_type_conversion=None,
531
+ **dset_kwargs,
532
+ ):
533
+ """Convert a group of NeuroVault collections into a NiMARE Dataset.
534
+
535
+ .. versionadded:: 0.0.8
536
+
537
+ Parameters
538
+ ----------
539
+ collection_ids : :obj:`list` of :obj:`int` or :obj:`dict`
540
+ A list of collections on neurovault specified by their id.
541
+ The collection ids can accessed through the neurovault API
542
+ (i.e., https://neurovault.org/api/collections) or
543
+ their main website (i.e., https://neurovault.org/collections).
544
+ For example, in this URL https://neurovault.org/collections/8836/,
545
+ `8836` is the collection id.
546
+ collection_ids can also be a dictionary whose keys are the informative
547
+ study name and the values are collection ids to give the collections
548
+ more informative names in the dataset.
549
+ contrasts : :obj:`dict`
550
+ Dictionary whose keys represent the name of the contrast in
551
+ the dataset and whose values represent a regular expression that would
552
+ match the names represented in NeuroVault.
553
+ For example, under the ``Name`` column in this URL
554
+ https://neurovault.org/collections/8836/,
555
+ a valid contrast could be "as-Animal", which will be called "animal" in the created
556
+ dataset if the contrasts argument is ``{'animal': "as-Animal"}``.
557
+ img_dir : :obj:`str` or None, optional
558
+ Base path to save all the downloaded images, by default the images
559
+ will be saved to a temporary directory with the prefix "neurovault".
560
+ map_type_conversion : :obj:`dict` or None, optional
561
+ Dictionary whose keys are what you expect the `map_type` name to
562
+ be in neurovault and the values are the name of the respective
563
+ statistic map in a nimare dataset. Default = None.
564
+ **dset_kwargs : keyword arguments passed to Dataset
565
+ Keyword arguments to pass in when creating the Dataset object.
566
+ see :obj:`~nimare.dataset.Dataset` for details.
567
+
568
+ Returns
569
+ -------
570
+ :obj:`~nimare.dataset.Dataset`
571
+ Dataset object containing experiment information from neurovault.
572
+ """
573
+ img_dir = Path(_get_dataset_dir("_".join(contrasts.keys()), data_dir=img_dir))
574
+
575
+ if map_type_conversion is None:
576
+ map_type_conversion = DEFAULT_MAP_TYPE_CONVERSION
577
+
578
+ if not isinstance(collection_ids, dict):
579
+ collection_ids = {nv_coll: nv_coll for nv_coll in collection_ids}
580
+
581
+ dataset_dict = {}
582
+ for coll_name, nv_coll in collection_ids.items():
583
+ nv_url = f"https://neurovault.org/api/collections/{nv_coll}/images/?format=json"
584
+ images = requests.get(nv_url).json()
585
+ if "Not found" in images.get("detail", ""):
586
+ raise ValueError(
587
+ f"Collection {nv_coll} not found. "
588
+ "Three likely causes are (1) the collection doesn't exist, "
589
+ "(2) the collection is private, or "
590
+ "(3) the provided ID corresponds to an image instead of a collection."
591
+ )
592
+
593
+ dataset_dict[f"study-{coll_name}"] = {"contrasts": {}}
594
+ for contrast_name, contrast_regex in contrasts.items():
595
+ dataset_dict[f"study-{coll_name}"]["contrasts"][contrast_name] = {
596
+ "images": {
597
+ "beta": None,
598
+ "t": None,
599
+ "varcope": None,
600
+ },
601
+ "metadata": {"sample_sizes": None},
602
+ }
603
+
604
+ sample_sizes = []
605
+ no_images = True
606
+ for img_dict in images["results"]:
607
+ if not (
608
+ re.match(contrast_regex, img_dict["name"])
609
+ and img_dict["map_type"] in map_type_conversion
610
+ and img_dict["analysis_level"] == "group"
611
+ ):
612
+ continue
613
+
614
+ no_images = False
615
+ filename = img_dir / (
616
+ f"collection-{nv_coll}_id-{img_dict['id']}_" + Path(img_dict["file"]).name
617
+ )
618
+
619
+ if not filename.exists():
620
+ r = requests.get(img_dict["file"])
621
+ with open(filename, "wb") as f:
622
+ f.write(r.content)
623
+
624
+ (
625
+ dataset_dict[f"study-{coll_name}"]["contrasts"][contrast_name]["images"][
626
+ map_type_conversion[img_dict["map_type"]]
627
+ ]
628
+ ) = filename.as_posix()
629
+
630
+ # aggregate sample sizes (should all be the same)
631
+ sample_sizes.append(img_dict["number_of_subjects"])
632
+
633
+ if no_images:
634
+ raise ValueError(
635
+ f"No images were found for contrast {contrast_name}. "
636
+ f"Please check the contrast regular expression: {contrast_regex}"
637
+ )
638
+ # take modal sample size (raise warning if there are multiple values)
639
+ if len(set(sample_sizes)) > 1:
640
+ sample_size = _resolve_sample_size(sample_sizes)
641
+ LGR.warning(
642
+ (
643
+ f"Multiple sample sizes were found for neurovault collection: {nv_coll}"
644
+ f"for contrast: {contrast_name}, sample sizes: {set(sample_sizes)}"
645
+ f", selecting modal sample size: {sample_size}"
646
+ )
647
+ )
648
+ else:
649
+ sample_size = sample_sizes[0]
650
+ (
651
+ dataset_dict[f"study-{coll_name}"]["contrasts"][contrast_name]["metadata"][
652
+ "sample_sizes"
653
+ ]
654
+ ) = [sample_size]
655
+
656
+ dataset = Dataset(dataset_dict, **dset_kwargs)
657
+
658
+ return dataset
659
+
660
+
661
+ def _resolve_sample_size(sample_sizes):
662
+ """Choose modal sample_size if there are multiple sample_sizes to choose from."""
663
+ sample_size_counts = Counter(sample_sizes)
664
+ if None in sample_size_counts:
665
+ sample_size_counts.pop(None)
666
+
667
+ return sample_size_counts.most_common()[0][0]