nimare 0.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- benchmarks/__init__.py +0 -0
- benchmarks/bench_cbma.py +57 -0
- nimare/__init__.py +45 -0
- nimare/_version.py +21 -0
- nimare/annotate/__init__.py +21 -0
- nimare/annotate/cogat.py +213 -0
- nimare/annotate/gclda.py +924 -0
- nimare/annotate/lda.py +147 -0
- nimare/annotate/text.py +75 -0
- nimare/annotate/utils.py +87 -0
- nimare/base.py +217 -0
- nimare/cli.py +124 -0
- nimare/correct.py +462 -0
- nimare/dataset.py +685 -0
- nimare/decode/__init__.py +33 -0
- nimare/decode/base.py +115 -0
- nimare/decode/continuous.py +462 -0
- nimare/decode/discrete.py +753 -0
- nimare/decode/encode.py +110 -0
- nimare/decode/utils.py +44 -0
- nimare/diagnostics.py +510 -0
- nimare/estimator.py +139 -0
- nimare/extract/__init__.py +19 -0
- nimare/extract/extract.py +466 -0
- nimare/extract/utils.py +295 -0
- nimare/generate.py +331 -0
- nimare/io.py +667 -0
- nimare/meta/__init__.py +39 -0
- nimare/meta/cbma/__init__.py +6 -0
- nimare/meta/cbma/ale.py +951 -0
- nimare/meta/cbma/base.py +947 -0
- nimare/meta/cbma/mkda.py +1361 -0
- nimare/meta/cbmr.py +970 -0
- nimare/meta/ibma.py +1683 -0
- nimare/meta/kernel.py +501 -0
- nimare/meta/models.py +1199 -0
- nimare/meta/utils.py +494 -0
- nimare/nimads.py +492 -0
- nimare/reports/__init__.py +24 -0
- nimare/reports/base.py +664 -0
- nimare/reports/default.yml +123 -0
- nimare/reports/figures.py +651 -0
- nimare/reports/report.tpl +160 -0
- nimare/resources/__init__.py +1 -0
- nimare/resources/atlases/Harvard-Oxford-LICENSE +93 -0
- nimare/resources/atlases/HarvardOxford-cort-maxprob-thr25-2mm.nii.gz +0 -0
- nimare/resources/database_file_manifest.json +142 -0
- nimare/resources/english_spellings.csv +1738 -0
- nimare/resources/filenames.json +32 -0
- nimare/resources/neurosynth_laird_studies.json +58773 -0
- nimare/resources/neurosynth_stoplist.txt +396 -0
- nimare/resources/nidm_pain_dset.json +1349 -0
- nimare/resources/references.bib +541 -0
- nimare/resources/semantic_knowledge_children.txt +325 -0
- nimare/resources/semantic_relatedness_children.txt +249 -0
- nimare/resources/templates/MNI152_2x2x2_brainmask.nii.gz +0 -0
- nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_T1w.nii.gz +0 -0
- nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_desc-brain_mask.nii.gz +0 -0
- nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_T1w.nii.gz +0 -0
- nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_desc-brain_mask.nii.gz +0 -0
- nimare/results.py +225 -0
- nimare/stats.py +276 -0
- nimare/tests/__init__.py +1 -0
- nimare/tests/conftest.py +229 -0
- nimare/tests/data/amygdala_roi.nii.gz +0 -0
- nimare/tests/data/data-neurosynth_version-7_coordinates.tsv.gz +0 -0
- nimare/tests/data/data-neurosynth_version-7_metadata.tsv.gz +0 -0
- nimare/tests/data/data-neurosynth_version-7_vocab-terms_source-abstract_type-tfidf_features.npz +0 -0
- nimare/tests/data/data-neurosynth_version-7_vocab-terms_vocabulary.txt +100 -0
- nimare/tests/data/neurosynth_dset.json +2868 -0
- nimare/tests/data/neurosynth_laird_studies.json +58773 -0
- nimare/tests/data/nidm_pain_dset.json +1349 -0
- nimare/tests/data/nimads_annotation.json +1 -0
- nimare/tests/data/nimads_studyset.json +1 -0
- nimare/tests/data/test_baseline.txt +2 -0
- nimare/tests/data/test_pain_dataset.json +1278 -0
- nimare/tests/data/test_pain_dataset_multiple_contrasts.json +1242 -0
- nimare/tests/data/test_sleuth_file.txt +18 -0
- nimare/tests/data/test_sleuth_file2.txt +10 -0
- nimare/tests/data/test_sleuth_file3.txt +5 -0
- nimare/tests/data/test_sleuth_file4.txt +5 -0
- nimare/tests/data/test_sleuth_file5.txt +5 -0
- nimare/tests/test_annotate_cogat.py +32 -0
- nimare/tests/test_annotate_gclda.py +86 -0
- nimare/tests/test_annotate_lda.py +27 -0
- nimare/tests/test_dataset.py +99 -0
- nimare/tests/test_decode_continuous.py +132 -0
- nimare/tests/test_decode_discrete.py +92 -0
- nimare/tests/test_diagnostics.py +168 -0
- nimare/tests/test_estimator_performance.py +385 -0
- nimare/tests/test_extract.py +46 -0
- nimare/tests/test_generate.py +247 -0
- nimare/tests/test_io.py +294 -0
- nimare/tests/test_meta_ale.py +298 -0
- nimare/tests/test_meta_cbmr.py +295 -0
- nimare/tests/test_meta_ibma.py +240 -0
- nimare/tests/test_meta_kernel.py +209 -0
- nimare/tests/test_meta_mkda.py +234 -0
- nimare/tests/test_nimads.py +21 -0
- nimare/tests/test_reports.py +110 -0
- nimare/tests/test_stats.py +101 -0
- nimare/tests/test_transforms.py +272 -0
- nimare/tests/test_utils.py +200 -0
- nimare/tests/test_workflows.py +221 -0
- nimare/tests/utils.py +126 -0
- nimare/transforms.py +907 -0
- nimare/utils.py +1367 -0
- nimare/workflows/__init__.py +14 -0
- nimare/workflows/base.py +189 -0
- nimare/workflows/cbma.py +165 -0
- nimare/workflows/ibma.py +108 -0
- nimare/workflows/macm.py +77 -0
- nimare/workflows/misc.py +65 -0
- nimare-0.4.2.dist-info/LICENSE +21 -0
- nimare-0.4.2.dist-info/METADATA +124 -0
- nimare-0.4.2.dist-info/RECORD +119 -0
- nimare-0.4.2.dist-info/WHEEL +5 -0
- nimare-0.4.2.dist-info/entry_points.txt +2 -0
- nimare-0.4.2.dist-info/top_level.txt +2 -0
nimare/nimads.py
ADDED
@@ -0,0 +1,492 @@
|
|
1
|
+
"""NIMADS-related classes for NiMARE."""
|
2
|
+
|
3
|
+
import json
|
4
|
+
import weakref
|
5
|
+
from copy import deepcopy
|
6
|
+
|
7
|
+
from nimare.io import convert_nimads_to_dataset
|
8
|
+
|
9
|
+
|
10
|
+
class Studyset:
|
11
|
+
"""A collection of studies for meta-analysis.
|
12
|
+
|
13
|
+
.. versionadded:: 0.0.14
|
14
|
+
|
15
|
+
This is the primary target for Estimators and Transformers in NiMARE.
|
16
|
+
|
17
|
+
Attributes
|
18
|
+
----------
|
19
|
+
id : str
|
20
|
+
A unique identifier for the Studyset.
|
21
|
+
name : str
|
22
|
+
A human-readable name for the Studyset.
|
23
|
+
annotations : :obj:`list` of :obj:`nimare.nimads.Annotation` objects
|
24
|
+
The Annotation objects associated with the Studyset.
|
25
|
+
studies : :obj:`list` of :obj:`nimare.nimads.Study` objects
|
26
|
+
The Study objects comprising the Studyset.
|
27
|
+
"""
|
28
|
+
|
29
|
+
def __init__(self, source, target_space=None, mask=None, annotations=None):
|
30
|
+
# load source as json
|
31
|
+
if isinstance(source, str):
|
32
|
+
with open(source, "r+") as f:
|
33
|
+
source = json.load(f)
|
34
|
+
|
35
|
+
self.id = source["id"]
|
36
|
+
self.name = source["name"] or ""
|
37
|
+
self.studies = [Study(s) for s in source["studies"]]
|
38
|
+
self._annotations = []
|
39
|
+
if annotations:
|
40
|
+
self.annotations = annotations
|
41
|
+
|
42
|
+
def __repr__(self):
|
43
|
+
"""My Simple representation."""
|
44
|
+
return repr(f"<Studyset: {self.id}>")
|
45
|
+
|
46
|
+
def __str__(self):
|
47
|
+
"""Give useful information about the Studyset."""
|
48
|
+
return str(" ".join(["Studyset:", self.name, "::", f"studies: {len(self.studies)}"]))
|
49
|
+
|
50
|
+
@property
|
51
|
+
def annotations(self):
|
52
|
+
"""Return existing Annotations."""
|
53
|
+
return self._annotations
|
54
|
+
|
55
|
+
@annotations.setter
|
56
|
+
def annotations(self, annotation):
|
57
|
+
if isinstance(annotation, dict):
|
58
|
+
loaded_annotation = Annotation(annotation, self)
|
59
|
+
elif isinstance(annotation, str):
|
60
|
+
with open(annotation, "r+") as f:
|
61
|
+
loaded_annotation = Annotation(json.load(f), self)
|
62
|
+
elif isinstance(annotation, Annotation):
|
63
|
+
loaded_annotation = annotation
|
64
|
+
self._annotations.append(loaded_annotation)
|
65
|
+
|
66
|
+
@annotations.deleter
|
67
|
+
def annotations(self, annotation_id=None):
|
68
|
+
if annotation_id:
|
69
|
+
self._annotations = [a for a in self._annotations if a.id != annotation_id]
|
70
|
+
else:
|
71
|
+
self._annotations = []
|
72
|
+
|
73
|
+
@classmethod
|
74
|
+
def from_nimads(cls, filename):
|
75
|
+
"""Create a Studyset from a NIMADS JSON file."""
|
76
|
+
with open(filename, "r+") as fn:
|
77
|
+
nimads = json.load(fn)
|
78
|
+
|
79
|
+
return cls(nimads)
|
80
|
+
|
81
|
+
def combine_analyses(self):
|
82
|
+
"""Combine analyses in Studyset."""
|
83
|
+
studyset = self.copy()
|
84
|
+
for study in studyset.studies:
|
85
|
+
if len(study.analyses) > 1:
|
86
|
+
source_lst = [analysis.to_dict() for analysis in study.analyses]
|
87
|
+
ids, names, conditions, images, points, weights = [
|
88
|
+
[source[key] for source in source_lst] for key in source_lst[0]
|
89
|
+
]
|
90
|
+
|
91
|
+
new_source = {
|
92
|
+
"id": "_".join(ids),
|
93
|
+
"name": "; ".join(names),
|
94
|
+
"conditions": [cond for c_list in conditions for cond in c_list],
|
95
|
+
"images": [image for i_list in images for image in i_list],
|
96
|
+
"points": [point for p_list in points for point in p_list],
|
97
|
+
"weights": [weight for w_list in weights for weight in w_list],
|
98
|
+
}
|
99
|
+
study.analyses = [Analysis(new_source)]
|
100
|
+
|
101
|
+
return studyset
|
102
|
+
|
103
|
+
def to_nimads(self, filename):
|
104
|
+
"""Write the Studyset to a NIMADS JSON file."""
|
105
|
+
with open(filename, "w+") as fn:
|
106
|
+
json.dump(self.to_dict(), fn)
|
107
|
+
|
108
|
+
def to_dict(self):
|
109
|
+
"""Return a dictionary representation of the Studyset."""
|
110
|
+
return {
|
111
|
+
"id": self.id,
|
112
|
+
"name": self.name,
|
113
|
+
"studies": [s.to_dict() for s in self.studies],
|
114
|
+
}
|
115
|
+
|
116
|
+
def to_dataset(self):
|
117
|
+
"""Convert the Studyset to a NiMARE Dataset."""
|
118
|
+
return convert_nimads_to_dataset(self)
|
119
|
+
|
120
|
+
def load(self, filename):
|
121
|
+
"""Load a Studyset from a pickled file."""
|
122
|
+
raise NotImplementedError("Loading from pickled files is not yet supported.")
|
123
|
+
|
124
|
+
def save(self, filename):
|
125
|
+
"""Write the Studyset to a pickled file."""
|
126
|
+
raise NotImplementedError("Saving to pickled files is not yet supported.")
|
127
|
+
|
128
|
+
def copy(self):
|
129
|
+
"""Create a copy of the Studyset."""
|
130
|
+
return deepcopy(self)
|
131
|
+
|
132
|
+
def slice(self, analyses):
|
133
|
+
"""Create a new Studyset with only requested Analyses."""
|
134
|
+
studyset_dict = self.to_dict()
|
135
|
+
annotations = [annot.to_dict() for annot in self.annotations]
|
136
|
+
|
137
|
+
for study in studyset_dict["studies"]:
|
138
|
+
study["analyses"] = [a for a in study["analyses"] if a["id"] in analyses]
|
139
|
+
|
140
|
+
studyset = self.__class__(source=studyset_dict)
|
141
|
+
|
142
|
+
for annot in annotations:
|
143
|
+
annot["notes"] = [n for n in annot["notes"] if n["analysis"] in analyses]
|
144
|
+
studyset.annotation = annot
|
145
|
+
|
146
|
+
return studyset
|
147
|
+
|
148
|
+
def merge(self, right):
|
149
|
+
"""Merge a separate Studyset into the current one."""
|
150
|
+
raise NotImplementedError("Merging Studysets is not yet supported.")
|
151
|
+
|
152
|
+
def update_image_path(self, new_path):
|
153
|
+
"""Point to a new location for image files on the local filesystem."""
|
154
|
+
raise NotImplementedError("Updating image paths is not yet supported.")
|
155
|
+
|
156
|
+
def get_analyses_by_coordinates(self, xyz, r=None, n=None):
|
157
|
+
"""Extract a list of Analyses with at least one Point near the requested coordinates."""
|
158
|
+
raise NotImplementedError("Getting analyses by coordinates is not yet supported.")
|
159
|
+
|
160
|
+
def get_analyses_by_mask(self, img):
|
161
|
+
"""Extract a list of Analyses with at least one Point in the specified mask."""
|
162
|
+
raise NotImplementedError("Getting analyses by mask is not yet supported.")
|
163
|
+
|
164
|
+
def get_analyses_by_annotations(self):
|
165
|
+
"""Extract a list of Analyses with a given label/annotation."""
|
166
|
+
raise NotImplementedError("Getting analyses by annotations is not yet supported.")
|
167
|
+
|
168
|
+
def get_analyses_by_texts(self):
|
169
|
+
"""Extract a list of Analyses with a given text."""
|
170
|
+
raise NotImplementedError("Getting analyses by texts is not yet supported.")
|
171
|
+
|
172
|
+
def get_analyses_by_images(self):
|
173
|
+
"""Extract a list of Analyses with a given image."""
|
174
|
+
raise NotImplementedError("Getting analyses by images is not yet supported.")
|
175
|
+
|
176
|
+
def get_analyses_by_metadata(self):
|
177
|
+
"""Extract a list of Analyses with a metadata field/value."""
|
178
|
+
raise NotImplementedError("Getting analyses by metadata is not yet supported.")
|
179
|
+
|
180
|
+
def get_points(self, analyses):
|
181
|
+
"""Collect Points associated with specified Analyses."""
|
182
|
+
raise NotImplementedError("Getting points is not yet supported.")
|
183
|
+
|
184
|
+
def get_annotations(self, analyses):
|
185
|
+
"""Collect Annotations associated with specified Analyses."""
|
186
|
+
raise NotImplementedError("Getting annotations is not yet supported.")
|
187
|
+
|
188
|
+
def get_texts(self, analyses):
|
189
|
+
"""Collect texts associated with specified Analyses."""
|
190
|
+
raise NotImplementedError("Getting texts is not yet supported.")
|
191
|
+
|
192
|
+
def get_images(self, analyses):
|
193
|
+
"""Collect image files associated with specified Analyses."""
|
194
|
+
raise NotImplementedError("Getting images is not yet supported.")
|
195
|
+
|
196
|
+
def get_metadata(self, analyses):
|
197
|
+
"""Collect metadata associated with specified Analyses."""
|
198
|
+
raise NotImplementedError("Getting metadata is not yet supported.")
|
199
|
+
|
200
|
+
|
201
|
+
class Study:
|
202
|
+
"""A collection of Analyses from the same paper.
|
203
|
+
|
204
|
+
.. versionadded:: 0.0.14
|
205
|
+
|
206
|
+
Attributes
|
207
|
+
----------
|
208
|
+
id : str
|
209
|
+
A unique identifier for the Study.
|
210
|
+
name : str
|
211
|
+
A human readable name of the Study, typically the title of the paper.
|
212
|
+
authors : str
|
213
|
+
A string of the authors of the paper.
|
214
|
+
publication : str
|
215
|
+
A string of the publication information for the paper, typically a journal name.
|
216
|
+
metadata : dict
|
217
|
+
A dictionary of metadata associated with the Study.
|
218
|
+
analyses : :obj:`list` of :obj:`nimare.nimads.Analysis` objects
|
219
|
+
The Analysis objects comprising the Study.
|
220
|
+
An analysis represents a contrast with statistical results.
|
221
|
+
"""
|
222
|
+
|
223
|
+
def __init__(self, source):
|
224
|
+
self.id = source["id"]
|
225
|
+
self.name = source["name"] or ""
|
226
|
+
self.authors = source["authors"] or ""
|
227
|
+
self.publication = source["publication"] or ""
|
228
|
+
self.metadata = source.get("metadata", {}) or {}
|
229
|
+
self.analyses = [Analysis(a) for a in source["analyses"]]
|
230
|
+
|
231
|
+
def __repr__(self):
|
232
|
+
"""My Simple representation."""
|
233
|
+
return repr(f"<Study: {self.id}>")
|
234
|
+
|
235
|
+
def __str__(self):
|
236
|
+
"""My Simple representation."""
|
237
|
+
return str(" ".join([self.name, f"analyses: {len(self.analyses)}"]))
|
238
|
+
|
239
|
+
def get_analyses(self):
|
240
|
+
"""Collect Analyses from the Study.
|
241
|
+
|
242
|
+
Notes
|
243
|
+
-----
|
244
|
+
What filters, if any, should we support in this method?
|
245
|
+
"""
|
246
|
+
...
|
247
|
+
|
248
|
+
def to_dict(self):
|
249
|
+
"""Return a dictionary representation of the Study."""
|
250
|
+
return {
|
251
|
+
"id": self.id,
|
252
|
+
"name": self.name,
|
253
|
+
"authors": self.authors,
|
254
|
+
"publication": self.publication,
|
255
|
+
"metadata": self.metadata,
|
256
|
+
"analyses": [a.to_dict() for a in self.analyses],
|
257
|
+
}
|
258
|
+
|
259
|
+
|
260
|
+
class Analysis:
|
261
|
+
"""A single statistical contrast from a Study.
|
262
|
+
|
263
|
+
.. versionadded:: 0.0.14
|
264
|
+
|
265
|
+
Attributes
|
266
|
+
----------
|
267
|
+
id : str
|
268
|
+
A unique identifier for the Analysis.
|
269
|
+
name : str
|
270
|
+
A human readable name of the Analysis.
|
271
|
+
conditions : list of Condition objects
|
272
|
+
The Conditions in the Analysis.
|
273
|
+
annotations : list of Annotation objects
|
274
|
+
Any Annotations available for the Analysis.
|
275
|
+
Each Annotation should come from the same Annotator.
|
276
|
+
images : dict of Image objects
|
277
|
+
A dictionary of type: Image pairs.
|
278
|
+
points : list of Point objects
|
279
|
+
Any significant Points from the Analysis.
|
280
|
+
metadata: dict
|
281
|
+
A dictionary of metadata associated with the Analysis.
|
282
|
+
|
283
|
+
Notes
|
284
|
+
-----
|
285
|
+
Should the images attribute be a list instead, if the Images contain type information?
|
286
|
+
"""
|
287
|
+
|
288
|
+
def __init__(self, source):
|
289
|
+
self.id = source["id"]
|
290
|
+
self.name = source["name"]
|
291
|
+
self.conditions = [
|
292
|
+
Condition(c, w) for c, w in zip(source["conditions"], source["weights"])
|
293
|
+
]
|
294
|
+
self.images = [Image(i) for i in source["images"]]
|
295
|
+
self.points = [Point(p) for p in source["points"]]
|
296
|
+
self.metadata = source.get("metadata", {}) or {}
|
297
|
+
self.annotations = {}
|
298
|
+
|
299
|
+
def __repr__(self):
|
300
|
+
"""My Simple representation."""
|
301
|
+
return repr(f"<Analysis: {self.id}>")
|
302
|
+
|
303
|
+
def __str__(self):
|
304
|
+
"""My Simple representation."""
|
305
|
+
return str(
|
306
|
+
" ".join([self.name, f"images: {len(self.images)}", f"points: {len(self.points)}"])
|
307
|
+
)
|
308
|
+
|
309
|
+
def to_dict(self):
|
310
|
+
"""Convert the Analysis to a dictionary."""
|
311
|
+
return {
|
312
|
+
"id": self.id,
|
313
|
+
"name": self.name,
|
314
|
+
"conditions": [
|
315
|
+
{k: v for k, v in c.to_dict().items() if k in ["name", "description"]}
|
316
|
+
for c in self.conditions
|
317
|
+
],
|
318
|
+
"images": [i.to_dict() for i in self.images],
|
319
|
+
"points": [p.to_dict() for p in self.points],
|
320
|
+
"weights": [c.to_dict()["weight"] for c in self.conditions],
|
321
|
+
}
|
322
|
+
|
323
|
+
|
324
|
+
class Condition:
|
325
|
+
"""A condition within an Analysis.
|
326
|
+
|
327
|
+
.. versionadded:: 0.0.14
|
328
|
+
|
329
|
+
Attributes
|
330
|
+
----------
|
331
|
+
name: str
|
332
|
+
A human readable name of the Condition. Good examples are from cognitive atlas.
|
333
|
+
description
|
334
|
+
A human readable description of the Condition.
|
335
|
+
weight
|
336
|
+
The weight of the Condition in the Analysis.
|
337
|
+
|
338
|
+
Notes
|
339
|
+
-----
|
340
|
+
Condition-level Annotations, like condition-wise trial counts, are stored in the parent
|
341
|
+
Analysis's Annotations, preferably with names that make it clear that they correspond to a
|
342
|
+
specific Condition.
|
343
|
+
"""
|
344
|
+
|
345
|
+
def __init__(self, condition, weight):
|
346
|
+
self.name = condition["name"]
|
347
|
+
self.description = condition["description"]
|
348
|
+
self.weight = weight
|
349
|
+
|
350
|
+
def __repr__(self):
|
351
|
+
"""My Simple representation."""
|
352
|
+
return repr(f"<Condition: {self.id}>")
|
353
|
+
|
354
|
+
def to_dict(self):
|
355
|
+
"""Convert the Condition to a dictionary."""
|
356
|
+
return {"name": self.name, "description": self.description, "weight": self.weight}
|
357
|
+
|
358
|
+
|
359
|
+
class Annotation:
|
360
|
+
"""A collection of labels and associated weights from the same Annotator.
|
361
|
+
|
362
|
+
.. versionadded:: 0.0.14
|
363
|
+
|
364
|
+
Attributes
|
365
|
+
----------
|
366
|
+
term_weights : :obj:`pandas.DataFrame`
|
367
|
+
A pandas DataFrame containing the annotation group's labels and weights.
|
368
|
+
This is the main attribute of interest for NeuroStore.
|
369
|
+
A dictionary could also work.
|
370
|
+
|
371
|
+
Notes
|
372
|
+
-----
|
373
|
+
Where would p(term|topic) and p(voxel|topic) arrays/DataFrames go? Having one Annotation per
|
374
|
+
Analysis (for each Annotator), and storing these arrays in the Annotation, would make for
|
375
|
+
*a lot* of duplication.
|
376
|
+
The same goes for metadata/provenance, but that will generally be much lighter on memory than
|
377
|
+
the arrays.
|
378
|
+
|
379
|
+
Could be a dictionary with analysis objects as keys?
|
380
|
+
(need to define __hash__ and __eq__ for Analysis)
|
381
|
+
Or could use Analysis.id as key.
|
382
|
+
"""
|
383
|
+
|
384
|
+
def __init__(self, source, studyset):
|
385
|
+
self.name = source["name"]
|
386
|
+
self.id = source["id"]
|
387
|
+
self._analysis_ref = {
|
388
|
+
a.id: weakref.proxy(a) for study in studyset.studies for a in study.analyses
|
389
|
+
}
|
390
|
+
self.notes = [Note(self._analysis_ref[n["analysis"]], n["note"]) for n in source["notes"]]
|
391
|
+
for note in self.notes:
|
392
|
+
self._analysis_ref[note.analysis.id].annotations[self.id] = note.note
|
393
|
+
|
394
|
+
def __repr__(self):
|
395
|
+
"""My Simple representation."""
|
396
|
+
return repr(f"<Annotation: {self.id}>")
|
397
|
+
|
398
|
+
def to_dict(self):
|
399
|
+
"""Convert the Annotation to a dictionary."""
|
400
|
+
return {"name": self.name, "id": self.id, "notes": [note.to_dict() for note in self.notes]}
|
401
|
+
|
402
|
+
|
403
|
+
class Note:
|
404
|
+
"""A Note within an annotation.
|
405
|
+
|
406
|
+
.. versionadded:: 0.0.14
|
407
|
+
|
408
|
+
Attributes
|
409
|
+
----------
|
410
|
+
analysis : Analysis object
|
411
|
+
the analysis the note is associated with
|
412
|
+
note : dict
|
413
|
+
the attributes pertaining to the analysis
|
414
|
+
"""
|
415
|
+
|
416
|
+
def __init__(self, analysis, note):
|
417
|
+
self.analysis = analysis
|
418
|
+
self.note = note
|
419
|
+
|
420
|
+
def __repr__(self):
|
421
|
+
"""My Simple representation."""
|
422
|
+
return repr(f"<Note: {self.id}>")
|
423
|
+
|
424
|
+
def to_dict(self):
|
425
|
+
"""Convert the Note to a dictionary."""
|
426
|
+
return {"analysis": self.analysis.id, "note": self.note}
|
427
|
+
|
428
|
+
|
429
|
+
class Image:
|
430
|
+
"""A single statistical map from an Analysis.
|
431
|
+
|
432
|
+
.. versionadded:: 0.0.14
|
433
|
+
|
434
|
+
Attributes
|
435
|
+
----------
|
436
|
+
filename
|
437
|
+
type?
|
438
|
+
|
439
|
+
Notes
|
440
|
+
-----
|
441
|
+
Should we support remote paths, with some kind of fetching method?
|
442
|
+
"""
|
443
|
+
|
444
|
+
def __init__(self, source):
|
445
|
+
self.url = source["url"]
|
446
|
+
self.filename = source["filename"]
|
447
|
+
self.space = source["space"]
|
448
|
+
self.value_type = source["value_type"]
|
449
|
+
|
450
|
+
def __repr__(self):
|
451
|
+
"""My Simple representation."""
|
452
|
+
return repr(f"<Image: {self.id}>")
|
453
|
+
|
454
|
+
def to_dict(self):
|
455
|
+
"""Convert the Image to a dictionary."""
|
456
|
+
return {
|
457
|
+
"url": self.url,
|
458
|
+
"filename": self.filename,
|
459
|
+
"space": self.space,
|
460
|
+
"value_type": self.value_type,
|
461
|
+
}
|
462
|
+
|
463
|
+
|
464
|
+
class Point:
|
465
|
+
"""A single peak coordinate from an Analysis.
|
466
|
+
|
467
|
+
.. versionadded:: 0.0.14
|
468
|
+
|
469
|
+
Attributes
|
470
|
+
----------
|
471
|
+
x : float
|
472
|
+
y : float
|
473
|
+
z : float
|
474
|
+
space
|
475
|
+
kind
|
476
|
+
image
|
477
|
+
point_values
|
478
|
+
"""
|
479
|
+
|
480
|
+
def __init__(self, source):
|
481
|
+
self.space = source["space"]
|
482
|
+
self.x = source["coordinates"][0]
|
483
|
+
self.y = source["coordinates"][1]
|
484
|
+
self.z = source["coordinates"][2]
|
485
|
+
|
486
|
+
def __repr__(self):
|
487
|
+
"""My Simple representation."""
|
488
|
+
return repr(f"<Point: {self.id}>")
|
489
|
+
|
490
|
+
def to_dict(self):
|
491
|
+
"""Convert the Point to a dictionary."""
|
492
|
+
return {"space": self.space, "coordinates": [self.x, self.y, self.z]}
|
@@ -0,0 +1,24 @@
|
|
1
|
+
"""Reports module."""
|
2
|
+
|
3
|
+
from .base import Report, run_reports
|
4
|
+
from .figures import (
|
5
|
+
gen_table,
|
6
|
+
plot_clusters,
|
7
|
+
plot_coordinates,
|
8
|
+
plot_heatmap,
|
9
|
+
plot_interactive_brain,
|
10
|
+
plot_mask,
|
11
|
+
plot_static_brain,
|
12
|
+
)
|
13
|
+
|
14
|
+
__all__ = [
|
15
|
+
"Report",
|
16
|
+
"run_reports",
|
17
|
+
"gen_table",
|
18
|
+
"plot_clusters",
|
19
|
+
"plot_coordinates",
|
20
|
+
"plot_heatmap",
|
21
|
+
"plot_interactive_brain",
|
22
|
+
"plot_mask",
|
23
|
+
"plot_static_brain",
|
24
|
+
]
|