nimare 0.4.2rc4__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nimare/_version.py +3 -3
- nimare/io.py +37 -5
- nimare/meta/utils.py +1 -1
- nimare/nimads.py +280 -34
- nimare/tests/test_io.py +42 -0
- nimare/tests/test_nimads.py +263 -0
- nimare/utils.py +3 -3
- {nimare-0.4.2rc4.dist-info → nimare-0.5.0.dist-info}/METADATA +1 -1
- {nimare-0.4.2rc4.dist-info → nimare-0.5.0.dist-info}/RECORD +13 -13
- {nimare-0.4.2rc4.dist-info → nimare-0.5.0.dist-info}/LICENSE +0 -0
- {nimare-0.4.2rc4.dist-info → nimare-0.5.0.dist-info}/WHEEL +0 -0
- {nimare-0.4.2rc4.dist-info → nimare-0.5.0.dist-info}/entry_points.txt +0 -0
- {nimare-0.4.2rc4.dist-info → nimare-0.5.0.dist-info}/top_level.txt +0 -0
nimare/_version.py
CHANGED
@@ -8,11 +8,11 @@ import json
|
|
8
8
|
|
9
9
|
version_json = '''
|
10
10
|
{
|
11
|
-
"date": "2025-
|
11
|
+
"date": "2025-04-25T13:22:39-0500",
|
12
12
|
"dirty": false,
|
13
13
|
"error": null,
|
14
|
-
"full-revisionid": "
|
15
|
-
"version": "0.
|
14
|
+
"full-revisionid": "9e3b73cbb6235b31a4fab7a9a118e32c1d2f9932",
|
15
|
+
"version": "0.5.0"
|
16
16
|
}
|
17
17
|
''' # END VERSION_JSON
|
18
18
|
|
nimare/io.py
CHANGED
@@ -62,13 +62,45 @@ def convert_nimads_to_dataset(studyset, annotation=None):
|
|
62
62
|
"z": [p.z for p in analysis.points] or [None],
|
63
63
|
},
|
64
64
|
}
|
65
|
-
|
66
|
-
|
67
|
-
|
65
|
+
|
66
|
+
sample_sizes = analysis.metadata.get("sample_sizes")
|
67
|
+
sample_size = None
|
68
|
+
|
69
|
+
# Validate sample sizes if present
|
70
|
+
if sample_sizes is not None and not isinstance(sample_sizes, (list, tuple)):
|
71
|
+
raise TypeError(
|
72
|
+
f"Expected sample_sizes to be list or tuple, but got {type(sample_sizes)}"
|
73
|
+
)
|
74
|
+
|
75
|
+
if not sample_sizes:
|
76
|
+
# Try to get single sample size from analysis or study metadata
|
77
|
+
sample_size = analysis.metadata.get("sample_size")
|
78
|
+
if sample_size is None:
|
79
|
+
sample_size = study.metadata.get("sample_size")
|
80
|
+
|
81
|
+
# Validate single sample size if present
|
82
|
+
if sample_size is not None and not isinstance(sample_size, (int, float)):
|
83
|
+
raise TypeError(f"Expected sample_size to be numeric, but got {type(sample_size)}")
|
84
|
+
|
85
|
+
# Add sample size info to result if available
|
86
|
+
if sample_sizes or sample_size is not None:
|
87
|
+
try:
|
88
|
+
result["metadata"]["sample_sizes"] = sample_sizes or [sample_size]
|
89
|
+
except TypeError as e:
|
90
|
+
raise TypeError(f"Error converting sample size data to list: {str(e)}") from e
|
91
|
+
|
92
|
+
# Handle annotations if present
|
68
93
|
if analysis.annotations:
|
69
94
|
result["labels"] = {}
|
70
|
-
|
71
|
-
|
95
|
+
try:
|
96
|
+
for annotation in analysis.annotations.values():
|
97
|
+
if not isinstance(annotation, dict):
|
98
|
+
raise TypeError(
|
99
|
+
f"Expected annotation to be dict, but got {type(annotation)}"
|
100
|
+
)
|
101
|
+
result["labels"].update(annotation)
|
102
|
+
except (TypeError, AttributeError) as e:
|
103
|
+
raise ValueError(f"Invalid annotation format: {str(e)}") from e
|
72
104
|
|
73
105
|
return result
|
74
106
|
|
nimare/meta/utils.py
CHANGED
@@ -33,7 +33,7 @@ def _convolve_sphere(kernel, ijks, index, max_shape):
|
|
33
33
|
|
34
34
|
def np_all_axis1(x):
|
35
35
|
"""Numba compatible version of np.all(x, axis=1)."""
|
36
|
-
out = np.ones(x.shape[0], dtype=np.
|
36
|
+
out = np.ones(x.shape[0], dtype=np.bool_)
|
37
37
|
for i in range(x.shape[1]):
|
38
38
|
out = np.logical_and(out, x[:, i])
|
39
39
|
return out
|
nimare/nimads.py
CHANGED
@@ -4,7 +4,11 @@ import json
|
|
4
4
|
import weakref
|
5
5
|
from copy import deepcopy
|
6
6
|
|
7
|
+
import numpy as np
|
8
|
+
from nilearn._utils import load_niimg
|
9
|
+
|
7
10
|
from nimare.io import convert_nimads_to_dataset
|
11
|
+
from nimare.utils import mm2vox
|
8
12
|
|
9
13
|
|
10
14
|
class Studyset:
|
@@ -84,7 +88,7 @@ class Studyset:
|
|
84
88
|
for study in studyset.studies:
|
85
89
|
if len(study.analyses) > 1:
|
86
90
|
source_lst = [analysis.to_dict() for analysis in study.analyses]
|
87
|
-
ids, names, conditions, images, points, weights = [
|
91
|
+
ids, names, conditions, images, points, weights, metadata = [
|
88
92
|
[source[key] for source in source_lst] for key in source_lst[0]
|
89
93
|
]
|
90
94
|
|
@@ -95,6 +99,7 @@ class Studyset:
|
|
95
99
|
"images": [image for i_list in images for image in i_list],
|
96
100
|
"points": [point for p_list in points for point in p_list],
|
97
101
|
"weights": [weight for w_list in weights for weight in w_list],
|
102
|
+
"metadata": {k: v for m_dict in metadata for k, v in m_dict.items()},
|
98
103
|
}
|
99
104
|
study.analyses = [Analysis(new_source)]
|
100
105
|
|
@@ -118,12 +123,42 @@ class Studyset:
|
|
118
123
|
return convert_nimads_to_dataset(self)
|
119
124
|
|
120
125
|
def load(self, filename):
|
121
|
-
"""Load a Studyset from a pickled file.
|
122
|
-
|
126
|
+
"""Load a Studyset from a pickled file.
|
127
|
+
|
128
|
+
Parameters
|
129
|
+
----------
|
130
|
+
filename : str
|
131
|
+
Path to the pickled file to load from.
|
132
|
+
|
133
|
+
Returns
|
134
|
+
-------
|
135
|
+
Studyset
|
136
|
+
The loaded Studyset object.
|
137
|
+
"""
|
138
|
+
import pickle
|
139
|
+
|
140
|
+
with open(filename, "rb") as f:
|
141
|
+
loaded_data = pickle.load(f)
|
142
|
+
|
143
|
+
# Update current instance with loaded data
|
144
|
+
self.id = loaded_data.id
|
145
|
+
self.name = loaded_data.name
|
146
|
+
self.studies = loaded_data.studies
|
147
|
+
self._annotations = loaded_data._annotations
|
148
|
+
return self
|
123
149
|
|
124
150
|
def save(self, filename):
|
125
|
-
"""Write the Studyset to a pickled file.
|
126
|
-
|
151
|
+
"""Write the Studyset to a pickled file.
|
152
|
+
|
153
|
+
Parameters
|
154
|
+
----------
|
155
|
+
filename : str
|
156
|
+
Path where the pickled file should be saved.
|
157
|
+
"""
|
158
|
+
import pickle
|
159
|
+
|
160
|
+
with open(filename, "wb") as f:
|
161
|
+
pickle.dump(self, f)
|
127
162
|
|
128
163
|
def copy(self):
|
129
164
|
"""Create a copy of the Studyset."""
|
@@ -141,49 +176,221 @@ class Studyset:
|
|
141
176
|
|
142
177
|
for annot in annotations:
|
143
178
|
annot["notes"] = [n for n in annot["notes"] if n["analysis"] in analyses]
|
144
|
-
studyset.
|
179
|
+
studyset.annotations = annot
|
145
180
|
|
146
181
|
return studyset
|
147
182
|
|
148
183
|
def merge(self, right):
|
149
|
-
"""Merge a separate Studyset into the current one.
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
184
|
+
"""Merge a separate Studyset into the current one.
|
185
|
+
|
186
|
+
Parameters
|
187
|
+
----------
|
188
|
+
right : Studyset
|
189
|
+
The other Studyset to merge with this one.
|
190
|
+
|
191
|
+
Returns
|
192
|
+
-------
|
193
|
+
Studyset
|
194
|
+
A new Studyset containing merged studies from both input Studysets.
|
195
|
+
For studies with the same ID, their analyses and metadata are combined,
|
196
|
+
with data from self (left) taking precedence in case of conflicts.
|
197
|
+
"""
|
198
|
+
if not isinstance(right, Studyset):
|
199
|
+
raise ValueError("Can only merge with another Studyset")
|
200
|
+
|
201
|
+
# Create new source dictionary starting with left (self) studyset
|
202
|
+
merged_source = self.to_dict()
|
203
|
+
merged_source["id"] = f"{self.id}_{right.id}"
|
204
|
+
merged_source["name"] = f"Merged: {self.name} + {right.name}"
|
205
|
+
|
206
|
+
# Create lookup of existing studies by ID
|
207
|
+
left_studies = {study["id"]: study for study in merged_source["studies"]}
|
208
|
+
|
209
|
+
# Process studies from right studyset
|
210
|
+
right_dict = right.to_dict()
|
211
|
+
for right_study in right_dict["studies"]:
|
212
|
+
study_id = right_study["id"]
|
213
|
+
|
214
|
+
if study_id in left_studies:
|
215
|
+
# Merge study data
|
216
|
+
left_study = left_studies[study_id]
|
217
|
+
|
218
|
+
# Keep metadata from left unless missing
|
219
|
+
left_study["metadata"].update(
|
220
|
+
{
|
221
|
+
k: v
|
222
|
+
for k, v in right_study["metadata"].items()
|
223
|
+
if k not in left_study["metadata"]
|
224
|
+
}
|
225
|
+
)
|
226
|
+
|
227
|
+
# Keep basic info from left unless empty
|
228
|
+
for field in ["name", "authors", "publication"]:
|
229
|
+
if not left_study[field]:
|
230
|
+
left_study[field] = right_study[field]
|
231
|
+
|
232
|
+
# Combine analyses, avoiding duplicates by ID
|
233
|
+
left_analyses = {a["id"]: a for a in left_study["analyses"]}
|
234
|
+
for right_analysis in right_study["analyses"]:
|
235
|
+
if right_analysis["id"] not in left_analyses:
|
236
|
+
left_study["analyses"].append(right_analysis)
|
237
|
+
else:
|
238
|
+
# Add new study
|
239
|
+
merged_source["studies"].append(right_study)
|
240
|
+
|
241
|
+
# Create new merged studyset
|
242
|
+
merged = self.__class__(source=merged_source)
|
243
|
+
|
244
|
+
# Merge annotations, preferring left's annotations for conflicts
|
245
|
+
existing_annot_ids = {a.id for a in self.annotations}
|
246
|
+
for right_annot in right.annotations:
|
247
|
+
if right_annot.id not in existing_annot_ids:
|
248
|
+
merged.annotations = right_annot.to_dict()
|
249
|
+
|
250
|
+
return merged
|
155
251
|
|
156
252
|
def get_analyses_by_coordinates(self, xyz, r=None, n=None):
|
157
|
-
"""Extract a list of Analyses with at least one Point near the requested coordinates.
|
158
|
-
|
253
|
+
"""Extract a list of Analyses with at least one Point near the requested coordinates.
|
254
|
+
|
255
|
+
Parameters
|
256
|
+
----------
|
257
|
+
xyz : array_like
|
258
|
+
1 x 3 array of coordinates in mm space to search from
|
259
|
+
r : float, optional
|
260
|
+
Search radius in millimeters.
|
261
|
+
Mutually exclusive with n.
|
262
|
+
n : int, optional
|
263
|
+
Number of closest analyses to return.
|
264
|
+
Mutually exclusive with r.
|
265
|
+
|
266
|
+
Returns
|
267
|
+
-------
|
268
|
+
list[str]
|
269
|
+
A list of Analysis IDs with at least one point within the search criteria.
|
270
|
+
|
271
|
+
Notes
|
272
|
+
-----
|
273
|
+
Either r or n must be provided, but not both.
|
274
|
+
"""
|
275
|
+
if (r is None and n is None) or (r is not None and n is not None):
|
276
|
+
raise ValueError("Exactly one of r or n must be provided.")
|
277
|
+
|
278
|
+
xyz = np.asarray(xyz).ravel()
|
279
|
+
if xyz.shape != (3,):
|
280
|
+
raise ValueError("xyz must be a 1 x 3 array-like object.")
|
281
|
+
|
282
|
+
# Extract all points from all analyses
|
283
|
+
all_points = []
|
284
|
+
analysis_ids = []
|
285
|
+
for study in self.studies:
|
286
|
+
for analysis in study.analyses:
|
287
|
+
for point in analysis.points:
|
288
|
+
if hasattr(point, "x") and hasattr(point, "y") and hasattr(point, "z"):
|
289
|
+
all_points.append([point.x, point.y, point.z])
|
290
|
+
analysis_ids.append(analysis.id)
|
291
|
+
|
292
|
+
if not all_points: # Return empty list if no coordinates found
|
293
|
+
return []
|
294
|
+
|
295
|
+
all_points = np.array(all_points)
|
296
|
+
|
297
|
+
# Calculate Euclidean distances to all points
|
298
|
+
distances = np.sqrt(np.sum((all_points - xyz) ** 2, axis=1))
|
299
|
+
|
300
|
+
if r is not None:
|
301
|
+
# Find analyses with points within radius r
|
302
|
+
within_radius = distances <= r
|
303
|
+
found_analyses = set(np.array(analysis_ids)[within_radius])
|
304
|
+
else:
|
305
|
+
# Find n closest analyses
|
306
|
+
closest_n_idx = np.argsort(distances)[:n]
|
307
|
+
found_analyses = set(np.array(analysis_ids)[closest_n_idx])
|
308
|
+
|
309
|
+
return list(found_analyses)
|
159
310
|
|
160
311
|
def get_analyses_by_mask(self, img):
|
161
|
-
"""Extract a list of Analyses with at least one Point in the specified mask.
|
162
|
-
raise NotImplementedError("Getting analyses by mask is not yet supported.")
|
312
|
+
"""Extract a list of Analyses with at least one Point in the specified mask.
|
163
313
|
|
164
|
-
|
165
|
-
|
166
|
-
|
314
|
+
Parameters
|
315
|
+
----------
|
316
|
+
img : img_like
|
317
|
+
Mask across which to search for coordinates.
|
167
318
|
|
168
|
-
|
169
|
-
|
170
|
-
|
319
|
+
Returns
|
320
|
+
-------
|
321
|
+
list[str]
|
322
|
+
A list of Analysis IDs with at least one point in the mask.
|
323
|
+
"""
|
324
|
+
# Load mask
|
325
|
+
mask = load_niimg(img)
|
326
|
+
|
327
|
+
# Extract all points from all analyses
|
328
|
+
all_points = []
|
329
|
+
analysis_ids = []
|
330
|
+
for study in self.studies:
|
331
|
+
for analysis in study.analyses:
|
332
|
+
for point in analysis.points:
|
333
|
+
if hasattr(point, "x") and hasattr(point, "y") and hasattr(point, "z"):
|
334
|
+
all_points.append([point.x, point.y, point.z])
|
335
|
+
analysis_ids.append(analysis.id)
|
336
|
+
|
337
|
+
if not all_points: # Return empty list if no coordinates found
|
338
|
+
return []
|
339
|
+
|
340
|
+
# Convert to voxel coordinates
|
341
|
+
all_points = np.array(all_points)
|
342
|
+
ijk = mm2vox(all_points, mask.affine)
|
343
|
+
|
344
|
+
# Get mask coordinates
|
345
|
+
mask_data = mask.get_fdata()
|
346
|
+
mask_coords = np.vstack(np.where(mask_data)).T
|
171
347
|
|
172
|
-
|
173
|
-
|
174
|
-
raise NotImplementedError("Getting analyses by images is not yet supported.")
|
348
|
+
# Check for presence of coordinates in mask
|
349
|
+
in_mask = np.any(np.all(ijk[:, None] == mask_coords[None, :], axis=-1), axis=-1)
|
175
350
|
|
176
|
-
|
351
|
+
# Get unique analysis IDs where points are in mask
|
352
|
+
found_analyses = set(np.array(analysis_ids)[in_mask])
|
353
|
+
|
354
|
+
return list(found_analyses)
|
355
|
+
|
356
|
+
def get_analyses_by_annotations(self, key, value=None):
|
357
|
+
"""Extract a list of Analyses with a given label/annotation."""
|
358
|
+
annotations = {}
|
359
|
+
for study in self.studies:
|
360
|
+
for analysis in study.analyses:
|
361
|
+
a_annot = analysis.annotations
|
362
|
+
if key in a_annot and (value is None or a_annot[key] == value):
|
363
|
+
annotations[analysis.id] = {key: a_annot[key]}
|
364
|
+
return annotations
|
365
|
+
|
366
|
+
def get_analyses_by_metadata(self, key, value=None):
|
177
367
|
"""Extract a list of Analyses with a metadata field/value."""
|
178
|
-
|
368
|
+
metadata = {}
|
369
|
+
for study in self.studies:
|
370
|
+
for analysis in study.analyses:
|
371
|
+
a_metadata = analysis.metadata
|
372
|
+
if key in a_metadata and (value is None or a_metadata[key] == value):
|
373
|
+
metadata[analysis.id] = {key: a_metadata[key]}
|
374
|
+
return metadata
|
179
375
|
|
180
376
|
def get_points(self, analyses):
|
181
377
|
"""Collect Points associated with specified Analyses."""
|
182
|
-
|
378
|
+
points = {}
|
379
|
+
for study in self.studies:
|
380
|
+
for analysis in study.analyses:
|
381
|
+
if analysis.id in analyses:
|
382
|
+
points[analysis.id] = analysis.points
|
383
|
+
return points
|
183
384
|
|
184
385
|
def get_annotations(self, analyses):
|
185
386
|
"""Collect Annotations associated with specified Analyses."""
|
186
|
-
|
387
|
+
annotations = {}
|
388
|
+
for study in self.studies:
|
389
|
+
for analysis in study.analyses:
|
390
|
+
if analysis.id in analyses:
|
391
|
+
annotations[analysis.id] = analysis.annotations
|
392
|
+
|
393
|
+
return annotations
|
187
394
|
|
188
395
|
def get_texts(self, analyses):
|
189
396
|
"""Collect texts associated with specified Analyses."""
|
@@ -191,11 +398,32 @@ class Studyset:
|
|
191
398
|
|
192
399
|
def get_images(self, analyses):
|
193
400
|
"""Collect image files associated with specified Analyses."""
|
194
|
-
|
401
|
+
images = {}
|
402
|
+
for study in self.studies:
|
403
|
+
for analysis in study.analyses:
|
404
|
+
if analysis.id in analyses:
|
405
|
+
images[analysis.id] = analysis.images
|
406
|
+
return images
|
195
407
|
|
196
408
|
def get_metadata(self, analyses):
|
197
|
-
"""Collect metadata associated with specified Analyses.
|
198
|
-
|
409
|
+
"""Collect metadata associated with specified Analyses.
|
410
|
+
|
411
|
+
Parameters
|
412
|
+
----------
|
413
|
+
analyses : list of str
|
414
|
+
List of Analysis IDs to get metadata for.
|
415
|
+
|
416
|
+
Returns
|
417
|
+
-------
|
418
|
+
dict[str, dict]
|
419
|
+
Dictionary mapping Analysis IDs to their combined metadata (including study metadata).
|
420
|
+
"""
|
421
|
+
metadata = {}
|
422
|
+
for study in self.studies:
|
423
|
+
for analysis in study.analyses:
|
424
|
+
if analysis.id in analyses:
|
425
|
+
metadata[analysis.id] = analysis.get_metadata()
|
426
|
+
return metadata
|
199
427
|
|
200
428
|
|
201
429
|
class Study:
|
@@ -226,7 +454,7 @@ class Study:
|
|
226
454
|
self.authors = source["authors"] or ""
|
227
455
|
self.publication = source["publication"] or ""
|
228
456
|
self.metadata = source.get("metadata", {}) or {}
|
229
|
-
self.analyses = [Analysis(a) for a in source["analyses"]]
|
457
|
+
self.analyses = [Analysis(a, study=self) for a in source["analyses"]]
|
230
458
|
|
231
459
|
def __repr__(self):
|
232
460
|
"""My Simple representation."""
|
@@ -285,7 +513,7 @@ class Analysis:
|
|
285
513
|
Should the images attribute be a list instead, if the Images contain type information?
|
286
514
|
"""
|
287
515
|
|
288
|
-
def __init__(self, source):
|
516
|
+
def __init__(self, source, study=None):
|
289
517
|
self.id = source["id"]
|
290
518
|
self.name = source["name"]
|
291
519
|
self.conditions = [
|
@@ -295,6 +523,7 @@ class Analysis:
|
|
295
523
|
self.points = [Point(p) for p in source["points"]]
|
296
524
|
self.metadata = source.get("metadata", {}) or {}
|
297
525
|
self.annotations = {}
|
526
|
+
self._study = weakref.proxy(study) if study else None
|
298
527
|
|
299
528
|
def __repr__(self):
|
300
529
|
"""My Simple representation."""
|
@@ -306,6 +535,22 @@ class Analysis:
|
|
306
535
|
" ".join([self.name, f"images: {len(self.images)}", f"points: {len(self.points)}"])
|
307
536
|
)
|
308
537
|
|
538
|
+
def get_metadata(self) -> "dict[str, any]":
|
539
|
+
"""Get combined metadata from both analysis and parent study.
|
540
|
+
|
541
|
+
Returns
|
542
|
+
-------
|
543
|
+
dict[str, any]
|
544
|
+
Combined metadata dictionary with analysis metadata taking precedence
|
545
|
+
over study metadata for any overlapping keys.
|
546
|
+
"""
|
547
|
+
if self._study is None:
|
548
|
+
return self.metadata.copy()
|
549
|
+
|
550
|
+
combined_metadata = self._study.metadata.copy()
|
551
|
+
combined_metadata.update(self.metadata)
|
552
|
+
return combined_metadata
|
553
|
+
|
309
554
|
def to_dict(self):
|
310
555
|
"""Convert the Analysis to a dictionary."""
|
311
556
|
return {
|
@@ -318,6 +563,7 @@ class Analysis:
|
|
318
563
|
"images": [i.to_dict() for i in self.images],
|
319
564
|
"points": [p.to_dict() for p in self.points],
|
320
565
|
"weights": [c.to_dict()["weight"] for c in self.conditions],
|
566
|
+
"metadata": self.metadata,
|
321
567
|
}
|
322
568
|
|
323
569
|
|
nimare/tests/test_io.py
CHANGED
@@ -22,6 +22,48 @@ def test_convert_nimads_to_dataset(example_nimads_studyset, example_nimads_annot
|
|
22
22
|
assert isinstance(dset2, nimare.dataset.Dataset)
|
23
23
|
|
24
24
|
|
25
|
+
def test_convert_nimads_to_dataset_sample_sizes(
|
26
|
+
example_nimads_studyset, example_nimads_annotation
|
27
|
+
):
|
28
|
+
"""Conversion of nimads JSON to nimare dataset."""
|
29
|
+
studyset = Studyset(example_nimads_studyset)
|
30
|
+
for study in studyset.studies:
|
31
|
+
for analysis in study.analyses:
|
32
|
+
analysis.metadata["sample_sizes"] = [2, 20]
|
33
|
+
|
34
|
+
dset = io.convert_nimads_to_dataset(studyset)
|
35
|
+
|
36
|
+
assert isinstance(dset, nimare.dataset.Dataset)
|
37
|
+
assert "sample_sizes" in dset.metadata.columns
|
38
|
+
|
39
|
+
|
40
|
+
def test_convert_nimads_to_dataset_single_sample_size(
|
41
|
+
example_nimads_studyset, example_nimads_annotation
|
42
|
+
):
|
43
|
+
"""Test conversion of nimads JSON to nimare dataset with a single sample size value."""
|
44
|
+
studyset = Studyset(example_nimads_studyset)
|
45
|
+
for study in studyset.studies:
|
46
|
+
for analysis in study.analyses:
|
47
|
+
analysis.metadata["sample_size"] = 20
|
48
|
+
|
49
|
+
dset = io.convert_nimads_to_dataset(studyset)
|
50
|
+
|
51
|
+
assert isinstance(dset, nimare.dataset.Dataset)
|
52
|
+
assert "sample_sizes" in dset.metadata.columns
|
53
|
+
|
54
|
+
|
55
|
+
def test_analysis_to_dict_invalid_sample_sizes_type(example_nimads_studyset):
|
56
|
+
"""Test _analysis_to_dict raises ValueError when sample_sizes is not a list/tuple."""
|
57
|
+
studyset = Studyset(example_nimads_studyset)
|
58
|
+
# Set sample_sizes to an int rather than list/tuple
|
59
|
+
for study in studyset.studies:
|
60
|
+
for analysis in study.analyses:
|
61
|
+
analysis.metadata["sample_sizes"] = 5
|
62
|
+
with pytest.raises(TypeError):
|
63
|
+
# Trigger conversion which internally calls _analysis_to_dict
|
64
|
+
io.convert_nimads_to_dataset(studyset)
|
65
|
+
|
66
|
+
|
25
67
|
def test_convert_sleuth_to_dataset_smoke():
|
26
68
|
"""Smoke test for Sleuth text file conversion."""
|
27
69
|
sleuth_file = os.path.join(get_test_data_path(), "test_sleuth_file.txt")
|
nimare/tests/test_nimads.py
CHANGED
@@ -1,5 +1,11 @@
|
|
1
1
|
"""Test NiMADS functionality."""
|
2
2
|
|
3
|
+
import json
|
4
|
+
import os
|
5
|
+
import tempfile
|
6
|
+
|
7
|
+
import pytest
|
8
|
+
|
3
9
|
from nimare import nimads
|
4
10
|
from nimare.dataset import Dataset
|
5
11
|
|
@@ -19,3 +25,260 @@ def test_load_nimads(example_nimads_studyset, example_nimads_annotation):
|
|
19
25
|
assert isinstance(filtered_studyset, nimads.Studyset)
|
20
26
|
dataset = filtered_studyset.to_dataset()
|
21
27
|
assert isinstance(dataset, Dataset)
|
28
|
+
|
29
|
+
|
30
|
+
def test_slice_preserves_metadata_and_annotations(
|
31
|
+
example_nimads_studyset, example_nimads_annotation
|
32
|
+
):
|
33
|
+
"""Test that slicing preserves both metadata and annotations.
|
34
|
+
|
35
|
+
This test verifies that both metadata attached to analyses and annotation
|
36
|
+
notes are correctly preserved when slicing a studyset.
|
37
|
+
"""
|
38
|
+
studyset = nimads.Studyset(example_nimads_studyset)
|
39
|
+
studyset.annotations = example_nimads_annotation
|
40
|
+
|
41
|
+
# Get analysis IDs from the first annotation
|
42
|
+
annotation = studyset.annotations[0]
|
43
|
+
analysis_ids = [n.analysis.id for n in annotation.notes if n.note["include"]]
|
44
|
+
selected_ids = analysis_ids[:2] # Take first two analyses
|
45
|
+
|
46
|
+
# Add metadata to the analyses we'll keep
|
47
|
+
metadata_map = {}
|
48
|
+
for study in studyset.studies:
|
49
|
+
for analysis in study.analyses:
|
50
|
+
if analysis.id in selected_ids:
|
51
|
+
analysis.metadata = {
|
52
|
+
"sample_size": 30,
|
53
|
+
"contrast_type": "activation",
|
54
|
+
"significance_threshold": 0.001,
|
55
|
+
}
|
56
|
+
metadata_map[analysis.id] = analysis.metadata
|
57
|
+
|
58
|
+
# Slice studyset
|
59
|
+
sliced_studyset = studyset.slice(analyses=selected_ids)
|
60
|
+
|
61
|
+
# Verify analyses and their metadata are preserved
|
62
|
+
for study in sliced_studyset.studies:
|
63
|
+
for analysis in study.analyses:
|
64
|
+
assert analysis.id in selected_ids
|
65
|
+
assert analysis.metadata == metadata_map[analysis.id]
|
66
|
+
|
67
|
+
# Verify annotations are preserved for remaining analyses
|
68
|
+
sliced_annotation = sliced_studyset.annotations[0]
|
69
|
+
sliced_analysis_ids = [n.analysis.id for n in sliced_annotation.notes]
|
70
|
+
sliced_annotation_notes = {n.analysis.id: n.note for n in sliced_annotation.notes}
|
71
|
+
|
72
|
+
# Check that notes exist only for remaining analyses
|
73
|
+
assert set(sliced_analysis_ids) == set(selected_ids)
|
74
|
+
|
75
|
+
# Check that annotation contents are preserved
|
76
|
+
for analysis_id in selected_ids:
|
77
|
+
original_note = next(n.note for n in annotation.notes if n.analysis.id == analysis_id)
|
78
|
+
assert sliced_annotation_notes[analysis_id] == original_note
|
79
|
+
|
80
|
+
|
81
|
+
def test_studyset_init(example_nimads_studyset):
|
82
|
+
"""Test Studyset initialization."""
|
83
|
+
# Test initialization with dict
|
84
|
+
studyset1 = nimads.Studyset(example_nimads_studyset)
|
85
|
+
assert studyset1.id == example_nimads_studyset["id"]
|
86
|
+
assert studyset1.name == example_nimads_studyset["name"]
|
87
|
+
assert len(studyset1.studies) == len(example_nimads_studyset["studies"])
|
88
|
+
|
89
|
+
# Test initialization with JSON file
|
90
|
+
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as tmp:
|
91
|
+
json.dump(example_nimads_studyset, tmp)
|
92
|
+
tmp_path = tmp.name
|
93
|
+
|
94
|
+
try:
|
95
|
+
studyset2 = nimads.Studyset(tmp_path)
|
96
|
+
assert studyset2.id == example_nimads_studyset["id"]
|
97
|
+
assert studyset2.name == example_nimads_studyset["name"]
|
98
|
+
assert len(studyset2.studies) == len(example_nimads_studyset["studies"])
|
99
|
+
finally:
|
100
|
+
os.unlink(tmp_path)
|
101
|
+
|
102
|
+
|
103
|
+
def test_studyset_string_methods(example_nimads_studyset):
|
104
|
+
"""Test string representation methods."""
|
105
|
+
studyset = nimads.Studyset(example_nimads_studyset)
|
106
|
+
|
107
|
+
# Test __repr__
|
108
|
+
assert repr(studyset) == f"'<Studyset: {studyset.id}>'"
|
109
|
+
|
110
|
+
# Test __str__
|
111
|
+
expected_str = f"Studyset: {studyset.name} :: studies: {len(studyset.studies)}"
|
112
|
+
assert str(studyset) == expected_str
|
113
|
+
|
114
|
+
|
115
|
+
def test_studyset_save_load(example_nimads_studyset):
|
116
|
+
"""Test saving and loading Studyset."""
|
117
|
+
studyset = nimads.Studyset(example_nimads_studyset)
|
118
|
+
|
119
|
+
with tempfile.NamedTemporaryFile(suffix=".pkl", delete=False) as tmp:
|
120
|
+
tmp_path = tmp.name
|
121
|
+
|
122
|
+
try:
|
123
|
+
# Test save
|
124
|
+
studyset.save(tmp_path)
|
125
|
+
assert os.path.exists(tmp_path)
|
126
|
+
|
127
|
+
# Test load
|
128
|
+
new_studyset = nimads.Studyset({"id": "temp", "name": "", "studies": []})
|
129
|
+
new_studyset.load(tmp_path)
|
130
|
+
|
131
|
+
assert new_studyset.id == studyset.id
|
132
|
+
assert new_studyset.name == studyset.name
|
133
|
+
assert len(new_studyset.studies) == len(studyset.studies)
|
134
|
+
finally:
|
135
|
+
os.unlink(tmp_path)
|
136
|
+
|
137
|
+
|
138
|
+
def test_studyset_to_dict(example_nimads_studyset):
|
139
|
+
"""Test conversion to dictionary."""
|
140
|
+
studyset = nimads.Studyset(example_nimads_studyset)
|
141
|
+
result = studyset.to_dict()
|
142
|
+
|
143
|
+
assert isinstance(result, dict)
|
144
|
+
assert "id" in result
|
145
|
+
assert "name" in result
|
146
|
+
assert "studies" in result
|
147
|
+
assert len(result["studies"]) == len(studyset.studies)
|
148
|
+
|
149
|
+
|
150
|
+
def test_studyset_to_nimads(example_nimads_studyset):
|
151
|
+
"""Test saving to NIMADS format."""
|
152
|
+
studyset = nimads.Studyset(example_nimads_studyset)
|
153
|
+
|
154
|
+
with tempfile.NamedTemporaryFile(suffix=".json", delete=False) as tmp:
|
155
|
+
tmp_path = tmp.name
|
156
|
+
|
157
|
+
try:
|
158
|
+
studyset.to_nimads(tmp_path)
|
159
|
+
assert os.path.exists(tmp_path)
|
160
|
+
|
161
|
+
# Verify the saved file can be loaded
|
162
|
+
with open(tmp_path, "r") as f:
|
163
|
+
saved_data = json.load(f)
|
164
|
+
|
165
|
+
assert saved_data["id"] == studyset.id
|
166
|
+
assert saved_data["name"] == studyset.name
|
167
|
+
assert len(saved_data["studies"]) == len(studyset.studies)
|
168
|
+
finally:
|
169
|
+
os.unlink(tmp_path)
|
170
|
+
|
171
|
+
|
172
|
+
def test_studyset_copy(example_nimads_studyset):
|
173
|
+
"""Test copying of Studyset."""
|
174
|
+
studyset = nimads.Studyset(example_nimads_studyset)
|
175
|
+
copied = studyset.copy()
|
176
|
+
|
177
|
+
assert copied is not studyset
|
178
|
+
assert copied.id == studyset.id
|
179
|
+
assert copied.name == studyset.name
|
180
|
+
assert len(copied.studies) == len(studyset.studies)
|
181
|
+
|
182
|
+
|
183
|
+
def test_studyset_merge(example_nimads_studyset):
|
184
|
+
"""Test merging of Studysets."""
|
185
|
+
studyset1 = nimads.Studyset(example_nimads_studyset)
|
186
|
+
|
187
|
+
# Create a modified copy for merging
|
188
|
+
modified_data = example_nimads_studyset.copy()
|
189
|
+
modified_data["id"] = "other_id"
|
190
|
+
modified_data["name"] = "Other name"
|
191
|
+
studyset2 = nimads.Studyset(modified_data)
|
192
|
+
|
193
|
+
merged = studyset1.merge(studyset2)
|
194
|
+
|
195
|
+
assert isinstance(merged, nimads.Studyset)
|
196
|
+
assert merged.id == f"{studyset1.id}_{studyset2.id}"
|
197
|
+
assert merged.name == f"Merged: {studyset1.name} + {studyset2.name}"
|
198
|
+
|
199
|
+
# Test invalid merge
|
200
|
+
with pytest.raises(ValueError):
|
201
|
+
studyset1.merge("not a studyset")
|
202
|
+
|
203
|
+
|
204
|
+
def test_get_analyses_by_coordinates(example_nimads_studyset):
|
205
|
+
"""Test retrieving analyses by coordinates."""
|
206
|
+
studyset = nimads.Studyset(example_nimads_studyset)
|
207
|
+
|
208
|
+
# Test with radius
|
209
|
+
xyz = [0, 0, 0]
|
210
|
+
results_r = studyset.get_analyses_by_coordinates(xyz, r=10)
|
211
|
+
assert isinstance(results_r, list)
|
212
|
+
|
213
|
+
# Test with n nearest
|
214
|
+
results_n = studyset.get_analyses_by_coordinates(xyz, n=5)
|
215
|
+
assert isinstance(results_n, list)
|
216
|
+
assert len(results_n) <= 5
|
217
|
+
|
218
|
+
# Test invalid parameters
|
219
|
+
with pytest.raises(ValueError):
|
220
|
+
studyset.get_analyses_by_coordinates(xyz) # Neither r nor n
|
221
|
+
with pytest.raises(ValueError):
|
222
|
+
studyset.get_analyses_by_coordinates(xyz, r=10, n=5) # Both r and n
|
223
|
+
with pytest.raises(ValueError):
|
224
|
+
studyset.get_analyses_by_coordinates([0, 0]) # Invalid coordinates
|
225
|
+
|
226
|
+
|
227
|
+
def test_get_analyses_by_mask(example_nimads_studyset, mni_mask):
|
228
|
+
"""Test retrieving analyses by mask."""
|
229
|
+
studyset = nimads.Studyset(example_nimads_studyset)
|
230
|
+
|
231
|
+
results = studyset.get_analyses_by_mask(mni_mask)
|
232
|
+
assert isinstance(results, list)
|
233
|
+
|
234
|
+
|
235
|
+
def test_get_analyses_by_metadata(example_nimads_studyset):
|
236
|
+
"""Test retrieving analyses by metadata."""
|
237
|
+
studyset = nimads.Studyset(example_nimads_studyset)
|
238
|
+
|
239
|
+
# Add some metadata for testing
|
240
|
+
key = "test_key"
|
241
|
+
value = "test_value"
|
242
|
+
for study in studyset.studies:
|
243
|
+
for analysis in study.analyses:
|
244
|
+
analysis.metadata[key] = value
|
245
|
+
|
246
|
+
# Test with key only
|
247
|
+
results1 = studyset.get_analyses_by_metadata(key)
|
248
|
+
assert isinstance(results1, dict)
|
249
|
+
|
250
|
+
# Test with key and value
|
251
|
+
results2 = studyset.get_analyses_by_metadata(key, value)
|
252
|
+
assert isinstance(results2, dict)
|
253
|
+
assert all(list(d.values())[0] == value for d in results2.values())
|
254
|
+
|
255
|
+
|
256
|
+
def test_data_retrieval_methods(example_nimads_studyset):
|
257
|
+
"""Test methods that retrieve data for specified analyses."""
|
258
|
+
studyset = nimads.Studyset(example_nimads_studyset)
|
259
|
+
|
260
|
+
# Get some analysis IDs to test with
|
261
|
+
analysis_ids = []
|
262
|
+
for study in studyset.studies:
|
263
|
+
for analysis in study.analyses:
|
264
|
+
analysis_ids.append(analysis.id)
|
265
|
+
if len(analysis_ids) >= 2: # Just test with first two analyses
|
266
|
+
break
|
267
|
+
if len(analysis_ids) >= 2:
|
268
|
+
break
|
269
|
+
|
270
|
+
# Test get_points
|
271
|
+
points = studyset.get_points(analysis_ids)
|
272
|
+
assert isinstance(points, dict)
|
273
|
+
|
274
|
+
# Test get_images
|
275
|
+
images = studyset.get_images(analysis_ids)
|
276
|
+
assert isinstance(images, dict)
|
277
|
+
|
278
|
+
# Test get_metadata
|
279
|
+
metadata = studyset.get_metadata(analysis_ids)
|
280
|
+
assert isinstance(metadata, dict)
|
281
|
+
|
282
|
+
# Test get_annotations
|
283
|
+
annotations = studyset.get_annotations(analysis_ids)
|
284
|
+
assert isinstance(annotations, dict)
|
nimare/utils.py
CHANGED
@@ -1286,9 +1286,9 @@ def b_spline_bases(masker_voxels, spacing, margin=10):
|
|
1286
1286
|
x_spline_coords = x_spline.nonzero()
|
1287
1287
|
y_spline_coords = y_spline.nonzero()
|
1288
1288
|
z_spline_coords = z_spline.nonzero()
|
1289
|
-
x_spline_sparse = sparse.COO(x_spline_coords, x_spline[x_spline_coords])
|
1290
|
-
y_spline_sparse = sparse.COO(y_spline_coords, y_spline[y_spline_coords])
|
1291
|
-
z_spline_sparse = sparse.COO(z_spline_coords, z_spline[z_spline_coords])
|
1289
|
+
x_spline_sparse = sparse.COO(x_spline_coords, x_spline[x_spline_coords], shape=x_spline.shape)
|
1290
|
+
y_spline_sparse = sparse.COO(y_spline_coords, y_spline[y_spline_coords], shape=y_spline.shape)
|
1291
|
+
z_spline_sparse = sparse.COO(z_spline_coords, z_spline[z_spline_coords], shape=z_spline.shape)
|
1292
1292
|
|
1293
1293
|
# create spatial design matrix by tensor product of spline bases in 3 dimesion
|
1294
1294
|
# Row sums of X are all 1=> There is no need to re-normalise X
|
@@ -1,7 +1,7 @@
|
|
1
1
|
benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
2
|
benchmarks/bench_cbma.py,sha256=fg_EER8hohi6kD1Hno_uXsFntKuCLTvseu-6OqkHkoU,1382
|
3
3
|
nimare/__init__.py,sha256=HHIq3EimSZSf3zJSDwuTjBig1GbRwHGYfRLSqI3yleE,802
|
4
|
-
nimare/_version.py,sha256
|
4
|
+
nimare/_version.py,sha256=yDyGWuluU_v_cYzPo2r_ijBugg3SveqkOJLRp7q7EJA,497
|
5
5
|
nimare/base.py,sha256=9DlcRB2mW759p7XqpKG3wRE-MmPsEPiYTbq6V1Yile4,7826
|
6
6
|
nimare/cli.py,sha256=Zvy5jN2KopH_MBke-gm8A0DbBZmIFGvnE1tjhbYib9I,3695
|
7
7
|
nimare/correct.py,sha256=2eI0jR6_odND-2CzSlaxRU2um6ccLSapd7ERAYteBnE,17110
|
@@ -9,12 +9,12 @@ nimare/dataset.py,sha256=LmjDE1GQ3ud33l3plvms_Uv8GB9_Xb-6kPtkIV2eYTw,24650
|
|
9
9
|
nimare/diagnostics.py,sha256=sCatkXUM9rvrY9MMz6Q66njWSiWGifWwSNEMu3ldnVs,20263
|
10
10
|
nimare/estimator.py,sha256=DtsSIyPDrKkpi-KNv2m-cZMvJO7CCfHLXHRhCT73sbY,5063
|
11
11
|
nimare/generate.py,sha256=L4_c2sLAlF7XDKTm-3q4oOx8pLID2NaxG9YET5KSIZw,12475
|
12
|
-
nimare/io.py,sha256=
|
13
|
-
nimare/nimads.py,sha256=
|
12
|
+
nimare/io.py,sha256=76T9KLOorquImy-01cLG3pwuJLJm_UD_hAhK3_0qj7g,27299
|
13
|
+
nimare/nimads.py,sha256=2s5QnaLvrBt-kMrImGhG_p6r0unysufIIcPczr2bG0c,24342
|
14
14
|
nimare/results.py,sha256=7szcyR6PkZAXBqbIGGWrw1nW9j9QCdpdl4MxUK_1Wzc,8190
|
15
15
|
nimare/stats.py,sha256=XhXfFj6KHTPVSTXhbEid0qt8HLqJD82Bl5T23qmaf40,10098
|
16
16
|
nimare/transforms.py,sha256=_kZO8N3IEHxd6Ir4IcewZtkWHpFknnjEnjsaD9b-1pg,30136
|
17
|
-
nimare/utils.py,sha256=
|
17
|
+
nimare/utils.py,sha256=rMuT1mhEYrb3R_2EAsz5A_f5VPWXzqRkH_saQTFybm4,47002
|
18
18
|
nimare/annotate/__init__.py,sha256=hTla1yFYTJ8PDjm72ReeHa3qf0Que50Ww0fqz3Z86JI,434
|
19
19
|
nimare/annotate/cogat.py,sha256=xzrepAuTkRenbECn4XYwgyul45r0tIMxCDKQV_ZFVb8,7457
|
20
20
|
nimare/annotate/gclda.py,sha256=P2OQjuJn3DX0GVPte_VIVZf0LfO-yE_OhLbs6DwMaHQ,42592
|
@@ -35,7 +35,7 @@ nimare/meta/cbmr.py,sha256=h9CNMHW4KUh-YQYyGDRzqPN7G1tkHYAhrSgxrP8E61s,47769
|
|
35
35
|
nimare/meta/ibma.py,sha256=qVw3ZhOlK6hgTdZZwa7TPDpfrGLf52dN00wH2Gw-Z8c,65442
|
36
36
|
nimare/meta/kernel.py,sha256=5wN-6dbLkzqQ0WUSTrC0DJPSbbyGiZCls9M_TAYhNGY,19169
|
37
37
|
nimare/meta/models.py,sha256=0QPlQTjWaNTeI8qTX-DHMXVjQSRD72SfJ2RZIYBZnCg,47054
|
38
|
-
nimare/meta/utils.py,sha256=
|
38
|
+
nimare/meta/utils.py,sha256=84T4I-wn-HTx7zwMqh0TdvzUbBe_vcpxIU4jA_pfuFU,18083
|
39
39
|
nimare/meta/cbma/__init__.py,sha256=bWLrv5tL03U2ITttnnz3yerbg74w44qkZgdy32QMhqQ,219
|
40
40
|
nimare/meta/cbma/ale.py,sha256=ZNjXC4MXhfSHvrTRppY04NkGtz_Iri696k5QMuBog7o,39273
|
41
41
|
nimare/meta/cbma/base.py,sha256=-gJ4hW6_6T5oto3pre9GbWodHWRmPS2WZ1LJjB0BtMI,38382
|
@@ -74,13 +74,13 @@ nimare/tests/test_diagnostics.py,sha256=VrfR_8nQKn2VF7dFdnTM7ZQy3Ou5eHdpaLhml5T6
|
|
74
74
|
nimare/tests/test_estimator_performance.py,sha256=tbK2Qr83rB0in-pB6MccnjLg4iHSyfilx-hTNDWQfe4,12749
|
75
75
|
nimare/tests/test_extract.py,sha256=XJSxZTdy_hAej1J9CFK9zQk29rAM5KPiZKlopmUVCJ4,1206
|
76
76
|
nimare/tests/test_generate.py,sha256=LSh2APJsg87u2s2zydkrre3RVk_ZGpoB4d7uuvIPWYE,7318
|
77
|
-
nimare/tests/test_io.py,sha256=
|
77
|
+
nimare/tests/test_io.py,sha256=QKr_zRGu8tyrpiLoLAjCV9ektxCTHRlKPWgyJRqQ9T8,10397
|
78
78
|
nimare/tests/test_meta_ale.py,sha256=hccXSNzLGUgj6E4tCsiHZpuUFoBxXkP293-vtUS5jdE,11791
|
79
79
|
nimare/tests/test_meta_cbmr.py,sha256=cl_pUA1dxXpDD5Ci_tllSVG0uKykuneHDbUxGY4w7Ks,9776
|
80
80
|
nimare/tests/test_meta_ibma.py,sha256=Yw4F0_pr3cpVSe7oeMlK0-btg1Uw58cenklOsIw87Pc,7775
|
81
81
|
nimare/tests/test_meta_kernel.py,sha256=Edk6lOsnqokg86mp9jAkokA203K61R7pjJEmyEEzV7E,8450
|
82
82
|
nimare/tests/test_meta_mkda.py,sha256=9PuzNUKrTBjbCHdSnuOAToXbV7wp1O0TCdD537qGQkA,9206
|
83
|
-
nimare/tests/test_nimads.py,sha256=
|
83
|
+
nimare/tests/test_nimads.py,sha256=3yzCO8rmUVfEYAt3HNnJcyAENorJ5BOWdJXY3hjrdP0,9807
|
84
84
|
nimare/tests/test_reports.py,sha256=Qdz-PHjQwOneRmSCo0ac2d67BeGypWJIMi4OoiQrNik,3293
|
85
85
|
nimare/tests/test_stats.py,sha256=_GhpUC1u4hnFR2SZ-sHQqkJ5MwsyPsvwPEd2GkQmsHY,4030
|
86
86
|
nimare/tests/test_transforms.py,sha256=mzEnufefhybs4r_dfRY6zQUAShepPMwKFY7S5amq3cs,10378
|
@@ -111,9 +111,9 @@ nimare/workflows/cbma.py,sha256=2jYJs9kH7_LzFP6d7-oTHiTTgAFbtmiBNtBXSCSZPjg,7052
|
|
111
111
|
nimare/workflows/ibma.py,sha256=lAkWtqSqnZiUUV460Bh046U9LeGhnry3bl8BFi-tx7s,4289
|
112
112
|
nimare/workflows/macm.py,sha256=mVUBeKbTawhU93ApnkunZSUXZWo7qBPrM3dMGWfl0ik,2531
|
113
113
|
nimare/workflows/misc.py,sha256=OWgHlSAnRI0-5Seii-bd48piIYsfEAF_aNKGorH1yJQ,1827
|
114
|
-
nimare-0.
|
115
|
-
nimare-0.
|
116
|
-
nimare-0.
|
117
|
-
nimare-0.
|
118
|
-
nimare-0.
|
119
|
-
nimare-0.
|
114
|
+
nimare-0.5.0.dist-info/LICENSE,sha256=PWPXnCGWh-FMiBZ61OnQ2BHFjPPlJJ7F0kFx_ryzp-M,1074
|
115
|
+
nimare-0.5.0.dist-info/METADATA,sha256=6kJAmkMFEOP6_q-95CW66xbYhWrnT-JfYvPSS9yUaDU,4695
|
116
|
+
nimare-0.5.0.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
117
|
+
nimare-0.5.0.dist-info/entry_points.txt,sha256=3w_hk9N2PWnKZkCaJyDlc0_kdn3rh35aiI21rSdvsuA,44
|
118
|
+
nimare-0.5.0.dist-info/top_level.txt,sha256=XnOcEXMs0BxdI8t3_ksTl96T8hykn9L7-bxLLraVrTI,18
|
119
|
+
nimare-0.5.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|