nimare 0.4.2__tar.gz → 0.5.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (130) hide show
  1. {nimare-0.4.2/nimare.egg-info → nimare-0.5.0}/PKG-INFO +1 -1
  2. {nimare-0.4.2 → nimare-0.5.0}/nimare/_version.py +3 -3
  3. {nimare-0.4.2 → nimare-0.5.0}/nimare/meta/utils.py +1 -1
  4. {nimare-0.4.2 → nimare-0.5.0}/nimare/nimads.py +280 -34
  5. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_io.py +0 -12
  6. nimare-0.5.0/nimare/tests/test_nimads.py +284 -0
  7. {nimare-0.4.2 → nimare-0.5.0}/nimare/utils.py +3 -3
  8. {nimare-0.4.2 → nimare-0.5.0/nimare.egg-info}/PKG-INFO +1 -1
  9. nimare-0.4.2/nimare/tests/test_nimads.py +0 -21
  10. {nimare-0.4.2 → nimare-0.5.0}/LICENSE +0 -0
  11. {nimare-0.4.2 → nimare-0.5.0}/MANIFEST.in +0 -0
  12. {nimare-0.4.2 → nimare-0.5.0}/README.md +0 -0
  13. {nimare-0.4.2 → nimare-0.5.0}/benchmarks/__init__.py +0 -0
  14. {nimare-0.4.2 → nimare-0.5.0}/benchmarks/bench_cbma.py +0 -0
  15. {nimare-0.4.2 → nimare-0.5.0}/nimare/__init__.py +0 -0
  16. {nimare-0.4.2 → nimare-0.5.0}/nimare/annotate/__init__.py +0 -0
  17. {nimare-0.4.2 → nimare-0.5.0}/nimare/annotate/cogat.py +0 -0
  18. {nimare-0.4.2 → nimare-0.5.0}/nimare/annotate/gclda.py +0 -0
  19. {nimare-0.4.2 → nimare-0.5.0}/nimare/annotate/lda.py +0 -0
  20. {nimare-0.4.2 → nimare-0.5.0}/nimare/annotate/text.py +0 -0
  21. {nimare-0.4.2 → nimare-0.5.0}/nimare/annotate/utils.py +0 -0
  22. {nimare-0.4.2 → nimare-0.5.0}/nimare/base.py +0 -0
  23. {nimare-0.4.2 → nimare-0.5.0}/nimare/cli.py +0 -0
  24. {nimare-0.4.2 → nimare-0.5.0}/nimare/correct.py +0 -0
  25. {nimare-0.4.2 → nimare-0.5.0}/nimare/dataset.py +0 -0
  26. {nimare-0.4.2 → nimare-0.5.0}/nimare/decode/__init__.py +0 -0
  27. {nimare-0.4.2 → nimare-0.5.0}/nimare/decode/base.py +0 -0
  28. {nimare-0.4.2 → nimare-0.5.0}/nimare/decode/continuous.py +0 -0
  29. {nimare-0.4.2 → nimare-0.5.0}/nimare/decode/discrete.py +0 -0
  30. {nimare-0.4.2 → nimare-0.5.0}/nimare/decode/encode.py +0 -0
  31. {nimare-0.4.2 → nimare-0.5.0}/nimare/decode/utils.py +0 -0
  32. {nimare-0.4.2 → nimare-0.5.0}/nimare/diagnostics.py +0 -0
  33. {nimare-0.4.2 → nimare-0.5.0}/nimare/estimator.py +0 -0
  34. {nimare-0.4.2 → nimare-0.5.0}/nimare/extract/__init__.py +0 -0
  35. {nimare-0.4.2 → nimare-0.5.0}/nimare/extract/extract.py +0 -0
  36. {nimare-0.4.2 → nimare-0.5.0}/nimare/extract/utils.py +0 -0
  37. {nimare-0.4.2 → nimare-0.5.0}/nimare/generate.py +0 -0
  38. {nimare-0.4.2 → nimare-0.5.0}/nimare/io.py +0 -0
  39. {nimare-0.4.2 → nimare-0.5.0}/nimare/meta/__init__.py +0 -0
  40. {nimare-0.4.2 → nimare-0.5.0}/nimare/meta/cbma/__init__.py +0 -0
  41. {nimare-0.4.2 → nimare-0.5.0}/nimare/meta/cbma/ale.py +0 -0
  42. {nimare-0.4.2 → nimare-0.5.0}/nimare/meta/cbma/base.py +0 -0
  43. {nimare-0.4.2 → nimare-0.5.0}/nimare/meta/cbma/mkda.py +0 -0
  44. {nimare-0.4.2 → nimare-0.5.0}/nimare/meta/cbmr.py +0 -0
  45. {nimare-0.4.2 → nimare-0.5.0}/nimare/meta/ibma.py +0 -0
  46. {nimare-0.4.2 → nimare-0.5.0}/nimare/meta/kernel.py +0 -0
  47. {nimare-0.4.2 → nimare-0.5.0}/nimare/meta/models.py +0 -0
  48. {nimare-0.4.2 → nimare-0.5.0}/nimare/reports/__init__.py +0 -0
  49. {nimare-0.4.2 → nimare-0.5.0}/nimare/reports/base.py +0 -0
  50. {nimare-0.4.2 → nimare-0.5.0}/nimare/reports/default.yml +0 -0
  51. {nimare-0.4.2 → nimare-0.5.0}/nimare/reports/figures.py +0 -0
  52. {nimare-0.4.2 → nimare-0.5.0}/nimare/reports/report.tpl +0 -0
  53. {nimare-0.4.2 → nimare-0.5.0}/nimare/resources/__init__.py +0 -0
  54. {nimare-0.4.2 → nimare-0.5.0}/nimare/resources/atlases/Harvard-Oxford-LICENSE +0 -0
  55. {nimare-0.4.2 → nimare-0.5.0}/nimare/resources/atlases/HarvardOxford-cort-maxprob-thr25-2mm.nii.gz +0 -0
  56. {nimare-0.4.2 → nimare-0.5.0}/nimare/resources/database_file_manifest.json +0 -0
  57. {nimare-0.4.2 → nimare-0.5.0}/nimare/resources/english_spellings.csv +0 -0
  58. {nimare-0.4.2 → nimare-0.5.0}/nimare/resources/filenames.json +0 -0
  59. {nimare-0.4.2 → nimare-0.5.0}/nimare/resources/neurosynth_laird_studies.json +0 -0
  60. {nimare-0.4.2 → nimare-0.5.0}/nimare/resources/neurosynth_stoplist.txt +0 -0
  61. {nimare-0.4.2 → nimare-0.5.0}/nimare/resources/nidm_pain_dset.json +0 -0
  62. {nimare-0.4.2 → nimare-0.5.0}/nimare/resources/references.bib +0 -0
  63. {nimare-0.4.2 → nimare-0.5.0}/nimare/resources/semantic_knowledge_children.txt +0 -0
  64. {nimare-0.4.2 → nimare-0.5.0}/nimare/resources/semantic_relatedness_children.txt +0 -0
  65. {nimare-0.4.2 → nimare-0.5.0}/nimare/resources/templates/MNI152_2x2x2_brainmask.nii.gz +0 -0
  66. {nimare-0.4.2 → nimare-0.5.0}/nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_T1w.nii.gz +0 -0
  67. {nimare-0.4.2 → nimare-0.5.0}/nimare/resources/templates/tpl-MNI152NLin6Asym_res-01_desc-brain_mask.nii.gz +0 -0
  68. {nimare-0.4.2 → nimare-0.5.0}/nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_T1w.nii.gz +0 -0
  69. {nimare-0.4.2 → nimare-0.5.0}/nimare/resources/templates/tpl-MNI152NLin6Asym_res-02_desc-brain_mask.nii.gz +0 -0
  70. {nimare-0.4.2 → nimare-0.5.0}/nimare/results.py +0 -0
  71. {nimare-0.4.2 → nimare-0.5.0}/nimare/stats.py +0 -0
  72. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/__init__.py +0 -0
  73. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/conftest.py +0 -0
  74. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/data/amygdala_roi.nii.gz +0 -0
  75. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/data/data-neurosynth_version-7_coordinates.tsv.gz +0 -0
  76. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/data/data-neurosynth_version-7_metadata.tsv.gz +0 -0
  77. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/data/data-neurosynth_version-7_vocab-terms_source-abstract_type-tfidf_features.npz +0 -0
  78. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/data/data-neurosynth_version-7_vocab-terms_vocabulary.txt +0 -0
  79. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/data/neurosynth_dset.json +0 -0
  80. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/data/neurosynth_laird_studies.json +0 -0
  81. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/data/nidm_pain_dset.json +0 -0
  82. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/data/nimads_annotation.json +0 -0
  83. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/data/nimads_studyset.json +0 -0
  84. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/data/test_baseline.txt +0 -0
  85. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/data/test_pain_dataset.json +0 -0
  86. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/data/test_pain_dataset_multiple_contrasts.json +0 -0
  87. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/data/test_sleuth_file.txt +0 -0
  88. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/data/test_sleuth_file2.txt +0 -0
  89. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/data/test_sleuth_file3.txt +0 -0
  90. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/data/test_sleuth_file4.txt +0 -0
  91. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/data/test_sleuth_file5.txt +0 -0
  92. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_annotate_cogat.py +0 -0
  93. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_annotate_gclda.py +0 -0
  94. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_annotate_lda.py +0 -0
  95. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_dataset.py +0 -0
  96. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_decode_continuous.py +0 -0
  97. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_decode_discrete.py +0 -0
  98. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_diagnostics.py +0 -0
  99. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_estimator_performance.py +0 -0
  100. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_extract.py +0 -0
  101. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_generate.py +0 -0
  102. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_meta_ale.py +0 -0
  103. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_meta_cbmr.py +0 -0
  104. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_meta_ibma.py +0 -0
  105. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_meta_kernel.py +0 -0
  106. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_meta_mkda.py +0 -0
  107. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_reports.py +0 -0
  108. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_stats.py +0 -0
  109. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_transforms.py +0 -0
  110. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_utils.py +0 -0
  111. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/test_workflows.py +0 -0
  112. {nimare-0.4.2 → nimare-0.5.0}/nimare/tests/utils.py +0 -0
  113. {nimare-0.4.2 → nimare-0.5.0}/nimare/transforms.py +0 -0
  114. {nimare-0.4.2 → nimare-0.5.0}/nimare/workflows/__init__.py +0 -0
  115. {nimare-0.4.2 → nimare-0.5.0}/nimare/workflows/base.py +0 -0
  116. {nimare-0.4.2 → nimare-0.5.0}/nimare/workflows/cbma.py +0 -0
  117. {nimare-0.4.2 → nimare-0.5.0}/nimare/workflows/ibma.py +0 -0
  118. {nimare-0.4.2 → nimare-0.5.0}/nimare/workflows/macm.py +0 -0
  119. {nimare-0.4.2 → nimare-0.5.0}/nimare/workflows/misc.py +0 -0
  120. {nimare-0.4.2 → nimare-0.5.0}/nimare.egg-info/SOURCES.txt +0 -0
  121. {nimare-0.4.2 → nimare-0.5.0}/nimare.egg-info/dependency_links.txt +0 -0
  122. {nimare-0.4.2 → nimare-0.5.0}/nimare.egg-info/entry_points.txt +0 -0
  123. {nimare-0.4.2 → nimare-0.5.0}/nimare.egg-info/not-zip-safe +0 -0
  124. {nimare-0.4.2 → nimare-0.5.0}/nimare.egg-info/requires.txt +0 -0
  125. {nimare-0.4.2 → nimare-0.5.0}/nimare.egg-info/top_level.txt +0 -0
  126. {nimare-0.4.2 → nimare-0.5.0}/pypi_description.md +0 -0
  127. {nimare-0.4.2 → nimare-0.5.0}/pyproject.toml +0 -0
  128. {nimare-0.4.2 → nimare-0.5.0}/setup.cfg +0 -0
  129. {nimare-0.4.2 → nimare-0.5.0}/setup.py +0 -0
  130. {nimare-0.4.2 → nimare-0.5.0}/versioneer.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: nimare
3
- Version: 0.4.2
3
+ Version: 0.5.0
4
4
  Summary: NiMARE: Neuroimaging Meta-Analysis Research Environment
5
5
  Home-page: https://github.com/neurostuff/NiMARE
6
6
  Author: NiMARE developers
@@ -8,11 +8,11 @@ import json
8
8
 
9
9
  version_json = '''
10
10
  {
11
- "date": "2025-03-11T16:24:19-0500",
11
+ "date": "2025-04-25T13:22:39-0500",
12
12
  "dirty": false,
13
13
  "error": null,
14
- "full-revisionid": "b838dbbd3afa34668c431ba7cfee568650cee0b4",
15
- "version": "0.4.2"
14
+ "full-revisionid": "9e3b73cbb6235b31a4fab7a9a118e32c1d2f9932",
15
+ "version": "0.5.0"
16
16
  }
17
17
  ''' # END VERSION_JSON
18
18
 
@@ -33,7 +33,7 @@ def _convolve_sphere(kernel, ijks, index, max_shape):
33
33
 
34
34
  def np_all_axis1(x):
35
35
  """Numba compatible version of np.all(x, axis=1)."""
36
- out = np.ones(x.shape[0], dtype=np.bool8)
36
+ out = np.ones(x.shape[0], dtype=np.bool_)
37
37
  for i in range(x.shape[1]):
38
38
  out = np.logical_and(out, x[:, i])
39
39
  return out
@@ -4,7 +4,11 @@ import json
4
4
  import weakref
5
5
  from copy import deepcopy
6
6
 
7
+ import numpy as np
8
+ from nilearn._utils import load_niimg
9
+
7
10
  from nimare.io import convert_nimads_to_dataset
11
+ from nimare.utils import mm2vox
8
12
 
9
13
 
10
14
  class Studyset:
@@ -84,7 +88,7 @@ class Studyset:
84
88
  for study in studyset.studies:
85
89
  if len(study.analyses) > 1:
86
90
  source_lst = [analysis.to_dict() for analysis in study.analyses]
87
- ids, names, conditions, images, points, weights = [
91
+ ids, names, conditions, images, points, weights, metadata = [
88
92
  [source[key] for source in source_lst] for key in source_lst[0]
89
93
  ]
90
94
 
@@ -95,6 +99,7 @@ class Studyset:
95
99
  "images": [image for i_list in images for image in i_list],
96
100
  "points": [point for p_list in points for point in p_list],
97
101
  "weights": [weight for w_list in weights for weight in w_list],
102
+ "metadata": {k: v for m_dict in metadata for k, v in m_dict.items()},
98
103
  }
99
104
  study.analyses = [Analysis(new_source)]
100
105
 
@@ -118,12 +123,42 @@ class Studyset:
118
123
  return convert_nimads_to_dataset(self)
119
124
 
120
125
  def load(self, filename):
121
- """Load a Studyset from a pickled file."""
122
- raise NotImplementedError("Loading from pickled files is not yet supported.")
126
+ """Load a Studyset from a pickled file.
127
+
128
+ Parameters
129
+ ----------
130
+ filename : str
131
+ Path to the pickled file to load from.
132
+
133
+ Returns
134
+ -------
135
+ Studyset
136
+ The loaded Studyset object.
137
+ """
138
+ import pickle
139
+
140
+ with open(filename, "rb") as f:
141
+ loaded_data = pickle.load(f)
142
+
143
+ # Update current instance with loaded data
144
+ self.id = loaded_data.id
145
+ self.name = loaded_data.name
146
+ self.studies = loaded_data.studies
147
+ self._annotations = loaded_data._annotations
148
+ return self
123
149
 
124
150
  def save(self, filename):
125
- """Write the Studyset to a pickled file."""
126
- raise NotImplementedError("Saving to pickled files is not yet supported.")
151
+ """Write the Studyset to a pickled file.
152
+
153
+ Parameters
154
+ ----------
155
+ filename : str
156
+ Path where the pickled file should be saved.
157
+ """
158
+ import pickle
159
+
160
+ with open(filename, "wb") as f:
161
+ pickle.dump(self, f)
127
162
 
128
163
  def copy(self):
129
164
  """Create a copy of the Studyset."""
@@ -141,49 +176,221 @@ class Studyset:
141
176
 
142
177
  for annot in annotations:
143
178
  annot["notes"] = [n for n in annot["notes"] if n["analysis"] in analyses]
144
- studyset.annotation = annot
179
+ studyset.annotations = annot
145
180
 
146
181
  return studyset
147
182
 
148
183
  def merge(self, right):
149
- """Merge a separate Studyset into the current one."""
150
- raise NotImplementedError("Merging Studysets is not yet supported.")
151
-
152
- def update_image_path(self, new_path):
153
- """Point to a new location for image files on the local filesystem."""
154
- raise NotImplementedError("Updating image paths is not yet supported.")
184
+ """Merge a separate Studyset into the current one.
185
+
186
+ Parameters
187
+ ----------
188
+ right : Studyset
189
+ The other Studyset to merge with this one.
190
+
191
+ Returns
192
+ -------
193
+ Studyset
194
+ A new Studyset containing merged studies from both input Studysets.
195
+ For studies with the same ID, their analyses and metadata are combined,
196
+ with data from self (left) taking precedence in case of conflicts.
197
+ """
198
+ if not isinstance(right, Studyset):
199
+ raise ValueError("Can only merge with another Studyset")
200
+
201
+ # Create new source dictionary starting with left (self) studyset
202
+ merged_source = self.to_dict()
203
+ merged_source["id"] = f"{self.id}_{right.id}"
204
+ merged_source["name"] = f"Merged: {self.name} + {right.name}"
205
+
206
+ # Create lookup of existing studies by ID
207
+ left_studies = {study["id"]: study for study in merged_source["studies"]}
208
+
209
+ # Process studies from right studyset
210
+ right_dict = right.to_dict()
211
+ for right_study in right_dict["studies"]:
212
+ study_id = right_study["id"]
213
+
214
+ if study_id in left_studies:
215
+ # Merge study data
216
+ left_study = left_studies[study_id]
217
+
218
+ # Keep metadata from left unless missing
219
+ left_study["metadata"].update(
220
+ {
221
+ k: v
222
+ for k, v in right_study["metadata"].items()
223
+ if k not in left_study["metadata"]
224
+ }
225
+ )
226
+
227
+ # Keep basic info from left unless empty
228
+ for field in ["name", "authors", "publication"]:
229
+ if not left_study[field]:
230
+ left_study[field] = right_study[field]
231
+
232
+ # Combine analyses, avoiding duplicates by ID
233
+ left_analyses = {a["id"]: a for a in left_study["analyses"]}
234
+ for right_analysis in right_study["analyses"]:
235
+ if right_analysis["id"] not in left_analyses:
236
+ left_study["analyses"].append(right_analysis)
237
+ else:
238
+ # Add new study
239
+ merged_source["studies"].append(right_study)
240
+
241
+ # Create new merged studyset
242
+ merged = self.__class__(source=merged_source)
243
+
244
+ # Merge annotations, preferring left's annotations for conflicts
245
+ existing_annot_ids = {a.id for a in self.annotations}
246
+ for right_annot in right.annotations:
247
+ if right_annot.id not in existing_annot_ids:
248
+ merged.annotations = right_annot.to_dict()
249
+
250
+ return merged
155
251
 
156
252
  def get_analyses_by_coordinates(self, xyz, r=None, n=None):
157
- """Extract a list of Analyses with at least one Point near the requested coordinates."""
158
- raise NotImplementedError("Getting analyses by coordinates is not yet supported.")
253
+ """Extract a list of Analyses with at least one Point near the requested coordinates.
254
+
255
+ Parameters
256
+ ----------
257
+ xyz : array_like
258
+ 1 x 3 array of coordinates in mm space to search from
259
+ r : float, optional
260
+ Search radius in millimeters.
261
+ Mutually exclusive with n.
262
+ n : int, optional
263
+ Number of closest analyses to return.
264
+ Mutually exclusive with r.
265
+
266
+ Returns
267
+ -------
268
+ list[str]
269
+ A list of Analysis IDs with at least one point within the search criteria.
270
+
271
+ Notes
272
+ -----
273
+ Either r or n must be provided, but not both.
274
+ """
275
+ if (r is None and n is None) or (r is not None and n is not None):
276
+ raise ValueError("Exactly one of r or n must be provided.")
277
+
278
+ xyz = np.asarray(xyz).ravel()
279
+ if xyz.shape != (3,):
280
+ raise ValueError("xyz must be a 1 x 3 array-like object.")
281
+
282
+ # Extract all points from all analyses
283
+ all_points = []
284
+ analysis_ids = []
285
+ for study in self.studies:
286
+ for analysis in study.analyses:
287
+ for point in analysis.points:
288
+ if hasattr(point, "x") and hasattr(point, "y") and hasattr(point, "z"):
289
+ all_points.append([point.x, point.y, point.z])
290
+ analysis_ids.append(analysis.id)
291
+
292
+ if not all_points: # Return empty list if no coordinates found
293
+ return []
294
+
295
+ all_points = np.array(all_points)
296
+
297
+ # Calculate Euclidean distances to all points
298
+ distances = np.sqrt(np.sum((all_points - xyz) ** 2, axis=1))
299
+
300
+ if r is not None:
301
+ # Find analyses with points within radius r
302
+ within_radius = distances <= r
303
+ found_analyses = set(np.array(analysis_ids)[within_radius])
304
+ else:
305
+ # Find n closest analyses
306
+ closest_n_idx = np.argsort(distances)[:n]
307
+ found_analyses = set(np.array(analysis_ids)[closest_n_idx])
308
+
309
+ return list(found_analyses)
159
310
 
160
311
  def get_analyses_by_mask(self, img):
161
- """Extract a list of Analyses with at least one Point in the specified mask."""
162
- raise NotImplementedError("Getting analyses by mask is not yet supported.")
312
+ """Extract a list of Analyses with at least one Point in the specified mask.
163
313
 
164
- def get_analyses_by_annotations(self):
165
- """Extract a list of Analyses with a given label/annotation."""
166
- raise NotImplementedError("Getting analyses by annotations is not yet supported.")
314
+ Parameters
315
+ ----------
316
+ img : img_like
317
+ Mask across which to search for coordinates.
167
318
 
168
- def get_analyses_by_texts(self):
169
- """Extract a list of Analyses with a given text."""
170
- raise NotImplementedError("Getting analyses by texts is not yet supported.")
319
+ Returns
320
+ -------
321
+ list[str]
322
+ A list of Analysis IDs with at least one point in the mask.
323
+ """
324
+ # Load mask
325
+ mask = load_niimg(img)
326
+
327
+ # Extract all points from all analyses
328
+ all_points = []
329
+ analysis_ids = []
330
+ for study in self.studies:
331
+ for analysis in study.analyses:
332
+ for point in analysis.points:
333
+ if hasattr(point, "x") and hasattr(point, "y") and hasattr(point, "z"):
334
+ all_points.append([point.x, point.y, point.z])
335
+ analysis_ids.append(analysis.id)
336
+
337
+ if not all_points: # Return empty list if no coordinates found
338
+ return []
339
+
340
+ # Convert to voxel coordinates
341
+ all_points = np.array(all_points)
342
+ ijk = mm2vox(all_points, mask.affine)
343
+
344
+ # Get mask coordinates
345
+ mask_data = mask.get_fdata()
346
+ mask_coords = np.vstack(np.where(mask_data)).T
171
347
 
172
- def get_analyses_by_images(self):
173
- """Extract a list of Analyses with a given image."""
174
- raise NotImplementedError("Getting analyses by images is not yet supported.")
348
+ # Check for presence of coordinates in mask
349
+ in_mask = np.any(np.all(ijk[:, None] == mask_coords[None, :], axis=-1), axis=-1)
175
350
 
176
- def get_analyses_by_metadata(self):
351
+ # Get unique analysis IDs where points are in mask
352
+ found_analyses = set(np.array(analysis_ids)[in_mask])
353
+
354
+ return list(found_analyses)
355
+
356
+ def get_analyses_by_annotations(self, key, value=None):
357
+ """Extract a list of Analyses with a given label/annotation."""
358
+ annotations = {}
359
+ for study in self.studies:
360
+ for analysis in study.analyses:
361
+ a_annot = analysis.annotations
362
+ if key in a_annot and (value is None or a_annot[key] == value):
363
+ annotations[analysis.id] = {key: a_annot[key]}
364
+ return annotations
365
+
366
+ def get_analyses_by_metadata(self, key, value=None):
177
367
  """Extract a list of Analyses with a metadata field/value."""
178
- raise NotImplementedError("Getting analyses by metadata is not yet supported.")
368
+ metadata = {}
369
+ for study in self.studies:
370
+ for analysis in study.analyses:
371
+ a_metadata = analysis.metadata
372
+ if key in a_metadata and (value is None or a_metadata[key] == value):
373
+ metadata[analysis.id] = {key: a_metadata[key]}
374
+ return metadata
179
375
 
180
376
  def get_points(self, analyses):
181
377
  """Collect Points associated with specified Analyses."""
182
- raise NotImplementedError("Getting points is not yet supported.")
378
+ points = {}
379
+ for study in self.studies:
380
+ for analysis in study.analyses:
381
+ if analysis.id in analyses:
382
+ points[analysis.id] = analysis.points
383
+ return points
183
384
 
184
385
  def get_annotations(self, analyses):
185
386
  """Collect Annotations associated with specified Analyses."""
186
- raise NotImplementedError("Getting annotations is not yet supported.")
387
+ annotations = {}
388
+ for study in self.studies:
389
+ for analysis in study.analyses:
390
+ if analysis.id in analyses:
391
+ annotations[analysis.id] = analysis.annotations
392
+
393
+ return annotations
187
394
 
188
395
  def get_texts(self, analyses):
189
396
  """Collect texts associated with specified Analyses."""
@@ -191,11 +398,32 @@ class Studyset:
191
398
 
192
399
  def get_images(self, analyses):
193
400
  """Collect image files associated with specified Analyses."""
194
- raise NotImplementedError("Getting images is not yet supported.")
401
+ images = {}
402
+ for study in self.studies:
403
+ for analysis in study.analyses:
404
+ if analysis.id in analyses:
405
+ images[analysis.id] = analysis.images
406
+ return images
195
407
 
196
408
  def get_metadata(self, analyses):
197
- """Collect metadata associated with specified Analyses."""
198
- raise NotImplementedError("Getting metadata is not yet supported.")
409
+ """Collect metadata associated with specified Analyses.
410
+
411
+ Parameters
412
+ ----------
413
+ analyses : list of str
414
+ List of Analysis IDs to get metadata for.
415
+
416
+ Returns
417
+ -------
418
+ dict[str, dict]
419
+ Dictionary mapping Analysis IDs to their combined metadata (including study metadata).
420
+ """
421
+ metadata = {}
422
+ for study in self.studies:
423
+ for analysis in study.analyses:
424
+ if analysis.id in analyses:
425
+ metadata[analysis.id] = analysis.get_metadata()
426
+ return metadata
199
427
 
200
428
 
201
429
  class Study:
@@ -226,7 +454,7 @@ class Study:
226
454
  self.authors = source["authors"] or ""
227
455
  self.publication = source["publication"] or ""
228
456
  self.metadata = source.get("metadata", {}) or {}
229
- self.analyses = [Analysis(a) for a in source["analyses"]]
457
+ self.analyses = [Analysis(a, study=self) for a in source["analyses"]]
230
458
 
231
459
  def __repr__(self):
232
460
  """My Simple representation."""
@@ -285,7 +513,7 @@ class Analysis:
285
513
  Should the images attribute be a list instead, if the Images contain type information?
286
514
  """
287
515
 
288
- def __init__(self, source):
516
+ def __init__(self, source, study=None):
289
517
  self.id = source["id"]
290
518
  self.name = source["name"]
291
519
  self.conditions = [
@@ -295,6 +523,7 @@ class Analysis:
295
523
  self.points = [Point(p) for p in source["points"]]
296
524
  self.metadata = source.get("metadata", {}) or {}
297
525
  self.annotations = {}
526
+ self._study = weakref.proxy(study) if study else None
298
527
 
299
528
  def __repr__(self):
300
529
  """My Simple representation."""
@@ -306,6 +535,22 @@ class Analysis:
306
535
  " ".join([self.name, f"images: {len(self.images)}", f"points: {len(self.points)}"])
307
536
  )
308
537
 
538
+ def get_metadata(self) -> "dict[str, any]":
539
+ """Get combined metadata from both analysis and parent study.
540
+
541
+ Returns
542
+ -------
543
+ dict[str, any]
544
+ Combined metadata dictionary with analysis metadata taking precedence
545
+ over study metadata for any overlapping keys.
546
+ """
547
+ if self._study is None:
548
+ return self.metadata.copy()
549
+
550
+ combined_metadata = self._study.metadata.copy()
551
+ combined_metadata.update(self.metadata)
552
+ return combined_metadata
553
+
309
554
  def to_dict(self):
310
555
  """Convert the Analysis to a dictionary."""
311
556
  return {
@@ -318,6 +563,7 @@ class Analysis:
318
563
  "images": [i.to_dict() for i in self.images],
319
564
  "points": [p.to_dict() for p in self.points],
320
565
  "weights": [c.to_dict()["weight"] for c in self.conditions],
566
+ "metadata": self.metadata,
321
567
  }
322
568
 
323
569
 
@@ -64,18 +64,6 @@ def test_analysis_to_dict_invalid_sample_sizes_type(example_nimads_studyset):
64
64
  io.convert_nimads_to_dataset(studyset)
65
65
 
66
66
 
67
- def test_analysis_to_dict_invalid_annotations_format(example_nimads_studyset):
68
- """Test _analysis_to_dict raises ValueError when annotations are in an invalid format."""
69
- studyset = Studyset(example_nimads_studyset)
70
- # Here we assume that the annotation is expected to be a dict
71
- # Set annotation to an invalid format (e.g., a string)
72
- for study in studyset.studies:
73
- for analysis in study.analyses:
74
- analysis.metadata["annotations"] = "invalid_format"
75
- with pytest.raises(TypeError):
76
- io.convert_nimads_to_dataset(studyset)
77
-
78
-
79
67
  def test_convert_sleuth_to_dataset_smoke():
80
68
  """Smoke test for Sleuth text file conversion."""
81
69
  sleuth_file = os.path.join(get_test_data_path(), "test_sleuth_file.txt")