junifer 0.0.3.dev186__py3-none-any.whl → 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (178) hide show
  1. junifer/_version.py +14 -2
  2. junifer/api/cli.py +162 -17
  3. junifer/api/functions.py +87 -419
  4. junifer/api/parser.py +24 -0
  5. junifer/api/queue_context/__init__.py +8 -0
  6. junifer/api/queue_context/gnu_parallel_local_adapter.py +258 -0
  7. junifer/api/queue_context/htcondor_adapter.py +365 -0
  8. junifer/api/queue_context/queue_context_adapter.py +60 -0
  9. junifer/api/queue_context/tests/test_gnu_parallel_local_adapter.py +192 -0
  10. junifer/api/queue_context/tests/test_htcondor_adapter.py +257 -0
  11. junifer/api/res/afni/run_afni_docker.sh +6 -6
  12. junifer/api/res/ants/ResampleImage +3 -0
  13. junifer/api/res/ants/antsApplyTransforms +3 -0
  14. junifer/api/res/ants/antsApplyTransformsToPoints +3 -0
  15. junifer/api/res/ants/run_ants_docker.sh +39 -0
  16. junifer/api/res/fsl/applywarp +3 -0
  17. junifer/api/res/fsl/flirt +3 -0
  18. junifer/api/res/fsl/img2imgcoord +3 -0
  19. junifer/api/res/fsl/run_fsl_docker.sh +39 -0
  20. junifer/api/res/fsl/std2imgcoord +3 -0
  21. junifer/api/res/run_conda.sh +4 -4
  22. junifer/api/res/run_venv.sh +22 -0
  23. junifer/api/tests/data/partly_cloudy_agg_mean_tian.yml +16 -0
  24. junifer/api/tests/test_api_utils.py +21 -3
  25. junifer/api/tests/test_cli.py +232 -9
  26. junifer/api/tests/test_functions.py +211 -439
  27. junifer/api/tests/test_parser.py +1 -1
  28. junifer/configs/juseless/datagrabbers/aomic_id1000_vbm.py +6 -1
  29. junifer/configs/juseless/datagrabbers/camcan_vbm.py +6 -1
  30. junifer/configs/juseless/datagrabbers/ixi_vbm.py +6 -1
  31. junifer/configs/juseless/datagrabbers/tests/test_ucla.py +8 -8
  32. junifer/configs/juseless/datagrabbers/ucla.py +44 -26
  33. junifer/configs/juseless/datagrabbers/ukb_vbm.py +6 -1
  34. junifer/data/VOIs/meta/AutobiographicalMemory_VOIs.txt +23 -0
  35. junifer/data/VOIs/meta/Power2013_MNI_VOIs.tsv +264 -0
  36. junifer/data/__init__.py +4 -0
  37. junifer/data/coordinates.py +298 -31
  38. junifer/data/masks.py +360 -28
  39. junifer/data/parcellations.py +621 -188
  40. junifer/data/template_spaces.py +190 -0
  41. junifer/data/tests/test_coordinates.py +34 -3
  42. junifer/data/tests/test_data_utils.py +1 -0
  43. junifer/data/tests/test_masks.py +202 -86
  44. junifer/data/tests/test_parcellations.py +266 -55
  45. junifer/data/tests/test_template_spaces.py +104 -0
  46. junifer/data/utils.py +4 -2
  47. junifer/datagrabber/__init__.py +1 -0
  48. junifer/datagrabber/aomic/id1000.py +111 -70
  49. junifer/datagrabber/aomic/piop1.py +116 -53
  50. junifer/datagrabber/aomic/piop2.py +116 -53
  51. junifer/datagrabber/aomic/tests/test_id1000.py +27 -27
  52. junifer/datagrabber/aomic/tests/test_piop1.py +27 -27
  53. junifer/datagrabber/aomic/tests/test_piop2.py +27 -27
  54. junifer/datagrabber/base.py +62 -10
  55. junifer/datagrabber/datalad_base.py +0 -2
  56. junifer/datagrabber/dmcc13_benchmark.py +372 -0
  57. junifer/datagrabber/hcp1200/datalad_hcp1200.py +5 -0
  58. junifer/datagrabber/hcp1200/hcp1200.py +30 -13
  59. junifer/datagrabber/pattern.py +133 -27
  60. junifer/datagrabber/pattern_datalad.py +111 -13
  61. junifer/datagrabber/tests/test_base.py +57 -6
  62. junifer/datagrabber/tests/test_datagrabber_utils.py +204 -76
  63. junifer/datagrabber/tests/test_datalad_base.py +0 -6
  64. junifer/datagrabber/tests/test_dmcc13_benchmark.py +256 -0
  65. junifer/datagrabber/tests/test_multiple.py +43 -10
  66. junifer/datagrabber/tests/test_pattern.py +125 -178
  67. junifer/datagrabber/tests/test_pattern_datalad.py +44 -25
  68. junifer/datagrabber/utils.py +151 -16
  69. junifer/datareader/default.py +36 -10
  70. junifer/external/nilearn/junifer_nifti_spheres_masker.py +6 -0
  71. junifer/markers/base.py +25 -16
  72. junifer/markers/collection.py +35 -16
  73. junifer/markers/complexity/__init__.py +27 -0
  74. junifer/markers/complexity/complexity_base.py +149 -0
  75. junifer/markers/complexity/hurst_exponent.py +136 -0
  76. junifer/markers/complexity/multiscale_entropy_auc.py +140 -0
  77. junifer/markers/complexity/perm_entropy.py +132 -0
  78. junifer/markers/complexity/range_entropy.py +136 -0
  79. junifer/markers/complexity/range_entropy_auc.py +145 -0
  80. junifer/markers/complexity/sample_entropy.py +134 -0
  81. junifer/markers/complexity/tests/test_complexity_base.py +19 -0
  82. junifer/markers/complexity/tests/test_hurst_exponent.py +69 -0
  83. junifer/markers/complexity/tests/test_multiscale_entropy_auc.py +68 -0
  84. junifer/markers/complexity/tests/test_perm_entropy.py +68 -0
  85. junifer/markers/complexity/tests/test_range_entropy.py +69 -0
  86. junifer/markers/complexity/tests/test_range_entropy_auc.py +69 -0
  87. junifer/markers/complexity/tests/test_sample_entropy.py +68 -0
  88. junifer/markers/complexity/tests/test_weighted_perm_entropy.py +68 -0
  89. junifer/markers/complexity/weighted_perm_entropy.py +133 -0
  90. junifer/markers/falff/_afni_falff.py +153 -0
  91. junifer/markers/falff/_junifer_falff.py +142 -0
  92. junifer/markers/falff/falff_base.py +91 -84
  93. junifer/markers/falff/falff_parcels.py +61 -45
  94. junifer/markers/falff/falff_spheres.py +64 -48
  95. junifer/markers/falff/tests/test_falff_parcels.py +89 -121
  96. junifer/markers/falff/tests/test_falff_spheres.py +92 -127
  97. junifer/markers/functional_connectivity/crossparcellation_functional_connectivity.py +1 -0
  98. junifer/markers/functional_connectivity/edge_functional_connectivity_parcels.py +1 -0
  99. junifer/markers/functional_connectivity/functional_connectivity_base.py +1 -0
  100. junifer/markers/functional_connectivity/tests/test_crossparcellation_functional_connectivity.py +46 -44
  101. junifer/markers/functional_connectivity/tests/test_edge_functional_connectivity_parcels.py +34 -39
  102. junifer/markers/functional_connectivity/tests/test_edge_functional_connectivity_spheres.py +40 -52
  103. junifer/markers/functional_connectivity/tests/test_functional_connectivity_parcels.py +62 -70
  104. junifer/markers/functional_connectivity/tests/test_functional_connectivity_spheres.py +99 -85
  105. junifer/markers/parcel_aggregation.py +60 -38
  106. junifer/markers/reho/_afni_reho.py +192 -0
  107. junifer/markers/reho/_junifer_reho.py +281 -0
  108. junifer/markers/reho/reho_base.py +69 -34
  109. junifer/markers/reho/reho_parcels.py +26 -16
  110. junifer/markers/reho/reho_spheres.py +23 -9
  111. junifer/markers/reho/tests/test_reho_parcels.py +93 -92
  112. junifer/markers/reho/tests/test_reho_spheres.py +88 -86
  113. junifer/markers/sphere_aggregation.py +54 -9
  114. junifer/markers/temporal_snr/temporal_snr_base.py +1 -0
  115. junifer/markers/temporal_snr/tests/test_temporal_snr_parcels.py +38 -37
  116. junifer/markers/temporal_snr/tests/test_temporal_snr_spheres.py +34 -38
  117. junifer/markers/tests/test_collection.py +43 -42
  118. junifer/markers/tests/test_ets_rss.py +29 -37
  119. junifer/markers/tests/test_parcel_aggregation.py +587 -468
  120. junifer/markers/tests/test_sphere_aggregation.py +209 -157
  121. junifer/markers/utils.py +2 -40
  122. junifer/onthefly/read_transform.py +13 -6
  123. junifer/pipeline/__init__.py +1 -0
  124. junifer/pipeline/pipeline_step_mixin.py +105 -41
  125. junifer/pipeline/registry.py +17 -0
  126. junifer/pipeline/singleton.py +45 -0
  127. junifer/pipeline/tests/test_pipeline_step_mixin.py +139 -51
  128. junifer/pipeline/tests/test_update_meta_mixin.py +1 -0
  129. junifer/pipeline/tests/test_workdir_manager.py +104 -0
  130. junifer/pipeline/update_meta_mixin.py +8 -2
  131. junifer/pipeline/utils.py +154 -15
  132. junifer/pipeline/workdir_manager.py +246 -0
  133. junifer/preprocess/__init__.py +3 -0
  134. junifer/preprocess/ants/__init__.py +4 -0
  135. junifer/preprocess/ants/ants_apply_transforms_warper.py +185 -0
  136. junifer/preprocess/ants/tests/test_ants_apply_transforms_warper.py +56 -0
  137. junifer/preprocess/base.py +96 -69
  138. junifer/preprocess/bold_warper.py +265 -0
  139. junifer/preprocess/confounds/fmriprep_confound_remover.py +91 -134
  140. junifer/preprocess/confounds/tests/test_fmriprep_confound_remover.py +106 -111
  141. junifer/preprocess/fsl/__init__.py +4 -0
  142. junifer/preprocess/fsl/apply_warper.py +179 -0
  143. junifer/preprocess/fsl/tests/test_apply_warper.py +45 -0
  144. junifer/preprocess/tests/test_bold_warper.py +159 -0
  145. junifer/preprocess/tests/test_preprocess_base.py +6 -6
  146. junifer/preprocess/warping/__init__.py +6 -0
  147. junifer/preprocess/warping/_ants_warper.py +167 -0
  148. junifer/preprocess/warping/_fsl_warper.py +109 -0
  149. junifer/preprocess/warping/space_warper.py +213 -0
  150. junifer/preprocess/warping/tests/test_space_warper.py +198 -0
  151. junifer/stats.py +18 -4
  152. junifer/storage/base.py +9 -1
  153. junifer/storage/hdf5.py +8 -3
  154. junifer/storage/pandas_base.py +2 -1
  155. junifer/storage/sqlite.py +1 -0
  156. junifer/storage/tests/test_hdf5.py +2 -1
  157. junifer/storage/tests/test_sqlite.py +8 -8
  158. junifer/storage/tests/test_utils.py +6 -6
  159. junifer/storage/utils.py +1 -0
  160. junifer/testing/datagrabbers.py +11 -7
  161. junifer/testing/utils.py +1 -0
  162. junifer/tests/test_stats.py +2 -0
  163. junifer/utils/__init__.py +1 -0
  164. junifer/utils/helpers.py +53 -0
  165. junifer/utils/logging.py +14 -3
  166. junifer/utils/tests/test_helpers.py +35 -0
  167. {junifer-0.0.3.dev186.dist-info → junifer-0.0.4.dist-info}/METADATA +59 -28
  168. junifer-0.0.4.dist-info/RECORD +257 -0
  169. {junifer-0.0.3.dev186.dist-info → junifer-0.0.4.dist-info}/WHEEL +1 -1
  170. junifer/markers/falff/falff_estimator.py +0 -334
  171. junifer/markers/falff/tests/test_falff_estimator.py +0 -238
  172. junifer/markers/reho/reho_estimator.py +0 -515
  173. junifer/markers/reho/tests/test_reho_estimator.py +0 -260
  174. junifer-0.0.3.dev186.dist-info/RECORD +0 -199
  175. {junifer-0.0.3.dev186.dist-info → junifer-0.0.4.dist-info}/AUTHORS.rst +0 -0
  176. {junifer-0.0.3.dev186.dist-info → junifer-0.0.4.dist-info}/LICENSE.md +0 -0
  177. {junifer-0.0.3.dev186.dist-info → junifer-0.0.4.dist-info}/entry_points.txt +0 -0
  178. {junifer-0.0.3.dev186.dist-info → junifer-0.0.4.dist-info}/top_level.txt +0 -0
@@ -10,16 +10,17 @@ from pathlib import Path
10
10
  import nibabel as nib
11
11
  import numpy as np
12
12
  import pytest
13
- from nilearn import datasets
14
- from nilearn.image import concat_imgs, math_img, new_img_like, resample_to_img
13
+ from nilearn.image import math_img, new_img_like
15
14
  from nilearn.maskers import NiftiLabelsMasker, NiftiMasker
16
15
  from nilearn.masking import compute_brain_mask
17
16
  from numpy.testing import assert_array_almost_equal, assert_array_equal
18
17
  from scipy.stats import trim_mean
19
18
 
20
- from junifer.data import load_mask, load_parcellation, register_parcellation
19
+ from junifer.data import get_mask, get_parcellation, register_parcellation
20
+ from junifer.datareader import DefaultDataReader
21
21
  from junifer.markers.parcel_aggregation import ParcelAggregation
22
22
  from junifer.storage import SQLiteFeatureStorage
23
+ from junifer.testing.datagrabbers import PartlyCloudyTestingDataGrabber
23
24
 
24
25
 
25
26
  def test_ParcelAggregation_input_output() -> None:
@@ -36,123 +37,145 @@ def test_ParcelAggregation_input_output() -> None:
36
37
 
37
38
  def test_ParcelAggregation_3D() -> None:
38
39
  """Test ParcelAggregation object on 3D images."""
39
- # Get the testing parcellation (for nilearn)
40
- parcellation = datasets.fetch_atlas_schaefer_2018(n_rois=100)
41
-
42
- # Get the oasis VBM data
43
- oasis_dataset = datasets.fetch_oasis_vbm(n_subjects=1)
44
- vbm = oasis_dataset.gray_matter_maps[0]
45
- img = nib.load(vbm)
46
-
47
- # Mask parcellation manually
48
- parcellation_img_res = resample_to_img(
49
- parcellation.maps,
50
- img,
51
- interpolation="nearest",
52
- )
53
- parcellation_bin = math_img(
54
- "img != 0",
55
- img=parcellation_img_res,
56
- )
57
-
58
- # Create NiftiMasker
59
- masker = NiftiMasker(parcellation_bin, target_affine=img.affine)
60
- data = masker.fit_transform(img)
61
- parcellation_values = masker.transform(parcellation_img_res)
62
- parcellation_values = np.squeeze(parcellation_values).astype(int)
63
-
64
- # Compute the mean manually
65
- manual = []
66
- for t_v in sorted(np.unique(parcellation_values)):
67
- t_values = np.mean(data[:, parcellation_values == t_v])
68
- manual.append(t_values)
69
- manual = np.array(manual)[np.newaxis, :]
70
-
71
- # Create NiftiLabelsMasker
72
- nifti_masker = NiftiLabelsMasker(labels_img=parcellation.maps)
73
- auto = nifti_masker.fit_transform(img)
74
-
75
- # Check that arrays are almost equal
76
- assert_array_almost_equal(auto, manual)
77
-
78
- # Use the ParcelAggregation object
79
- marker = ParcelAggregation(
80
- parcellation="Schaefer100x7",
81
- method="mean",
82
- name="gmd_schaefer100x7_mean",
83
- on="VBM_GM",
84
- ) # Test passing "on" as a keyword argument
85
- input = {"VBM_GM": {"data": img, "meta": {}}}
86
- jun_values3d_mean = marker.fit_transform(input)["VBM_GM"]["data"]
87
-
88
- assert jun_values3d_mean.ndim == 2
89
- assert jun_values3d_mean.shape[0] == 1
90
- assert_array_equal(manual, jun_values3d_mean)
91
-
92
- # Test using another function (std)
93
- manual = []
94
- for t_v in sorted(np.unique(parcellation_values)):
95
- t_values = np.std(data[:, parcellation_values == t_v])
96
- manual.append(t_values)
97
- manual = np.array(manual)[np.newaxis, :]
98
-
99
- # Use the ParcelAggregation object
100
- marker = ParcelAggregation(parcellation="Schaefer100x7", method="std")
101
- input = {"VBM_GM": {"data": img, "meta": {}}}
102
- jun_values3d_std = marker.fit_transform(input)["VBM_GM"]["data"]
103
-
104
- assert jun_values3d_std.ndim == 2
105
- assert jun_values3d_std.shape[0] == 1
106
- assert_array_equal(manual, jun_values3d_std)
107
-
108
- # Test using another function with parameters
109
- manual = []
110
- for t_v in sorted(np.unique(parcellation_values)):
111
- t_values = trim_mean(
112
- data[:, parcellation_values == t_v],
113
- proportiontocut=0.1,
114
- axis=None, # type: ignore
115
- )
116
- manual.append(t_values)
117
- manual = np.array(manual)[np.newaxis, :]
118
-
119
- # Use the ParcelAggregation object
120
- marker = ParcelAggregation(
121
- parcellation="Schaefer100x7",
122
- method="trim_mean",
123
- method_params={"proportiontocut": 0.1},
124
- )
125
- input = {"VBM_GM": {"data": img, "meta": {}}}
126
- jun_values3d_tm = marker.fit_transform(input)["VBM_GM"]["data"]
40
+ with PartlyCloudyTestingDataGrabber() as dg:
41
+ element_data = DefaultDataReader().fit_transform(dg["sub-01"])
42
+ # Create ParcelAggregation object
43
+ marker = ParcelAggregation(
44
+ parcellation="TianxS1x3TxMNInonlinear2009cAsym",
45
+ method="mean",
46
+ on="BOLD",
47
+ )
48
+ element_data["BOLD"]["data"] = element_data["BOLD"]["data"].slicer[
49
+ ..., 0:1
50
+ ]
51
+
52
+ # Compare with nilearn
53
+ # Load testing parcellation
54
+ testing_parcellation, _ = get_parcellation(
55
+ parcellation=["TianxS1x3TxMNInonlinear2009cAsym"],
56
+ target_data=element_data["BOLD"],
57
+ )
58
+ # Binarize parcellation
59
+ testing_parcellation_bin = math_img(
60
+ "img != 0",
61
+ img=testing_parcellation,
62
+ )
63
+ # Create NiftiMasker
64
+ masker = NiftiMasker(
65
+ testing_parcellation_bin,
66
+ target_affine=element_data["BOLD"]["data"].affine,
67
+ )
68
+ data = masker.fit_transform(element_data["BOLD"]["data"])
69
+ parcellation_values = np.squeeze(
70
+ masker.transform(testing_parcellation)
71
+ ).astype(int)
72
+ # Compute the mean manually
73
+ manual = []
74
+ for t_v in sorted(np.unique(parcellation_values)):
75
+ t_values = np.mean(data[:, parcellation_values == t_v])
76
+ manual.append(t_values)
77
+ manual = np.array(manual)[np.newaxis, :]
78
+
79
+ # Create NiftiLabelsMasker
80
+ nifti_labels_masker = NiftiLabelsMasker(
81
+ labels_img=testing_parcellation
82
+ )
83
+ nifti_labels_masked_bold = nifti_labels_masker.fit_transform(
84
+ element_data["BOLD"]["data"].slicer[..., 0:1]
85
+ )
127
86
 
128
- assert jun_values3d_tm.ndim == 2
129
- assert jun_values3d_tm.shape[0] == 1
130
- assert_array_equal(manual, jun_values3d_tm)
87
+ parcel_agg_mean_bold_data = marker.fit_transform(element_data)["BOLD"][
88
+ "data"
89
+ ]
90
+ # Check that arrays are almost equal
91
+ assert_array_equal(parcel_agg_mean_bold_data, manual)
92
+ assert_array_almost_equal(nifti_labels_masked_bold, manual)
93
+
94
+ # Check further
95
+ assert parcel_agg_mean_bold_data.ndim == 2
96
+ assert parcel_agg_mean_bold_data.shape[0] == 1
97
+ assert_array_equal(
98
+ nifti_labels_masked_bold.shape, parcel_agg_mean_bold_data.shape
99
+ )
100
+ assert_array_equal(nifti_labels_masked_bold, parcel_agg_mean_bold_data)
101
+
102
+ # Compute std manually
103
+ manual = []
104
+ for t_v in sorted(np.unique(parcellation_values)):
105
+ t_values = np.std(data[:, parcellation_values == t_v])
106
+ manual.append(t_values)
107
+ manual = np.array(manual)[np.newaxis, :]
108
+
109
+ # Create ParcelAggregation object
110
+ marker = ParcelAggregation(
111
+ parcellation="TianxS1x3TxMNInonlinear2009cAsym",
112
+ method="std",
113
+ on="BOLD",
114
+ )
115
+ parcel_agg_std_bold_data = marker.fit_transform(element_data)["BOLD"][
116
+ "data"
117
+ ]
118
+ assert parcel_agg_std_bold_data.ndim == 2
119
+ assert parcel_agg_std_bold_data.shape[0] == 1
120
+ assert_array_equal(parcel_agg_std_bold_data, manual)
121
+
122
+ # Test using another function with parameters
123
+ manual = []
124
+ for t_v in sorted(np.unique(parcellation_values)):
125
+ t_values = trim_mean(
126
+ data[:, parcellation_values == t_v],
127
+ proportiontocut=0.1,
128
+ axis=None, # type: ignore
129
+ )
130
+ manual.append(t_values)
131
+ manual = np.array(manual)[np.newaxis, :]
132
+
133
+ # Create ParcelAggregation object
134
+ marker = ParcelAggregation(
135
+ parcellation="TianxS1x3TxMNInonlinear2009cAsym",
136
+ method="trim_mean",
137
+ method_params={"proportiontocut": 0.1},
138
+ on="BOLD",
139
+ )
140
+ parcel_agg_trim_mean_bold_data = marker.fit_transform(element_data)[
141
+ "BOLD"
142
+ ]["data"]
143
+ assert parcel_agg_trim_mean_bold_data.ndim == 2
144
+ assert parcel_agg_trim_mean_bold_data.shape[0] == 1
145
+ assert_array_equal(parcel_agg_trim_mean_bold_data, manual)
131
146
 
132
147
 
133
148
  def test_ParcelAggregation_4D():
134
149
  """Test ParcelAggregation object on 4D images."""
135
- # Get the testing parcellation (for nilearn)
136
- parcellation = datasets.fetch_atlas_schaefer_2018(
137
- n_rois=100, yeo_networks=7, resolution_mm=2
138
- )
139
-
140
- # Get the SPM auditory data:
141
- subject_data = datasets.fetch_spm_auditory()
142
- fmri_img = concat_imgs(subject_data.func) # type: ignore
143
-
144
- # Create NiftiLabelsMasker
145
- nifti_masker = NiftiLabelsMasker(labels_img=parcellation.maps)
146
- auto4d = nifti_masker.fit_transform(fmri_img)
147
-
148
- # Create ParcelAggregation object
149
- marker = ParcelAggregation(parcellation="Schaefer100x7", method="mean")
150
- input = {"BOLD": {"data": fmri_img, "meta": {}}}
151
- jun_values4d = marker.fit_transform(input)["BOLD"]["data"]
150
+ with PartlyCloudyTestingDataGrabber() as dg:
151
+ element_data = DefaultDataReader().fit_transform(dg["sub-01"])
152
+ # Create ParcelAggregation object
153
+ marker = ParcelAggregation(
154
+ parcellation="TianxS1x3TxMNInonlinear2009cAsym", method="mean"
155
+ )
156
+ parcel_agg_bold_data = marker.fit_transform(element_data)["BOLD"][
157
+ "data"
158
+ ]
159
+
160
+ # Compare with nilearn
161
+ # Load testing parcellation
162
+ testing_parcellation, _ = get_parcellation(
163
+ parcellation=["TianxS1x3TxMNInonlinear2009cAsym"],
164
+ target_data=element_data["BOLD"],
165
+ )
166
+ # Extract data
167
+ nifti_labels_masker = NiftiLabelsMasker(
168
+ labels_img=testing_parcellation
169
+ )
170
+ nifti_labels_masked_bold = nifti_labels_masker.fit_transform(
171
+ element_data["BOLD"]["data"]
172
+ )
152
173
 
153
- assert jun_values4d.ndim == 2
154
- assert_array_equal(auto4d.shape, jun_values4d.shape)
155
- assert_array_equal(auto4d, jun_values4d)
174
+ assert parcel_agg_bold_data.ndim == 2
175
+ assert_array_equal(
176
+ nifti_labels_masked_bold.shape, parcel_agg_bold_data.shape
177
+ )
178
+ assert_array_equal(nifti_labels_masked_bold, parcel_agg_bold_data)
156
179
 
157
180
 
158
181
  def test_ParcelAggregation_storage(tmp_path: Path) -> None:
@@ -164,130 +187,148 @@ def test_ParcelAggregation_storage(tmp_path: Path) -> None:
164
187
  The path to the test directory.
165
188
 
166
189
  """
167
- # Get the oasis VBM data
168
- oasis_dataset = datasets.fetch_oasis_vbm(n_subjects=1)
169
- vbm = oasis_dataset.gray_matter_maps[0]
170
- img = nib.load(vbm)
171
- uri = tmp_path / "test_sphere_storage_3D.sqlite"
172
-
173
- storage = SQLiteFeatureStorage(uri=uri, upsert="ignore")
174
- meta = {
175
- "element": {"subject": "sub-01", "session": "ses-01"},
176
- "dependencies": {"nilearn", "nibabel"},
177
- }
178
- input = {"VBM_GM": {"data": img, "meta": meta}}
179
- marker = ParcelAggregation(
180
- parcellation="Schaefer100x7", method="mean", on="VBM_GM"
181
- )
182
-
183
- marker.fit_transform(input, storage=storage)
184
-
185
- features = storage.list_features()
186
- assert any(
187
- x["name"] == "VBM_GM_ParcelAggregation" for x in features.values()
188
- )
189
-
190
- meta = {
191
- "element": {"subject": "sub-01", "session": "ses-01"},
192
- "dependencies": {"nilearn", "nibabel"},
193
- }
194
- # Get the SPM auditory data
195
- subject_data = datasets.fetch_spm_auditory()
196
- fmri_img = concat_imgs(subject_data.func) # type: ignore
197
- input = {"BOLD": {"data": fmri_img, "meta": meta}}
198
- marker = ParcelAggregation(
199
- parcellation="Schaefer100x7", method="mean", on="BOLD"
200
- )
190
+ # Store 3D
191
+ with PartlyCloudyTestingDataGrabber() as dg:
192
+ element_data = DefaultDataReader().fit_transform(dg["sub-01"])
193
+ storage = SQLiteFeatureStorage(
194
+ uri=tmp_path / "test_parcel_storage_3D.sqlite", upsert="ignore"
195
+ )
196
+ marker = ParcelAggregation(
197
+ parcellation="TianxS1x3TxMNInonlinear2009cAsym",
198
+ method="mean",
199
+ on="BOLD",
200
+ )
201
+ element_data["BOLD"]["data"] = element_data["BOLD"]["data"].slicer[
202
+ ..., 0:1
203
+ ]
204
+ marker.fit_transform(input=element_data, storage=storage)
205
+ features = storage.list_features()
206
+ assert any(
207
+ x["name"] == "BOLD_ParcelAggregation" for x in features.values()
208
+ )
201
209
 
202
- marker.fit_transform(input, storage=storage)
203
- features = storage.list_features()
204
- assert any(
205
- x["name"] == "BOLD_ParcelAggregation" for x in features.values()
206
- )
210
+ # Store 4D
211
+ with PartlyCloudyTestingDataGrabber() as dg:
212
+ element_data = DefaultDataReader().fit_transform(dg["sub-01"])
213
+ storage = SQLiteFeatureStorage(
214
+ uri=tmp_path / "test_parcel_storage_4D.sqlite", upsert="ignore"
215
+ )
216
+ marker = ParcelAggregation(
217
+ parcellation="TianxS1x3TxMNInonlinear2009cAsym",
218
+ method="mean",
219
+ on="BOLD",
220
+ )
221
+ marker.fit_transform(input=element_data, storage=storage)
222
+ features = storage.list_features()
223
+ assert any(
224
+ x["name"] == "BOLD_ParcelAggregation" for x in features.values()
225
+ )
207
226
 
208
227
 
209
228
  def test_ParcelAggregation_3D_mask() -> None:
210
229
  """Test ParcelAggregation object on 3D images with mask."""
230
+ with PartlyCloudyTestingDataGrabber() as dg:
231
+ element_data = DefaultDataReader().fit_transform(dg["sub-01"])
232
+ # Create ParcelAggregation object
233
+ marker = ParcelAggregation(
234
+ parcellation="TianxS1x3TxMNInonlinear2009cAsym",
235
+ method="mean",
236
+ name="tian_mean",
237
+ on="BOLD",
238
+ masks="compute_brain_mask",
239
+ )
240
+ element_data["BOLD"]["data"] = element_data["BOLD"]["data"].slicer[
241
+ ..., 0:1
242
+ ]
243
+ parcel_agg_bold_data = marker.fit_transform(element_data)["BOLD"][
244
+ "data"
245
+ ]
246
+
247
+ # Compare with nilearn
248
+ # Load testing parcellation
249
+ testing_parcellation, _ = get_parcellation(
250
+ parcellation=["TianxS1x3TxMNInonlinear2009cAsym"],
251
+ target_data=element_data["BOLD"],
252
+ )
253
+ # Load mask
254
+ mask_img = get_mask(
255
+ "compute_brain_mask", target_data=element_data["BOLD"]
256
+ )
257
+ # Extract data
258
+ nifti_labels_masker = NiftiLabelsMasker(
259
+ labels_img=testing_parcellation, mask_img=mask_img
260
+ )
261
+ nifti_labels_masked_bold = nifti_labels_masker.fit_transform(
262
+ element_data["BOLD"]["data"].slicer[..., 0:1]
263
+ )
211
264
 
212
- # Get the testing parcellation (for nilearn)
213
- parcellation = datasets.fetch_atlas_schaefer_2018(n_rois=100)
214
-
215
- # Get one mask
216
- mask_img, _ = load_mask("GM_prob0.2")
217
-
218
- # Get the oasis VBM data
219
- oasis_dataset = datasets.fetch_oasis_vbm(n_subjects=1)
220
- vbm = oasis_dataset.gray_matter_maps[0]
221
- img = nib.load(vbm)
222
-
223
- # Create NiftiLabelsMasker
224
- nifti_masker = NiftiLabelsMasker(
225
- labels_img=parcellation.maps, mask_img=mask_img
226
- )
227
- auto = nifti_masker.fit_transform(img)
228
-
229
- # Use the ParcelAggregation object
230
- marker = ParcelAggregation(
231
- parcellation="Schaefer100x7",
232
- method="mean",
233
- masks="GM_prob0.2",
234
- name="gmd_schaefer100x7_mean",
235
- on="VBM_GM",
236
- ) # Test passing "on" as a keyword argument
237
- input = {"VBM_GM": {"data": img, "meta": {}}}
238
- jun_values3d_mean = marker.fit_transform(input)["VBM_GM"]["data"]
239
-
240
- assert jun_values3d_mean.ndim == 2
241
- assert jun_values3d_mean.shape[0] == 1
242
- assert_array_almost_equal(auto, jun_values3d_mean)
265
+ assert parcel_agg_bold_data.ndim == 2
266
+ assert_array_equal(
267
+ nifti_labels_masked_bold.shape, parcel_agg_bold_data.shape
268
+ )
269
+ assert_array_equal(nifti_labels_masked_bold, parcel_agg_bold_data)
243
270
 
244
271
 
245
272
  def test_ParcelAggregation_3D_mask_computed() -> None:
246
273
  """Test ParcelAggregation object on 3D images with computed masks."""
274
+ with PartlyCloudyTestingDataGrabber() as dg:
275
+ element_data = DefaultDataReader().fit_transform(dg["sub-01"])
276
+ element_data["BOLD"]["data"] = element_data["BOLD"]["data"].slicer[
277
+ ..., 0:1
278
+ ]
279
+
280
+ # Compare with nilearn
281
+ # Load testing parcellation
282
+ testing_parcellation, _ = get_parcellation(
283
+ parcellation=["TianxS1x3TxMNInonlinear2009cAsym"],
284
+ target_data=element_data["BOLD"],
285
+ )
286
+ # Get a mask
287
+ mask_img = compute_brain_mask(
288
+ element_data["BOLD"]["data"], threshold=0.2
289
+ )
290
+ # Create NiftiLabelsMasker
291
+ nifti_labels_masker = NiftiLabelsMasker(
292
+ labels_img=testing_parcellation, mask_img=mask_img
293
+ )
294
+ nifti_labels_masked_bold_good = nifti_labels_masker.fit_transform(
295
+ element_data["BOLD"]["data"]
296
+ )
247
297
 
248
- # Get the testing parcellation (for nilearn)
249
- parcellation = datasets.fetch_atlas_schaefer_2018(n_rois=100)
250
-
251
- # Get the oasis VBM data
252
- oasis_dataset = datasets.fetch_oasis_vbm(n_subjects=1)
253
- vbm = oasis_dataset.gray_matter_maps[0]
254
- img = nib.load(vbm)
255
-
256
- # Get one mask
257
- mask_img = compute_brain_mask(img, threshold=0.2)
258
-
259
- # Create NiftiLabelsMasker
260
- nifti_masker = NiftiLabelsMasker(
261
- labels_img=parcellation.maps, mask_img=mask_img
262
- )
263
- auto = nifti_masker.fit_transform(img)
264
-
265
- # Get one mask
266
- mask_img = compute_brain_mask(img, threshold=0.5)
267
-
268
- # Create NiftiLabelsMasker
269
- nifti_masker = NiftiLabelsMasker(
270
- labels_img=parcellation.maps, mask_img=mask_img
271
- )
272
- auto_bad = nifti_masker.fit_transform(img)
273
-
274
- # Use the ParcelAggregation object
275
- marker = ParcelAggregation(
276
- parcellation="Schaefer100x7",
277
- method="mean",
278
- masks={"compute_brain_mask": {"threshold": 0.2}},
279
- name="gmd_schaefer100x7_mean",
280
- on="VBM_GM",
281
- ) # Test passing "on" as a keyword argument
282
- input = {"VBM_GM": {"data": img, "meta": {}}}
283
- jun_values3d_mean = marker.fit_transform(input)["VBM_GM"]["data"]
298
+ # Get another mask
299
+ mask_img = compute_brain_mask(
300
+ element_data["BOLD"]["data"], threshold=0.5
301
+ )
302
+ # Create NiftiLabelsMasker
303
+ nifti_labels_masker = NiftiLabelsMasker(
304
+ labels_img=testing_parcellation, mask_img=mask_img
305
+ )
306
+ nifti_labels_masked_bold_bad = nifti_labels_masker.fit_transform(
307
+ mask_img
308
+ )
284
309
 
285
- assert jun_values3d_mean.ndim == 2
286
- assert jun_values3d_mean.shape[0] == 1
287
- assert_array_almost_equal(auto, jun_values3d_mean)
310
+ # Use the ParcelAggregation object
311
+ marker = ParcelAggregation(
312
+ parcellation="TianxS1x3TxMNInonlinear2009cAsym",
313
+ method="mean",
314
+ masks={"compute_brain_mask": {"threshold": 0.2}},
315
+ name="tian_mean",
316
+ on="BOLD",
317
+ )
318
+ parcel_agg_mean_bold_data = marker.fit_transform(element_data)["BOLD"][
319
+ "data"
320
+ ]
321
+
322
+ assert parcel_agg_mean_bold_data.ndim == 2
323
+ assert parcel_agg_mean_bold_data.shape[0] == 1
324
+ assert_array_almost_equal(
325
+ nifti_labels_masked_bold_good, parcel_agg_mean_bold_data
326
+ )
288
327
 
289
- with pytest.raises(AssertionError):
290
- assert_array_almost_equal(jun_values3d_mean, auto_bad)
328
+ with pytest.raises(AssertionError):
329
+ assert_array_almost_equal(
330
+ parcel_agg_mean_bold_data, nifti_labels_masked_bold_bad
331
+ )
291
332
 
292
333
 
293
334
  def test_ParcelAggregation_3D_multiple_non_overlapping(tmp_path: Path) -> None:
@@ -299,81 +340,93 @@ def test_ParcelAggregation_3D_multiple_non_overlapping(tmp_path: Path) -> None:
299
340
  The path to the test directory.
300
341
 
301
342
  """
343
+ with PartlyCloudyTestingDataGrabber() as dg:
344
+ element_data = DefaultDataReader().fit_transform(dg["sub-01"])
345
+ element_data["BOLD"]["data"] = element_data["BOLD"]["data"].slicer[
346
+ ..., 0:1
347
+ ]
348
+
349
+ # Load testing parcellation
350
+ testing_parcellation, labels = get_parcellation(
351
+ parcellation=["TianxS1x3TxMNInonlinear2009cAsym"],
352
+ target_data=element_data["BOLD"],
353
+ )
302
354
 
303
- # Get the testing parcellation
304
- parcellation, labels, _ = load_parcellation("Schaefer100x7")
355
+ # Create two parcellations from it
356
+ parcellation_data = testing_parcellation.get_fdata()
357
+ parcellation1_data = parcellation_data.copy()
358
+ parcellation1_data[parcellation1_data > 8] = 0
359
+ parcellation2_data = parcellation_data.copy()
360
+ parcellation2_data[parcellation2_data <= 8] = 0
361
+ parcellation2_data[parcellation2_data > 0] -= 8
362
+ labels1 = labels[:8]
363
+ labels2 = labels[8:]
364
+
365
+ parcellation1_img = new_img_like(
366
+ testing_parcellation, parcellation1_data
367
+ )
368
+ parcellation2_img = new_img_like(
369
+ testing_parcellation, parcellation2_data
370
+ )
305
371
 
306
- assert parcellation is not None
372
+ parcellation1_path = tmp_path / "parcellation1.nii.gz"
373
+ parcellation2_path = tmp_path / "parcellation2.nii.gz"
307
374
 
308
- # Get the oasis VBM data
309
- oasis_dataset = datasets.fetch_oasis_vbm(n_subjects=1)
310
- vbm = oasis_dataset.gray_matter_maps[0]
311
- img = nib.load(vbm)
375
+ nib.save(parcellation1_img, parcellation1_path)
376
+ nib.save(parcellation2_img, parcellation2_path)
312
377
 
313
- # Create two parcellations from it
314
- parcellation_data = parcellation.get_fdata()
315
- parcellation1_data = parcellation_data.copy()
316
- parcellation1_data[parcellation1_data > 50] = 0
317
- parcellation2_data = parcellation_data.copy()
318
- parcellation2_data[parcellation2_data <= 50] = 0
319
- parcellation2_data[parcellation2_data > 0] -= 50
320
- labels1 = labels[:50]
321
- labels2 = labels[50:]
378
+ register_parcellation(
379
+ name="TianxS1x3TxMNInonlinear2009cAsym_low",
380
+ parcellation_path=parcellation1_path,
381
+ parcels_labels=labels1,
382
+ space="MNI152NLin2009cAsym",
383
+ overwrite=True,
384
+ )
385
+ register_parcellation(
386
+ name="TianxS1x3TxMNInonlinear2009cAsym_high",
387
+ parcellation_path=parcellation2_path,
388
+ parcels_labels=labels2,
389
+ space="MNI152NLin2009cAsym",
390
+ overwrite=True,
391
+ )
322
392
 
323
- parcellation1_img = new_img_like(parcellation, parcellation1_data)
324
- parcellation2_img = new_img_like(parcellation, parcellation2_data)
393
+ # Use the ParcelAggregation object on the original parcellation
394
+ marker_original = ParcelAggregation(
395
+ parcellation="TianxS1x3TxMNInonlinear2009cAsym",
396
+ method="mean",
397
+ name="tian_mean",
398
+ on="BOLD",
399
+ )
400
+ orig_mean = marker_original.fit_transform(element_data)["BOLD"]
401
+
402
+ orig_mean_data = orig_mean["data"]
403
+ assert orig_mean_data.ndim == 2
404
+ assert orig_mean_data.shape == (1, 16)
405
+
406
+ # Use the ParcelAggregation object on the two parcellations
407
+ marker_split = ParcelAggregation(
408
+ parcellation=[
409
+ "TianxS1x3TxMNInonlinear2009cAsym_low",
410
+ "TianxS1x3TxMNInonlinear2009cAsym_high",
411
+ ],
412
+ method="mean",
413
+ name="tian_mean",
414
+ on="BOLD",
415
+ )
325
416
 
326
- parcellation1_path = tmp_path / "parcellation1.nii.gz"
327
- parcellation2_path = tmp_path / "parcellation2.nii.gz"
417
+ # No warnings should be raised
418
+ with warnings.catch_warnings():
419
+ warnings.simplefilter("error", category=UserWarning)
420
+ split_mean = marker_split.fit_transform(element_data)["BOLD"]
328
421
 
329
- nib.save(parcellation1_img, parcellation1_path)
330
- nib.save(parcellation2_img, parcellation2_path)
422
+ split_mean_data = split_mean["data"]
331
423
 
332
- register_parcellation(
333
- "Schaefer100x7_low", parcellation1_path, labels1, overwrite=True
334
- )
335
- register_parcellation(
336
- "Schaefer100x7_high", parcellation2_path, labels2, overwrite=True
337
- )
424
+ assert split_mean_data.ndim == 2
425
+ assert split_mean_data.shape == (1, 16)
338
426
 
339
- # Use the ParcelAggregation object on the original parcellation
340
- marker_original = ParcelAggregation(
341
- parcellation="Schaefer100x7",
342
- method="mean",
343
- name="gmd_schaefer100x7_mean",
344
- on="VBM_GM",
345
- ) # Test passing "on" as a keyword argument
346
- input = {"VBM_GM": {"data": img, "meta": {}}}
347
- orig_mean = marker_original.fit_transform(input)["VBM_GM"]
348
-
349
- orig_mean_data = orig_mean["data"]
350
- assert orig_mean_data.ndim == 2
351
- assert orig_mean_data.shape[0] == 1
352
- assert orig_mean_data.shape[1] == 100
353
- # assert_array_almost_equal(auto, jun_values3d_mean)
354
-
355
- # Use the ParcelAggregation object on the two parcellations
356
- marker_split = ParcelAggregation(
357
- parcellation=["Schaefer100x7_low", "Schaefer100x7_high"],
358
- method="mean",
359
- name="gmd_schaefer100x7_mean",
360
- on="VBM_GM",
361
- ) # Test passing "on" as a keyword argument
362
- input = {"VBM_GM": {"data": img, "meta": {}}}
363
-
364
- # No warnings should be raised
365
- with warnings.catch_warnings():
366
- warnings.simplefilter("error", category=UserWarning)
367
- split_mean = marker_split.fit_transform(input)["VBM_GM"]
368
- split_mean_data = split_mean["data"]
369
-
370
- assert split_mean_data.ndim == 2
371
- assert split_mean_data.shape[0] == 1
372
- assert split_mean_data.shape[1] == 100
373
-
374
- # Data and labels should be the same
375
- assert_array_equal(orig_mean_data, split_mean_data)
376
- assert orig_mean["col_names"] == split_mean["col_names"]
427
+ # Data and labels should be the same
428
+ assert_array_equal(orig_mean_data, split_mean_data)
429
+ assert orig_mean["col_names"] == split_mean["col_names"]
377
430
 
378
431
 
379
432
  def test_ParcelAggregation_3D_multiple_overlapping(tmp_path: Path) -> None:
@@ -385,87 +438,100 @@ def test_ParcelAggregation_3D_multiple_overlapping(tmp_path: Path) -> None:
385
438
  The path to the test directory.
386
439
 
387
440
  """
441
+ with PartlyCloudyTestingDataGrabber() as dg:
442
+ element_data = DefaultDataReader().fit_transform(dg["sub-01"])
443
+ element_data["BOLD"]["data"] = element_data["BOLD"]["data"].slicer[
444
+ ..., 0:1
445
+ ]
446
+
447
+ # Load testing parcellation
448
+ testing_parcellation, labels = get_parcellation(
449
+ parcellation=["TianxS1x3TxMNInonlinear2009cAsym"],
450
+ target_data=element_data["BOLD"],
451
+ )
388
452
 
389
- # Get the testing parcellation
390
- parcellation, labels, _ = load_parcellation("Schaefer100x7")
453
+ # Create two parcellations from it
454
+ parcellation_data = testing_parcellation.get_fdata()
455
+ parcellation1_data = parcellation_data.copy()
456
+ parcellation1_data[parcellation1_data > 8] = 0
457
+ parcellation2_data = parcellation_data.copy()
391
458
 
392
- assert parcellation is not None
459
+ # Make the second parcellation overlap with the first
460
+ parcellation2_data[parcellation2_data <= 6] = 0
461
+ parcellation2_data[parcellation2_data > 0] -= 6
462
+ labels1 = [f"low_{x}" for x in labels[:8]] # Change the labels
463
+ labels2 = [f"high_{x}" for x in labels[6:]] # Change the labels
393
464
 
394
- # Get the oasis VBM data
395
- oasis_dataset = datasets.fetch_oasis_vbm(n_subjects=1)
396
- vbm = oasis_dataset.gray_matter_maps[0]
397
- img = nib.load(vbm)
465
+ parcellation1_img = new_img_like(
466
+ testing_parcellation, parcellation1_data
467
+ )
468
+ parcellation2_img = new_img_like(
469
+ testing_parcellation, parcellation2_data
470
+ )
398
471
 
399
- # Create two parcellations from it
400
- parcellation_data = parcellation.get_fdata()
401
- parcellation1_data = parcellation_data.copy()
402
- parcellation1_data[parcellation1_data > 50] = 0
403
- parcellation2_data = parcellation_data.copy()
472
+ parcellation1_path = tmp_path / "parcellation1.nii.gz"
473
+ parcellation2_path = tmp_path / "parcellation2.nii.gz"
404
474
 
405
- # Make the second parcellation overlap with the first
406
- parcellation2_data[parcellation2_data <= 45] = 0
407
- parcellation2_data[parcellation2_data > 0] -= 45
408
- labels1 = [f"low_{x}" for x in labels[:50]] # Change the labels
409
- labels2 = [f"high_{x}" for x in labels[45:]] # Change the labels
475
+ nib.save(parcellation1_img, parcellation1_path)
476
+ nib.save(parcellation2_img, parcellation2_path)
410
477
 
411
- parcellation1_img = new_img_like(parcellation, parcellation1_data)
412
- parcellation2_img = new_img_like(parcellation, parcellation2_data)
478
+ register_parcellation(
479
+ name="TianxS1x3TxMNInonlinear2009cAsym_low",
480
+ parcellation_path=parcellation1_path,
481
+ parcels_labels=labels1,
482
+ space="MNI152NLin2009cAsym",
483
+ overwrite=True,
484
+ )
485
+ register_parcellation(
486
+ name="TianxS1x3TxMNInonlinear2009cAsym_high",
487
+ parcellation_path=parcellation2_path,
488
+ parcels_labels=labels2,
489
+ space="MNI152NLin2009cAsym",
490
+ overwrite=True,
491
+ )
413
492
 
414
- parcellation1_path = tmp_path / "parcellation1.nii.gz"
415
- parcellation2_path = tmp_path / "parcellation2.nii.gz"
493
+ # Use the ParcelAggregation object on the original parcellation
494
+ marker_original = ParcelAggregation(
495
+ parcellation="TianxS1x3TxMNInonlinear2009cAsym",
496
+ method="mean",
497
+ name="tian_mean",
498
+ on="BOLD",
499
+ )
500
+ orig_mean = marker_original.fit_transform(element_data)["BOLD"]
501
+
502
+ orig_mean_data = orig_mean["data"]
503
+ assert orig_mean_data.ndim == 2
504
+ assert orig_mean_data.shape == (1, 16)
505
+
506
+ # Use the ParcelAggregation object on the two parcellations
507
+ marker_split = ParcelAggregation(
508
+ parcellation=[
509
+ "TianxS1x3TxMNInonlinear2009cAsym_low",
510
+ "TianxS1x3TxMNInonlinear2009cAsym_high",
511
+ ],
512
+ method="mean",
513
+ name="tian_mean",
514
+ on="BOLD",
515
+ )
516
+ # Warning should be raised
517
+ with pytest.warns(RuntimeWarning, match="overlapping voxels"):
518
+ split_mean = marker_split.fit_transform(element_data)["BOLD"]
416
519
 
417
- nib.save(parcellation1_img, parcellation1_path)
418
- nib.save(parcellation2_img, parcellation2_path)
520
+ split_mean_data = split_mean["data"]
419
521
 
420
- register_parcellation(
421
- "Schaefer100x7_low2", parcellation1_path, labels1, overwrite=True
422
- )
423
- register_parcellation(
424
- "Schaefer100x7_high2", parcellation2_path, labels2, overwrite=True
425
- )
522
+ assert split_mean_data.ndim == 2
523
+ assert split_mean_data.shape == (1, 18)
524
+
525
+ # Overlapping voxels should be NaN
526
+ assert np.isnan(split_mean_data[:, 8:10]).all()
527
+
528
+ non_nan = split_mean_data[~np.isnan(split_mean_data)]
529
+ # Data should be the same
530
+ assert_array_equal(orig_mean_data, non_nan[None, :])
426
531
 
427
- # Use the ParcelAggregation object on the original parcellation
428
- marker_original = ParcelAggregation(
429
- parcellation="Schaefer100x7",
430
- method="mean",
431
- name="gmd_schaefer100x7_mean",
432
- on="VBM_GM",
433
- ) # Test passing "on" as a keyword argument
434
- input = {"VBM_GM": {"data": img, "meta": {}}}
435
- orig_mean = marker_original.fit_transform(input)["VBM_GM"]
436
-
437
- orig_mean_data = orig_mean["data"]
438
- assert orig_mean_data.ndim == 2
439
- assert orig_mean_data.shape[0] == 1
440
- assert orig_mean_data.shape[1] == 100
441
- # assert_array_almost_equal(auto, jun_values3d_mean)
442
-
443
- # Use the ParcelAggregation object on the two parcellations
444
- marker_split = ParcelAggregation(
445
- parcellation=["Schaefer100x7_low2", "Schaefer100x7_high2"],
446
- method="mean",
447
- name="gmd_schaefer100x7_mean",
448
- on="VBM_GM",
449
- ) # Test passing "on" as a keyword argument
450
- input = {"VBM_GM": {"data": img, "meta": {}}}
451
- with pytest.warns(RuntimeWarning, match="overlapping voxels"):
452
- split_mean = marker_split.fit_transform(input)["VBM_GM"]
453
- split_mean_data = split_mean["data"]
454
-
455
- assert split_mean_data.ndim == 2
456
- assert split_mean_data.shape[0] == 1
457
- assert split_mean_data.shape[1] == 105
458
-
459
- # Overlapping voxels should be NaN
460
- assert np.isnan(split_mean_data[:, 50:55]).all()
461
-
462
- non_nan = split_mean_data[~np.isnan(split_mean_data)]
463
- # Data should be the same
464
- assert_array_equal(orig_mean_data, non_nan[None, :])
465
-
466
- # Labels should be "low" for the first 50 and "high" for the second 50
467
- assert all(x.startswith("low") for x in split_mean["col_names"][:50])
468
- assert all(x.startswith("high") for x in split_mean["col_names"][50:])
532
+ # Labels should be "low" for the first 8 and "high" for the second 8
533
+ assert all(x.startswith("low") for x in split_mean["col_names"][:8])
534
+ assert all(x.startswith("high") for x in split_mean["col_names"][8:])
469
535
 
470
536
 
471
537
  def test_ParcelAggregation_3D_multiple_duplicated_labels(
@@ -479,127 +545,164 @@ def test_ParcelAggregation_3D_multiple_duplicated_labels(
479
545
  The path to the test directory.
480
546
 
481
547
  """
548
+ with PartlyCloudyTestingDataGrabber() as dg:
549
+ element_data = DefaultDataReader().fit_transform(dg["sub-01"])
550
+ element_data["BOLD"]["data"] = element_data["BOLD"]["data"].slicer[
551
+ ..., 0:1
552
+ ]
553
+
554
+ # Load testing parcellation
555
+ testing_parcellation, labels = get_parcellation(
556
+ parcellation=["TianxS1x3TxMNInonlinear2009cAsym"],
557
+ target_data=element_data["BOLD"],
558
+ )
482
559
 
483
- # Get the testing parcellation
484
- parcellation, labels, _ = load_parcellation("Schaefer100x7")
560
+ # Create two parcellations from it
561
+ parcellation_data = testing_parcellation.get_fdata()
562
+ parcellation1_data = parcellation_data.copy()
563
+ parcellation1_data[parcellation1_data > 8] = 0
564
+ parcellation2_data = parcellation_data.copy()
565
+ parcellation2_data[parcellation2_data <= 8] = 0
566
+ parcellation2_data[parcellation2_data > 0] -= 8
567
+ labels1 = labels[:8]
568
+ labels2 = labels[7:-1] # One label is duplicated
569
+
570
+ parcellation1_img = new_img_like(
571
+ testing_parcellation, parcellation1_data
572
+ )
573
+ parcellation2_img = new_img_like(
574
+ testing_parcellation, parcellation2_data
575
+ )
485
576
 
486
- assert parcellation is not None
577
+ parcellation1_path = tmp_path / "parcellation1.nii.gz"
578
+ parcellation2_path = tmp_path / "parcellation2.nii.gz"
487
579
 
488
- # Get the oasis VBM data
489
- oasis_dataset = datasets.fetch_oasis_vbm(n_subjects=1)
490
- vbm = oasis_dataset.gray_matter_maps[0]
491
- img = nib.load(vbm)
580
+ nib.save(parcellation1_img, parcellation1_path)
581
+ nib.save(parcellation2_img, parcellation2_path)
492
582
 
493
- # Create two parcellations from it
494
- parcellation_data = parcellation.get_fdata()
495
- parcellation1_data = parcellation_data.copy()
496
- parcellation1_data[parcellation1_data > 50] = 0
497
- parcellation2_data = parcellation_data.copy()
498
- parcellation2_data[parcellation2_data <= 50] = 0
499
- parcellation2_data[parcellation2_data > 0] -= 50
500
- labels1 = labels[:50]
501
- labels2 = labels[49:-1] # One label is duplicated
583
+ register_parcellation(
584
+ name="TianxS1x3TxMNInonlinear2009cAsym_low",
585
+ parcellation_path=parcellation1_path,
586
+ parcels_labels=labels1,
587
+ space="MNI152NLin2009cAsym",
588
+ overwrite=True,
589
+ )
590
+ register_parcellation(
591
+ name="TianxS1x3TxMNInonlinear2009cAsym_high",
592
+ parcellation_path=parcellation2_path,
593
+ parcels_labels=labels2,
594
+ space="MNI152NLin2009cAsym",
595
+ overwrite=True,
596
+ )
502
597
 
503
- parcellation1_img = new_img_like(parcellation, parcellation1_data)
504
- parcellation2_img = new_img_like(parcellation, parcellation2_data)
598
+ # Use the ParcelAggregation object on the original parcellation
599
+ marker_original = ParcelAggregation(
600
+ parcellation="TianxS1x3TxMNInonlinear2009cAsym",
601
+ method="mean",
602
+ name="tian_mean",
603
+ on="BOLD",
604
+ )
605
+ orig_mean = marker_original.fit_transform(element_data)["BOLD"]
606
+
607
+ orig_mean_data = orig_mean["data"]
608
+ assert orig_mean_data.ndim == 2
609
+ assert orig_mean_data.shape == (1, 16)
610
+
611
+ # Use the ParcelAggregation object on the two parcellations
612
+ marker_split = ParcelAggregation(
613
+ parcellation=[
614
+ "TianxS1x3TxMNInonlinear2009cAsym_low",
615
+ "TianxS1x3TxMNInonlinear2009cAsym_high",
616
+ ],
617
+ method="mean",
618
+ name="tian_mean",
619
+ on="BOLD",
620
+ )
505
621
 
506
- parcellation1_path = tmp_path / "parcellation1.nii.gz"
507
- parcellation2_path = tmp_path / "parcellation2.nii.gz"
622
+ # Warning should be raised
623
+ with pytest.warns(RuntimeWarning, match="duplicated labels."):
624
+ split_mean = marker_split.fit_transform(element_data)["BOLD"]
508
625
 
509
- nib.save(parcellation1_img, parcellation1_path)
510
- nib.save(parcellation2_img, parcellation2_path)
626
+ split_mean_data = split_mean["data"]
511
627
 
512
- register_parcellation(
513
- "Schaefer100x7_low", parcellation1_path, labels1, overwrite=True
514
- )
515
- register_parcellation(
516
- "Schaefer100x7_high", parcellation2_path, labels2, overwrite=True
517
- )
628
+ assert split_mean_data.ndim == 2
629
+ assert split_mean_data.shape == (1, 16)
518
630
 
519
- # Use the ParcelAggregation object on the original parcellation
520
- marker_original = ParcelAggregation(
521
- parcellation="Schaefer100x7",
522
- method="mean",
523
- name="gmd_schaefer100x7_mean",
524
- on="VBM_GM",
525
- ) # Test passing "on" as a keyword argument
526
- input = {"VBM_GM": {"data": img, "meta": {}}}
527
- orig_mean = marker_original.fit_transform(input)["VBM_GM"]
528
-
529
- orig_mean_data = orig_mean["data"]
530
- assert orig_mean_data.ndim == 2
531
- assert orig_mean_data.shape[0] == 1
532
- assert orig_mean_data.shape[1] == 100
533
- # assert_array_almost_equal(auto, jun_values3d_mean)
534
-
535
- # Use the ParcelAggregation object on the two parcellations
536
- marker_split = ParcelAggregation(
537
- parcellation=["Schaefer100x7_low", "Schaefer100x7_high"],
538
- method="mean",
539
- name="gmd_schaefer100x7_mean",
540
- on="VBM_GM",
541
- ) # Test passing "on" as a keyword argument
542
- input = {"VBM_GM": {"data": img, "meta": {}}}
543
-
544
- with pytest.warns(RuntimeWarning, match="duplicated labels."):
545
- split_mean = marker_split.fit_transform(input)["VBM_GM"]
546
- split_mean_data = split_mean["data"]
547
-
548
- assert split_mean_data.ndim == 2
549
- assert split_mean_data.shape[0] == 1
550
- assert split_mean_data.shape[1] == 100
551
-
552
- # Data should be the same
553
- assert_array_equal(orig_mean_data, split_mean_data)
554
-
555
- # Labels should be prefixed with the parcellation name
556
- col_names = [f"Schaefer100x7_low_{x}" for x in labels1]
557
- col_names += [f"Schaefer100x7_high_{x}" for x in labels2]
558
- assert col_names == split_mean["col_names"]
631
+ # Data should be the same
632
+ assert_array_equal(orig_mean_data, split_mean_data)
633
+
634
+ # Labels should be prefixed with the parcellation name
635
+ col_names = [
636
+ f"TianxS1x3TxMNInonlinear2009cAsym_low_{x}" for x in labels1
637
+ ]
638
+ col_names += [
639
+ f"TianxS1x3TxMNInonlinear2009cAsym_high_{x}" for x in labels2
640
+ ]
641
+ assert col_names == split_mean["col_names"]
559
642
 
560
643
 
561
644
  def test_ParcelAggregation_4D_agg_time():
562
645
  """Test ParcelAggregation object on 4D images, aggregating time."""
563
- # Get the testing parcellation (for nilearn)
564
- parcellation = datasets.fetch_atlas_schaefer_2018(
565
- n_rois=100, yeo_networks=7, resolution_mm=2
566
- )
567
-
568
- # Get the SPM auditory data:
569
- subject_data = datasets.fetch_spm_auditory()
570
- fmri_img = concat_imgs(subject_data.func) # type: ignore
571
-
572
- # Create NiftiLabelsMasker
573
- nifti_masker = NiftiLabelsMasker(labels_img=parcellation.maps)
574
- auto4d = nifti_masker.fit_transform(fmri_img)
575
- auto_mean = auto4d.mean(axis=0)
576
-
577
- # Create ParcelAggregation object
578
- marker = ParcelAggregation(
579
- parcellation="Schaefer100x7", method="mean", time_method="mean"
580
- )
581
- input = {"BOLD": {"data": fmri_img, "meta": {}}}
582
- jun_values4d = marker.fit_transform(input)["BOLD"]["data"]
646
+ with PartlyCloudyTestingDataGrabber() as dg:
647
+ element_data = DefaultDataReader().fit_transform(dg["sub-01"])
648
+ # Create ParcelAggregation object
649
+ marker = ParcelAggregation(
650
+ parcellation="TianxS1x3TxMNInonlinear2009cAsym",
651
+ method="mean",
652
+ time_method="mean",
653
+ on="BOLD",
654
+ )
655
+ parcel_agg_bold_data = marker.fit_transform(element_data)["BOLD"][
656
+ "data"
657
+ ]
658
+
659
+ # Compare with nilearn
660
+ # Loading testing parcellation
661
+ testing_parcellation, _ = get_parcellation(
662
+ parcellation=["TianxS1x3TxMNInonlinear2009cAsym"],
663
+ target_data=element_data["BOLD"],
664
+ )
665
+ # Extract data
666
+ nifti_labels_masker = NiftiLabelsMasker(
667
+ labels_img=testing_parcellation
668
+ )
669
+ nifti_labels_masked_bold = nifti_labels_masker.fit_transform(
670
+ element_data["BOLD"]["data"]
671
+ )
672
+ nifti_labels_masked_bold_mean = nifti_labels_masked_bold.mean(axis=0)
583
673
 
584
- assert jun_values4d.ndim == 1
585
- assert_array_equal(auto_mean.shape, jun_values4d.shape)
586
- assert_array_almost_equal(auto_mean, jun_values4d, decimal=2)
674
+ assert parcel_agg_bold_data.ndim == 1
675
+ assert_array_equal(
676
+ nifti_labels_masked_bold_mean.shape, parcel_agg_bold_data.shape
677
+ )
678
+ assert_array_almost_equal(
679
+ nifti_labels_masked_bold_mean, parcel_agg_bold_data, decimal=2
680
+ )
587
681
 
588
- auto_pick_0 = auto4d[:1, :]
589
- marker = ParcelAggregation(
590
- parcellation="Schaefer100x7",
591
- method="mean",
592
- time_method="select",
593
- time_method_params={"pick": [0]},
594
- )
682
+ # Test picking first time point
683
+ nifti_labels_masked_bold_pick_0 = nifti_labels_masked_bold[:1, :]
684
+ marker = ParcelAggregation(
685
+ parcellation="TianxS1x3TxMNInonlinear2009cAsym",
686
+ method="mean",
687
+ time_method="select",
688
+ time_method_params={"pick": [0]},
689
+ on="BOLD",
690
+ )
691
+ parcel_agg_bold_data = marker.fit_transform(element_data)["BOLD"][
692
+ "data"
693
+ ]
595
694
 
596
- input = {"BOLD": {"data": fmri_img, "meta": {}}}
597
- jun_values4d = marker.fit_transform(input)["BOLD"]["data"]
695
+ assert parcel_agg_bold_data.ndim == 2
696
+ assert_array_equal(
697
+ nifti_labels_masked_bold_pick_0.shape, parcel_agg_bold_data.shape
698
+ )
699
+ assert_array_equal(
700
+ nifti_labels_masked_bold_pick_0, parcel_agg_bold_data
701
+ )
598
702
 
599
- assert jun_values4d.ndim == 2
600
- assert_array_equal(auto_pick_0.shape, jun_values4d.shape)
601
- assert_array_equal(auto_pick_0, jun_values4d)
602
703
 
704
+ def test_ParcelAggregation_errors() -> None:
705
+ """Test errors for ParcelAggregation."""
603
706
  with pytest.raises(ValueError, match="can only be used with BOLD data"):
604
707
  ParcelAggregation(
605
708
  parcellation="Schaefer100x7",
@@ -619,6 +722,22 @@ def test_ParcelAggregation_4D_agg_time():
619
722
  on="VBM_GM",
620
723
  )
621
724
 
622
- with pytest.warns(RuntimeWarning, match="No time dimension to aggregate"):
623
- input = {"BOLD": {"data": fmri_img.slicer[..., 0:1], "meta": {}}}
624
- marker.fit_transform(input)
725
+
726
+ def test_ParcelAggregation_warning() -> None:
727
+ """Test warning for ParcelAggregation."""
728
+ with PartlyCloudyTestingDataGrabber() as dg:
729
+ element_data = DefaultDataReader().fit_transform(dg["sub-01"])
730
+ with pytest.warns(
731
+ RuntimeWarning, match="No time dimension to aggregate"
732
+ ):
733
+ marker = ParcelAggregation(
734
+ parcellation="TianxS1x3TxMNInonlinear2009cAsym",
735
+ method="mean",
736
+ time_method="select",
737
+ time_method_params={"pick": [0]},
738
+ on="BOLD",
739
+ )
740
+ element_data["BOLD"]["data"] = element_data["BOLD"]["data"].slicer[
741
+ ..., 0:1
742
+ ]
743
+ marker.fit_transform(element_data)