junifer 0.0.3.dev188__py3-none-any.whl → 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (178) hide show
  1. junifer/_version.py +14 -2
  2. junifer/api/cli.py +162 -17
  3. junifer/api/functions.py +87 -419
  4. junifer/api/parser.py +24 -0
  5. junifer/api/queue_context/__init__.py +8 -0
  6. junifer/api/queue_context/gnu_parallel_local_adapter.py +258 -0
  7. junifer/api/queue_context/htcondor_adapter.py +365 -0
  8. junifer/api/queue_context/queue_context_adapter.py +60 -0
  9. junifer/api/queue_context/tests/test_gnu_parallel_local_adapter.py +192 -0
  10. junifer/api/queue_context/tests/test_htcondor_adapter.py +257 -0
  11. junifer/api/res/afni/run_afni_docker.sh +6 -6
  12. junifer/api/res/ants/ResampleImage +3 -0
  13. junifer/api/res/ants/antsApplyTransforms +3 -0
  14. junifer/api/res/ants/antsApplyTransformsToPoints +3 -0
  15. junifer/api/res/ants/run_ants_docker.sh +39 -0
  16. junifer/api/res/fsl/applywarp +3 -0
  17. junifer/api/res/fsl/flirt +3 -0
  18. junifer/api/res/fsl/img2imgcoord +3 -0
  19. junifer/api/res/fsl/run_fsl_docker.sh +39 -0
  20. junifer/api/res/fsl/std2imgcoord +3 -0
  21. junifer/api/res/run_conda.sh +4 -4
  22. junifer/api/res/run_venv.sh +22 -0
  23. junifer/api/tests/data/partly_cloudy_agg_mean_tian.yml +16 -0
  24. junifer/api/tests/test_api_utils.py +21 -3
  25. junifer/api/tests/test_cli.py +232 -9
  26. junifer/api/tests/test_functions.py +211 -439
  27. junifer/api/tests/test_parser.py +1 -1
  28. junifer/configs/juseless/datagrabbers/aomic_id1000_vbm.py +6 -1
  29. junifer/configs/juseless/datagrabbers/camcan_vbm.py +6 -1
  30. junifer/configs/juseless/datagrabbers/ixi_vbm.py +6 -1
  31. junifer/configs/juseless/datagrabbers/tests/test_ucla.py +8 -8
  32. junifer/configs/juseless/datagrabbers/ucla.py +44 -26
  33. junifer/configs/juseless/datagrabbers/ukb_vbm.py +6 -1
  34. junifer/data/VOIs/meta/AutobiographicalMemory_VOIs.txt +23 -0
  35. junifer/data/VOIs/meta/Power2013_MNI_VOIs.tsv +264 -0
  36. junifer/data/__init__.py +4 -0
  37. junifer/data/coordinates.py +298 -31
  38. junifer/data/masks.py +360 -28
  39. junifer/data/parcellations.py +621 -188
  40. junifer/data/template_spaces.py +190 -0
  41. junifer/data/tests/test_coordinates.py +34 -3
  42. junifer/data/tests/test_data_utils.py +1 -0
  43. junifer/data/tests/test_masks.py +202 -86
  44. junifer/data/tests/test_parcellations.py +266 -55
  45. junifer/data/tests/test_template_spaces.py +104 -0
  46. junifer/data/utils.py +4 -2
  47. junifer/datagrabber/__init__.py +1 -0
  48. junifer/datagrabber/aomic/id1000.py +111 -70
  49. junifer/datagrabber/aomic/piop1.py +116 -53
  50. junifer/datagrabber/aomic/piop2.py +116 -53
  51. junifer/datagrabber/aomic/tests/test_id1000.py +27 -27
  52. junifer/datagrabber/aomic/tests/test_piop1.py +27 -27
  53. junifer/datagrabber/aomic/tests/test_piop2.py +27 -27
  54. junifer/datagrabber/base.py +62 -10
  55. junifer/datagrabber/datalad_base.py +0 -2
  56. junifer/datagrabber/dmcc13_benchmark.py +372 -0
  57. junifer/datagrabber/hcp1200/datalad_hcp1200.py +5 -0
  58. junifer/datagrabber/hcp1200/hcp1200.py +30 -13
  59. junifer/datagrabber/pattern.py +133 -27
  60. junifer/datagrabber/pattern_datalad.py +111 -13
  61. junifer/datagrabber/tests/test_base.py +57 -6
  62. junifer/datagrabber/tests/test_datagrabber_utils.py +204 -76
  63. junifer/datagrabber/tests/test_datalad_base.py +0 -6
  64. junifer/datagrabber/tests/test_dmcc13_benchmark.py +256 -0
  65. junifer/datagrabber/tests/test_multiple.py +43 -10
  66. junifer/datagrabber/tests/test_pattern.py +125 -178
  67. junifer/datagrabber/tests/test_pattern_datalad.py +44 -25
  68. junifer/datagrabber/utils.py +151 -16
  69. junifer/datareader/default.py +36 -10
  70. junifer/external/nilearn/junifer_nifti_spheres_masker.py +6 -0
  71. junifer/markers/base.py +25 -16
  72. junifer/markers/collection.py +35 -16
  73. junifer/markers/complexity/__init__.py +27 -0
  74. junifer/markers/complexity/complexity_base.py +149 -0
  75. junifer/markers/complexity/hurst_exponent.py +136 -0
  76. junifer/markers/complexity/multiscale_entropy_auc.py +140 -0
  77. junifer/markers/complexity/perm_entropy.py +132 -0
  78. junifer/markers/complexity/range_entropy.py +136 -0
  79. junifer/markers/complexity/range_entropy_auc.py +145 -0
  80. junifer/markers/complexity/sample_entropy.py +134 -0
  81. junifer/markers/complexity/tests/test_complexity_base.py +19 -0
  82. junifer/markers/complexity/tests/test_hurst_exponent.py +69 -0
  83. junifer/markers/complexity/tests/test_multiscale_entropy_auc.py +68 -0
  84. junifer/markers/complexity/tests/test_perm_entropy.py +68 -0
  85. junifer/markers/complexity/tests/test_range_entropy.py +69 -0
  86. junifer/markers/complexity/tests/test_range_entropy_auc.py +69 -0
  87. junifer/markers/complexity/tests/test_sample_entropy.py +68 -0
  88. junifer/markers/complexity/tests/test_weighted_perm_entropy.py +68 -0
  89. junifer/markers/complexity/weighted_perm_entropy.py +133 -0
  90. junifer/markers/falff/_afni_falff.py +153 -0
  91. junifer/markers/falff/_junifer_falff.py +142 -0
  92. junifer/markers/falff/falff_base.py +91 -84
  93. junifer/markers/falff/falff_parcels.py +61 -45
  94. junifer/markers/falff/falff_spheres.py +64 -48
  95. junifer/markers/falff/tests/test_falff_parcels.py +89 -121
  96. junifer/markers/falff/tests/test_falff_spheres.py +92 -127
  97. junifer/markers/functional_connectivity/crossparcellation_functional_connectivity.py +1 -0
  98. junifer/markers/functional_connectivity/edge_functional_connectivity_parcels.py +1 -0
  99. junifer/markers/functional_connectivity/functional_connectivity_base.py +1 -0
  100. junifer/markers/functional_connectivity/tests/test_crossparcellation_functional_connectivity.py +46 -44
  101. junifer/markers/functional_connectivity/tests/test_edge_functional_connectivity_parcels.py +34 -39
  102. junifer/markers/functional_connectivity/tests/test_edge_functional_connectivity_spheres.py +40 -52
  103. junifer/markers/functional_connectivity/tests/test_functional_connectivity_parcels.py +62 -70
  104. junifer/markers/functional_connectivity/tests/test_functional_connectivity_spheres.py +99 -85
  105. junifer/markers/parcel_aggregation.py +60 -38
  106. junifer/markers/reho/_afni_reho.py +192 -0
  107. junifer/markers/reho/_junifer_reho.py +281 -0
  108. junifer/markers/reho/reho_base.py +69 -34
  109. junifer/markers/reho/reho_parcels.py +26 -16
  110. junifer/markers/reho/reho_spheres.py +23 -9
  111. junifer/markers/reho/tests/test_reho_parcels.py +93 -92
  112. junifer/markers/reho/tests/test_reho_spheres.py +88 -86
  113. junifer/markers/sphere_aggregation.py +54 -9
  114. junifer/markers/temporal_snr/temporal_snr_base.py +1 -0
  115. junifer/markers/temporal_snr/tests/test_temporal_snr_parcels.py +38 -37
  116. junifer/markers/temporal_snr/tests/test_temporal_snr_spheres.py +34 -38
  117. junifer/markers/tests/test_collection.py +43 -42
  118. junifer/markers/tests/test_ets_rss.py +29 -37
  119. junifer/markers/tests/test_parcel_aggregation.py +587 -468
  120. junifer/markers/tests/test_sphere_aggregation.py +209 -157
  121. junifer/markers/utils.py +2 -40
  122. junifer/onthefly/read_transform.py +13 -6
  123. junifer/pipeline/__init__.py +1 -0
  124. junifer/pipeline/pipeline_step_mixin.py +105 -41
  125. junifer/pipeline/registry.py +17 -0
  126. junifer/pipeline/singleton.py +45 -0
  127. junifer/pipeline/tests/test_pipeline_step_mixin.py +139 -51
  128. junifer/pipeline/tests/test_update_meta_mixin.py +1 -0
  129. junifer/pipeline/tests/test_workdir_manager.py +104 -0
  130. junifer/pipeline/update_meta_mixin.py +8 -2
  131. junifer/pipeline/utils.py +154 -15
  132. junifer/pipeline/workdir_manager.py +246 -0
  133. junifer/preprocess/__init__.py +3 -0
  134. junifer/preprocess/ants/__init__.py +4 -0
  135. junifer/preprocess/ants/ants_apply_transforms_warper.py +185 -0
  136. junifer/preprocess/ants/tests/test_ants_apply_transforms_warper.py +56 -0
  137. junifer/preprocess/base.py +96 -69
  138. junifer/preprocess/bold_warper.py +265 -0
  139. junifer/preprocess/confounds/fmriprep_confound_remover.py +91 -134
  140. junifer/preprocess/confounds/tests/test_fmriprep_confound_remover.py +106 -111
  141. junifer/preprocess/fsl/__init__.py +4 -0
  142. junifer/preprocess/fsl/apply_warper.py +179 -0
  143. junifer/preprocess/fsl/tests/test_apply_warper.py +45 -0
  144. junifer/preprocess/tests/test_bold_warper.py +159 -0
  145. junifer/preprocess/tests/test_preprocess_base.py +6 -6
  146. junifer/preprocess/warping/__init__.py +6 -0
  147. junifer/preprocess/warping/_ants_warper.py +167 -0
  148. junifer/preprocess/warping/_fsl_warper.py +109 -0
  149. junifer/preprocess/warping/space_warper.py +213 -0
  150. junifer/preprocess/warping/tests/test_space_warper.py +198 -0
  151. junifer/stats.py +18 -4
  152. junifer/storage/base.py +9 -1
  153. junifer/storage/hdf5.py +8 -3
  154. junifer/storage/pandas_base.py +2 -1
  155. junifer/storage/sqlite.py +1 -0
  156. junifer/storage/tests/test_hdf5.py +2 -1
  157. junifer/storage/tests/test_sqlite.py +8 -8
  158. junifer/storage/tests/test_utils.py +6 -6
  159. junifer/storage/utils.py +1 -0
  160. junifer/testing/datagrabbers.py +11 -7
  161. junifer/testing/utils.py +1 -0
  162. junifer/tests/test_stats.py +2 -0
  163. junifer/utils/__init__.py +1 -0
  164. junifer/utils/helpers.py +53 -0
  165. junifer/utils/logging.py +14 -3
  166. junifer/utils/tests/test_helpers.py +35 -0
  167. {junifer-0.0.3.dev188.dist-info → junifer-0.0.4.dist-info}/METADATA +59 -28
  168. junifer-0.0.4.dist-info/RECORD +257 -0
  169. {junifer-0.0.3.dev188.dist-info → junifer-0.0.4.dist-info}/WHEEL +1 -1
  170. junifer/markers/falff/falff_estimator.py +0 -334
  171. junifer/markers/falff/tests/test_falff_estimator.py +0 -238
  172. junifer/markers/reho/reho_estimator.py +0 -515
  173. junifer/markers/reho/tests/test_reho_estimator.py +0 -260
  174. junifer-0.0.3.dev188.dist-info/RECORD +0 -199
  175. {junifer-0.0.3.dev188.dist-info → junifer-0.0.4.dist-info}/AUTHORS.rst +0 -0
  176. {junifer-0.0.3.dev188.dist-info → junifer-0.0.4.dist-info}/LICENSE.md +0 -0
  177. {junifer-0.0.3.dev188.dist-info → junifer-0.0.4.dist-info}/entry_points.txt +0 -0
  178. {junifer-0.0.3.dev188.dist-info → junifer-0.0.4.dist-info}/top_level.txt +0 -0
@@ -1,158 +1,126 @@
1
- """Provide test for parcel-aggregated (f)ALFF."""
1
+ """Provide tests for ALFFParcels."""
2
2
 
3
3
  # Authors: Federico Raimondo <f.raimondo@fz-juelich.de>
4
4
  # Synchon Mandal <s.mandal@fz-juelich.de>
5
5
  # License: AGPL
6
6
 
7
+ import logging
7
8
  from pathlib import Path
8
9
 
9
10
  import pytest
10
- from numpy.testing import assert_array_equal
11
- from scipy.stats import pearsonr
11
+ import scipy as sp
12
12
 
13
13
  from junifer.datareader import DefaultDataReader
14
14
  from junifer.markers.falff import ALFFParcels
15
+ from junifer.pipeline import WorkDirManager
15
16
  from junifer.pipeline.utils import _check_afni
16
17
  from junifer.storage import SQLiteFeatureStorage
17
18
  from junifer.testing.datagrabbers import PartlyCloudyTestingDataGrabber
18
- from junifer.utils import logger
19
19
 
20
20
 
21
- _PARCELLATION = "Schaefer100x7"
21
+ PARCELLATION = "TianxS1x3TxMNInonlinear2009cAsym"
22
22
 
23
23
 
24
- def test_ALFFParcels_python() -> None:
25
- """Test ALFFParcels using python."""
26
- # Get the SPM auditory data:
27
-
28
- with PartlyCloudyTestingDataGrabber() as dg:
29
- input = dg["sub-01"]
30
-
31
- input = DefaultDataReader().fit_transform(input)
32
- # Create ParcelAggregation object
33
- marker = ALFFParcels(
34
- parcellation=_PARCELLATION,
35
- method="mean",
36
- use_afni=False,
37
- fractional=False,
38
- )
39
- python_values = marker.fit_transform(input)["BOLD"]["data"]
40
-
41
- assert marker.use_afni is False
42
- assert python_values.ndim == 2
43
- assert python_values.shape == (1, 100)
24
+ def test_ALFFParcels(caplog: pytest.LogCaptureFixture, tmp_path: Path) -> None:
25
+ """Test ALFFParcels.
44
26
 
27
+ Parameters
28
+ ----------
29
+ caplog : pytest.LogCaptureFixture
30
+ The pytest.LogCaptureFixture object.
31
+ tmp_path : pathlib.Path
32
+ The path to the test directory.
45
33
 
46
- @pytest.mark.skipif(
47
- _check_afni() is False, reason="requires afni to be in PATH"
48
- )
49
- def test_ALFFParcels_afni() -> None:
50
- """Test ALFFParcels using afni."""
51
- # Get the SPM auditory data:
52
- with PartlyCloudyTestingDataGrabber() as dg:
53
- input = dg["sub-01"]
54
-
55
- input = DefaultDataReader().fit_transform(input)
56
- # Create ParcelAggregation object
57
- marker = ALFFParcels(
58
- parcellation=_PARCELLATION,
59
- method="mean",
60
- use_afni=True,
61
- fractional=False,
62
- )
63
- assert marker.use_afni is True
64
- afni_values = marker.fit_transform(input)["BOLD"]["data"]
65
-
66
- assert afni_values.ndim == 2
67
- assert afni_values.shape == (1, 100)
68
-
69
- # Again, should be blazing fast
70
- marker = ALFFParcels(
71
- parcellation=_PARCELLATION, method="mean", fractional=False
72
- )
73
- assert marker.use_afni is None
74
- afni_values2 = marker.fit_transform(input)["BOLD"]["data"]
75
- assert marker.use_afni is True
76
- assert_array_equal(afni_values, afni_values2)
34
+ """
35
+ with caplog.at_level(logging.DEBUG):
36
+ with PartlyCloudyTestingDataGrabber() as dg:
37
+ element_data = DefaultDataReader().fit_transform(dg["sub-01"])
38
+ # Update workdir to current test's tmp_path
39
+ WorkDirManager().workdir = tmp_path
40
+
41
+ # Initialize marker
42
+ marker = ALFFParcels(
43
+ parcellation=PARCELLATION,
44
+ fractional=False,
45
+ using="junifer",
46
+ )
47
+ # Fit transform marker on data
48
+ output = marker.fit_transform(element_data)
49
+
50
+ assert "Creating cache" in caplog.text
51
+
52
+ # Get BOLD output
53
+ assert "BOLD" in output
54
+ output_bold = output["BOLD"]
55
+ # Assert BOLD output keys
56
+ assert "data" in output_bold
57
+ assert "col_names" in output_bold
58
+
59
+ output_bold_data = output_bold["data"]
60
+ # Assert BOLD output data dimension
61
+ assert output_bold_data.ndim == 2
62
+ assert output_bold_data.shape == (1, 16)
63
+
64
+ # Reset log capture
65
+ caplog.clear()
66
+ # Initialize storage
67
+ storage = SQLiteFeatureStorage(tmp_path / "falff_parcels.sqlite")
68
+ # Fit transform marker on data with storage
69
+ marker.fit_transform(
70
+ input=element_data,
71
+ storage=storage,
72
+ )
73
+ # Cache working correctly
74
+ assert "Creating cache" not in caplog.text
77
75
 
78
76
 
79
77
  @pytest.mark.skipif(
80
- _check_afni() is False, reason="requires afni to be in PATH"
78
+ _check_afni() is False, reason="requires AFNI to be in PATH"
81
79
  )
82
80
  @pytest.mark.parametrize(
83
81
  "fractional", [True, False], ids=["fractional", "non-fractional"]
84
82
  )
85
- def test_ALFFParcels_python_vs_afni(
86
- fractional: bool,
87
- ) -> None:
88
- """Test ALFFParcels using python.
83
+ def test_ALFFParcels_comparison(tmp_path: Path, fractional: bool) -> None:
84
+ """Test ALFFParcels implementation comparison.
89
85
 
90
86
  Parameters
91
87
  ----------
88
+ tmp_path : pathlib.Path
89
+ The path to the test directory.
92
90
  fractional : bool
93
91
  Whether to compute fractional ALFF or not.
94
92
 
95
93
  """
96
-
97
- with PartlyCloudyTestingDataGrabber() as dg:
98
- input = dg["sub-01"]
99
-
100
- input = DefaultDataReader().fit_transform(input)
101
- # Create ParcelAggregation object
102
- marker_python = ALFFParcels(
103
- parcellation=_PARCELLATION,
104
- method="mean",
105
- use_afni=False,
106
- fractional=fractional,
107
- )
108
- python_values = marker_python.fit_transform(input)["BOLD"]["data"]
109
-
110
- assert marker_python.use_afni is False
111
- assert python_values.ndim == 2
112
- assert python_values.shape == (1, 100)
113
-
114
- marker_afni = ALFFParcels(
115
- parcellation=_PARCELLATION,
116
- method="mean",
117
- use_afni=True,
118
- fractional=fractional,
119
- )
120
- afni_values = marker_afni.fit_transform(input)["BOLD"]["data"]
121
-
122
- assert marker_afni.use_afni is True
123
- assert afni_values.ndim == 2
124
- assert afni_values.shape == (1, 100)
125
-
126
- r, p = pearsonr(python_values[0], afni_values[0])
127
- logger.info(f"Correlation between python and afni: {r} (p={p})")
128
- assert r > 0.99
129
-
130
-
131
- def test_ALFFParcels_storage(
132
- tmp_path: Path,
133
- ) -> None:
134
- """Test ALFFParcels storage.
135
-
136
- Parameters
137
- ----------
138
- tmp_path : pathlib.Path
139
- The path to the test directory.
140
- """
141
94
  with PartlyCloudyTestingDataGrabber() as dg:
142
- # Use first subject
143
- input = dg["sub-01"]
144
- input = DefaultDataReader().fit_transform(input)
145
- # Create ParcelAggregation object
146
- marker = ALFFParcels(
147
- parcellation=_PARCELLATION,
148
- method="mean",
149
- use_afni=False,
150
- fractional=True,
95
+ element_data = DefaultDataReader().fit_transform(dg["sub-01"])
96
+ # Update workdir to current test's tmp_path
97
+ WorkDirManager().workdir = tmp_path
98
+
99
+ # Initialize marker
100
+ junifer_marker = ALFFParcels(
101
+ parcellation=PARCELLATION,
102
+ fractional=fractional,
103
+ using="junifer",
151
104
  )
152
- storage = SQLiteFeatureStorage(tmp_path / "alff_parcels.sqlite")
153
-
154
- # Fit transform marker on data with storage
155
- marker.fit_transform(
156
- input=input,
157
- storage=storage,
105
+ # Fit transform marker on data
106
+ junifer_output = junifer_marker.fit_transform(element_data)
107
+ # Get BOLD output
108
+ junifer_output_bold = junifer_output["BOLD"]
109
+
110
+ # Initialize marker
111
+ afni_marker = ALFFParcels(
112
+ parcellation=PARCELLATION,
113
+ fractional=fractional,
114
+ using="afni",
115
+ )
116
+ # Fit transform marker on data
117
+ afni_output = afni_marker.fit_transform(element_data)
118
+ # Get BOLD output
119
+ afni_output_bold = afni_output["BOLD"]
120
+
121
+ # Check for Pearson correlation coefficient
122
+ r, _ = sp.stats.pearsonr(
123
+ junifer_output_bold["data"][0],
124
+ afni_output_bold["data"][0],
158
125
  )
126
+ assert r > 0.97
@@ -1,164 +1,129 @@
1
- """Provide test for sphere-aggregated (f)ALFF."""
1
+ """Provide tests for ALFFSpheres."""
2
2
 
3
3
  # Authors: Federico Raimondo <f.raimondo@fz-juelich.de>
4
4
  # Synchon Mandal <s.mandal@fz-juelich.de>
5
5
  # License: AGPL
6
6
 
7
+ import logging
7
8
  from pathlib import Path
8
9
 
9
10
  import pytest
10
- from numpy.testing import assert_array_equal
11
- from scipy.stats import pearsonr
11
+ import scipy as sp
12
12
 
13
13
  from junifer.datareader import DefaultDataReader
14
14
  from junifer.markers.falff import ALFFSpheres
15
+ from junifer.pipeline import WorkDirManager
15
16
  from junifer.pipeline.utils import _check_afni
16
17
  from junifer.storage import SQLiteFeatureStorage
17
18
  from junifer.testing.datagrabbers import PartlyCloudyTestingDataGrabber
18
- from junifer.utils import logger
19
19
 
20
20
 
21
- _COORDINATES = "DMNBuckner"
21
+ COORDINATES = "DMNBuckner"
22
22
 
23
23
 
24
- def test_ALFFSpheres_python() -> None:
25
- """Test ALFFSpheres using python."""
26
- # Get the SPM auditory data:
27
-
28
- with PartlyCloudyTestingDataGrabber() as dg:
29
- input = dg["sub-01"]
30
-
31
- input = DefaultDataReader().fit_transform(input)
32
- # Create ParcelAggregation object
33
- marker = ALFFSpheres(
34
- coords=_COORDINATES,
35
- radius=5,
36
- method="mean",
37
- use_afni=False,
38
- fractional=False,
39
- )
40
- python_values = marker.fit_transform(input)["BOLD"]["data"]
41
-
42
- assert marker.use_afni is False
43
- assert python_values.ndim == 2
44
- assert python_values.shape == (1, 6)
24
+ def test_ALFFSpheres(caplog: pytest.LogCaptureFixture, tmp_path: Path) -> None:
25
+ """Test ALFFSpheres.
45
26
 
27
+ Parameters
28
+ ----------
29
+ caplog : pytest.LogCaptureFixture
30
+ The pytest.LogCaptureFixture object.
31
+ tmp_path : pathlib.Path
32
+ The path to the test directory.
46
33
 
47
- @pytest.mark.skipif(
48
- _check_afni() is False, reason="requires afni to be in PATH"
49
- )
50
- def test_ALFFSpheres_afni() -> None:
51
- """Test ALFFSpheres using afni."""
52
- # Get the SPM auditory data:
53
- with PartlyCloudyTestingDataGrabber() as dg:
54
- input = dg["sub-01"]
55
-
56
- input = DefaultDataReader().fit_transform(input)
57
- # Create ParcelAggregation object
58
- marker = ALFFSpheres(
59
- coords=_COORDINATES,
60
- radius=5,
61
- method="mean",
62
- use_afni=True,
63
- fractional=False,
64
- )
65
- assert marker.use_afni is True
66
- afni_values = marker.fit_transform(input)["BOLD"]["data"]
67
-
68
- assert afni_values.ndim == 2
69
- assert afni_values.shape == (1, 6)
70
-
71
- # Again, should be blazing fast
72
- marker = ALFFSpheres(
73
- coords=_COORDINATES,
74
- radius=5,
75
- method="mean",
76
- fractional=False,
77
- )
78
- assert marker.use_afni is None
79
- afni_values2 = marker.fit_transform(input)["BOLD"]["data"]
80
- assert marker.use_afni is True
81
- assert_array_equal(afni_values, afni_values2)
34
+ """
35
+ with caplog.at_level(logging.DEBUG):
36
+ with PartlyCloudyTestingDataGrabber() as dg:
37
+ element_data = DefaultDataReader().fit_transform(dg["sub-01"])
38
+ # Update workdir to current test's tmp_path
39
+ WorkDirManager().workdir = tmp_path
40
+
41
+ # Initialize marker
42
+ marker = ALFFSpheres(
43
+ coords=COORDINATES,
44
+ fractional=False,
45
+ using="junifer",
46
+ radius=5.0,
47
+ )
48
+ # Fit transform marker on data
49
+ output = marker.fit_transform(element_data)
50
+
51
+ assert "Creating cache" in caplog.text
52
+
53
+ # Get BOLD output
54
+ assert "BOLD" in output
55
+ output_bold = output["BOLD"]
56
+ # Assert BOLD output keys
57
+ assert "data" in output_bold
58
+ assert "col_names" in output_bold
59
+
60
+ output_bold_data = output_bold["data"]
61
+ # Assert BOLD output data dimension
62
+ assert output_bold_data.ndim == 2
63
+ assert output_bold_data.shape == (1, 6)
64
+
65
+ # Reset log capture
66
+ caplog.clear()
67
+ # Initialize storage
68
+ storage = SQLiteFeatureStorage(tmp_path / "falff_spheres.sqlite")
69
+ # Fit transform marker on data with storage
70
+ marker.fit_transform(
71
+ input=element_data,
72
+ storage=storage,
73
+ )
74
+ # Cache working correctly
75
+ assert "Creating cache" not in caplog.text
82
76
 
83
77
 
84
78
  @pytest.mark.skipif(
85
- _check_afni() is False, reason="requires afni to be in PATH"
79
+ _check_afni() is False, reason="requires AFNI to be in PATH"
86
80
  )
87
81
  @pytest.mark.parametrize(
88
82
  "fractional", [True, False], ids=["fractional", "non-fractional"]
89
83
  )
90
- def test_ALFFSpheres_python_vs_afni(
91
- fractional: bool,
92
- ) -> None:
93
- """Test ALFFSpheres python vs afni results.
84
+ def test_ALFFSpheres_comparison(tmp_path: Path, fractional: bool) -> None:
85
+ """Test ALFFSpheres implementation comparison.
94
86
 
95
87
  Parameters
96
88
  ----------
89
+ tmp_path : pathlib.Path
90
+ The path to the test directory.
97
91
  fractional : bool
98
92
  Whether to compute fractional ALFF or not.
99
- """
100
- with PartlyCloudyTestingDataGrabber() as dg:
101
- input = dg["sub-01"]
102
-
103
- input = DefaultDataReader().fit_transform(input)
104
- # Create ParcelAggregation object
105
- marker_python = ALFFSpheres(
106
- coords=_COORDINATES,
107
- radius=5,
108
- method="mean",
109
- use_afni=False,
110
- fractional=fractional,
111
- )
112
- python_values = marker_python.fit_transform(input)["BOLD"]["data"]
113
-
114
- assert marker_python.use_afni is False
115
- assert python_values.ndim == 2
116
- assert python_values.shape == (1, 6)
117
-
118
- marker_afni = ALFFSpheres(
119
- coords=_COORDINATES,
120
- radius=5,
121
- method="mean",
122
- use_afni=True,
123
- fractional=fractional,
124
- )
125
- afni_values = marker_afni.fit_transform(input)["BOLD"]["data"]
126
-
127
- assert marker_afni.use_afni is True
128
- assert afni_values.ndim == 2
129
- assert afni_values.shape == (1, 6)
130
-
131
- r, p = pearsonr(python_values[0], afni_values[0])
132
- logger.info(f"Correlation between python and afni: {r} (p={p})")
133
- assert r > 0.99
134
-
135
-
136
- def test_ALFFSpheres_storage(
137
- tmp_path: Path,
138
- ) -> None:
139
- """Test ALFFSpheres storage.
140
93
 
141
- Parameters
142
- ----------
143
- tmp_path : pathlib.Path
144
- The path to the test directory.
145
94
  """
146
95
  with PartlyCloudyTestingDataGrabber() as dg:
147
- # Use first subject
148
- input = dg["sub-01"]
149
- input = DefaultDataReader().fit_transform(input)
150
- # Create ParcelAggregation object
151
- marker = ALFFSpheres(
152
- coords=_COORDINATES,
153
- radius=5,
154
- method="mean",
155
- use_afni=False,
156
- fractional=True,
96
+ element_data = DefaultDataReader().fit_transform(dg["sub-01"])
97
+ # Update workdir to current test's tmp_path
98
+ WorkDirManager().workdir = tmp_path
99
+
100
+ # Initialize marker
101
+ junifer_marker = ALFFSpheres(
102
+ coords=COORDINATES,
103
+ fractional=fractional,
104
+ using="junifer",
105
+ radius=5.0,
157
106
  )
158
- storage = SQLiteFeatureStorage(tmp_path / "alff_parcels.sqlite")
159
-
160
- # Fit transform marker on data with storage
161
- marker.fit_transform(
162
- input=input,
163
- storage=storage,
107
+ # Fit transform marker on data
108
+ junifer_output = junifer_marker.fit_transform(element_data)
109
+ # Get BOLD output
110
+ junifer_output_bold = junifer_output["BOLD"]
111
+
112
+ # Initialize marker
113
+ afni_marker = ALFFSpheres(
114
+ coords=COORDINATES,
115
+ fractional=fractional,
116
+ using="afni",
117
+ radius=5.0,
118
+ )
119
+ # Fit transform marker on data
120
+ afni_output = afni_marker.fit_transform(element_data)
121
+ # Get BOLD output
122
+ afni_output_bold = afni_output["BOLD"]
123
+
124
+ # Check for Pearson correlation coefficient
125
+ r, _ = sp.stats.pearsonr(
126
+ junifer_output_bold["data"][0],
127
+ afni_output_bold["data"][0],
164
128
  )
129
+ assert r > 0.99
@@ -37,6 +37,7 @@ class CrossParcellationFC(BaseMarker):
37
37
  name : str, optional
38
38
  The name of the marker. If None, will use the class name
39
39
  (default None).
40
+
40
41
  """
41
42
 
42
43
  _DEPENDENCIES: ClassVar[Set[str]] = {"nilearn"}
@@ -97,6 +97,7 @@ class EdgeCentricFCParcels(FunctionalConnectivityBase):
97
97
 
98
98
  * ``data`` : the actual computed values as a numpy.ndarray
99
99
  * ``col_names`` : the column labels for the computed values as list
100
+
100
101
  """
101
102
  parcel_aggregation = ParcelAggregation(
102
103
  parcellation=self.parcellation,
@@ -84,6 +84,7 @@ class FunctionalConnectivityBase(BaseMarker):
84
84
  -------
85
85
  list of str
86
86
  The list of data types that can be used as input for this marker.
87
+
87
88
  """
88
89
  return ["BOLD"]
89
90
 
@@ -2,14 +2,17 @@
2
2
 
3
3
  # Authors: Leonard Sasse <l.sasse@fz-juelich.de>
4
4
  # Kaustubh R. Patil <k.patil@fz-juelich.de>
5
+ # Synchon Mandal <s.mandal@fz-juelich.de>
5
6
  # License: AGPL
6
7
 
7
8
  from pathlib import Path
8
9
 
9
10
  import pytest
10
- from nilearn import image
11
11
 
12
+ from junifer.datareader import DefaultDataReader
12
13
  from junifer.markers.functional_connectivity import CrossParcellationFC
14
+ from junifer.pipeline import WorkDirManager
15
+ from junifer.pipeline.utils import _check_ants
13
16
  from junifer.storage import SQLiteFeatureStorage
14
17
  from junifer.testing.datagrabbers import SPMAuditoryTestingDataGrabber
15
18
 
@@ -18,31 +21,53 @@ parcellation_one = "Schaefer100x17"
18
21
  parcellation_two = "Schaefer200x17"
19
22
 
20
23
 
21
- def test_compute() -> None:
22
- """Test CrossParcellationFC compute()."""
24
+ def test_init() -> None:
25
+ """Test CrossParcellationFC init()."""
26
+ with pytest.raises(ValueError, match="must be different"):
27
+ CrossParcellationFC(
28
+ parcellation_one="a",
29
+ parcellation_two="a",
30
+ correlation_method="pearson",
31
+ )
23
32
 
24
- with SPMAuditoryTestingDataGrabber() as dg:
25
- out = dg["sub001"]
26
- niimg = image.load_img(str(out["BOLD"]["path"].absolute()))
27
- input_dict = {
28
- "BOLD": {
29
- "data": niimg,
30
- "path": out["BOLD"]["path"],
31
- "meta": {"element": "sub001"},
32
- }
33
- }
34
33
 
34
+ def test_get_output_type() -> None:
35
+ """Test CrossParcellationFC get_output_type()."""
36
+ crossparcellation = CrossParcellationFC(
37
+ parcellation_one=parcellation_one, parcellation_two=parcellation_two
38
+ )
39
+ assert "matrix" == crossparcellation.get_output_type("BOLD")
40
+
41
+
42
+ @pytest.mark.skipif(
43
+ _check_ants() is False, reason="requires ANTs to be in PATH"
44
+ )
45
+ def test_compute(tmp_path: Path) -> None:
46
+ """Test CrossParcellationFC compute().
47
+
48
+ Parameters
49
+ ----------
50
+ tmp_path : pathlib.Path
51
+ The path to the test directory.
52
+
53
+ """
54
+ with SPMAuditoryTestingDataGrabber() as dg:
55
+ element_data = DefaultDataReader().fit_transform(dg["sub001"])
56
+ WorkDirManager().workdir = tmp_path
35
57
  crossparcellation = CrossParcellationFC(
36
58
  parcellation_one=parcellation_one,
37
59
  parcellation_two=parcellation_two,
38
60
  correlation_method="spearman",
39
61
  )
40
- out = crossparcellation.compute(input_dict["BOLD"])
62
+ out = crossparcellation.compute(element_data["BOLD"])
41
63
  assert out["data"].shape == (200, 100)
42
64
  assert len(out["col_names"]) == 100
43
65
  assert len(out["row_names"]) == 200
44
66
 
45
67
 
68
+ @pytest.mark.skipif(
69
+ _check_ants() is False, reason="requires ANTs to be in PATH"
70
+ )
46
71
  def test_store(tmp_path: Path) -> None:
47
72
  """Test CrossParcellationFC store().
48
73
 
@@ -52,43 +77,20 @@ def test_store(tmp_path: Path) -> None:
52
77
  The path to the test directory.
53
78
 
54
79
  """
55
-
56
80
  with SPMAuditoryTestingDataGrabber() as dg:
57
- input_dict = dg["sub001"]
58
- niimg = image.load_img(str(input_dict["BOLD"]["path"].absolute()))
59
-
60
- input_dict["BOLD"]["data"] = niimg
61
-
81
+ element_data = DefaultDataReader().fit_transform(dg["sub001"])
82
+ WorkDirManager().workdir = tmp_path
62
83
  crossparcellation = CrossParcellationFC(
63
84
  parcellation_one=parcellation_one,
64
85
  parcellation_two=parcellation_two,
65
86
  correlation_method="spearman",
66
87
  )
67
- uri = tmp_path / "test_crossparcellation.sqlite"
68
- storage = SQLiteFeatureStorage(uri=uri, upsert="ignore")
69
- crossparcellation.fit_transform(input_dict, storage=storage)
88
+ storage = SQLiteFeatureStorage(
89
+ uri=tmp_path / "test_crossparcellation.sqlite", upsert="ignore"
90
+ )
91
+ # Fit transform marker on data with storage
92
+ crossparcellation.fit_transform(input=element_data, storage=storage)
70
93
  features = storage.list_features()
71
94
  assert any(
72
95
  x["name"] == "BOLD_CrossParcellationFC" for x in features.values()
73
96
  )
74
-
75
-
76
- def test_get_output_type() -> None:
77
- """Test CrossParcellationFC get_output_type()."""
78
-
79
- crossparcellation = CrossParcellationFC(
80
- parcellation_one=parcellation_one, parcellation_two=parcellation_two
81
- )
82
- input_ = "BOLD"
83
- output = crossparcellation.get_output_type(input_)
84
- assert output == "matrix"
85
-
86
-
87
- def test_init_() -> None:
88
- """Test CrossParcellationFC init()."""
89
- with pytest.raises(ValueError, match="must be different"):
90
- CrossParcellationFC(
91
- parcellation_one="a",
92
- parcellation_two="a",
93
- correlation_method="pearson",
94
- )