junifer 0.0.5.dev180__py3-none-any.whl → 0.0.5.dev202__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. junifer/_version.py +2 -2
  2. junifer/data/masks/ukb/UKB_15K_GM_template.nii.gz +0 -0
  3. junifer/data/masks.py +36 -0
  4. junifer/data/tests/test_masks.py +17 -0
  5. junifer/datagrabber/tests/test_datalad_base.py +4 -4
  6. junifer/datagrabber/tests/test_pattern_datalad.py +4 -4
  7. junifer/markers/base.py +49 -23
  8. junifer/markers/brainprint.py +56 -265
  9. junifer/markers/complexity/complexity_base.py +23 -43
  10. junifer/markers/complexity/tests/test_hurst_exponent.py +4 -3
  11. junifer/markers/complexity/tests/test_multiscale_entropy_auc.py +4 -3
  12. junifer/markers/complexity/tests/test_perm_entropy.py +4 -3
  13. junifer/markers/complexity/tests/test_range_entropy.py +4 -3
  14. junifer/markers/complexity/tests/test_range_entropy_auc.py +4 -3
  15. junifer/markers/complexity/tests/test_sample_entropy.py +4 -3
  16. junifer/markers/complexity/tests/test_weighted_perm_entropy.py +4 -3
  17. junifer/markers/ets_rss.py +24 -42
  18. junifer/markers/falff/falff_base.py +17 -46
  19. junifer/markers/falff/falff_parcels.py +53 -27
  20. junifer/markers/falff/falff_spheres.py +57 -29
  21. junifer/markers/falff/tests/test_falff_parcels.py +39 -23
  22. junifer/markers/falff/tests/test_falff_spheres.py +39 -23
  23. junifer/markers/functional_connectivity/crossparcellation_functional_connectivity.py +32 -48
  24. junifer/markers/functional_connectivity/edge_functional_connectivity_parcels.py +16 -10
  25. junifer/markers/functional_connectivity/edge_functional_connectivity_spheres.py +13 -9
  26. junifer/markers/functional_connectivity/functional_connectivity_base.py +26 -40
  27. junifer/markers/functional_connectivity/functional_connectivity_parcels.py +6 -6
  28. junifer/markers/functional_connectivity/functional_connectivity_spheres.py +6 -6
  29. junifer/markers/functional_connectivity/tests/test_crossparcellation_functional_connectivity.py +8 -4
  30. junifer/markers/functional_connectivity/tests/test_edge_functional_connectivity_parcels.py +6 -3
  31. junifer/markers/functional_connectivity/tests/test_edge_functional_connectivity_spheres.py +6 -3
  32. junifer/markers/functional_connectivity/tests/test_functional_connectivity_parcels.py +6 -3
  33. junifer/markers/functional_connectivity/tests/test_functional_connectivity_spheres.py +10 -5
  34. junifer/markers/parcel_aggregation.py +40 -59
  35. junifer/markers/reho/reho_base.py +6 -27
  36. junifer/markers/reho/reho_parcels.py +23 -15
  37. junifer/markers/reho/reho_spheres.py +22 -16
  38. junifer/markers/reho/tests/test_reho_parcels.py +8 -3
  39. junifer/markers/reho/tests/test_reho_spheres.py +8 -3
  40. junifer/markers/sphere_aggregation.py +40 -59
  41. junifer/markers/temporal_snr/temporal_snr_base.py +20 -32
  42. junifer/markers/temporal_snr/temporal_snr_parcels.py +6 -6
  43. junifer/markers/temporal_snr/temporal_snr_spheres.py +6 -6
  44. junifer/markers/temporal_snr/tests/test_temporal_snr_parcels.py +6 -3
  45. junifer/markers/temporal_snr/tests/test_temporal_snr_spheres.py +6 -3
  46. junifer/markers/tests/test_brainprint.py +23 -12
  47. junifer/markers/tests/test_collection.py +9 -8
  48. junifer/markers/tests/test_ets_rss.py +15 -9
  49. junifer/markers/tests/test_markers_base.py +17 -18
  50. junifer/markers/tests/test_parcel_aggregation.py +93 -32
  51. junifer/markers/tests/test_sphere_aggregation.py +72 -19
  52. junifer/pipeline/pipeline_step_mixin.py +11 -1
  53. junifer/pipeline/tests/test_registry.py +1 -1
  54. {junifer-0.0.5.dev180.dist-info → junifer-0.0.5.dev202.dist-info}/METADATA +1 -1
  55. {junifer-0.0.5.dev180.dist-info → junifer-0.0.5.dev202.dist-info}/RECORD +60 -59
  56. {junifer-0.0.5.dev180.dist-info → junifer-0.0.5.dev202.dist-info}/WHEEL +1 -1
  57. {junifer-0.0.5.dev180.dist-info → junifer-0.0.5.dev202.dist-info}/AUTHORS.rst +0 -0
  58. {junifer-0.0.5.dev180.dist-info → junifer-0.0.5.dev202.dist-info}/LICENSE.md +0 -0
  59. {junifer-0.0.5.dev180.dist-info → junifer-0.0.5.dev202.dist-info}/entry_points.txt +0 -0
  60. {junifer-0.0.5.dev180.dist-info → junifer-0.0.5.dev202.dist-info}/top_level.txt +0 -0
@@ -53,6 +53,12 @@ class ComplexityBase(BaseMarker):
53
53
 
54
54
  _DEPENDENCIES: ClassVar[Set[str]] = {"nilearn", "neurokit2"}
55
55
 
56
+ _MARKER_INOUT_MAPPINGS: ClassVar[Dict[str, Dict[str, str]]] = {
57
+ "BOLD": {
58
+ "complexity": "vector",
59
+ },
60
+ }
61
+
56
62
  def __init__(
57
63
  self,
58
64
  parcellation: Union[str, List[str]],
@@ -78,33 +84,6 @@ class ComplexityBase(BaseMarker):
78
84
  klass=NotImplementedError,
79
85
  )
80
86
 
81
- def get_valid_inputs(self) -> List[str]:
82
- """Get valid data types for input.
83
-
84
- Returns
85
- -------
86
- list of str
87
- The list of data types that can be used as input for this marker.
88
-
89
- """
90
- return ["BOLD"]
91
-
92
- def get_output_type(self, input_type: str) -> str:
93
- """Get output type.
94
-
95
- Parameters
96
- ----------
97
- input_type : str
98
- The data type input to the marker.
99
-
100
- Returns
101
- -------
102
- str
103
- The storage type output by the marker.
104
-
105
- """
106
- return "vector"
107
-
108
87
  def compute(
109
88
  self,
110
89
  input: Dict[str, Any],
@@ -124,29 +103,30 @@ class ComplexityBase(BaseMarker):
124
103
  Returns
125
104
  -------
126
105
  dict
127
- The computed result as dictionary. The following keys will be
128
- included in the dictionary:
106
+ The computed result as dictionary. This will be either returned
107
+ to the user or stored in the storage by calling the store method
108
+ with this as a parameter. The dictionary has the following keys:
109
+
110
+ * ``complexity`` : dictionary with the following keys:
129
111
 
130
- * ``data`` : ROI-wise complexity measures as ``numpy.ndarray``
131
- * ``col_names`` : ROI labels for the complexity measures as list
112
+ - ``data`` : ROI-wise complexity measures as ``numpy.ndarray``
113
+ - ``col_names`` : ROI labels as list of str
132
114
 
133
115
  """
134
- # Initialize a ParcelAggregation
116
+ # Extract the 2D time series using ParcelAggregation
135
117
  parcel_aggregation = ParcelAggregation(
136
118
  parcellation=self.parcellation,
137
119
  method=self.agg_method,
138
120
  method_params=self.agg_method_params,
139
121
  masks=self.masks,
140
122
  on="BOLD",
141
- )
142
- # Extract the 2D time series using parcel aggregation
143
- parcel_aggregation_map = parcel_aggregation.compute(
144
- input=input, extra_input=extra_input
145
- )
146
-
123
+ ).compute(input=input, extra_input=extra_input)
147
124
  # Compute complexity measure
148
- parcel_aggregation_map["data"] = self.compute_complexity(
149
- parcel_aggregation_map["data"]
150
- )
151
-
152
- return parcel_aggregation_map
125
+ return {
126
+ "complexity": {
127
+ "data": self.compute_complexity(
128
+ parcel_aggregation["aggregation"]["data"]
129
+ ),
130
+ "col_names": parcel_aggregation["aggregation"]["col_names"],
131
+ }
132
+ }
@@ -40,13 +40,14 @@ def test_compute() -> None:
40
40
  # Compute the marker
41
41
  feature_map = marker.fit_transform(element_data)
42
42
  # Assert the dimension of timeseries
43
- assert feature_map["BOLD"]["data"].ndim == 2
43
+ assert feature_map["BOLD"]["complexity"]["data"].ndim == 2
44
44
 
45
45
 
46
46
  def test_get_output_type() -> None:
47
47
  """Test HurstExponent get_output_type()."""
48
- marker = HurstExponent(parcellation=PARCELLATION)
49
- assert marker.get_output_type("BOLD") == "vector"
48
+ assert "vector" == HurstExponent(
49
+ parcellation=PARCELLATION
50
+ ).get_output_type(input_type="BOLD", output_feature="complexity")
50
51
 
51
52
 
52
53
  @pytest.mark.skipif(
@@ -39,13 +39,14 @@ def test_compute() -> None:
39
39
  # Compute the marker
40
40
  feature_map = marker.fit_transform(element_data)
41
41
  # Assert the dimension of timeseries
42
- assert feature_map["BOLD"]["data"].ndim == 2
42
+ assert feature_map["BOLD"]["complexity"]["data"].ndim == 2
43
43
 
44
44
 
45
45
  def test_get_output_type() -> None:
46
46
  """Test MultiscaleEntropyAUC get_output_type()."""
47
- marker = MultiscaleEntropyAUC(parcellation=PARCELLATION)
48
- assert marker.get_output_type("BOLD") == "vector"
47
+ assert "vector" == MultiscaleEntropyAUC(
48
+ parcellation=PARCELLATION
49
+ ).get_output_type(input_type="BOLD", output_feature="complexity")
49
50
 
50
51
 
51
52
  @pytest.mark.skipif(
@@ -39,13 +39,14 @@ def test_compute() -> None:
39
39
  # Compute the marker
40
40
  feature_map = marker.fit_transform(element_data)
41
41
  # Assert the dimension of timeseries
42
- assert feature_map["BOLD"]["data"].ndim == 2
42
+ assert feature_map["BOLD"]["complexity"]["data"].ndim == 2
43
43
 
44
44
 
45
45
  def test_get_output_type() -> None:
46
46
  """Test PermEntropy get_output_type()."""
47
- marker = PermEntropy(parcellation=PARCELLATION)
48
- assert marker.get_output_type("BOLD") == "vector"
47
+ assert "vector" == PermEntropy(parcellation=PARCELLATION).get_output_type(
48
+ input_type="BOLD", output_feature="complexity"
49
+ )
49
50
 
50
51
 
51
52
  @pytest.mark.skipif(
@@ -40,13 +40,14 @@ def test_compute() -> None:
40
40
  # Compute the marker
41
41
  feature_map = marker.fit_transform(element_data)
42
42
  # Assert the dimension of timeseries
43
- assert feature_map["BOLD"]["data"].ndim == 2
43
+ assert feature_map["BOLD"]["complexity"]["data"].ndim == 2
44
44
 
45
45
 
46
46
  def test_get_output_type() -> None:
47
47
  """Test RangeEntropy get_output_type()."""
48
- marker = RangeEntropy(parcellation=PARCELLATION)
49
- assert marker.get_output_type("BOLD") == "vector"
48
+ assert "vector" == RangeEntropy(parcellation=PARCELLATION).get_output_type(
49
+ input_type="BOLD", output_feature="complexity"
50
+ )
50
51
 
51
52
 
52
53
  @pytest.mark.skipif(
@@ -40,13 +40,14 @@ def test_compute() -> None:
40
40
  # Compute the marker
41
41
  feature_map = marker.fit_transform(element_data)
42
42
  # Assert the dimension of timeseries
43
- assert feature_map["BOLD"]["data"].ndim == 2
43
+ assert feature_map["BOLD"]["complexity"]["data"].ndim == 2
44
44
 
45
45
 
46
46
  def test_get_output_type() -> None:
47
47
  """Test RangeEntropyAUC get_output_type()."""
48
- marker = RangeEntropyAUC(parcellation=PARCELLATION)
49
- assert marker.get_output_type("BOLD") == "vector"
48
+ assert "vector" == RangeEntropyAUC(
49
+ parcellation=PARCELLATION
50
+ ).get_output_type(input_type="BOLD", output_feature="complexity")
50
51
 
51
52
 
52
53
  @pytest.mark.skipif(
@@ -39,13 +39,14 @@ def test_compute() -> None:
39
39
  # Compute the marker
40
40
  feature_map = marker.fit_transform(element_data)
41
41
  # Assert the dimension of timeseries
42
- assert feature_map["BOLD"]["data"].ndim == 2
42
+ assert feature_map["BOLD"]["complexity"]["data"].ndim == 2
43
43
 
44
44
 
45
45
  def test_get_output_type() -> None:
46
46
  """Test SampleEntropy get_output_type()."""
47
- marker = SampleEntropy(parcellation=PARCELLATION)
48
- assert marker.get_output_type("BOLD") == "vector"
47
+ assert "vector" == SampleEntropy(
48
+ parcellation=PARCELLATION
49
+ ).get_output_type(input_type="BOLD", output_feature="complexity")
49
50
 
50
51
 
51
52
  @pytest.mark.skipif(
@@ -39,13 +39,14 @@ def test_compute() -> None:
39
39
  # Compute the marker
40
40
  feature_map = marker.fit_transform(element_data)
41
41
  # Assert the dimension of timeseries
42
- assert feature_map["BOLD"]["data"].ndim == 2
42
+ assert feature_map["BOLD"]["complexity"]["data"].ndim == 2
43
43
 
44
44
 
45
45
  def test_get_output_type() -> None:
46
46
  """Test WeightedPermEntropy get_output_type()."""
47
- marker = WeightedPermEntropy(parcellation=PARCELLATION)
48
- assert marker.get_output_type("BOLD") == "vector"
47
+ assert "vector" == WeightedPermEntropy(
48
+ parcellation=PARCELLATION
49
+ ).get_output_type(input_type="BOLD", output_feature="complexity")
49
50
 
50
51
 
51
52
  @pytest.mark.skipif(
@@ -47,6 +47,12 @@ class RSSETSMarker(BaseMarker):
47
47
 
48
48
  _DEPENDENCIES: ClassVar[Set[str]] = {"nilearn"}
49
49
 
50
+ _MARKER_INOUT_MAPPINGS: ClassVar[Dict[str, Dict[str, str]]] = {
51
+ "BOLD": {
52
+ "rss_ets": "timeseries",
53
+ },
54
+ }
55
+
50
56
  def __init__(
51
57
  self,
52
58
  parcellation: Union[str, List[str]],
@@ -61,33 +67,6 @@ class RSSETSMarker(BaseMarker):
61
67
  self.masks = masks
62
68
  super().__init__(name=name)
63
69
 
64
- def get_valid_inputs(self) -> List[str]:
65
- """Get valid data types for input.
66
-
67
- Returns
68
- -------
69
- list of str
70
- The list of data types that can be used as input for this marker.
71
-
72
- """
73
- return ["BOLD"]
74
-
75
- def get_output_type(self, input_type: str) -> str:
76
- """Get output type.
77
-
78
- Parameters
79
- ----------
80
- input_type : str
81
- The data type input to the marker.
82
-
83
- Returns
84
- -------
85
- str
86
- The storage type output by the marker.
87
-
88
- """
89
- return "timeseries"
90
-
91
70
  def compute(
92
71
  self,
93
72
  input: Dict[str, Any],
@@ -109,8 +88,9 @@ class RSSETSMarker(BaseMarker):
109
88
  Returns
110
89
  -------
111
90
  dict
112
- The computed result as dictionary. The dictionary has the following
113
- keys:
91
+ The computed result as dictionary. This will be either returned
92
+ to the user or stored in the storage by calling the store method
93
+ with this as a parameter. The dictionary has the following keys:
114
94
 
115
95
  * ``data`` : the actual computed values as a numpy.ndarray
116
96
  * ``col_names`` : the column labels for the computed values as list
@@ -124,20 +104,22 @@ class RSSETSMarker(BaseMarker):
124
104
 
125
105
  """
126
106
  logger.debug("Calculating root sum of squares of edgewise timeseries.")
127
- # Initialize a ParcelAggregation
128
- parcel_aggregation = ParcelAggregation(
107
+ # Perform aggregation
108
+ aggregation = ParcelAggregation(
129
109
  parcellation=self.parcellation,
130
110
  method=self.agg_method,
131
111
  method_params=self.agg_method_params,
132
112
  masks=self.masks,
133
- )
134
- # Compute the parcel aggregation
135
- out = parcel_aggregation.compute(input=input, extra_input=extra_input)
136
- edge_ts, _ = _ets(out["data"])
137
- # Compute the RSS
138
- out["data"] = np.sum(edge_ts**2, 1) ** 0.5
139
- # Make it 2D
140
- out["data"] = out["data"][:, np.newaxis]
141
- # Set correct column label
142
- out["col_names"] = ["root_sum_of_squares_ets"]
143
- return out
113
+ ).compute(input=input, extra_input=extra_input)
114
+ # Compute edgewise timeseries
115
+ edge_ts, _ = _ets(aggregation["aggregation"]["data"])
116
+ # Compute the RSS of edgewise timeseries
117
+ rss = np.sum(edge_ts**2, 1) ** 0.5
118
+
119
+ return {
120
+ "rss_ets": {
121
+ # Make it 2D
122
+ "data": rss[:, np.newaxis],
123
+ "col_names": ["root_sum_of_squares_ets"],
124
+ }
125
+ }
@@ -37,8 +37,6 @@ class ALFFBase(BaseMarker):
37
37
 
38
38
  Parameters
39
39
  ----------
40
- fractional : bool
41
- Whether to compute fractional ALFF.
42
40
  highpass : positive float
43
41
  Highpass cutoff frequency.
44
42
  lowpass : positive float
@@ -85,9 +83,15 @@ class ALFFBase(BaseMarker):
85
83
  },
86
84
  ]
87
85
 
86
+ _MARKER_INOUT_MAPPINGS: ClassVar[Dict[str, Dict[str, str]]] = {
87
+ "BOLD": {
88
+ "alff": "vector",
89
+ "falff": "vector",
90
+ },
91
+ }
92
+
88
93
  def __init__(
89
94
  self,
90
- fractional: bool,
91
95
  highpass: float,
92
96
  lowpass: float,
93
97
  using: str,
@@ -110,45 +114,12 @@ class ALFFBase(BaseMarker):
110
114
  )
111
115
  self.using = using
112
116
  self.tr = tr
113
- self.fractional = fractional
114
-
115
- # Create a name based on the class name if none is provided
116
- if name is None:
117
- suffix = "_fractional" if fractional else ""
118
- name = f"{self.__class__.__name__}{suffix}"
119
117
  super().__init__(on="BOLD", name=name)
120
118
 
121
- def get_valid_inputs(self) -> List[str]:
122
- """Get valid data types for input.
123
-
124
- Returns
125
- -------
126
- list of str
127
- The list of data types that can be used as input for this marker.
128
-
129
- """
130
- return ["BOLD"]
131
-
132
- def get_output_type(self, input_type: str) -> str:
133
- """Get output type.
134
-
135
- Parameters
136
- ----------
137
- input_type : str
138
- The data type input to the marker.
139
-
140
- Returns
141
- -------
142
- str
143
- The storage type output by the marker.
144
-
145
- """
146
- return "vector"
147
-
148
119
  def _compute(
149
120
  self,
150
121
  input_data: Dict[str, Any],
151
- ) -> Tuple["Nifti1Image", Path]:
122
+ ) -> Tuple["Nifti1Image", "Nifti1Image", Path, Path]:
152
123
  """Compute ALFF and fALFF.
153
124
 
154
125
  Parameters
@@ -161,9 +132,13 @@ class ALFFBase(BaseMarker):
161
132
  Returns
162
133
  -------
163
134
  Niimg-like object
164
- The ALFF / fALFF as NIfTI.
135
+ The ALFF as NIfTI.
136
+ Niimg-like object
137
+ The fALFF as NIfTI.
138
+ pathlib.Path
139
+ The path to the ALFF as NIfTI.
165
140
  pathlib.Path
166
- The path to the ALFF / fALFF as NIfTI.
141
+ The path to the fALFF as NIfTI.
167
142
 
168
143
  """
169
144
  logger.debug("Calculating ALFF and fALFF")
@@ -186,11 +161,7 @@ class ALFFBase(BaseMarker):
186
161
  # parcellation / coordinates to native space, else the
187
162
  # path should be passed for use later if required.
188
163
  # TODO(synchon): will be taken care in #292
189
- if input_data["space"] == "native" and self.fractional:
190
- return falff, input_data["path"]
191
- elif input_data["space"] == "native" and not self.fractional:
192
- return alff, input_data["path"]
193
- elif input_data["space"] != "native" and self.fractional:
194
- return falff, falff_path
164
+ if input_data["space"] == "native":
165
+ return alff, falff, input_data["path"], input_data["path"]
195
166
  else:
196
- return alff, alff_path
167
+ return alff, falff, alff_path, falff_path
@@ -26,8 +26,6 @@ class ALFFParcels(ALFFBase):
26
26
  parcellation : str or list of str
27
27
  The name(s) of the parcellation(s). Check valid options by calling
28
28
  :func:`.list_parcellations`.
29
- fractional : bool
30
- Whether to compute fractional ALFF.
31
29
  using : {"junifer", "afni"}
32
30
  Implementation to use for computing ALFF:
33
31
 
@@ -73,7 +71,6 @@ class ALFFParcels(ALFFBase):
73
71
  def __init__(
74
72
  self,
75
73
  parcellation: Union[str, List[str]],
76
- fractional: bool,
77
74
  using: str,
78
75
  highpass: float = 0.01,
79
76
  lowpass: float = 0.1,
@@ -85,7 +82,6 @@ class ALFFParcels(ALFFBase):
85
82
  ) -> None:
86
83
  # Superclass init first to validate `using` parameter
87
84
  super().__init__(
88
- fractional=fractional,
89
85
  highpass=highpass,
90
86
  lowpass=lowpass,
91
87
  using=using,
@@ -114,33 +110,63 @@ class ALFFParcels(ALFFBase):
114
110
  Returns
115
111
  -------
116
112
  dict
117
- The computed result as dictionary. The dictionary has the following
118
- keys:
113
+ The computed result as dictionary. This will be either returned
114
+ to the user or stored in the storage by calling the store method
115
+ with this as a parameter. The dictionary has the following keys:
119
116
 
120
- * ``data`` : the actual computed values as a numpy.ndarray
121
- * ``col_names`` : the column labels for the computed values as list
117
+ * ``alff`` : dictionary with the following keys:
118
+
119
+ - ``data`` : ROI values as ``numpy.ndarray``
120
+ - ``col_names`` : ROI labels as list of str
121
+
122
+ * ``falff`` : dictionary with the following keys:
123
+
124
+ - ``data`` : ROI values as ``numpy.ndarray``
125
+ - ``col_names`` : ROI labels as list of str
122
126
 
123
127
  """
124
128
  logger.info("Calculating ALFF / fALFF for parcels")
125
129
 
126
- # Compute ALFF / fALFF
127
- output_data, output_file_path = self._compute(input_data=input)
128
-
129
- # Initialize parcel aggregation
130
- parcel_aggregation = ParcelAggregation(
131
- parcellation=self.parcellation,
132
- method=self.agg_method,
133
- method_params=self.agg_method_params,
134
- masks=self.masks,
135
- on="BOLD",
136
- )
137
- # Perform aggregation on ALFF / fALFF
138
- parcel_aggregation_input = dict(input.items())
139
- parcel_aggregation_input["data"] = output_data
140
- parcel_aggregation_input["path"] = output_file_path
141
- output = parcel_aggregation.compute(
142
- input=parcel_aggregation_input,
143
- extra_input=extra_input,
130
+ # Compute ALFF + fALFF
131
+ alff_output, falff_output, alff_output_path, falff_output_path = (
132
+ self._compute(input_data=input)
144
133
  )
145
134
 
146
- return output
135
+ # Perform aggregation on ALFF + fALFF
136
+ aggregation_alff_input = dict(input.items())
137
+ aggregation_falff_input = dict(input.items())
138
+ aggregation_alff_input["data"] = alff_output
139
+ aggregation_falff_input["data"] = falff_output
140
+ aggregation_alff_input["path"] = alff_output_path
141
+ aggregation_falff_input["path"] = falff_output_path
142
+
143
+ return {
144
+ "alff": {
145
+ **ParcelAggregation(
146
+ parcellation=self.parcellation,
147
+ method=self.agg_method,
148
+ method_params=self.agg_method_params,
149
+ masks=self.masks,
150
+ on="BOLD",
151
+ ).compute(
152
+ input=aggregation_alff_input,
153
+ extra_input=extra_input,
154
+ )[
155
+ "aggregation"
156
+ ],
157
+ },
158
+ "falff": {
159
+ **ParcelAggregation(
160
+ parcellation=self.parcellation,
161
+ method=self.agg_method,
162
+ method_params=self.agg_method_params,
163
+ masks=self.masks,
164
+ on="BOLD",
165
+ ).compute(
166
+ input=aggregation_falff_input,
167
+ extra_input=extra_input,
168
+ )[
169
+ "aggregation"
170
+ ],
171
+ },
172
+ }
@@ -26,8 +26,6 @@ class ALFFSpheres(ALFFBase):
26
26
  coords : str
27
27
  The name of the coordinates list to use. See
28
28
  :func:`.list_coordinates` for options.
29
- fractional : bool
30
- Whether to compute fractional ALFF.
31
29
  using : {"junifer", "afni"}
32
30
  Implementation to use for computing ALFF:
33
31
 
@@ -80,7 +78,6 @@ class ALFFSpheres(ALFFBase):
80
78
  def __init__(
81
79
  self,
82
80
  coords: str,
83
- fractional: bool,
84
81
  using: str,
85
82
  radius: Optional[float] = None,
86
83
  allow_overlap: bool = False,
@@ -94,7 +91,6 @@ class ALFFSpheres(ALFFBase):
94
91
  ) -> None:
95
92
  # Superclass init first to validate `using` parameter
96
93
  super().__init__(
97
- fractional=fractional,
98
94
  highpass=highpass,
99
95
  lowpass=lowpass,
100
96
  using=using,
@@ -125,35 +121,67 @@ class ALFFSpheres(ALFFBase):
125
121
  Returns
126
122
  -------
127
123
  dict
128
- The computed result as dictionary. The dictionary has the following
129
- keys:
124
+ The computed result as dictionary. This will be either returned
125
+ to the user or stored in the storage by calling the store method
126
+ with this as a parameter. The dictionary has the following keys:
130
127
 
131
- * ``data`` : the actual computed values as a numpy.ndarray
132
- * ``col_names`` : the column labels for the computed values as list
128
+ * ``alff`` : dictionary with the following keys:
129
+
130
+ - ``data`` : ROI values as ``numpy.ndarray``
131
+ - ``col_names`` : ROI labels as list of str
132
+
133
+ * ``falff`` : dictionary with the following keys:
134
+
135
+ - ``data`` : ROI values as ``numpy.ndarray``
136
+ - ``col_names`` : ROI labels as list of str
133
137
 
134
138
  """
135
139
  logger.info("Calculating ALFF / fALFF for spheres")
136
140
 
137
- # Compute ALFF / fALFF
138
- output_data, output_file_path = self._compute(input_data=input)
139
-
140
- # Initialize sphere aggregation
141
- sphere_aggregation = SphereAggregation(
142
- coords=self.coords,
143
- radius=self.radius,
144
- allow_overlap=self.allow_overlap,
145
- method=self.agg_method,
146
- method_params=self.agg_method_params,
147
- masks=self.masks,
148
- on="BOLD",
149
- )
150
- # Perform aggregation on ALFF / fALFF
151
- sphere_aggregation_input = dict(input.items())
152
- sphere_aggregation_input["data"] = output_data
153
- sphere_aggregation_input["path"] = output_file_path
154
- output = sphere_aggregation.compute(
155
- input=sphere_aggregation_input,
156
- extra_input=extra_input,
141
+ # Compute ALFF + fALFF
142
+ alff_output, falff_output, alff_output_path, falff_output_path = (
143
+ self._compute(input_data=input)
157
144
  )
158
145
 
159
- return output
146
+ # Perform aggregation on ALFF / fALFF
147
+ aggregation_alff_input = dict(input.items())
148
+ aggregation_falff_input = dict(input.items())
149
+ aggregation_alff_input["data"] = alff_output
150
+ aggregation_falff_input["data"] = falff_output
151
+ aggregation_alff_input["path"] = alff_output_path
152
+ aggregation_falff_input["path"] = falff_output_path
153
+
154
+ return {
155
+ "alff": {
156
+ **SphereAggregation(
157
+ coords=self.coords,
158
+ radius=self.radius,
159
+ allow_overlap=self.allow_overlap,
160
+ method=self.agg_method,
161
+ method_params=self.agg_method_params,
162
+ masks=self.masks,
163
+ on="BOLD",
164
+ ).compute(
165
+ input=aggregation_alff_input,
166
+ extra_input=extra_input,
167
+ )[
168
+ "aggregation"
169
+ ],
170
+ },
171
+ "falff": {
172
+ **SphereAggregation(
173
+ coords=self.coords,
174
+ radius=self.radius,
175
+ allow_overlap=self.allow_overlap,
176
+ method=self.agg_method,
177
+ method_params=self.agg_method_params,
178
+ masks=self.masks,
179
+ on="BOLD",
180
+ ).compute(
181
+ input=aggregation_falff_input,
182
+ extra_input=extra_input,
183
+ )[
184
+ "aggregation"
185
+ ],
186
+ },
187
+ }