junifer 0.0.6.dev538__py3-none-any.whl → 0.0.7.dev43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. junifer/_version.py +2 -2
  2. junifer/api/decorators.py +0 -1
  3. junifer/api/functions.py +1 -2
  4. junifer/api/queue_context/gnu_parallel_local_adapter.py +4 -7
  5. junifer/api/queue_context/htcondor_adapter.py +6 -10
  6. junifer/cli/tests/test_parser.py +66 -0
  7. junifer/data/_dispatch.py +5 -5
  8. junifer/data/coordinates/_ants_coordinates_warper.py +1 -1
  9. junifer/data/coordinates/_coordinates.py +1 -1
  10. junifer/data/coordinates/_fsl_coordinates_warper.py +1 -1
  11. junifer/data/coordinates/tests/test_coordinates.py +38 -16
  12. junifer/data/masks/_ants_mask_warper.py +1 -1
  13. junifer/data/masks/_fsl_mask_warper.py +1 -1
  14. junifer/data/masks/tests/test_masks.py +66 -33
  15. junifer/data/parcellations/_ants_parcellation_warper.py +1 -1
  16. junifer/data/parcellations/_fsl_parcellation_warper.py +1 -1
  17. junifer/data/parcellations/_parcellations.py +7 -7
  18. junifer/data/parcellations/tests/test_parcellations.py +162 -76
  19. junifer/data/tests/test_data_utils.py +0 -1
  20. junifer/data/utils.py +1 -1
  21. junifer/datagrabber/aomic/id1000.py +6 -0
  22. junifer/datagrabber/aomic/piop1.py +4 -3
  23. junifer/datagrabber/aomic/piop2.py +4 -3
  24. junifer/datagrabber/pattern_datalad.py +0 -1
  25. junifer/datagrabber/pattern_validation_mixin.py +0 -1
  26. junifer/datagrabber/tests/test_dmcc13_benchmark.py +4 -8
  27. junifer/external/nilearn/junifer_connectivity_measure.py +32 -3
  28. junifer/external/nilearn/tests/test_junifer_connectivity_measure.py +45 -1
  29. junifer/markers/base.py +3 -3
  30. junifer/markers/brainprint.py +5 -5
  31. junifer/markers/complexity/multiscale_entropy_auc.py +3 -3
  32. junifer/markers/complexity/range_entropy.py +3 -3
  33. junifer/markers/complexity/sample_entropy.py +3 -3
  34. junifer/markers/falff/falff_parcels.py +2 -6
  35. junifer/markers/falff/falff_spheres.py +2 -6
  36. junifer/markers/functional_connectivity/functional_connectivity_base.py +7 -5
  37. junifer/markers/functional_connectivity/tests/test_functional_connectivity_parcels.py +2 -1
  38. junifer/markers/functional_connectivity/tests/test_functional_connectivity_spheres.py +2 -1
  39. junifer/markers/reho/_afni_reho.py +1 -1
  40. junifer/markers/reho/reho_base.py +0 -1
  41. junifer/markers/reho/reho_parcels.py +0 -1
  42. junifer/markers/reho/reho_spheres.py +0 -1
  43. junifer/markers/temporal_snr/temporal_snr_base.py +0 -1
  44. junifer/markers/tests/test_markers_base.py +0 -1
  45. junifer/onthefly/_brainprint.py +3 -3
  46. junifer/onthefly/read_transform.py +1 -2
  47. junifer/onthefly/tests/test_read_transform.py +0 -1
  48. junifer/pipeline/tests/test_marker_collection.py +2 -1
  49. junifer/pipeline/workdir_manager.py +1 -2
  50. junifer/preprocess/confounds/fmriprep_confound_remover.py +1 -1
  51. junifer/preprocess/confounds/tests/test_fmriprep_confound_remover.py +0 -1
  52. junifer/preprocess/smoothing/tests/test_smoothing.py +0 -1
  53. junifer/preprocess/warping/_ants_warper.py +2 -3
  54. junifer/preprocess/warping/_fsl_warper.py +1 -1
  55. junifer/preprocess/warping/space_warper.py +4 -2
  56. junifer/storage/pandas_base.py +3 -1
  57. junifer/storage/sqlite.py +3 -8
  58. junifer/storage/tests/test_pandas_base.py +6 -3
  59. junifer/storage/tests/test_storage_base.py +2 -1
  60. junifer/utils/logging.py +38 -128
  61. junifer/utils/tests/test_logging.py +12 -4
  62. {junifer-0.0.6.dev538.dist-info → junifer-0.0.7.dev43.dist-info}/METADATA +3 -2
  63. {junifer-0.0.6.dev538.dist-info → junifer-0.0.7.dev43.dist-info}/RECORD +68 -91
  64. {junifer-0.0.6.dev538.dist-info → junifer-0.0.7.dev43.dist-info}/WHEEL +1 -1
  65. junifer/data/coordinates/VOIs/meta/AutobiographicalMemory_VOIs.txt +0 -23
  66. junifer/data/coordinates/VOIs/meta/CogAC_VOIs.txt +0 -19
  67. junifer/data/coordinates/VOIs/meta/CogAR_VOIs.txt +0 -8
  68. junifer/data/coordinates/VOIs/meta/DMNBuckner_VOIs.txt +0 -6
  69. junifer/data/coordinates/VOIs/meta/Dosenbach2010_MNI_VOIs.txt +0 -160
  70. junifer/data/coordinates/VOIs/meta/Empathy_VOIs.txt +0 -22
  71. junifer/data/coordinates/VOIs/meta/Motor_VOIs.txt +0 -10
  72. junifer/data/coordinates/VOIs/meta/MultiTask_VOIs.txt +0 -9
  73. junifer/data/coordinates/VOIs/meta/PhysioStress_VOIs.txt +0 -18
  74. junifer/data/coordinates/VOIs/meta/Power2011_MNI_VOIs.txt +0 -264
  75. junifer/data/coordinates/VOIs/meta/Power2013_MNI_VOIs.tsv +0 -264
  76. junifer/data/coordinates/VOIs/meta/Rew_VOIs.txt +0 -25
  77. junifer/data/coordinates/VOIs/meta/Somatosensory_VOIs.txt +0 -10
  78. junifer/data/coordinates/VOIs/meta/ToM_VOIs.txt +0 -15
  79. junifer/data/coordinates/VOIs/meta/VigAtt_VOIs.txt +0 -16
  80. junifer/data/coordinates/VOIs/meta/WM_VOIs.txt +0 -23
  81. junifer/data/coordinates/VOIs/meta/eMDN_VOIs.txt +0 -17
  82. junifer/data/coordinates/VOIs/meta/eSAD_VOIs.txt +0 -12
  83. junifer/data/coordinates/VOIs/meta/extDMN_VOIs.txt +0 -16
  84. junifer/data/masks/ukb/UKB_15K_GM_template.nii.gz +0 -0
  85. junifer/data/masks/vickery-patil/CAT12_IXI555_MNI152_TMP_GS_GMprob0.2_clean.nii.gz +0 -0
  86. junifer/data/masks/vickery-patil/CAT12_IXI555_MNI152_TMP_GS_GMprob0.2_clean_3mm.nii.gz +0 -0
  87. junifer/data/masks/vickery-patil/GMprob0.2_cortex_3mm_NA_rm.nii.gz +0 -0
  88. {junifer-0.0.6.dev538.dist-info → junifer-0.0.7.dev43.dist-info}/entry_points.txt +0 -0
  89. {junifer-0.0.6.dev538.dist-info → junifer-0.0.7.dev43.dist-info}/licenses/AUTHORS.rst +0 -0
  90. {junifer-0.0.6.dev538.dist-info → junifer-0.0.7.dev43.dist-info}/licenses/LICENSE.md +0 -0
  91. {junifer-0.0.6.dev538.dist-info → junifer-0.0.7.dev43.dist-info}/top_level.txt +0 -0
@@ -264,7 +264,7 @@ def test_DMCC13Benchmark_invalid_sessions():
264
264
  """Test DMCC13Benchmark DataGrabber invalid sessions."""
265
265
  with pytest.raises(
266
266
  ValueError,
267
- match=("phonyses is not a valid session in " "the DMCC dataset"),
267
+ match=("phonyses is not a valid session in the DMCC dataset"),
268
268
  ):
269
269
  DMCC13Benchmark(sessions="phonyses")
270
270
 
@@ -273,9 +273,7 @@ def test_DMCC13Benchmark_invalid_tasks():
273
273
  """Test DMCC13Benchmark DataGrabber invalid tasks."""
274
274
  with pytest.raises(
275
275
  ValueError,
276
- match=(
277
- "thisisnotarealtask is not a valid task in " "the DMCC dataset"
278
- ),
276
+ match=("thisisnotarealtask is not a valid task in the DMCC dataset"),
279
277
  ):
280
278
  DMCC13Benchmark(tasks="thisisnotarealtask")
281
279
 
@@ -284,9 +282,7 @@ def test_DMCC13Benchmark_phase_encodings():
284
282
  """Test DMCC13Benchmark DataGrabber invalid phase encodings."""
285
283
  with pytest.raises(
286
284
  ValueError,
287
- match=(
288
- "moonphase is not a valid phase encoding in " "the DMCC dataset"
289
- ),
285
+ match=("moonphase is not a valid phase encoding in the DMCC dataset"),
290
286
  ):
291
287
  DMCC13Benchmark(phase_encodings="moonphase")
292
288
 
@@ -295,6 +291,6 @@ def test_DMCC13Benchmark_runs():
295
291
  """Test DMCC13Benchmark DataGrabber invalid runs."""
296
292
  with pytest.raises(
297
293
  ValueError,
298
- match=("cerebralrun is not a valid run in " "the DMCC dataset"),
294
+ match=("cerebralrun is not a valid run in the DMCC dataset"),
299
295
  ):
300
296
  DMCC13Benchmark(runs="cerebralrun")
@@ -3,6 +3,8 @@
3
3
  # Authors: Synchon Mandal <s.mandal@fz-juelich.de>
4
4
  # License: AGPL
5
5
 
6
+ import sys
7
+ from itertools import product
6
8
  from typing import Callable, Optional
7
9
 
8
10
  import numpy as np
@@ -322,11 +324,13 @@ class JuniferConnectivityMeasure(ConnectivityMeasure):
322
324
  The covariance estimator
323
325
  (default ``EmpiricalCovariance(store_precision=False)``).
324
326
  kind : {"covariance", "correlation", "spearman correlation", \
325
- "partial correlation", "tangent", "precision"}, optional
327
+ "partial correlation", "xi correlation", "tangent", \
328
+ "precision"}, optional
326
329
  The matrix kind. The default value uses Pearson's correlation.
327
330
  If ``"spearman correlation"`` is used, the data will be ranked before
328
- estimating the covariance. For the use of ``"tangent"`` see [1]_
329
- (default "correlation").
331
+ estimating the covariance. For ``"xi correlation"``, the coefficient
332
+ is not symmetric and should be interpreted as a measure of dependence
333
+ [2]_ . For the use of ``"tangent"`` see [1]_ (default "correlation").
330
334
  vectorize : bool, optional
331
335
  If True, connectivity matrices are reshaped into 1D arrays and only
332
336
  their flattened lower triangular parts are returned (default False).
@@ -372,6 +376,12 @@ class JuniferConnectivityMeasure(ConnectivityMeasure):
372
376
  Springer.
373
377
  doi:10/cn2h9c.
374
378
 
379
+ .. [2] Chatterjee, S.
380
+ A new coefficient of correlation.
381
+ Journal of the American Statistical Association 116.536 (2021):
382
+ 2009-2022.
383
+ doi:10.1080/01621459.2020.1758115.
384
+
375
385
  """
376
386
 
377
387
  def __init__(
@@ -420,6 +430,25 @@ class JuniferConnectivityMeasure(ConnectivityMeasure):
420
430
  covariances_std.append(self.cov_estimator_.fit(x).covariance_)
421
431
 
422
432
  connectivities = [cov_to_corr(cov) for cov in covariances_std]
433
+ elif self.kind == "xi correlation":
434
+ if sys.version_info < (3, 10): # pragma: no cover
435
+ raise_error(
436
+ klass=RuntimeError,
437
+ msg=(
438
+ "scipy.stats.chatterjeexi is available from "
439
+ "scipy 1.15.0 and that requires Python 3.10 and above."
440
+ ),
441
+ )
442
+ connectivities = []
443
+ for x in X:
444
+ n_rois = x.shape[1]
445
+ connectivity = np.ones((n_rois, n_rois))
446
+ for i, j in product(range(n_rois), range(n_rois)):
447
+ if i != j:
448
+ connectivity[i, j] = stats.chatterjeexi(
449
+ x[:, i], x[:, j], y_continuous=True
450
+ ).statistic
451
+ connectivities.append(connectivity)
423
452
  else:
424
453
  covariances = [self.cov_estimator_.fit(x).covariance_ for x in X]
425
454
  if self.kind in ("covariance", "tangent"):
@@ -4,6 +4,7 @@
4
4
  # License: AGPL
5
5
 
6
6
  import copy
7
+ import sys
7
8
  import warnings
8
9
  from math import cosh, exp, log, sinh, sqrt
9
10
  from typing import TYPE_CHECKING, Optional, Union
@@ -12,7 +13,11 @@ import numpy as np
12
13
  import pytest
13
14
  from nilearn.connectome.connectivity_matrices import sym_matrix_to_vec
14
15
  from nilearn.tests.test_signal import generate_signals
15
- from numpy.testing import assert_array_almost_equal, assert_array_equal
16
+ from numpy.testing import (
17
+ assert_allclose,
18
+ assert_array_almost_equal,
19
+ assert_array_equal,
20
+ )
16
21
  from pandas import DataFrame
17
22
  from scipy import linalg
18
23
  from sklearn.covariance import EmpiricalCovariance, LedoitWolf
@@ -1088,3 +1093,42 @@ def test_connectivity_measure_standardize(
1088
1093
  ).fit_transform(signals)
1089
1094
  for m in record:
1090
1095
  assert match not in m.message
1096
+
1097
+
1098
+ @pytest.mark.skipif(
1099
+ sys.version_info > (3, 9),
1100
+ reason="will have correct scipy version so no error",
1101
+ )
1102
+ def test_xi_correlation_error() -> None:
1103
+ """Check xi correlation according to paper."""
1104
+ with pytest.raises(RuntimeError, match="scipy.stats.chatterjeexi"):
1105
+ JuniferConnectivityMeasure(kind="xi correlation").fit_transform(
1106
+ np.zeros((2, 2))
1107
+ )
1108
+
1109
+
1110
+ @pytest.mark.skipif(
1111
+ sys.version_info < (3, 10),
1112
+ reason=(
1113
+ "needs scipy 1.15.0 and above which in turn requires "
1114
+ "python 3.10 and above"
1115
+ ),
1116
+ )
1117
+ def test_xi_correlation() -> None:
1118
+ """Check xi correlation according to paper."""
1119
+ rng = np.random.default_rng(25982435982346983)
1120
+ x = rng.random(size=10)
1121
+ y = rng.random(size=10)
1122
+ arr = np.column_stack((x, y))
1123
+ expected = np.array(
1124
+ [
1125
+ [
1126
+ [1.0, -0.3030303],
1127
+ [-0.18181818, 1.0],
1128
+ ]
1129
+ ]
1130
+ )
1131
+ got = JuniferConnectivityMeasure(kind="xi correlation").fit_transform(
1132
+ [arr]
1133
+ )
1134
+ assert_allclose(expected, got)
junifer/markers/base.py CHANGED
@@ -229,9 +229,9 @@ class BaseMarker(ABC, PipelineStepMixin, UpdateMetaMixin):
229
229
  # feature data is not manipulated, only meta
230
230
  self.update_meta(feature_data_copy, "marker")
231
231
  # Update marker feature's metadata name
232
- feature_data_copy["meta"]["marker"][
233
- "name"
234
- ] += f"_{feature_name}"
232
+ feature_data_copy["meta"]["marker"]["name"] += (
233
+ f"_{feature_name}"
234
+ )
235
235
 
236
236
  if storage is not None:
237
237
  logger.info(f"Storing in {storage}")
@@ -116,7 +116,7 @@ class BrainPrint(BaseMarker):
116
116
  aseg_path: Path,
117
117
  norm_path: Path,
118
118
  indices: list,
119
- ) -> Path:
119
+ ) -> Path: # pragma: no cover
120
120
  """Generate a surface from the aseg and label files.
121
121
 
122
122
  Parameters
@@ -191,7 +191,7 @@ class BrainPrint(BaseMarker):
191
191
  self,
192
192
  aseg_path: Path,
193
193
  norm_path: Path,
194
- ) -> dict[str, Path]:
194
+ ) -> dict[str, Path]: # pragma: no cover
195
195
  """Create surfaces from FreeSurfer aseg labels.
196
196
 
197
197
  Parameters
@@ -266,7 +266,7 @@ class BrainPrint(BaseMarker):
266
266
  rh_white_path: Path,
267
267
  lh_pial_path: Path,
268
268
  rh_pial_path: Path,
269
- ) -> dict[str, Path]:
269
+ ) -> dict[str, Path]: # pragma: no cover
270
270
  """Create cortical surfaces from FreeSurfer labels.
271
271
 
272
272
  Parameters
@@ -308,7 +308,7 @@ class BrainPrint(BaseMarker):
308
308
  def _fix_nan(
309
309
  self,
310
310
  input_data: list[Union[float, str, npt.ArrayLike]],
311
- ) -> np.ndarray:
311
+ ) -> np.ndarray: # pragma: no cover
312
312
  """Convert BrainPrint output with string NaN to ``numpy.nan``.
313
313
 
314
314
  Parameters
@@ -330,7 +330,7 @@ class BrainPrint(BaseMarker):
330
330
  self,
331
331
  input: dict[str, Any],
332
332
  extra_input: Optional[dict] = None,
333
- ) -> dict:
333
+ ) -> dict: # pragma: no cover
334
334
  """Compute.
335
335
 
336
336
  Parameters
@@ -114,9 +114,9 @@ class MultiscaleEntropyAUC(ComplexityBase):
114
114
 
115
115
  assert isinstance(emb_dim, int), "Embedding dimension must be integer."
116
116
  assert isinstance(scale, int), "Scale must be integer."
117
- assert isinstance(
118
- tol, float
119
- ), "Tolerance must be a positive float number."
117
+ assert isinstance(tol, float), (
118
+ "Tolerance must be a positive float number."
119
+ )
120
120
 
121
121
  _, n_roi = extracted_bold_values.shape
122
122
  MSEn_auc_roi = np.zeros((n_roi, 1))
@@ -114,9 +114,9 @@ class RangeEntropy(ComplexityBase):
114
114
 
115
115
  assert isinstance(emb_dim, int), "Embedding dimension must be integer."
116
116
  assert isinstance(delay, int), "Delay must be integer."
117
- assert isinstance(
118
- tolerance, float
119
- ), "Tolerance must be a float number between 0 and 1."
117
+ assert isinstance(tolerance, float), (
118
+ "Tolerance must be a float number between 0 and 1."
119
+ )
120
120
 
121
121
  _, n_roi = extracted_bold_values.shape
122
122
  range_en_roi = np.zeros((n_roi, 1))
@@ -115,9 +115,9 @@ class SampleEntropy(ComplexityBase):
115
115
 
116
116
  assert isinstance(emb_dim, int), "Embedding dimension must be integer."
117
117
  assert isinstance(delay, int), "Delay must be integer."
118
- assert isinstance(
119
- tol, float
120
- ), "Tolerance must be a positive float number."
118
+ assert isinstance(tol, float), (
119
+ "Tolerance must be a positive float number."
120
+ )
121
121
 
122
122
  _, n_roi = extracted_bold_values.shape
123
123
  samp_en_roi = np.zeros((n_roi, 1))
@@ -151,9 +151,7 @@ class ALFFParcels(ALFFBase):
151
151
  ).compute(
152
152
  input=aggregation_alff_input,
153
153
  extra_input=extra_input,
154
- )[
155
- "aggregation"
156
- ],
154
+ )["aggregation"],
157
155
  },
158
156
  "falff": {
159
157
  **ParcelAggregation(
@@ -165,8 +163,6 @@ class ALFFParcels(ALFFBase):
165
163
  ).compute(
166
164
  input=aggregation_falff_input,
167
165
  extra_input=extra_input,
168
- )[
169
- "aggregation"
170
- ],
166
+ )["aggregation"],
171
167
  },
172
168
  }
@@ -164,9 +164,7 @@ class ALFFSpheres(ALFFBase):
164
164
  ).compute(
165
165
  input=aggregation_alff_input,
166
166
  extra_input=extra_input,
167
- )[
168
- "aggregation"
169
- ],
167
+ )["aggregation"],
170
168
  },
171
169
  "falff": {
172
170
  **SphereAggregation(
@@ -180,8 +178,6 @@ class ALFFSpheres(ALFFBase):
180
178
  ).compute(
181
179
  input=aggregation_falff_input,
182
180
  extra_input=extra_input,
183
- )[
184
- "aggregation"
185
- ],
181
+ )["aggregation"],
186
182
  },
187
183
  }
@@ -3,7 +3,6 @@
3
3
  # Authors: Synchon Mandal <s.mandal@fz-juelich.de>
4
4
  # License: AGPL
5
5
 
6
-
7
6
  from abc import abstractmethod
8
7
  from typing import Any, ClassVar, Optional, Union
9
8
 
@@ -143,14 +142,17 @@ class FunctionalConnectivityBase(BaseMarker):
143
142
  },
144
143
  )
145
144
  # Create dictionary for output
145
+ labels = aggregation["aggregation"]["col_names"]
146
146
  return {
147
147
  "functional_connectivity": {
148
148
  "data": connectivity.fit_transform(
149
149
  [aggregation["aggregation"]["data"]]
150
150
  )[0],
151
- # Create column names
152
- "row_names": aggregation["aggregation"]["col_names"],
153
- "col_names": aggregation["aggregation"]["col_names"],
154
- "matrix_kind": "tril",
151
+ "row_names": labels,
152
+ "col_names": labels,
153
+ # xi correlation coefficient is not symmetric
154
+ "matrix_kind": (
155
+ "full" if self.conn_method == "xi correlation" else "tril"
156
+ ),
155
157
  },
156
158
  }
@@ -91,7 +91,8 @@ def test_FunctionalConnectivityParcels(
91
91
  )
92
92
  # Compute the connectivity measure
93
93
  connectivity_measure = ConnectivityMeasure(
94
- cov_estimator=cov_estimator, kind="correlation" # type: ignore
94
+ cov_estimator=cov_estimator,
95
+ kind="correlation", # type: ignore
95
96
  ).fit_transform([extracted_timeseries])[0]
96
97
 
97
98
  # Check that FC are almost equal
@@ -92,7 +92,8 @@ def test_FunctionalConnectivitySpheres(
92
92
  )
93
93
  # Compute the connectivity measure
94
94
  connectivity_measure = ConnectivityMeasure(
95
- cov_estimator=cov_estimator, kind="correlation" # type: ignore
95
+ cov_estimator=cov_estimator,
96
+ kind="correlation", # type: ignore
96
97
  ).fit_transform([extracted_timeseries])[0]
97
98
 
98
99
  # Check that FC are almost equal
@@ -41,7 +41,7 @@ class AFNIReHo(metaclass=Singleton):
41
41
  },
42
42
  ]
43
43
 
44
- def __del__(self) -> None:
44
+ def __del__(self) -> None: # pragma: no cover
45
45
  """Terminate the class."""
46
46
  # Clear the computation cache
47
47
  logger.debug("Clearing cache for ReHo computation via AFNI")
@@ -3,7 +3,6 @@
3
3
  # Authors: Synchon Mandal <s.mandal@fz-juelich.de>
4
4
  # License: AGPL
5
5
 
6
-
7
6
  from pathlib import Path
8
7
  from typing import (
9
8
  TYPE_CHECKING,
@@ -3,7 +3,6 @@
3
3
  # Authors: Synchon Mandal <s.mandal@fz-juelich.de>
4
4
  # License: AGPL
5
5
 
6
-
7
6
  from typing import Any, Optional, Union
8
7
 
9
8
  import numpy as np
@@ -3,7 +3,6 @@
3
3
  # Authors: Synchon Mandal <s.mandal@fz-juelich.de>
4
4
  # License: AGPL
5
5
 
6
-
7
6
  from typing import Any, Optional, Union
8
7
 
9
8
  import numpy as np
@@ -3,7 +3,6 @@
3
3
  # Authors: Leonard Sasse <l.sasse@fz-juelich.de>
4
4
  # License: AGPL
5
5
 
6
-
7
6
  from abc import abstractmethod
8
7
  from typing import Any, ClassVar, Optional, Union
9
8
 
@@ -20,7 +20,6 @@ def test_base_marker_subclassing() -> None:
20
20
 
21
21
  # Create concrete class
22
22
  class MyBaseMarker(BaseMarker):
23
-
24
23
  _MARKER_INOUT_MAPPINGS = { # noqa: RUF012
25
24
  "BOLD": {
26
25
  "feat_1": "timeseries",
@@ -19,7 +19,7 @@ def normalize(
19
19
  storage: StorageLike,
20
20
  features: dict[str, dict[str, Optional[str]]],
21
21
  kind: str,
22
- ) -> pd.DataFrame:
22
+ ) -> pd.DataFrame: # pragma: no cover
23
23
  """Read stored brainprint data and normalize either surfaces or volumes.
24
24
 
25
25
  Parameters
@@ -79,7 +79,7 @@ def normalize(
79
79
  )
80
80
  else:
81
81
  raise_error(
82
- "Invalid value for `kind`, should be one of: " f"{valid_kind}"
82
+ f"Invalid value for `kind`, should be one of: {valid_kind}"
83
83
  )
84
84
 
85
85
  return normalized_df
@@ -89,7 +89,7 @@ def reweight(
89
89
  storage: StorageLike,
90
90
  feature_name: Optional[str] = None,
91
91
  feature_md5: Optional[str] = None,
92
- ) -> pd.DataFrame:
92
+ ) -> pd.DataFrame: # pragma: no cover
93
93
  """Read stored brainprint data and reweight eigenvalues.
94
94
 
95
95
  Parameters
@@ -3,7 +3,6 @@
3
3
  # Authors: Synchon Mandal <s.mandal@fz-juelich.de>
4
4
  # License: AGPL
5
5
 
6
-
7
6
  from typing import Optional
8
7
 
9
8
  import pandas as pd
@@ -85,7 +84,7 @@ def read_transform(
85
84
  # Check bctpy import
86
85
  try:
87
86
  import bct
88
- except ImportError as err:
87
+ except ImportError as err: # pragma: no cover
89
88
  raise_error(msg=str(err), klass=ImportError)
90
89
 
91
90
  # Warning about function usage
@@ -3,7 +3,6 @@
3
3
  # Authors: Synchon Mandal <s.mandal@fz-juelich.de>
4
4
  # License: AGPL
5
5
 
6
-
7
6
  import logging
8
7
  from pathlib import Path
9
8
 
@@ -186,7 +186,8 @@ def test_marker_collection_storage(tmp_path: Path) -> None:
186
186
  assert out is None
187
187
 
188
188
  mc2 = MarkerCollection(
189
- markers=markers, datareader=DefaultDataReader() # type: ignore
189
+ markers=markers,
190
+ datareader=DefaultDataReader(), # type: ignore
190
191
  )
191
192
  mc2.validate(dg)
192
193
  assert mc2._storage is None
@@ -197,8 +197,7 @@ class WorkDirManager(metaclass=Singleton):
197
197
  return
198
198
  if self._elementdir is not None:
199
199
  logger.debug(
200
- "Deleting element directory at "
201
- f"{self._elementdir.resolve()!s}"
200
+ f"Deleting element directory at {self._elementdir.resolve()!s}"
202
201
  )
203
202
  shutil.rmtree(self._elementdir, ignore_errors=True)
204
203
  self._elementdir = None
@@ -572,7 +572,7 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
572
572
  if bold_img.get_fdata().shape[3] != len(confound_df):
573
573
  raise_error(
574
574
  "Image time series and confounds have different length!\n"
575
- f"\tImage time series: { bold_img.get_fdata().shape[3]}\n"
575
+ f"\tImage time series: {bold_img.get_fdata().shape[3]}\n"
576
576
  f"\tConfounds: {len(confound_df)}"
577
577
  )
578
578
 
@@ -5,7 +5,6 @@
5
5
  # Synchon Mandal <s.mandal@fz-juelich.de>
6
6
  # License: AGPL
7
7
 
8
-
9
8
  import numpy as np
10
9
  import pandas as pd
11
10
  import pytest
@@ -3,7 +3,6 @@
3
3
  # Authors: Synchon Mandal <s.mandal@fz-juelich.de>
4
4
  # License: AGPL
5
5
 
6
-
7
6
  import pytest
8
7
 
9
8
  from junifer.datareader import DefaultDataReader
@@ -72,7 +72,7 @@ class ANTsWarper:
72
72
  )
73
73
 
74
74
  # Native space warping
75
- if reference == "T1w":
75
+ if reference == "T1w": # pragma: no cover
76
76
  logger.debug("Using ANTs for space warping")
77
77
 
78
78
  # Get the min of the voxel sizes from input and use it as the
@@ -237,8 +237,7 @@ class ANTsWarper:
237
237
  if input.get("mask") is not None:
238
238
  # Create a tempfile for warped mask output
239
239
  apply_transforms_mask_out_path = element_tempdir / (
240
- f"warped_mask_from_{input['space']}_to_"
241
- f"{reference}.nii.gz"
240
+ f"warped_mask_from_{input['space']}_to_{reference}.nii.gz"
242
241
  )
243
242
  # Set antsApplyTransforms command
244
243
  apply_transforms_mask_cmd = [
@@ -40,7 +40,7 @@ class FSLWarper:
40
40
  self,
41
41
  input: dict[str, Any],
42
42
  extra_input: dict[str, Any],
43
- ) -> dict[str, Any]:
43
+ ) -> dict[str, Any]: # pragma: no cover
44
44
  """Preprocess using FSL.
45
45
 
46
46
  Parameters
@@ -77,7 +77,7 @@ class SpaceWarper(BasePreprocessor):
77
77
  self.reference = reference
78
78
  # Set required data types based on reference and
79
79
  # initialize superclass
80
- if self.reference == "T1w":
80
+ if self.reference == "T1w": # pragma: no cover
81
81
  required_data_types = [self.reference, "Warp"]
82
82
  # Listify on
83
83
  if not isinstance(on, list):
@@ -170,7 +170,9 @@ class SpaceWarper(BasePreprocessor):
170
170
  """
171
171
  logger.info(f"Warping to {self.reference} space using SpaceWarper")
172
172
  # Transform to native space
173
- if self.using in ["fsl", "ants", "auto"] and self.reference == "T1w":
173
+ if (
174
+ self.using in ["fsl", "ants", "auto"] and self.reference == "T1w"
175
+ ): # pragma: no cover
174
176
  # Check for extra inputs
175
177
  if extra_input is None:
176
178
  raise_error(
@@ -187,7 +187,9 @@ class PandasBaseFeatureStorage(BaseFeatureStorage):
187
187
  )
188
188
  # Prepare new dataframe
189
189
  df = pd.DataFrame(
190
- data=data, columns=col_names, index=idx # type: ignore
190
+ data=data,
191
+ columns=col_names,
192
+ index=idx, # type: ignore
191
193
  )
192
194
  # Store dataframe
193
195
  self.store_df(meta_md5=meta_md5, element=element, df=df)
junifer/storage/sqlite.py CHANGED
@@ -229,9 +229,7 @@ class SQLiteFeatureStorage(PandasBaseFeatureStorage):
229
229
  # Format index names for retrieved data
230
230
  meta_df.index = meta_df.index.str.replace(r"meta_", "")
231
231
  # Convert dataframe to dictionary
232
- out: dict[str, dict[str, str]] = meta_df.to_dict(
233
- orient="index"
234
- ) # type: ignore
232
+ out: dict[str, dict[str, str]] = meta_df.to_dict(orient="index") # type: ignore
235
233
  # Format output
236
234
  for md5, t_meta in out.items():
237
235
  for k, v in t_meta.items():
@@ -536,8 +534,7 @@ class SQLiteFeatureStorage(PandasBaseFeatureStorage):
536
534
  klass=IOError,
537
535
  )
538
536
  logger.info(
539
- "Collecting data from "
540
- f"{self.uri.parent}/*{self.uri.name}" # type: ignore
537
+ f"Collecting data from {self.uri.parent}/*{self.uri.name}" # type: ignore
541
538
  )
542
539
  # Create new instance
543
540
  out_storage = SQLiteFeatureStorage(uri=self.uri, upsert="ignore")
@@ -596,9 +593,7 @@ def _generate_update_statements(table, index_col, rows_to_update):
596
593
  for i, (_, keys) in enumerate(pk_indb.iterrows()):
597
594
  stmt = (
598
595
  table.update()
599
- .where(
600
- and_(col == keys[j] for j, col in enumerate(pk_cols))
601
- ) # type: ignore
596
+ .where(and_(col == keys[j] for j, col in enumerate(pk_cols))) # type: ignore
602
597
  .values(new_records[i])
603
598
  )
604
599
  stmts.append(stmt)
@@ -37,7 +37,8 @@ def test_element_to_index() -> None:
37
37
  assert index.levels[1].name == "idx" # type: ignore
38
38
  # Check second index level values
39
39
  assert all(
40
- x == i for i, x in enumerate(index.levels[1].values) # type: ignore
40
+ x == i
41
+ for i, x in enumerate(index.levels[1].values) # type: ignore
41
42
  )
42
43
  # Check second index level values shape
43
44
  assert index.levels[1].values.shape == (10,) # type: ignore
@@ -69,7 +70,8 @@ def test_element_to_index() -> None:
69
70
  assert index.levels[1].name == "scan" # type: ignore
70
71
  # Check second index level values
71
72
  assert all(
72
- x == i for i, x in enumerate(index.levels[1].values) # type: ignore
73
+ x == i
74
+ for i, x in enumerate(index.levels[1].values) # type: ignore
73
75
  )
74
76
  # Check second index level values shape
75
77
  assert index.levels[1].values.shape == (7,) # type: ignore
@@ -97,7 +99,8 @@ def test_element_to_index() -> None:
97
99
  assert index.levels[2].name == "idx" # type: ignore
98
100
  # Check third index level values
99
101
  assert all(
100
- x == i for i, x in enumerate(index.levels[2].values) # type: ignore
102
+ x == i
103
+ for i, x in enumerate(index.levels[2].values) # type: ignore
101
104
  )
102
105
  # Check third index level values shape
103
106
  assert index.levels[2].values.shape == (10,) # type: ignore