junifer 0.0.3.dev186__py3-none-any.whl → 0.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- junifer/_version.py +14 -2
- junifer/api/cli.py +162 -17
- junifer/api/functions.py +87 -419
- junifer/api/parser.py +24 -0
- junifer/api/queue_context/__init__.py +8 -0
- junifer/api/queue_context/gnu_parallel_local_adapter.py +258 -0
- junifer/api/queue_context/htcondor_adapter.py +365 -0
- junifer/api/queue_context/queue_context_adapter.py +60 -0
- junifer/api/queue_context/tests/test_gnu_parallel_local_adapter.py +192 -0
- junifer/api/queue_context/tests/test_htcondor_adapter.py +257 -0
- junifer/api/res/afni/run_afni_docker.sh +6 -6
- junifer/api/res/ants/ResampleImage +3 -0
- junifer/api/res/ants/antsApplyTransforms +3 -0
- junifer/api/res/ants/antsApplyTransformsToPoints +3 -0
- junifer/api/res/ants/run_ants_docker.sh +39 -0
- junifer/api/res/fsl/applywarp +3 -0
- junifer/api/res/fsl/flirt +3 -0
- junifer/api/res/fsl/img2imgcoord +3 -0
- junifer/api/res/fsl/run_fsl_docker.sh +39 -0
- junifer/api/res/fsl/std2imgcoord +3 -0
- junifer/api/res/run_conda.sh +4 -4
- junifer/api/res/run_venv.sh +22 -0
- junifer/api/tests/data/partly_cloudy_agg_mean_tian.yml +16 -0
- junifer/api/tests/test_api_utils.py +21 -3
- junifer/api/tests/test_cli.py +232 -9
- junifer/api/tests/test_functions.py +211 -439
- junifer/api/tests/test_parser.py +1 -1
- junifer/configs/juseless/datagrabbers/aomic_id1000_vbm.py +6 -1
- junifer/configs/juseless/datagrabbers/camcan_vbm.py +6 -1
- junifer/configs/juseless/datagrabbers/ixi_vbm.py +6 -1
- junifer/configs/juseless/datagrabbers/tests/test_ucla.py +8 -8
- junifer/configs/juseless/datagrabbers/ucla.py +44 -26
- junifer/configs/juseless/datagrabbers/ukb_vbm.py +6 -1
- junifer/data/VOIs/meta/AutobiographicalMemory_VOIs.txt +23 -0
- junifer/data/VOIs/meta/Power2013_MNI_VOIs.tsv +264 -0
- junifer/data/__init__.py +4 -0
- junifer/data/coordinates.py +298 -31
- junifer/data/masks.py +360 -28
- junifer/data/parcellations.py +621 -188
- junifer/data/template_spaces.py +190 -0
- junifer/data/tests/test_coordinates.py +34 -3
- junifer/data/tests/test_data_utils.py +1 -0
- junifer/data/tests/test_masks.py +202 -86
- junifer/data/tests/test_parcellations.py +266 -55
- junifer/data/tests/test_template_spaces.py +104 -0
- junifer/data/utils.py +4 -2
- junifer/datagrabber/__init__.py +1 -0
- junifer/datagrabber/aomic/id1000.py +111 -70
- junifer/datagrabber/aomic/piop1.py +116 -53
- junifer/datagrabber/aomic/piop2.py +116 -53
- junifer/datagrabber/aomic/tests/test_id1000.py +27 -27
- junifer/datagrabber/aomic/tests/test_piop1.py +27 -27
- junifer/datagrabber/aomic/tests/test_piop2.py +27 -27
- junifer/datagrabber/base.py +62 -10
- junifer/datagrabber/datalad_base.py +0 -2
- junifer/datagrabber/dmcc13_benchmark.py +372 -0
- junifer/datagrabber/hcp1200/datalad_hcp1200.py +5 -0
- junifer/datagrabber/hcp1200/hcp1200.py +30 -13
- junifer/datagrabber/pattern.py +133 -27
- junifer/datagrabber/pattern_datalad.py +111 -13
- junifer/datagrabber/tests/test_base.py +57 -6
- junifer/datagrabber/tests/test_datagrabber_utils.py +204 -76
- junifer/datagrabber/tests/test_datalad_base.py +0 -6
- junifer/datagrabber/tests/test_dmcc13_benchmark.py +256 -0
- junifer/datagrabber/tests/test_multiple.py +43 -10
- junifer/datagrabber/tests/test_pattern.py +125 -178
- junifer/datagrabber/tests/test_pattern_datalad.py +44 -25
- junifer/datagrabber/utils.py +151 -16
- junifer/datareader/default.py +36 -10
- junifer/external/nilearn/junifer_nifti_spheres_masker.py +6 -0
- junifer/markers/base.py +25 -16
- junifer/markers/collection.py +35 -16
- junifer/markers/complexity/__init__.py +27 -0
- junifer/markers/complexity/complexity_base.py +149 -0
- junifer/markers/complexity/hurst_exponent.py +136 -0
- junifer/markers/complexity/multiscale_entropy_auc.py +140 -0
- junifer/markers/complexity/perm_entropy.py +132 -0
- junifer/markers/complexity/range_entropy.py +136 -0
- junifer/markers/complexity/range_entropy_auc.py +145 -0
- junifer/markers/complexity/sample_entropy.py +134 -0
- junifer/markers/complexity/tests/test_complexity_base.py +19 -0
- junifer/markers/complexity/tests/test_hurst_exponent.py +69 -0
- junifer/markers/complexity/tests/test_multiscale_entropy_auc.py +68 -0
- junifer/markers/complexity/tests/test_perm_entropy.py +68 -0
- junifer/markers/complexity/tests/test_range_entropy.py +69 -0
- junifer/markers/complexity/tests/test_range_entropy_auc.py +69 -0
- junifer/markers/complexity/tests/test_sample_entropy.py +68 -0
- junifer/markers/complexity/tests/test_weighted_perm_entropy.py +68 -0
- junifer/markers/complexity/weighted_perm_entropy.py +133 -0
- junifer/markers/falff/_afni_falff.py +153 -0
- junifer/markers/falff/_junifer_falff.py +142 -0
- junifer/markers/falff/falff_base.py +91 -84
- junifer/markers/falff/falff_parcels.py +61 -45
- junifer/markers/falff/falff_spheres.py +64 -48
- junifer/markers/falff/tests/test_falff_parcels.py +89 -121
- junifer/markers/falff/tests/test_falff_spheres.py +92 -127
- junifer/markers/functional_connectivity/crossparcellation_functional_connectivity.py +1 -0
- junifer/markers/functional_connectivity/edge_functional_connectivity_parcels.py +1 -0
- junifer/markers/functional_connectivity/functional_connectivity_base.py +1 -0
- junifer/markers/functional_connectivity/tests/test_crossparcellation_functional_connectivity.py +46 -44
- junifer/markers/functional_connectivity/tests/test_edge_functional_connectivity_parcels.py +34 -39
- junifer/markers/functional_connectivity/tests/test_edge_functional_connectivity_spheres.py +40 -52
- junifer/markers/functional_connectivity/tests/test_functional_connectivity_parcels.py +62 -70
- junifer/markers/functional_connectivity/tests/test_functional_connectivity_spheres.py +99 -85
- junifer/markers/parcel_aggregation.py +60 -38
- junifer/markers/reho/_afni_reho.py +192 -0
- junifer/markers/reho/_junifer_reho.py +281 -0
- junifer/markers/reho/reho_base.py +69 -34
- junifer/markers/reho/reho_parcels.py +26 -16
- junifer/markers/reho/reho_spheres.py +23 -9
- junifer/markers/reho/tests/test_reho_parcels.py +93 -92
- junifer/markers/reho/tests/test_reho_spheres.py +88 -86
- junifer/markers/sphere_aggregation.py +54 -9
- junifer/markers/temporal_snr/temporal_snr_base.py +1 -0
- junifer/markers/temporal_snr/tests/test_temporal_snr_parcels.py +38 -37
- junifer/markers/temporal_snr/tests/test_temporal_snr_spheres.py +34 -38
- junifer/markers/tests/test_collection.py +43 -42
- junifer/markers/tests/test_ets_rss.py +29 -37
- junifer/markers/tests/test_parcel_aggregation.py +587 -468
- junifer/markers/tests/test_sphere_aggregation.py +209 -157
- junifer/markers/utils.py +2 -40
- junifer/onthefly/read_transform.py +13 -6
- junifer/pipeline/__init__.py +1 -0
- junifer/pipeline/pipeline_step_mixin.py +105 -41
- junifer/pipeline/registry.py +17 -0
- junifer/pipeline/singleton.py +45 -0
- junifer/pipeline/tests/test_pipeline_step_mixin.py +139 -51
- junifer/pipeline/tests/test_update_meta_mixin.py +1 -0
- junifer/pipeline/tests/test_workdir_manager.py +104 -0
- junifer/pipeline/update_meta_mixin.py +8 -2
- junifer/pipeline/utils.py +154 -15
- junifer/pipeline/workdir_manager.py +246 -0
- junifer/preprocess/__init__.py +3 -0
- junifer/preprocess/ants/__init__.py +4 -0
- junifer/preprocess/ants/ants_apply_transforms_warper.py +185 -0
- junifer/preprocess/ants/tests/test_ants_apply_transforms_warper.py +56 -0
- junifer/preprocess/base.py +96 -69
- junifer/preprocess/bold_warper.py +265 -0
- junifer/preprocess/confounds/fmriprep_confound_remover.py +91 -134
- junifer/preprocess/confounds/tests/test_fmriprep_confound_remover.py +106 -111
- junifer/preprocess/fsl/__init__.py +4 -0
- junifer/preprocess/fsl/apply_warper.py +179 -0
- junifer/preprocess/fsl/tests/test_apply_warper.py +45 -0
- junifer/preprocess/tests/test_bold_warper.py +159 -0
- junifer/preprocess/tests/test_preprocess_base.py +6 -6
- junifer/preprocess/warping/__init__.py +6 -0
- junifer/preprocess/warping/_ants_warper.py +167 -0
- junifer/preprocess/warping/_fsl_warper.py +109 -0
- junifer/preprocess/warping/space_warper.py +213 -0
- junifer/preprocess/warping/tests/test_space_warper.py +198 -0
- junifer/stats.py +18 -4
- junifer/storage/base.py +9 -1
- junifer/storage/hdf5.py +8 -3
- junifer/storage/pandas_base.py +2 -1
- junifer/storage/sqlite.py +1 -0
- junifer/storage/tests/test_hdf5.py +2 -1
- junifer/storage/tests/test_sqlite.py +8 -8
- junifer/storage/tests/test_utils.py +6 -6
- junifer/storage/utils.py +1 -0
- junifer/testing/datagrabbers.py +11 -7
- junifer/testing/utils.py +1 -0
- junifer/tests/test_stats.py +2 -0
- junifer/utils/__init__.py +1 -0
- junifer/utils/helpers.py +53 -0
- junifer/utils/logging.py +14 -3
- junifer/utils/tests/test_helpers.py +35 -0
- {junifer-0.0.3.dev186.dist-info → junifer-0.0.4.dist-info}/METADATA +59 -28
- junifer-0.0.4.dist-info/RECORD +257 -0
- {junifer-0.0.3.dev186.dist-info → junifer-0.0.4.dist-info}/WHEEL +1 -1
- junifer/markers/falff/falff_estimator.py +0 -334
- junifer/markers/falff/tests/test_falff_estimator.py +0 -238
- junifer/markers/reho/reho_estimator.py +0 -515
- junifer/markers/reho/tests/test_reho_estimator.py +0 -260
- junifer-0.0.3.dev186.dist-info/RECORD +0 -199
- {junifer-0.0.3.dev186.dist-info → junifer-0.0.4.dist-info}/AUTHORS.rst +0 -0
- {junifer-0.0.3.dev186.dist-info → junifer-0.0.4.dist-info}/LICENSE.md +0 -0
- {junifer-0.0.3.dev186.dist-info → junifer-0.0.4.dist-info}/entry_points.txt +0 -0
- {junifer-0.0.3.dev186.dist-info → junifer-0.0.4.dist-info}/top_level.txt +0 -0
@@ -6,15 +6,14 @@
|
|
6
6
|
# License: AGPL
|
7
7
|
|
8
8
|
import logging
|
9
|
-
import stat
|
10
9
|
from pathlib import Path
|
11
|
-
from typing import List, Tuple, Union
|
10
|
+
from typing import Dict, List, Tuple, Union
|
12
11
|
|
13
12
|
import pytest
|
14
13
|
from ruamel.yaml import YAML
|
15
14
|
|
16
15
|
import junifer.testing.registry # noqa: F401
|
17
|
-
from junifer.api.functions import collect, queue, run
|
16
|
+
from junifer.api.functions import collect, queue, reset, run
|
18
17
|
from junifer.datagrabber.base import BaseDataGrabber
|
19
18
|
from junifer.pipeline.registry import build
|
20
19
|
|
@@ -25,195 +24,217 @@ yaml.default_flow_style = False
|
|
25
24
|
yaml.allow_unicode = True
|
26
25
|
yaml.indent(mapping=2, sequence=4, offset=2)
|
27
26
|
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
}
|
53
|
-
|
54
|
-
|
55
|
-
|
27
|
+
|
28
|
+
@pytest.fixture
|
29
|
+
def datagrabber() -> Dict[str, str]:
|
30
|
+
"""Return a datagrabber as a dictionary."""
|
31
|
+
return {
|
32
|
+
"kind": "PartlyCloudyTestingDataGrabber",
|
33
|
+
}
|
34
|
+
|
35
|
+
|
36
|
+
@pytest.fixture
|
37
|
+
def markers() -> List[Dict[str, str]]:
|
38
|
+
"""Return markers as a list of dictionary."""
|
39
|
+
return [
|
40
|
+
{
|
41
|
+
"name": "tian-s1-3T_mean",
|
42
|
+
"kind": "ParcelAggregation",
|
43
|
+
"parcellation": "TianxS1x3TxMNInonlinear2009cAsym",
|
44
|
+
"method": "mean",
|
45
|
+
},
|
46
|
+
{
|
47
|
+
"name": "tian-s1-3T_std",
|
48
|
+
"kind": "ParcelAggregation",
|
49
|
+
"parcellation": "TianxS1x3TxMNInonlinear2009cAsym",
|
50
|
+
"method": "std",
|
51
|
+
},
|
52
|
+
]
|
53
|
+
|
54
|
+
|
55
|
+
@pytest.fixture
|
56
|
+
def storage() -> Dict[str, str]:
|
57
|
+
"""Return a storage as a dictionary."""
|
58
|
+
return {
|
59
|
+
"kind": "SQLiteFeatureStorage",
|
60
|
+
}
|
61
|
+
|
62
|
+
|
63
|
+
def test_run_single_element(
|
64
|
+
tmp_path: Path,
|
65
|
+
datagrabber: Dict[str, str],
|
66
|
+
markers: List[Dict[str, str]],
|
67
|
+
storage: Dict[str, str],
|
68
|
+
) -> None:
|
56
69
|
"""Test run function with single element.
|
57
70
|
|
58
71
|
Parameters
|
59
72
|
----------
|
60
73
|
tmp_path : pathlib.Path
|
61
74
|
The path to the test directory.
|
75
|
+
datagrabber : dict
|
76
|
+
Testing datagrabber as dictionary.
|
77
|
+
markers : list of dict
|
78
|
+
Testing markers as list of dictionary.
|
79
|
+
storage : dict
|
80
|
+
Testing storage as dictionary.
|
62
81
|
|
63
82
|
"""
|
64
|
-
#
|
65
|
-
|
66
|
-
workdir.mkdir()
|
67
|
-
# Create output directory
|
68
|
-
outdir = tmp_path / "out"
|
69
|
-
outdir.mkdir()
|
70
|
-
# Create storage
|
71
|
-
uri = outdir / "test.sqlite"
|
72
|
-
storage["uri"] = uri # type: ignore
|
83
|
+
# Set storage
|
84
|
+
storage["uri"] = str((tmp_path / "out.sqlite").resolve())
|
73
85
|
# Run operations
|
74
86
|
run(
|
75
|
-
workdir=
|
87
|
+
workdir=tmp_path,
|
76
88
|
datagrabber=datagrabber,
|
77
89
|
markers=markers,
|
78
90
|
storage=storage,
|
79
91
|
elements=["sub-01"],
|
80
92
|
)
|
81
93
|
# Check files
|
82
|
-
files = list(
|
94
|
+
files = list(tmp_path.glob("*.sqlite"))
|
83
95
|
assert len(files) == 1
|
84
96
|
|
85
97
|
|
86
|
-
def test_run_single_element_with_preprocessing(
|
98
|
+
def test_run_single_element_with_preprocessing(
|
99
|
+
tmp_path: Path,
|
100
|
+
markers: List[Dict[str, str]],
|
101
|
+
storage: Dict[str, str],
|
102
|
+
) -> None:
|
87
103
|
"""Test run function with single element and pre-processing.
|
88
104
|
|
89
105
|
Parameters
|
90
106
|
----------
|
91
107
|
tmp_path : pathlib.Path
|
92
108
|
The path to the test directory.
|
109
|
+
markers : list of dict
|
110
|
+
Testing markers as list of dictionary.
|
111
|
+
storage : dict
|
112
|
+
Testing storage as dictionary.
|
93
113
|
|
94
114
|
"""
|
95
|
-
#
|
96
|
-
|
97
|
-
workdir.mkdir()
|
98
|
-
# Create output directory
|
99
|
-
outdir = tmp_path / "out"
|
100
|
-
outdir.mkdir()
|
101
|
-
# Create storage
|
102
|
-
uri = outdir / "test.sqlite"
|
103
|
-
storage["uri"] = uri # type: ignore
|
115
|
+
# Set storage
|
116
|
+
storage["uri"] = str((tmp_path / "out.sqlite").resolve())
|
104
117
|
# Run operations
|
105
118
|
run(
|
106
|
-
workdir=
|
119
|
+
workdir=tmp_path,
|
107
120
|
datagrabber={
|
108
121
|
"kind": "PartlyCloudyTestingDataGrabber",
|
109
122
|
"reduce_confounds": False,
|
110
123
|
},
|
111
|
-
markers=
|
124
|
+
markers=markers,
|
125
|
+
storage=storage,
|
126
|
+
preprocessors=[
|
112
127
|
{
|
113
|
-
"
|
114
|
-
"kind": "FunctionalConnectivityParcels",
|
115
|
-
"parcellation": "Schaefer100x17",
|
116
|
-
"agg_method": "mean",
|
128
|
+
"kind": "fMRIPrepConfoundRemover",
|
117
129
|
}
|
118
130
|
],
|
119
|
-
|
120
|
-
preprocessor={
|
121
|
-
"kind": "fMRIPrepConfoundRemover",
|
122
|
-
},
|
123
|
-
elements=["sub-001"],
|
131
|
+
elements=["sub-01"],
|
124
132
|
)
|
125
133
|
# Check files
|
126
|
-
files = list(
|
134
|
+
files = list(tmp_path.glob("*.sqlite"))
|
127
135
|
assert len(files) == 1
|
128
136
|
|
129
137
|
|
130
|
-
def
|
131
|
-
|
138
|
+
def test_run_multi_element_multi_output(
|
139
|
+
tmp_path: Path,
|
140
|
+
datagrabber: Dict[str, str],
|
141
|
+
markers: List[Dict[str, str]],
|
142
|
+
storage: Dict[str, str],
|
143
|
+
) -> None:
|
144
|
+
"""Test run function with multi element and multi output.
|
132
145
|
|
133
146
|
Parameters
|
134
147
|
----------
|
135
148
|
tmp_path : pathlib.Path
|
136
149
|
The path to the test directory.
|
150
|
+
datagrabber : dict
|
151
|
+
Testing datagrabber as dictionary.
|
152
|
+
markers : list of dict
|
153
|
+
Testing markers as list of dictionary.
|
154
|
+
storage : dict
|
155
|
+
Testing storage as dictionary.
|
137
156
|
|
138
157
|
"""
|
139
|
-
#
|
140
|
-
|
141
|
-
workdir.mkdir()
|
142
|
-
# Create output directory
|
143
|
-
outdir = tmp_path / "out"
|
144
|
-
outdir.mkdir()
|
145
|
-
# Create storage
|
146
|
-
uri = outdir / "test.sqlite"
|
147
|
-
storage["uri"] = uri # type: ignore
|
158
|
+
# Set storage
|
159
|
+
storage["uri"] = str((tmp_path / "out.sqlite").resolve())
|
148
160
|
storage["single_output"] = False # type: ignore
|
149
161
|
# Run operations
|
150
162
|
run(
|
151
|
-
workdir=
|
163
|
+
workdir=tmp_path,
|
152
164
|
datagrabber=datagrabber,
|
153
165
|
markers=markers,
|
154
166
|
storage=storage,
|
155
167
|
elements=["sub-01", "sub-03"],
|
156
168
|
)
|
157
169
|
# Check files
|
158
|
-
files = list(
|
170
|
+
files = list(tmp_path.glob("*.sqlite"))
|
159
171
|
assert len(files) == 2
|
160
172
|
|
161
173
|
|
162
|
-
def test_run_multi_element_single_output(
|
163
|
-
|
174
|
+
def test_run_multi_element_single_output(
|
175
|
+
tmp_path: Path,
|
176
|
+
datagrabber: Dict[str, str],
|
177
|
+
markers: List[Dict[str, str]],
|
178
|
+
storage: Dict[str, str],
|
179
|
+
) -> None:
|
180
|
+
"""Test run function with multi element and single output.
|
164
181
|
|
165
182
|
Parameters
|
166
183
|
----------
|
167
184
|
tmp_path : pathlib.Path
|
168
185
|
The path to the test directory.
|
186
|
+
datagrabber : dict
|
187
|
+
Testing datagrabber as dictionary.
|
188
|
+
markers : list of dict
|
189
|
+
Testing markers as list of dictionary.
|
190
|
+
storage : dict
|
191
|
+
Testing storage as dictionary.
|
169
192
|
|
170
193
|
"""
|
171
|
-
#
|
172
|
-
|
173
|
-
workdir.mkdir()
|
174
|
-
# Create output directory
|
175
|
-
outdir = tmp_path / "out"
|
176
|
-
outdir.mkdir()
|
177
|
-
# Create storage
|
178
|
-
uri = outdir / "test.sqlite"
|
179
|
-
storage["uri"] = uri # type: ignore
|
194
|
+
# Set storage
|
195
|
+
storage["uri"] = str((tmp_path / "out.sqlite").resolve())
|
180
196
|
storage["single_output"] = True # type: ignore
|
181
197
|
# Run operations
|
182
198
|
run(
|
183
|
-
workdir=
|
199
|
+
workdir=tmp_path,
|
184
200
|
datagrabber=datagrabber,
|
185
201
|
markers=markers,
|
186
202
|
storage=storage,
|
187
203
|
elements=["sub-01", "sub-03"],
|
188
204
|
)
|
189
205
|
# Check files
|
190
|
-
files = list(
|
206
|
+
files = list(tmp_path.glob("*.sqlite"))
|
191
207
|
assert len(files) == 1
|
192
|
-
assert files[0].name == "
|
208
|
+
assert files[0].name == "out.sqlite"
|
193
209
|
|
194
210
|
|
195
|
-
def test_run_and_collect(
|
211
|
+
def test_run_and_collect(
|
212
|
+
tmp_path: Path,
|
213
|
+
datagrabber: Dict[str, str],
|
214
|
+
markers: List[Dict[str, str]],
|
215
|
+
storage: Dict[str, str],
|
216
|
+
) -> None:
|
196
217
|
"""Test run and collect functions.
|
197
218
|
|
198
219
|
Parameters
|
199
220
|
----------
|
200
221
|
tmp_path : pathlib.Path
|
201
222
|
The path to the test directory.
|
223
|
+
datagrabber : dict
|
224
|
+
Testing datagrabber as dictionary.
|
225
|
+
markers : list of dict
|
226
|
+
Testing markers as list of dictionary.
|
227
|
+
storage : dict
|
228
|
+
Testing storage as dictionary.
|
202
229
|
|
203
230
|
"""
|
204
|
-
#
|
205
|
-
|
206
|
-
|
207
|
-
# Create output directory
|
208
|
-
outdir = tmp_path / "out"
|
209
|
-
outdir.mkdir()
|
210
|
-
# Create storage
|
211
|
-
uri = outdir / "test.sqlite"
|
212
|
-
storage["uri"] = uri # type: ignore
|
231
|
+
# Set storage
|
232
|
+
uri = tmp_path / "out.sqlite"
|
233
|
+
storage["uri"] = str(uri.resolve())
|
213
234
|
storage["single_output"] = False # type: ignore
|
214
235
|
# Run operations
|
215
236
|
run(
|
216
|
-
workdir=
|
237
|
+
workdir=tmp_path,
|
217
238
|
datagrabber=datagrabber,
|
218
239
|
markers=markers,
|
219
240
|
storage=storage,
|
@@ -224,7 +245,7 @@ def test_run_and_collect(tmp_path: Path) -> None:
|
|
224
245
|
)
|
225
246
|
elements = dg.get_elements() # type: ignore
|
226
247
|
# This should create 10 files
|
227
|
-
files = list(
|
248
|
+
files = list(tmp_path.glob("*.sqlite"))
|
228
249
|
assert len(files) == len(elements)
|
229
250
|
# But the test.sqlite file should not exist
|
230
251
|
assert not uri.exists()
|
@@ -238,6 +259,9 @@ def test_queue_correct_yaml_config(
|
|
238
259
|
tmp_path: Path,
|
239
260
|
monkeypatch: pytest.MonkeyPatch,
|
240
261
|
caplog: pytest.LogCaptureFixture,
|
262
|
+
datagrabber: Dict[str, str],
|
263
|
+
markers: List[Dict[str, str]],
|
264
|
+
storage: Dict[str, str],
|
241
265
|
) -> None:
|
242
266
|
"""Test proper YAML config generation for queueing.
|
243
267
|
|
@@ -249,6 +273,12 @@ def test_queue_correct_yaml_config(
|
|
249
273
|
The pytest.MonkeyPatch object.
|
250
274
|
caplog : pytest.LogCaptureFixture
|
251
275
|
The pytest.LogCaptureFixture object.
|
276
|
+
datagrabber : dict
|
277
|
+
Testing datagrabber as dictionary.
|
278
|
+
markers : list of dict
|
279
|
+
Testing markers as list of dictionary.
|
280
|
+
storage : dict
|
281
|
+
Testing storage as dictionary.
|
252
282
|
|
253
283
|
"""
|
254
284
|
with monkeypatch.context() as m:
|
@@ -260,7 +290,7 @@ def test_queue_correct_yaml_config(
|
|
260
290
|
"workdir": str(tmp_path.resolve()),
|
261
291
|
"datagrabber": datagrabber,
|
262
292
|
"markers": markers,
|
263
|
-
"storage":
|
293
|
+
"storage": storage,
|
264
294
|
"env": {
|
265
295
|
"kind": "conda",
|
266
296
|
"name": "junifer",
|
@@ -270,7 +300,7 @@ def test_queue_correct_yaml_config(
|
|
270
300
|
kind="HTCondor",
|
271
301
|
jobname="yaml_config_gen_check",
|
272
302
|
)
|
273
|
-
assert "Creating job
|
303
|
+
assert "Creating job directory at" in caplog.text
|
274
304
|
assert "Writing YAML config to" in caplog.text
|
275
305
|
assert "Queue done" in caplog.text
|
276
306
|
|
@@ -308,7 +338,7 @@ def test_queue_invalid_job_queue(
|
|
308
338
|
The pytest.MonkeyPatch object.
|
309
339
|
|
310
340
|
"""
|
311
|
-
with pytest.raises(ValueError, match="
|
341
|
+
with pytest.raises(ValueError, match="Invalid value for `kind`"):
|
312
342
|
with monkeypatch.context() as m:
|
313
343
|
m.chdir(tmp_path)
|
314
344
|
queue(
|
@@ -478,6 +508,7 @@ def test_queue_without_elements(
|
|
478
508
|
tmp_path: Path,
|
479
509
|
monkeypatch: pytest.MonkeyPatch,
|
480
510
|
caplog: pytest.LogCaptureFixture,
|
511
|
+
datagrabber: Dict[str, str],
|
481
512
|
) -> None:
|
482
513
|
"""Test queue without elements.
|
483
514
|
|
@@ -489,6 +520,8 @@ def test_queue_without_elements(
|
|
489
520
|
The pytest.MonkeyPatch object.
|
490
521
|
caplog : pytest.LogCaptureFixture
|
491
522
|
The pytest.LogCaptureFixture object.
|
523
|
+
datagrabber : dict
|
524
|
+
Testing datagrabber as dictionary.
|
492
525
|
|
493
526
|
"""
|
494
527
|
with monkeypatch.context() as m:
|
@@ -496,181 +529,63 @@ def test_queue_without_elements(
|
|
496
529
|
with caplog.at_level(logging.INFO):
|
497
530
|
queue(
|
498
531
|
config={"datagrabber": datagrabber},
|
499
|
-
kind="SLURM",
|
500
|
-
)
|
501
|
-
assert "Queue done" in caplog.text
|
502
|
-
|
503
|
-
|
504
|
-
def test_queue_condor_invalid_python_env(
|
505
|
-
tmp_path: Path,
|
506
|
-
monkeypatch: pytest.MonkeyPatch,
|
507
|
-
) -> None:
|
508
|
-
"""Test invalid Python environment check for HTCondor.
|
509
|
-
|
510
|
-
Parameters
|
511
|
-
----------
|
512
|
-
tmp_path : pathlib.Path
|
513
|
-
The path to the test directory.
|
514
|
-
monkeypatch : pytest.MonkeyPatch
|
515
|
-
The pytest.MonkeyPatch object.
|
516
|
-
|
517
|
-
"""
|
518
|
-
with pytest.raises(ValueError, match="Unknown env kind"):
|
519
|
-
with monkeypatch.context() as m:
|
520
|
-
m.chdir(tmp_path)
|
521
|
-
queue(
|
522
|
-
config={"elements": "sub-001"},
|
523
532
|
kind="HTCondor",
|
524
|
-
env={"kind": "galaxy"},
|
525
533
|
)
|
534
|
+
assert "Queue done" in caplog.text
|
526
535
|
|
527
536
|
|
528
|
-
def
|
529
|
-
tmp_path: Path,
|
530
|
-
monkeypatch: pytest.MonkeyPatch,
|
531
|
-
caplog: pytest.LogCaptureFixture,
|
532
|
-
) -> None:
|
533
|
-
"""Test conda Python environment check for HTCondor.
|
534
|
-
|
535
|
-
Parameters
|
536
|
-
----------
|
537
|
-
tmp_path : pathlib.Path
|
538
|
-
The path to the test directory.
|
539
|
-
monkeypatch : pytest.MonkeyPatch
|
540
|
-
The pytest.MonkeyPatch object.
|
541
|
-
caplog : pytest.LogCaptureFixture
|
542
|
-
The pytest.LogCaptureFixture object.
|
543
|
-
|
544
|
-
"""
|
545
|
-
with monkeypatch.context() as m:
|
546
|
-
m.chdir(tmp_path)
|
547
|
-
with caplog.at_level(logging.INFO):
|
548
|
-
queue(
|
549
|
-
config={"elements": "sub-001"},
|
550
|
-
kind="HTCondor",
|
551
|
-
jobname="conda_env_check",
|
552
|
-
env={"kind": "conda", "name": "conda-env"},
|
553
|
-
)
|
554
|
-
assert "Copying" in caplog.text
|
555
|
-
fname = (
|
556
|
-
tmp_path / "junifer_jobs" / "conda_env_check" / "run_conda.sh"
|
557
|
-
)
|
558
|
-
assert fname.is_file()
|
559
|
-
assert stat.S_IMODE(fname.stat().st_mode) & stat.S_IEXEC != 0
|
560
|
-
|
561
|
-
|
562
|
-
def test_queue_condor_conda_pre_run_python(
|
537
|
+
def test_reset_run(
|
563
538
|
tmp_path: Path,
|
564
|
-
|
565
|
-
|
539
|
+
datagrabber: Dict[str, str],
|
540
|
+
markers: List[Dict[str, str]],
|
541
|
+
storage: Dict[str, str],
|
566
542
|
) -> None:
|
567
|
-
"""Test
|
543
|
+
"""Test reset function for run.
|
568
544
|
|
569
545
|
Parameters
|
570
546
|
----------
|
571
547
|
tmp_path : pathlib.Path
|
572
548
|
The path to the test directory.
|
573
|
-
|
574
|
-
|
575
|
-
|
576
|
-
|
549
|
+
datagrabber : dict
|
550
|
+
Testing datagrabber as dictionary.
|
551
|
+
markers : list of dict
|
552
|
+
Testing markers as list of dictionary.
|
553
|
+
storage : dict
|
554
|
+
Testing storage as dictionary.
|
577
555
|
|
578
556
|
"""
|
579
|
-
|
580
|
-
|
581
|
-
|
582
|
-
|
583
|
-
|
584
|
-
|
585
|
-
|
586
|
-
|
587
|
-
|
588
|
-
|
589
|
-
|
590
|
-
|
591
|
-
fname = (
|
592
|
-
tmp_path / "junifer_jobs" / "conda_env_check" / "run_conda.sh"
|
593
|
-
)
|
594
|
-
assert fname.is_file()
|
595
|
-
assert stat.S_IMODE(fname.stat().st_mode) & stat.S_IEXEC != 0
|
596
|
-
|
597
|
-
fname = (
|
598
|
-
tmp_path / "junifer_jobs" / "conda_env_check" / "pre_run.sh"
|
599
|
-
)
|
600
|
-
assert fname.is_file()
|
601
|
-
assert stat.S_IMODE(fname.stat().st_mode) & stat.S_IEXEC != 0
|
602
|
-
|
603
|
-
|
604
|
-
def test_queue_condor_venv_python(
|
605
|
-
tmp_path: Path,
|
606
|
-
monkeypatch: pytest.MonkeyPatch,
|
607
|
-
caplog: pytest.LogCaptureFixture,
|
608
|
-
) -> None:
|
609
|
-
"""Test venv Python environment check for HTCondor.
|
610
|
-
|
611
|
-
Parameters
|
612
|
-
----------
|
613
|
-
tmp_path : pathlib.Path
|
614
|
-
The path to the test directory.
|
615
|
-
monkeypatch : pytest.MonkeyPatch
|
616
|
-
The pytest.MonkeyPatch object.
|
617
|
-
caplog : pytest.LogCaptureFixture
|
618
|
-
The pytest.LogCaptureFixture object.
|
557
|
+
# Create storage
|
558
|
+
storage["uri"] = tmp_path / "test_reset_run.sqlite" # type: ignore
|
559
|
+
# Run operation to generate files
|
560
|
+
run(
|
561
|
+
workdir=tmp_path,
|
562
|
+
datagrabber=datagrabber,
|
563
|
+
markers=markers,
|
564
|
+
storage=storage,
|
565
|
+
elements=["sub-01"],
|
566
|
+
)
|
567
|
+
# Reset operation
|
568
|
+
reset(config={"storage": storage})
|
619
569
|
|
620
|
-
""
|
621
|
-
with monkeypatch.context() as m:
|
622
|
-
m.chdir(tmp_path)
|
623
|
-
with caplog.at_level(logging.INFO):
|
624
|
-
queue(
|
625
|
-
config={"elements": "sub-001"},
|
626
|
-
kind="HTCondor",
|
627
|
-
env={"kind": "venv", "name": "venv-env"},
|
628
|
-
)
|
629
|
-
# TODO: needs implementation for testing
|
570
|
+
assert not Path(storage["uri"]).exists()
|
630
571
|
|
631
572
|
|
632
573
|
@pytest.mark.parametrize(
|
633
|
-
"
|
634
|
-
|
635
|
-
|
636
|
-
|
637
|
-
|
638
|
-
"4G",
|
639
|
-
4,
|
640
|
-
"4G",
|
641
|
-
"yes",
|
642
|
-
),
|
643
|
-
(
|
644
|
-
["sub-001"],
|
645
|
-
{"kind": "conda", "name": "conda-env"},
|
646
|
-
"4G",
|
647
|
-
4,
|
648
|
-
"4G",
|
649
|
-
"on_success_only",
|
650
|
-
),
|
651
|
-
(
|
652
|
-
["sub-001"],
|
653
|
-
{"kind": "venv", "name": "venv-env"},
|
654
|
-
"8G",
|
655
|
-
8,
|
656
|
-
"8G",
|
657
|
-
"no",
|
658
|
-
),
|
659
|
-
(["sub-001"], {"kind": "local"}, "12G", 12, "12G", "yes"),
|
660
|
-
],
|
574
|
+
"job_name",
|
575
|
+
(
|
576
|
+
"job",
|
577
|
+
None,
|
578
|
+
),
|
661
579
|
)
|
662
|
-
def
|
580
|
+
def test_reset_queue(
|
663
581
|
tmp_path: Path,
|
664
582
|
monkeypatch: pytest.MonkeyPatch,
|
665
|
-
|
666
|
-
|
667
|
-
|
668
|
-
|
669
|
-
cpus: int,
|
670
|
-
disk: str,
|
671
|
-
collect: str,
|
583
|
+
datagrabber: Dict[str, str],
|
584
|
+
markers: List[Dict[str, str]],
|
585
|
+
storage: Dict[str, str],
|
586
|
+
job_name: str,
|
672
587
|
) -> None:
|
673
|
-
"""Test
|
588
|
+
"""Test reset function for queue.
|
674
589
|
|
675
590
|
Parameters
|
676
591
|
----------
|
@@ -678,190 +593,47 @@ def test_queue_condor_assets_generation(
|
|
678
593
|
The path to the test directory.
|
679
594
|
monkeypatch : pytest.MonkeyPatch
|
680
595
|
The pytest.MonkeyPatch object.
|
681
|
-
|
682
|
-
|
683
|
-
|
684
|
-
|
685
|
-
|
686
|
-
|
687
|
-
|
688
|
-
The parametrized
|
689
|
-
cpus : int
|
690
|
-
The parametrized CPUs.
|
691
|
-
disk : str
|
692
|
-
The parametrized disk size.
|
693
|
-
collect : str
|
694
|
-
The parametrized collect option.
|
596
|
+
datagrabber : dict
|
597
|
+
Testing datagrabber as dictionary.
|
598
|
+
markers : list of dict
|
599
|
+
Testing markers as list of dictionary.
|
600
|
+
storage : dict
|
601
|
+
Testing storage as dictionary.
|
602
|
+
job_name : str
|
603
|
+
The parametrized job name.
|
695
604
|
|
696
605
|
"""
|
697
|
-
jobname = "condor_assets_gen_check"
|
698
|
-
with monkeypatch.context() as m:
|
699
|
-
m.chdir(tmp_path)
|
700
|
-
with caplog.at_level(logging.INFO):
|
701
|
-
queue(
|
702
|
-
config={"elements": elements},
|
703
|
-
kind="HTCondor",
|
704
|
-
jobname=jobname,
|
705
|
-
env=env,
|
706
|
-
mem=mem,
|
707
|
-
cpus=cpus,
|
708
|
-
disk=disk,
|
709
|
-
collect=collect,
|
710
|
-
)
|
711
|
-
|
712
|
-
# Check log directory creation
|
713
|
-
assert Path(tmp_path / "junifer_jobs" / jobname / "logs").is_dir()
|
714
|
-
|
715
|
-
run_submit_file_path = Path(
|
716
|
-
tmp_path / "junifer_jobs" / jobname / f"run_{jobname}.submit"
|
717
|
-
)
|
718
|
-
# Check junifer run submit file
|
719
|
-
assert run_submit_file_path.is_file()
|
720
|
-
# Read run submit file to check if resources are correct
|
721
|
-
with open(run_submit_file_path) as f:
|
722
|
-
for line in f.read().splitlines():
|
723
|
-
if "request_cpus" in line:
|
724
|
-
assert int(line.split("=")[1].strip()) == cpus
|
725
|
-
if "request_memory" in line:
|
726
|
-
assert line.split("=")[1].strip() == mem
|
727
|
-
if "request_disk" in line:
|
728
|
-
assert line.split("=")[1].strip() == disk
|
729
|
-
|
730
|
-
# Check junifer collect submit file
|
731
|
-
assert Path(
|
732
|
-
tmp_path
|
733
|
-
/ "junifer_jobs"
|
734
|
-
/ jobname
|
735
|
-
/ f"collect_{jobname}.submit"
|
736
|
-
).is_file()
|
737
|
-
|
738
|
-
dag_file_path = Path(
|
739
|
-
tmp_path / "junifer_jobs" / jobname / f"{jobname}.dag"
|
740
|
-
)
|
741
|
-
# Check junifer dag file
|
742
|
-
assert dag_file_path.is_file()
|
743
|
-
# Read dag file to check if collect job is found
|
744
|
-
element_count = 0
|
745
|
-
has_collect_job = False
|
746
|
-
has_final_collect_job = False
|
747
|
-
with open(dag_file_path) as f:
|
748
|
-
for line in f.read().splitlines():
|
749
|
-
if "JOB" in line:
|
750
|
-
element_count += 1
|
751
|
-
if "collect" in line:
|
752
|
-
has_collect_job = True
|
753
|
-
if "FINAL" in line:
|
754
|
-
has_final_collect_job = True
|
755
|
-
|
756
|
-
if collect == "yes":
|
757
|
-
assert len(elements) == element_count
|
758
|
-
assert has_collect_job is True
|
759
|
-
assert has_final_collect_job is True
|
760
|
-
elif collect == "on_success_only":
|
761
|
-
assert len(elements) == element_count - 1
|
762
|
-
assert has_collect_job is True
|
763
|
-
assert has_final_collect_job is False
|
764
|
-
else:
|
765
|
-
assert len(elements) == element_count
|
766
|
-
assert has_collect_job is False
|
767
|
-
|
768
|
-
if has_final_collect_job is True:
|
769
|
-
pre_collect_fname = Path(
|
770
|
-
tmp_path / "junifer_jobs" / jobname / "collect_pre.sh"
|
771
|
-
)
|
772
|
-
assert pre_collect_fname.exists()
|
773
|
-
assert (
|
774
|
-
stat.S_IMODE(pre_collect_fname.stat().st_mode)
|
775
|
-
& stat.S_IEXEC
|
776
|
-
!= 0
|
777
|
-
)
|
778
|
-
|
779
|
-
# Check submit log
|
780
|
-
assert (
|
781
|
-
"HTCondor job files created, to submit the job, run"
|
782
|
-
in caplog.text
|
783
|
-
)
|
784
|
-
|
785
|
-
|
786
|
-
def test_queue_condor_extra_preamble(
|
787
|
-
tmp_path: Path,
|
788
|
-
monkeypatch: pytest.MonkeyPatch,
|
789
|
-
) -> None:
|
790
|
-
"""Test HTCondor extra preamble addition.
|
791
|
-
|
792
|
-
Parameters
|
793
|
-
----------
|
794
|
-
tmp_path : pathlib.Path
|
795
|
-
The path to the test directory.
|
796
|
-
monkeypatch : pytest.MonkeyPatch
|
797
|
-
The pytest.MonkeyPatch object.
|
798
|
-
|
799
|
-
"""
|
800
|
-
jobname = "condor_extra_preamble_check"
|
801
|
-
extra_preamble = "FOO = BAR"
|
802
606
|
with monkeypatch.context() as m:
|
803
607
|
m.chdir(tmp_path)
|
608
|
+
# Create storage
|
609
|
+
storage["uri"] = "test_reset_queue.sqlite"
|
610
|
+
# Set job name
|
611
|
+
if job_name is None:
|
612
|
+
job_name = "junifer_job"
|
613
|
+
# Queue operation to generate files
|
804
614
|
queue(
|
805
|
-
config={
|
806
|
-
|
807
|
-
|
808
|
-
|
809
|
-
|
810
|
-
|
811
|
-
|
812
|
-
|
813
|
-
|
615
|
+
config={
|
616
|
+
"with": "junifer.testing.registry",
|
617
|
+
"workdir": str(tmp_path.resolve()),
|
618
|
+
"datagrabber": datagrabber,
|
619
|
+
"markers": markers,
|
620
|
+
"storage": storage,
|
621
|
+
"env": {
|
622
|
+
"kind": "conda",
|
623
|
+
"name": "junifer",
|
624
|
+
},
|
625
|
+
"mem": "8G",
|
626
|
+
},
|
627
|
+
kind="GNUParallelLocal",
|
628
|
+
jobname=job_name,
|
814
629
|
)
|
815
|
-
|
816
|
-
|
817
|
-
|
818
|
-
|
819
|
-
|
820
|
-
|
821
|
-
collect_submit_file_path = Path(
|
822
|
-
tmp_path / "junifer_jobs" / jobname / f"collect_{jobname}.submit"
|
630
|
+
# Reset operation
|
631
|
+
reset(
|
632
|
+
config={
|
633
|
+
"storage": storage,
|
634
|
+
"queue": {"kind": "GNUParallelLocal", "jobname": job_name},
|
635
|
+
}
|
823
636
|
)
|
824
|
-
with open(collect_submit_file_path) as f:
|
825
|
-
for line in f.read().splitlines():
|
826
|
-
if "FOO" in line:
|
827
|
-
assert line.strip() == extra_preamble
|
828
637
|
|
829
|
-
|
830
|
-
|
831
|
-
tmp_path: Path,
|
832
|
-
monkeypatch: pytest.MonkeyPatch,
|
833
|
-
caplog: pytest.LogCaptureFixture,
|
834
|
-
) -> None:
|
835
|
-
"""Test HTCondor job submission failure.
|
836
|
-
|
837
|
-
Parameters
|
838
|
-
----------
|
839
|
-
tmp_path : pathlib.Path
|
840
|
-
The path to the test directory.
|
841
|
-
monkeypatch : pytest.MonkeyPatch
|
842
|
-
The pytest.MonkeyPatch object.
|
843
|
-
caplog : pytest.LogCaptureFixture
|
844
|
-
The pytest.LogCaptureFixture object.
|
845
|
-
|
846
|
-
"""
|
847
|
-
with pytest.raises(
|
848
|
-
FileNotFoundError,
|
849
|
-
match="No such file or directory: 'condor_submit_dag'",
|
850
|
-
):
|
851
|
-
with monkeypatch.context() as m:
|
852
|
-
m.chdir(tmp_path)
|
853
|
-
with caplog.at_level(logging.INFO):
|
854
|
-
queue(
|
855
|
-
config={"elements": ["sub-001"]},
|
856
|
-
kind="HTCondor",
|
857
|
-
jobname="condor_job_submission_fail",
|
858
|
-
submit=True,
|
859
|
-
)
|
860
|
-
# Check submit log
|
861
|
-
assert "Submitting HTCondor job" in caplog.text
|
862
|
-
|
863
|
-
|
864
|
-
@pytest.mark.skip(reason="SLURM not installed on system.")
|
865
|
-
def test_queue_slurm() -> None:
|
866
|
-
"""Test job queueing in SLURM."""
|
867
|
-
pass
|
638
|
+
assert not Path(storage["uri"]).exists()
|
639
|
+
assert not (tmp_path / "junifer_jobs" / job_name).exists()
|