brkraw 0.5.2__py3-none-any.whl → 0.5.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- brkraw/__init__.py +1 -1
- brkraw/api/__init__.py +122 -0
- brkraw/api/types.py +39 -0
- brkraw/apps/loader/__init__.py +3 -6
- brkraw/apps/loader/core.py +128 -132
- brkraw/apps/loader/formatter.py +0 -2
- brkraw/apps/loader/helper.py +334 -114
- brkraw/apps/loader/info/scan.py +2 -2
- brkraw/apps/loader/info/transform.py +0 -1
- brkraw/apps/loader/types.py +56 -59
- brkraw/cli/commands/addon.py +1 -1
- brkraw/cli/commands/cache.py +82 -0
- brkraw/cli/commands/config.py +2 -2
- brkraw/cli/commands/convert.py +61 -38
- brkraw/cli/commands/hook.py +1 -3
- brkraw/cli/commands/info.py +1 -1
- brkraw/cli/commands/init.py +1 -1
- brkraw/cli/commands/params.py +1 -1
- brkraw/cli/commands/prune.py +2 -2
- brkraw/cli/commands/session.py +1 -11
- brkraw/cli/main.py +51 -1
- brkraw/cli/utils.py +1 -1
- brkraw/core/cache.py +87 -0
- brkraw/core/config.py +18 -2
- brkraw/core/fs.py +26 -9
- brkraw/core/zip.py +46 -32
- brkraw/dataclasses/__init__.py +3 -2
- brkraw/dataclasses/study.py +73 -23
- brkraw/resolver/datatype.py +10 -2
- brkraw/resolver/image.py +140 -21
- brkraw/resolver/nifti.py +4 -12
- brkraw/schema/niftiheader.yaml +0 -2
- brkraw/specs/meta/validator.py +0 -1
- brkraw/specs/rules/logic.py +1 -3
- {brkraw-0.5.2.dist-info → brkraw-0.5.5.dist-info}/METADATA +8 -9
- {brkraw-0.5.2.dist-info → brkraw-0.5.5.dist-info}/RECORD +39 -35
- {brkraw-0.5.2.dist-info → brkraw-0.5.5.dist-info}/entry_points.txt +1 -0
- {brkraw-0.5.2.dist-info → brkraw-0.5.5.dist-info}/WHEEL +0 -0
- {brkraw-0.5.2.dist-info → brkraw-0.5.5.dist-info}/licenses/LICENSE +0 -0
brkraw/dataclasses/study.py
CHANGED
|
@@ -1,21 +1,56 @@
|
|
|
1
|
+
|
|
1
2
|
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
import logging
|
|
2
5
|
from dataclasses import dataclass, field
|
|
3
6
|
from pathlib import Path
|
|
4
|
-
from typing import Dict,
|
|
7
|
+
from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Union
|
|
5
8
|
|
|
6
9
|
from ..core.fs import DatasetFS
|
|
7
10
|
from .node import DatasetNode
|
|
8
11
|
from .scan import Scan
|
|
9
12
|
|
|
10
|
-
|
|
11
|
-
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class LazyScan:
|
|
18
|
+
"""Lightweight lazy Scan proxy.
|
|
19
|
+
|
|
20
|
+
This defers `Scan.from_fs(...)` until the scan is actually accessed.
|
|
21
|
+
It implements attribute forwarding so it can be used where a Scan is expected.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
fs: DatasetFS
|
|
25
|
+
scan_id: int
|
|
26
|
+
scan_root: str
|
|
27
|
+
_scan: Optional[Scan] = field(default=None, init=False, repr=False)
|
|
28
|
+
|
|
29
|
+
def materialize(self) -> Scan:
|
|
30
|
+
if self._scan is None:
|
|
31
|
+
logger.debug(
|
|
32
|
+
"Materializing Scan.from_fs for scan_id=%s scan_root=%s",
|
|
33
|
+
self.scan_id,
|
|
34
|
+
self.scan_root,
|
|
35
|
+
)
|
|
36
|
+
self._scan = Scan.from_fs(self.fs, self.scan_id, self.scan_root)
|
|
37
|
+
return self._scan
|
|
38
|
+
|
|
39
|
+
def __getattr__(self, name: str):
|
|
40
|
+
# Delegate unknown attributes to the underlying Scan.
|
|
41
|
+
return getattr(self.materialize(), name)
|
|
42
|
+
|
|
43
|
+
def __repr__(self) -> str:
|
|
44
|
+
if self._scan is None:
|
|
45
|
+
return f"LazyScan(id={self.scan_id} root='{self.scan_root}')"
|
|
46
|
+
return repr(self._scan)
|
|
12
47
|
|
|
13
48
|
|
|
14
49
|
@dataclass
|
|
15
50
|
class Study(DatasetNode):
|
|
16
51
|
fs: DatasetFS
|
|
17
52
|
relroot: str = ""
|
|
18
|
-
scans: Dict[int,
|
|
53
|
+
scans: Dict[int, "LazyScan"] = field(default_factory=dict)
|
|
19
54
|
_cache: Dict[str, object] = field(default_factory=dict, init=False, repr=False)
|
|
20
55
|
|
|
21
56
|
@classmethod
|
|
@@ -42,17 +77,25 @@ class Study(DatasetNode):
|
|
|
42
77
|
|
|
43
78
|
@classmethod
|
|
44
79
|
def discover(cls, fs: DatasetFS) -> List["Study"]:
|
|
45
|
-
"""Bottom-up discovery using reco markers (2dseq + visu_pars).
|
|
46
|
-
|
|
47
|
-
|
|
80
|
+
"""Bottom-up discovery using reco markers (2dseq + visu_pars).
|
|
81
|
+
|
|
82
|
+
Notes:
|
|
83
|
+
Discovery is I/O bound on large studies or slow filesystems.
|
|
84
|
+
We minimize filesystem calls by:
|
|
85
|
+
- disabling per-directory sorting in fs.walk
|
|
86
|
+
- avoiding per-directory set() allocations
|
|
87
|
+
- caching scan-level existence checks (method/acqp)
|
|
88
|
+
"""
|
|
89
|
+
studies: Dict[str, "Study"] = {}
|
|
90
|
+
scan_ok_cache: Dict[str, bool] = {}
|
|
91
|
+
|
|
92
|
+
for dirpath, _, filenames in fs.walk(sort_entries=False):
|
|
48
93
|
rel = fs.strip_anchor(dirpath)
|
|
49
|
-
names = set(filenames)
|
|
50
|
-
if "2dseq" in names and "visu_pars" in names:
|
|
51
|
-
reco_dirs.append(rel)
|
|
52
94
|
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
95
|
+
if "2dseq" not in filenames or "visu_pars" not in filenames:
|
|
96
|
+
continue
|
|
97
|
+
|
|
98
|
+
parts = [p for p in rel.split("/") if p]
|
|
56
99
|
if "pdata" not in parts:
|
|
57
100
|
continue
|
|
58
101
|
pdata_idx = parts.index("pdata")
|
|
@@ -70,12 +113,18 @@ class Study(DatasetNode):
|
|
|
70
113
|
|
|
71
114
|
scan_root = "/".join(parts[:pdata_idx])
|
|
72
115
|
study_root = "/".join(parts[:pdata_idx - 1])
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
and fs.exists(f"{
|
|
78
|
-
|
|
116
|
+
|
|
117
|
+
# Validate scan-level markers once per scan_root.
|
|
118
|
+
ok = scan_ok_cache.get(scan_root)
|
|
119
|
+
if ok is None:
|
|
120
|
+
ok = fs.exists(f"{scan_root}/method") and fs.exists(f"{scan_root}/acqp")
|
|
121
|
+
scan_ok_cache[scan_root] = ok
|
|
122
|
+
if not ok:
|
|
123
|
+
continue
|
|
124
|
+
|
|
125
|
+
# Validate reco file. In most PV layouts, `reco` lives in the same pdata/<reco_id> dir.
|
|
126
|
+
# Prefer checking the listing we already have, fall back to exists() for safety.
|
|
127
|
+
if "reco" not in filenames and not fs.exists(f"{rel}/reco"):
|
|
79
128
|
continue
|
|
80
129
|
|
|
81
130
|
study = studies.get(study_root)
|
|
@@ -84,16 +133,17 @@ class Study(DatasetNode):
|
|
|
84
133
|
studies[study_root] = study
|
|
85
134
|
|
|
86
135
|
if scan_id not in study.scans:
|
|
87
|
-
|
|
136
|
+
# Defer Scan.from_fs(...) until the scan is actually accessed.
|
|
137
|
+
study.scans[scan_id] = LazyScan(fs=fs, scan_id=scan_id, scan_root=scan_root)
|
|
88
138
|
|
|
89
139
|
return [studies[k] for k in sorted(studies.keys())]
|
|
90
140
|
|
|
91
141
|
@property
|
|
92
|
-
def avail(self) -> Mapping[int,
|
|
142
|
+
def avail(self) -> Mapping[int, "LazyScan"]:
|
|
93
143
|
return {k: self.scans[k] for k in sorted(self.scans)}
|
|
94
144
|
|
|
95
|
-
def get_scan(self, scan_id: int) -> Scan:
|
|
96
|
-
return self.scans[scan_id]
|
|
145
|
+
def get_scan(self, scan_id: int) -> "Scan":
|
|
146
|
+
return self.scans[scan_id].materialize()
|
|
97
147
|
|
|
98
148
|
@property
|
|
99
149
|
def has_subject(self) -> bool:
|
brkraw/resolver/datatype.py
CHANGED
|
@@ -11,7 +11,7 @@ from __future__ import annotations
|
|
|
11
11
|
from typing import Union, Optional, TypedDict, cast
|
|
12
12
|
import numpy as np
|
|
13
13
|
from .helpers import get_file
|
|
14
|
-
from ..dataclasses import Scan, Reco
|
|
14
|
+
from ..dataclasses import Scan, Reco, LazyScan
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
WORDTYPE = {
|
|
@@ -42,8 +42,16 @@ def _get_dtype(byte_order: str, word_type: str) -> np.dtype:
|
|
|
42
42
|
return np.dtype(f"{BYTEORDER[byte_order]}{WORDTYPE[word_type]}")
|
|
43
43
|
|
|
44
44
|
|
|
45
|
-
def resolve(obj: Union["Scan", "Reco"]) -> Optional[ResolvedDatatype]:
|
|
45
|
+
def resolve(obj: Union["LazyScan", "Scan", "Reco"]) -> Optional[ResolvedDatatype]:
|
|
46
46
|
"""Return dtype/slope/offset metadata for a Scan or Reco."""
|
|
47
|
+
# Accept LazyScan-like proxies by materializing them.
|
|
48
|
+
if not isinstance(obj, (Scan, Reco)) and hasattr(obj, "materialize"):
|
|
49
|
+
try:
|
|
50
|
+
obj = obj.materialize()
|
|
51
|
+
except Exception as e:
|
|
52
|
+
raise TypeError(
|
|
53
|
+
f"resolve() failed to materialize proxy object {type(obj)!r}: {e}"
|
|
54
|
+
) from e
|
|
47
55
|
if isinstance(obj, Scan):
|
|
48
56
|
try:
|
|
49
57
|
p = get_file(obj, 'acqp')
|
brkraw/resolver/image.py
CHANGED
|
@@ -10,6 +10,7 @@ from __future__ import annotations
|
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
from typing import TYPE_CHECKING, Optional, Sequence, TypedDict, List, Tuple
|
|
13
|
+
import logging
|
|
13
14
|
from .datatype import resolve as datatype_resolver
|
|
14
15
|
from .shape import resolve as shape_resolver
|
|
15
16
|
from .helpers import get_reco, get_file, swap_element
|
|
@@ -22,16 +23,17 @@ if TYPE_CHECKING:
|
|
|
22
23
|
|
|
23
24
|
|
|
24
25
|
class ResolvedImage(TypedDict):
|
|
25
|
-
dataobj: np.ndarray
|
|
26
|
+
dataobj: Optional[np.ndarray]
|
|
26
27
|
slope: float
|
|
27
28
|
offset: float
|
|
28
|
-
shape_desc: List[str]
|
|
29
|
+
shape_desc: Optional[List[str]]
|
|
29
30
|
sliceorder_scheme: Optional[str]
|
|
30
31
|
num_cycles: int
|
|
31
32
|
time_per_cycle: Optional[float]
|
|
32
33
|
|
|
33
34
|
|
|
34
35
|
Z_AXIS_DESCRIPTORS = {'spatial', 'slice', 'without_slice'}
|
|
36
|
+
logger = logging.getLogger("brkraw.resolver.image")
|
|
35
37
|
|
|
36
38
|
|
|
37
39
|
def _find_z_axis_candidate(shape_desc: Sequence[str]) -> Optional[int]:
|
|
@@ -81,7 +83,10 @@ def ensure_3d_spatial_data(dataobj: np.ndarray, shape_info: "ResolvedShape") ->
|
|
|
81
83
|
ValueError: When data dimensionality and shape_desc disagree or z-axis
|
|
82
84
|
descriptor is missing.
|
|
83
85
|
"""
|
|
84
|
-
|
|
86
|
+
# NOTE: `shape_info['shape']` describes the full dataset. When we read only a
|
|
87
|
+
# subset of cycles (block read), `dataobj.shape` may differ (typically the last
|
|
88
|
+
# dimension). Use the actual `dataobj.shape` for validation and swapping.
|
|
89
|
+
shape = list(dataobj.shape)
|
|
85
90
|
shape_desc = list(shape_info['shape_desc'])
|
|
86
91
|
|
|
87
92
|
if dataobj.ndim != len(shape_desc):
|
|
@@ -108,18 +113,102 @@ def ensure_3d_spatial_data(dataobj: np.ndarray, shape_info: "ResolvedShape") ->
|
|
|
108
113
|
return new_dataobj, normalized_shape_desc
|
|
109
114
|
|
|
110
115
|
|
|
111
|
-
def _read_2dseq_data(
|
|
112
|
-
|
|
113
|
-
|
|
116
|
+
def _read_2dseq_data(
|
|
117
|
+
reco: "Reco",
|
|
118
|
+
dtype: np.dtype,
|
|
119
|
+
shape: Sequence[int],
|
|
120
|
+
*,
|
|
121
|
+
cycle_index: Optional[int] = None,
|
|
122
|
+
cycle_count: Optional[int] = None,
|
|
123
|
+
total_cycles: Optional[int] = None,
|
|
124
|
+
) -> np.ndarray:
|
|
125
|
+
"""Read 2dseq into a Fortran-ordered NumPy array.
|
|
126
|
+
|
|
127
|
+
Default behavior reads the full dataset.
|
|
128
|
+
|
|
129
|
+
When `cycle_index` is provided, read a contiguous block of cycles starting at
|
|
130
|
+
`cycle_index`. Use `cycle_count` to limit how many cycles to read. If
|
|
131
|
+
`cycle_count` is None, read through the end.
|
|
132
|
+
|
|
133
|
+
Notes:
|
|
134
|
+
This assumes cycles are stored contiguously by cycle in the 2dseq stream.
|
|
135
|
+
BrkRaw treats the cycle axis as the LAST dimension of `shape`.
|
|
136
|
+
"""
|
|
137
|
+
itemsize = np.dtype(dtype).itemsize
|
|
138
|
+
|
|
139
|
+
# Full read path (default).
|
|
140
|
+
if cycle_index is None:
|
|
141
|
+
expected_size = int(np.prod(shape)) * itemsize
|
|
142
|
+
with get_file(reco, "2dseq") as f:
|
|
143
|
+
f.seek(0)
|
|
144
|
+
raw = f.read()
|
|
145
|
+
if len(raw) != expected_size:
|
|
146
|
+
raise ValueError(
|
|
147
|
+
f"2dseq size mismatch: expected {expected_size} bytes for shape {shape}, got {len(raw)}"
|
|
148
|
+
)
|
|
149
|
+
try:
|
|
150
|
+
return np.frombuffer(raw, dtype).reshape(shape, order="F")
|
|
151
|
+
except ValueError as exc:
|
|
152
|
+
raise ValueError(f"failed to reshape 2dseq buffer to shape {shape}") from exc
|
|
153
|
+
|
|
154
|
+
# Block read path.
|
|
155
|
+
if total_cycles is None:
|
|
156
|
+
raise ValueError("total_cycles is required when cycle_index is provided")
|
|
157
|
+
|
|
158
|
+
total_cycles = int(total_cycles)
|
|
159
|
+
if total_cycles < 1:
|
|
160
|
+
raise ValueError(f"invalid total_cycles={total_cycles}")
|
|
161
|
+
|
|
162
|
+
if cycle_index < 0 or cycle_index >= total_cycles:
|
|
163
|
+
raise ValueError(f"cycle_index {cycle_index} out of range [0, {total_cycles - 1}]")
|
|
164
|
+
|
|
165
|
+
if not shape:
|
|
166
|
+
raise ValueError("shape is empty")
|
|
167
|
+
|
|
168
|
+
# BrkRaw convention: cycle axis is the last dimension only when cycles > 1.
|
|
169
|
+
if total_cycles > 1:
|
|
170
|
+
if int(shape[-1]) != total_cycles:
|
|
171
|
+
raise ValueError(
|
|
172
|
+
f"cycle axis mismatch: expected shape[-1]==total_cycles ({total_cycles}), got shape[-1]={shape[-1]} for shape={shape}"
|
|
173
|
+
)
|
|
174
|
+
elems_per_cycle = int(np.prod(shape[:-1])) if len(shape) > 1 else 1
|
|
175
|
+
else:
|
|
176
|
+
elems_per_cycle = int(np.prod(shape))
|
|
177
|
+
bytes_per_cycle = elems_per_cycle * itemsize
|
|
178
|
+
|
|
179
|
+
if cycle_count is None:
|
|
180
|
+
cycle_count = total_cycles - cycle_index
|
|
181
|
+
cycle_count = int(cycle_count)
|
|
182
|
+
|
|
183
|
+
if cycle_count <= 0:
|
|
184
|
+
raise ValueError(f"cycle_count must be > 0 (got {cycle_count})")
|
|
185
|
+
if cycle_index + cycle_count > total_cycles:
|
|
186
|
+
raise ValueError(
|
|
187
|
+
f"cycle_index+cycle_count exceeds total_cycles: {cycle_index}+{cycle_count} +> {total_cycles}"
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
byte_offset = cycle_index * bytes_per_cycle
|
|
191
|
+
byte_size = cycle_count * bytes_per_cycle
|
|
192
|
+
|
|
114
193
|
with get_file(reco, "2dseq") as f:
|
|
115
|
-
f.seek(
|
|
116
|
-
raw = f.read()
|
|
117
|
-
|
|
118
|
-
|
|
194
|
+
f.seek(byte_offset)
|
|
195
|
+
raw = f.read(byte_size)
|
|
196
|
+
|
|
197
|
+
if len(raw) != byte_size:
|
|
198
|
+
raise ValueError(
|
|
199
|
+
f"2dseq block read size mismatch: expected {byte_size} bytes, got {len(raw)}"
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
# Cycle axis is the last dimension.
|
|
203
|
+
if len(shape) == 1:
|
|
204
|
+
block_shape = (cycle_count,)
|
|
205
|
+
else:
|
|
206
|
+
block_shape = (*shape[:-1], cycle_count)
|
|
207
|
+
|
|
119
208
|
try:
|
|
120
|
-
return np.frombuffer(raw, dtype).reshape(
|
|
209
|
+
return np.frombuffer(raw, dtype).reshape(block_shape, order="F")
|
|
121
210
|
except ValueError as exc:
|
|
122
|
-
raise ValueError(f"failed to reshape 2dseq buffer to shape {
|
|
211
|
+
raise ValueError(f"failed to reshape 2dseq block buffer to shape {block_shape}") from exc
|
|
123
212
|
|
|
124
213
|
|
|
125
214
|
def _normalize_cycle_info(cycle_info: Optional["ResolvedCycle"]) -> Tuple[int, Optional[float]]:
|
|
@@ -129,7 +218,14 @@ def _normalize_cycle_info(cycle_info: Optional["ResolvedCycle"]) -> Tuple[int, O
|
|
|
129
218
|
return int(cycle_info['num_cycles']), cycle_info.get('time_step')
|
|
130
219
|
|
|
131
220
|
|
|
132
|
-
def resolve(
|
|
221
|
+
def resolve(
|
|
222
|
+
scan: "Scan",
|
|
223
|
+
reco_id: int = 1,
|
|
224
|
+
*,
|
|
225
|
+
load_data: bool = True,
|
|
226
|
+
cycle_index: Optional[int] = None,
|
|
227
|
+
cycle_count: Optional[int] = None,
|
|
228
|
+
) -> Optional[ResolvedImage]:
|
|
133
229
|
"""Load 2dseq as a NumPy array with associated metadata.
|
|
134
230
|
|
|
135
231
|
Args:
|
|
@@ -161,13 +257,36 @@ def resolve(scan: "Scan", reco_id: int = 1) -> Optional[ResolvedImage]:
|
|
|
161
257
|
offset = 0.0
|
|
162
258
|
shape = shape_info["shape"]
|
|
163
259
|
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
260
|
+
total_cycles, time_per_cycle = _normalize_cycle_info(shape_info['objs'].cycle)
|
|
261
|
+
|
|
262
|
+
dataobj, shape_desc = None, None
|
|
263
|
+
if load_data:
|
|
264
|
+
if total_cycles == 1:
|
|
265
|
+
logger.debug(
|
|
266
|
+
"Cycle slicing disabled: total_cycles=%s shape=%s",
|
|
267
|
+
total_cycles,
|
|
268
|
+
shape,
|
|
269
|
+
)
|
|
270
|
+
cycle_index = None
|
|
271
|
+
cycle_count = None
|
|
272
|
+
else:
|
|
273
|
+
logger.debug(
|
|
274
|
+
"Cycle slicing enabled: total_cycles=%s shape=%s",
|
|
275
|
+
total_cycles,
|
|
276
|
+
shape,
|
|
277
|
+
)
|
|
278
|
+
try:
|
|
279
|
+
dataobj = _read_2dseq_data(
|
|
280
|
+
reco,
|
|
281
|
+
dtype,
|
|
282
|
+
shape,
|
|
283
|
+
cycle_index=cycle_index,
|
|
284
|
+
cycle_count=cycle_count,
|
|
285
|
+
total_cycles=total_cycles,
|
|
286
|
+
)
|
|
287
|
+
except FileNotFoundError:
|
|
288
|
+
return None
|
|
289
|
+
dataobj, shape_desc = ensure_3d_spatial_data(dataobj, shape_info)
|
|
171
290
|
|
|
172
291
|
result: ResolvedImage = {
|
|
173
292
|
# image
|
|
@@ -178,7 +297,7 @@ def resolve(scan: "Scan", reco_id: int = 1) -> Optional[ResolvedImage]:
|
|
|
178
297
|
'sliceorder_scheme': shape_info['sliceorder_scheme'],
|
|
179
298
|
|
|
180
299
|
# cycle
|
|
181
|
-
'num_cycles':
|
|
300
|
+
'num_cycles': total_cycles,
|
|
182
301
|
'time_per_cycle': time_per_cycle,
|
|
183
302
|
}
|
|
184
303
|
return result
|
brkraw/resolver/nifti.py
CHANGED
|
@@ -32,7 +32,6 @@ DimInfo = Tuple[Optional[int], Optional[int], Optional[int]]
|
|
|
32
32
|
logger = logging.getLogger("brkraw")
|
|
33
33
|
|
|
34
34
|
class Nifti1HeaderContents(TypedDict, total=False):
|
|
35
|
-
flip_x: bool
|
|
36
35
|
slice_code: int
|
|
37
36
|
slope_inter: Tuple[float, float]
|
|
38
37
|
time_step: Optional[float]
|
|
@@ -57,7 +56,6 @@ class Nifti1HeaderContents(TypedDict, total=False):
|
|
|
57
56
|
_XYZ_UNITS = set(get_args(XYZUNIT))
|
|
58
57
|
_T_UNITS = set(get_args(TUNIT))
|
|
59
58
|
_HEADER_FIELDS = {
|
|
60
|
-
"flip_x",
|
|
61
59
|
"slice_code",
|
|
62
60
|
"slope_inter",
|
|
63
61
|
"time_step",
|
|
@@ -159,9 +157,7 @@ def _coerce_header_contents(data: Mapping[str, Any]) -> Nifti1HeaderContents:
|
|
|
159
157
|
header[key] = None
|
|
160
158
|
continue
|
|
161
159
|
raise ValueError(f"{key} cannot be null.")
|
|
162
|
-
if key
|
|
163
|
-
header[key] = _coerce_bool(value, name=key)
|
|
164
|
-
elif key in {"slice_code", "qform_code", "sform_code", "slice_start", "slice_end", "intent_code"}:
|
|
160
|
+
if key in {"slice_code", "qform_code", "sform_code", "slice_start", "slice_end", "intent_code"}:
|
|
165
161
|
header[key] = int(value)
|
|
166
162
|
elif key in {"time_step", "slice_duration", "cal_min", "cal_max"}:
|
|
167
163
|
header[key] = float(value)
|
|
@@ -247,9 +243,8 @@ def _set_dataobj(niiobj: "Nifti1Image", dataobj: np.ndarray) -> None:
|
|
|
247
243
|
|
|
248
244
|
|
|
249
245
|
def resolve(
|
|
250
|
-
image_info: "ResolvedImage",
|
|
251
|
-
|
|
252
|
-
xyz_units: "XYZUNIT" = 'mm',
|
|
246
|
+
image_info: "ResolvedImage",
|
|
247
|
+
xyz_units: "XYZUNIT" = 'mm',
|
|
253
248
|
t_units: "TUNIT" = 'sec'
|
|
254
249
|
) -> Nifti1HeaderContents:
|
|
255
250
|
|
|
@@ -273,7 +268,6 @@ def resolve(
|
|
|
273
268
|
slope = image_info['slope']
|
|
274
269
|
offset = image_info['offset']
|
|
275
270
|
result: Nifti1HeaderContents = {
|
|
276
|
-
'flip_x': flip_x,
|
|
277
271
|
'slice_code': slice_code,
|
|
278
272
|
'slope_inter': (slope, offset),
|
|
279
273
|
'time_step': time_step,
|
|
@@ -295,9 +289,7 @@ def update(
|
|
|
295
289
|
for c, val in nifti1header_contents.items():
|
|
296
290
|
if val is None or c in ('qform_code', 'sform_code'):
|
|
297
291
|
continue
|
|
298
|
-
if c ==
|
|
299
|
-
niiobj.header.default_x_flip = bool(val)
|
|
300
|
-
elif c == "slice_code":
|
|
292
|
+
if c == "slice_code":
|
|
301
293
|
if _coerce_int(val, name="slice_code") != 0:
|
|
302
294
|
niiobj.header['slice_code'] = _coerce_int(val, name="slice_code")
|
|
303
295
|
elif c == "slope_inter":
|
brkraw/schema/niftiheader.yaml
CHANGED
brkraw/specs/meta/validator.py
CHANGED
brkraw/specs/rules/logic.py
CHANGED
|
@@ -10,7 +10,7 @@ from ..remapper import load_spec, map_parameters
|
|
|
10
10
|
from .validator import validate_rules
|
|
11
11
|
import logging
|
|
12
12
|
|
|
13
|
-
logger = logging.getLogger(
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
14
|
|
|
15
15
|
RULE_CATEGORIES = ("info_spec", "metadata_spec", "converter_hook")
|
|
16
16
|
SPEC_CATEGORIES = ("info_spec", "metadata_spec")
|
|
@@ -216,8 +216,6 @@ def select_rule_use(
|
|
|
216
216
|
logger.debug("Rule %r matched, selected use=%r.", rule.get("name"), selected)
|
|
217
217
|
else:
|
|
218
218
|
logger.debug("Rule %r matched but has no usable 'use' entry.", rule.get("name"))
|
|
219
|
-
else:
|
|
220
|
-
logger.debug("Rule %r did not match.", rule.get("name"))
|
|
221
219
|
logger.debug("Rule selection result: %r", selected)
|
|
222
220
|
return selected
|
|
223
221
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: brkraw
|
|
3
|
-
Version: 0.5.
|
|
3
|
+
Version: 0.5.5
|
|
4
4
|
Summary: Toolkit for loading Bruker Paravision datasets, mapping metadata, and exporting NIfTI
|
|
5
5
|
Project-URL: Homepage, https://brkraw.github.io
|
|
6
6
|
Maintainer-email: SungHo Lee <shlee@unc.edu>
|
|
@@ -31,6 +31,7 @@ Requires-Dist: mkdocs; extra == 'dev'
|
|
|
31
31
|
Requires-Dist: mkdocs-material; extra == 'dev'
|
|
32
32
|
Requires-Dist: pymdown-extensions; extra == 'dev'
|
|
33
33
|
Requires-Dist: pytest; extra == 'dev'
|
|
34
|
+
Requires-Dist: tomli; extra == 'dev'
|
|
34
35
|
Provides-Extra: docs
|
|
35
36
|
Requires-Dist: rich; extra == 'docs'
|
|
36
37
|
Provides-Extra: minimal
|
|
@@ -44,11 +45,11 @@ Description-Content-Type: text/markdown
|
|
|
44
45
|
</picture>
|
|
45
46
|
<!-- markdownlint-enable MD041 MD033 MD013 -->
|
|
46
47
|
|
|
47
|
-
[](https://doi.org/10.5281/zenodo.3818614)
|
|
48
49
|
|
|
49
50
|
A modular toolkit for Bruker MRI raw-data handling.
|
|
50
51
|
|
|
51
|
-
BrkRaw (v0.5.
|
|
52
|
+
BrkRaw (v0.5.5) converts raw data into standardized, neuroimaging-ready
|
|
52
53
|
datasets, with extensible rules/specs and plugin hooks.
|
|
53
54
|
|
|
54
55
|
- Documentation: [brkraw.github.io](https://brkraw.github.io/)
|
|
@@ -66,16 +67,14 @@ If you use BrkRaw in your research, please cite it.
|
|
|
66
67
|
|
|
67
68
|
<!-- BEGIN: brkraw-bibtex -->
|
|
68
69
|
```biblatex
|
|
69
|
-
@software{
|
|
70
|
-
author = {Lee, Sung-Ho and Devenyi, Gabriel A and Ban, Woomi and Shih, Yen-Yu Ian},
|
|
70
|
+
@software{brkraw,
|
|
71
|
+
author = {Lee, Sung-Ho and Devenyi, Gabriel A. and Ban, Woomi and Shih, Yen-Yu Ian},
|
|
71
72
|
title = {BrkRaw: A modular toolkit for Bruker MRI raw-data handling},
|
|
72
|
-
|
|
73
|
-
version = {0.5.0},
|
|
73
|
+
version = {0.5.5},
|
|
74
74
|
doi = {10.5281/zenodo.3818614},
|
|
75
75
|
url = {https://github.com/BrkRaw/brkraw},
|
|
76
76
|
note = {Documentation: https://brkraw.github.io},
|
|
77
|
-
license = {GPL-3.0-only}
|
|
78
|
-
date = {2026-01-13}
|
|
77
|
+
license = {GPL-3.0-only}
|
|
79
78
|
}
|
|
80
79
|
```
|
|
81
80
|
<!-- END: brkraw-bibtex -->
|