brkraw-sordino 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,7 @@
1
+ """BrkRaw SORDINO-ZTE converter hook package."""
2
+
3
+ from .hook import get_dataobj
4
+
5
+ __all__ = ["__version__", "get_dataobj"]
6
+
7
+ __version__ = "0.1.3"
@@ -0,0 +1,8 @@
1
+ docs: docs.md
2
+ specs:
3
+ - specs/info_spec.yaml
4
+ - specs/metadata_spec.yaml
5
+ rules:
6
+ - rules/sordino.yaml
7
+ transforms:
8
+ - specs/utils.py
brkraw_sordino/docs.md ADDED
@@ -0,0 +1,99 @@
1
+ # brkraw-sordino
2
+
3
+ SORDINO-ZTE reconstruction hook for BrkRaw.
4
+
5
+ ## Install
6
+
7
+ ```bash
8
+ pip install -e .
9
+ ```
10
+
11
+ ## Hook install
12
+
13
+ ```bash
14
+ brkraw hook install brkraw-sordino
15
+ ```
16
+
17
+ This installs the hook rule from the package manifest (`brkraw_hook.yaml`).
18
+
19
+ ## Usage
20
+
21
+ Once installed, `brkraw` applies the hook automatically when a dataset matches the rule.
22
+
23
+ Basic conversion:
24
+
25
+ ```bash
26
+ brkraw convert /path/to/study --scan-id 3 --reco-id 1
27
+ ```
28
+
29
+ The hook behaves the same whether invoked via the CLI or via the Python API (the same hook entrypoint and arguments are used).
30
+
31
+ To explicitly pass hook options (or override defaults), use `--hook-arg` / `--hook-args-yaml` below.
32
+
33
+ ## Hook options
34
+
35
+ Hook arguments can be passed via `brkraw convert` using `--hook-arg` with the
36
+ entrypoint name (`sordino`):
37
+
38
+ ```bash
39
+ brkraw convert /path/to/study -s 3 -r 1 \
40
+ --hook-arg sordino:ext_factors=1.2 \
41
+ --hook-arg sordino:offset=2 \
42
+ --hook-arg sordino:split_ch=false
43
+ ```
44
+
45
+ ### Pass hook options via YAML (`--hook-args-yaml`)
46
+
47
+ BrkRaw can also load hook arguments from YAML. Generate a template like this:
48
+
49
+ ```bash
50
+ brkraw hook preset sordino -o hook_args.yaml
51
+ ```
52
+
53
+ Edit the generated YAML, then pass it to `brkraw convert` (repeatable):
54
+
55
+ ```bash
56
+ brkraw convert /path/to/study -s 3 -r 1 --hook-args-yaml hook_args.yaml
57
+ ```
58
+
59
+ Example:
60
+
61
+ ```yaml
62
+ hooks:
63
+ sordino:
64
+ ext_factors: 1.2
65
+ offset: 2
66
+ split_ch: false
67
+ # as_complex: true # optional, return (real, imag)
68
+ # cache_dir: ~/.brkraw/cache/sordino # optional (add manually if needed)
69
+ ```
70
+
71
+ Notes:
72
+
73
+ - CLI `--hook-arg` values override YAML.
74
+ - YAML supports both `{hooks: {sordino: {...}}}` and `{sordino: {...}}` shapes.
75
+ - You can also set `BRKRAW_CONVERT_HOOK_ARGS_YAML` (comma-separated paths).
76
+
77
+ Supported keys:
78
+
79
+ - `ext_factors`: scalar or 3-item sequence (default: 1.0)
80
+ - `ignore_samples`: int (default: 1)
81
+ - `offset`: int (default: 0)
82
+ - `num_frames`: int or null (default: None)
83
+ - `correct_spoketiming`: bool (default: false)
84
+ - `correct_ramptime`: bool (default: true)
85
+ - `offreso_ch`: int or null (default: None)
86
+ - `offreso_freq`: float (default: 0.0)
87
+ - `mem_limit`: float (default: 0.5)
88
+ - `clear_cache`: bool (default: true)
89
+ - `split_ch`: bool (default: false, merge channels)
90
+ - `as_complex`: bool (default: false, return complex as (real, imag))
91
+ - `cache_dir`: string path (default: ~/.brkraw/cache/sordino)
92
+
93
+ ## Notes
94
+
95
+ - The hook reconstructs data using an adjoint NUFFT and returns magnitude images by default.
96
+ - Multi-channel data defaults to merged channels; set `split_ch=true` to keep channels split.
97
+ - When `split_ch=false`, magnitude uses RSS while complex uses coherent sum.
98
+ - Orientation is normalized when the first 3D axes are spatial; see `notebooks/orientation.ipynb`.
99
+ - Cache files live under `~/.brkraw/cache/sordino` (or `BRKRAW_CONFIG_HOME`).
@@ -0,0 +1,81 @@
1
+ import logging
2
+ import os
3
+ import sys
4
+ import time
5
+ from typing import Any, Optional, TextIO, cast
6
+
7
+ logger = logging.getLogger("brkraw.sordino")
8
+
9
+
10
+ def progressbar(iterable: Any, *, desc: str = "", ncols: int = 100, disable: Optional[bool] = None):
11
+ """Lightweight progress iterator (tqdm-like) without external deps."""
12
+ if disable is None:
13
+ disable = logger.isEnabledFor(logging.DEBUG)
14
+ stream: TextIO = cast(
15
+ TextIO,
16
+ sys.__stderr__
17
+ or sys.stderr
18
+ or sys.__stdout__
19
+ or sys.stdout
20
+ or open(os.devnull, "w", encoding="utf-8"),
21
+ )
22
+ try:
23
+ is_tty = bool(getattr(stream, "isatty", lambda: False)())
24
+ except Exception:
25
+ is_tty = False
26
+ if disable or not is_tty or not logger.isEnabledFor(logging.INFO):
27
+ return iterable
28
+
29
+ try:
30
+ total = len(iterable) # type: ignore[arg-type]
31
+ except Exception:
32
+ total = 0
33
+
34
+ if total <= 0:
35
+ return iterable
36
+
37
+ bar_width = max(10, min(40, ncols - max(0, len(desc)) - 20))
38
+ start = time.time()
39
+ last_emit = 0.0
40
+
41
+ def _emit(i: int) -> None:
42
+ nonlocal last_emit
43
+ now = time.time()
44
+ if now - last_emit < 0.1 and i < total:
45
+ return
46
+ last_emit = now
47
+ frac = min(1.0, max(0.0, i / total))
48
+ filled = int(bar_width * frac)
49
+ bar = "#" * filled + "-" * (bar_width - filled)
50
+ elapsed = max(0.001, now - start)
51
+ rate = i / elapsed if i > 0 else 0.0
52
+ remaining = max(0, total - i)
53
+ eta = int(remaining / rate) if rate > 0 else -1
54
+ eta_txt = f"{eta}s" if eta >= 0 else "?"
55
+ prefix = f"{desc} " if desc else ""
56
+ line = f"{prefix}[{bar}] {i}/{total} ETA {eta_txt}"
57
+ try:
58
+ stream.write("\r" + line)
59
+ stream.flush()
60
+ except Exception:
61
+ pass
62
+
63
+ def _done() -> None:
64
+ try:
65
+ stream.write("\r" + (" " * (ncols if ncols > 0 else 120)) + "\r\n")
66
+ stream.flush()
67
+ except Exception:
68
+ pass
69
+
70
+ def _iter():
71
+ for i, item in enumerate(iterable, start=1):
72
+ _emit(i)
73
+ yield item
74
+ _done()
75
+
76
+ return _iter()
77
+
78
+
79
+ __all__ = [
80
+ "progressbar",
81
+ ]
brkraw_sordino/hook.py ADDED
@@ -0,0 +1,164 @@
1
+ import os
2
+ import logging
3
+ import numpy as np
4
+ import tempfile
5
+ from pathlib import Path
6
+
7
+ from typing import Any, Optional, Tuple, Dict, Union, cast, TYPE_CHECKING
8
+
9
+ from brkraw.specs.remapper import load_spec, map_parameters
10
+ from brkraw.resolver import fid as fid_resolver
11
+ from brkraw.resolver import datatype as dtype_resolver
12
+ from brkraw.core import config as config_core
13
+ from brkraw.core.fs import DatasetFile
14
+ from brkraw.core.zip import ZippedFile
15
+
16
+ from numpy.typing import NDArray
17
+ from .typing import Options
18
+ from .traj import get_trajectory
19
+ from .helper import progressbar
20
+ from .recon import get_dataobj_shape, recon_dataobj
21
+ from .spoketiming import prep_fid_segmentation, correct_spoketiming
22
+ from .orientation import correct as correct_orientation
23
+
24
+
25
+ FileIO = Union[DatasetFile, ZippedFile]
26
+ logger = logging.getLogger("brkraw.sordino")
27
+
28
+
29
+ def _normalize_ext_factors(value: Any) -> Tuple[float, float, float]:
30
+ if value is None:
31
+ return (1.0, 1.0, 1.0)
32
+ if isinstance(value, (int, float)):
33
+ val = float(value)
34
+ return (val, val, val)
35
+ if isinstance(value, (list, tuple, np.ndarray)):
36
+ items = list(value)
37
+ if len(items) == 1:
38
+ val = float(items[0])
39
+ return (val, val, val)
40
+ if len(items) == 3:
41
+ return (float(items[0]), float(items[1]), float(items[2]))
42
+ raise ValueError("ext_factors must be a scalar or a 3-item sequence")
43
+
44
+
45
+ def _get_cache_dir(path: Optional[Union[str, Path]]) -> Path:
46
+ if path:
47
+ base = Path(path).expanduser()
48
+ else:
49
+ base = config_core.resolve_root(None) / "cache" / "sordino"
50
+ base.mkdir(parents=True, exist_ok=True)
51
+ return base
52
+
53
+
54
+ def _build_options(kwargs: Dict[str, Any]) -> Options:
55
+ cache_dir = _get_cache_dir(kwargs.get("cache_dir"))
56
+ logger.debug("Cache dir: %s", cache_dir)
57
+
58
+ return Options(
59
+ ext_factors=_normalize_ext_factors(kwargs.get("ext_factors")),
60
+ ignore_samples=int(kwargs.get("ignore_samples", 1)),
61
+ offset=int(kwargs.get("offset", 0)),
62
+ num_frames=kwargs.get("num_frames"),
63
+ correct_spoketiming=bool(kwargs.get("correct_spoketiming", False)),
64
+ correct_ramptime=bool(kwargs.get("correct_ramptime", True)),
65
+ offreso_ch=kwargs.get("offreso_ch"),
66
+ offreso_freq=float(kwargs.get("offreso_freq", 0.0)),
67
+ mem_limit=float(kwargs.get("mem_limit", 0.5)),
68
+ clear_cache=bool(kwargs.get("clear_cache", True)),
69
+ split_ch=bool(kwargs.get("split_ch", False)),
70
+ cache_dir=cache_dir,
71
+ as_complex=bool(kwargs.get("as_complex", False)),
72
+ )
73
+
74
+
75
+ def _parse_recon_info(scan):
76
+ spec_path = Path(__file__).parent / "specs" / "recon_spec.yaml"
77
+ spec, transforms = load_spec(spec_path, validate=True)
78
+ recon_info = map_parameters(scan, spec, transforms)
79
+ dtype_info = dtype_resolver.resolve(scan)
80
+ if not dtype_info or "dtype" not in dtype_info:
81
+ raise ValueError("Failed to resolve FID dtype from acqp.")
82
+ recon_info['FIDDataType'] = dtype_info["dtype"]
83
+ return recon_info
84
+
85
+
86
+ def _get_fid_entry(scan: Any) -> FileIO:
87
+ fid_entry = fid_resolver.get_fid(scan)
88
+ if fid_entry is None:
89
+ logger.warning("No FID/rawdata entry found for scan %s.",
90
+ getattr(scan, "scan_id", "?"))
91
+ return cast(FileIO, fid_entry)
92
+
93
+
94
+ def get_dataobj(
95
+ scan: Any, reco_id: Optional[int] = None, **kwargs: Any,
96
+ ) -> Optional[Union[np.ndarray, Tuple[np.ndarray, ...]]]:
97
+
98
+ options = _build_options(kwargs)
99
+ recon_info = _parse_recon_info(scan)
100
+ fid_entry = _get_fid_entry(scan)
101
+
102
+ with fid_entry.open() as fid_fobj:
103
+ traj = get_trajectory(recon_info, options)
104
+
105
+ with tempfile.NamedTemporaryFile(
106
+ mode="w+b", delete=False, dir=options.cache_dir
107
+ ) as img_fobj: # where reconstructed dataobj will be stored.
108
+
109
+ if options.correct_spoketiming:
110
+ with tempfile.NamedTemporaryFile(
111
+ mode="w+b", delete=False, dir=options.cache_dir
112
+ ) as stc_fobj: # where spoketiming corrected fid will be stored.
113
+
114
+ segs = prep_fid_segmentation(fid_fobj, recon_info, options)
115
+ logger.info("Spoketiming correction: %s segment(s).", segs.shape[0])
116
+
117
+ stc_param = correct_spoketiming(segs, fid_fobj, stc_fobj, recon_info, options)
118
+ dtype = recon_dataobj(stc_fobj, traj, recon_info, img_fobj, options,
119
+ override_buffer_size=stc_param['buffer_size'],
120
+ override_dtype=stc_param['dtype'])
121
+ else:
122
+ dtype = recon_dataobj(fid_fobj, traj, recon_info, img_fobj, options)
123
+
124
+ with open(img_fobj.name, "rb") as img_fobj:
125
+ dataobj_shape = get_dataobj_shape(recon_info, options)
126
+ dataobj = np.frombuffer(img_fobj.read(), dtype=dtype).reshape(dataobj_shape, order='F')
127
+ num_receivers = recon_info.get("EncNReceivers", 1)
128
+ if not options.as_complex:
129
+ dataobj = np.abs(dataobj)
130
+
131
+ is_multi = num_receivers > 1
132
+ if not options.split_ch and is_multi:
133
+ if options.as_complex:
134
+ dataobj = np.sum(dataobj, axis=0)
135
+ is_multi = False
136
+ else:
137
+ dataobj = np.sqrt(np.sum(dataobj ** 2, axis=0))
138
+ is_multi = False
139
+
140
+ if options.as_complex:
141
+ if is_multi:
142
+ dataobj_list = []
143
+ for receiver_data in dataobj:
144
+ receiver_arr = cast(NDArray[Any], receiver_data)
145
+ dataobj_list.extend([
146
+ correct_orientation(np.real(receiver_arr), recon_info),
147
+ correct_orientation(np.imag(receiver_arr), recon_info),
148
+ ])
149
+ return cast(Tuple[np.ndarray, ...], tuple(dataobj_list))
150
+ return (
151
+ correct_orientation(np.real(dataobj), recon_info),
152
+ correct_orientation(np.imag(dataobj), recon_info),
153
+ )
154
+
155
+ if is_multi:
156
+ return cast(
157
+ Tuple[np.ndarray, ...],
158
+ tuple(correct_orientation(ch, recon_info) for ch in dataobj),
159
+ )
160
+ return correct_orientation(cast(np.ndarray, dataobj), recon_info)
161
+
162
+ HOOK = {"get_dataobj": get_dataobj}
163
+
164
+ __all__ = ["HOOK", "get_dataobj"]
@@ -0,0 +1,45 @@
1
+ import numpy as np
2
+ from numpy.typing import NDArray
3
+ from typing import Dict, Any, Tuple, Optional, cast
4
+ import logging
5
+
6
+ logger = logging.getLogger("brkraw.sordino")
7
+
8
+
9
+ PLANE_PERM = {
10
+ "axial": (0, 1, 2),
11
+ "coronal": (0, 2, 1),
12
+ "sagittal": (1, 2, 0),
13
+ }
14
+
15
+
16
+ def apply_axis_perm_flip(dataobj: NDArray, R: NDArray) -> NDArray[Any]:
17
+ perm = np.argmax(np.abs(R), axis=1)
18
+ data_t = np.transpose(dataobj, axes=tuple(perm) + tuple(range(3, dataobj.ndim)))
19
+ return data_t
20
+
21
+
22
+ def apply_plane_fix(
23
+ dataobj: NDArray,
24
+ plane: str,
25
+ ) -> NDArray[Any]:
26
+ perm = PLANE_PERM[plane]
27
+ perm_full = tuple(perm) + tuple(range(3, dataobj.ndim))
28
+ dataobj = np.transpose(dataobj, perm_full)
29
+ return dataobj
30
+
31
+
32
+ def correct(dataobj: NDArray, recon_info: Dict[str, Any]) -> NDArray[Any]:
33
+ R = cast(NDArray[Any], recon_info.get('GradientOrientation'))
34
+ plane = cast(str, recon_info.get('SliceOrientation'))
35
+ dataobj = apply_axis_perm_flip(dataobj, R.T)
36
+ dataobj = apply_plane_fix(
37
+ dataobj,
38
+ plane,
39
+ )
40
+ return dataobj
41
+
42
+
43
+ __all__ = [
44
+ "correct",
45
+ ]
@@ -0,0 +1,125 @@
1
+ import numpy as np
2
+ from typing import Any, Dict, Tuple
3
+ from numpy.typing import NDArray
4
+ import logging
5
+ from .helper import progressbar
6
+ from .typing import Options
7
+
8
+ logger = logging.getLogger("brkraw.sordino")
9
+
10
+
11
+ def parse_fid_info(recon_info: Dict[str, Any]) -> Tuple[np.ndarray, np.dtype]:
12
+ n_receivers = int(recon_info.get("EncNReceivers") or 0)
13
+ n_points = int(recon_info.get("NPoints") or 0)
14
+ n_pro = int(recon_info.get("NPro") or 0)
15
+ dtype = recon_info['FIDDataType']
16
+ if not all((n_receivers, n_points, n_pro)):
17
+ raise ValueError("Missing reconstruction dimensions in recon_spec output.")
18
+ return np.array([2, n_points, n_receivers, n_pro]), dtype
19
+
20
+
21
+ def get_num_frames(recon_info: Dict[str, Any], options: Options):
22
+ """ Return number of data frames need to be reconstructed
23
+ """
24
+ total_frames = recon_info['NRepetitions']
25
+ offset = getattr(options, 'offset') or 0
26
+ avail_frames = total_frames - offset
27
+ set_frames = getattr(options, 'num_frames') or total_frames
28
+
29
+ if set_frames > avail_frames:
30
+ diff = set_frames - avail_frames
31
+ set_frames -= diff
32
+ return set_frames
33
+
34
+
35
+ def parse_volume_shape(recon_info: Dict[str, Any],
36
+ options: Options) -> NDArray[np.int_]:
37
+ matrix = recon_info.get("Matrix")
38
+ if matrix is None:
39
+ matrix = [int(recon_info.get("NPoints") or 0)] * 3
40
+ logger.warning("Matrix size missing; defaulting to %s.", matrix)
41
+ ext_factors = getattr(options, 'ext_factors', None)
42
+ if ext_factors is None:
43
+ ext_factors = [1.0, 1.0, 1.0]
44
+ return np.asarray(matrix * np.asarray(ext_factors)).astype(int).tolist()
45
+
46
+
47
+ def get_dataobj_shape(recon_info: Dict[str, Any],
48
+ options: Options):
49
+ num_receivers = parse_fid_info(recon_info)[0][2]
50
+ vol_shape = parse_volume_shape(recon_info, options)
51
+ num_frame = get_num_frames(recon_info, options)
52
+
53
+ if num_receivers > 1:
54
+ return [num_receivers] + vol_shape + [num_frame]
55
+ else:
56
+ return vol_shape + [num_frame]
57
+
58
+
59
+ def nufft_adjoint(kspace, traj, volume_shape, operator='finufft'):
60
+ """Run nufft and return the reconstucted image"""
61
+ from mrinufft import get_operator
62
+
63
+ dcf = np.sqrt(np.square(traj).sum(-1)).flatten() ** 2
64
+ dcf /= dcf.max()
65
+ traj = traj.copy() / 0.5 * np.pi
66
+
67
+ nufft_op = get_operator(operator)(traj, shape=volume_shape, density=dcf)
68
+ complex_img = nufft_op.adj_op(kspace.flatten())
69
+ return complex_img
70
+
71
+
72
+ def recon_dataobj(fid_fobj,
73
+ traj,
74
+ recon_info: Dict[str, Any],
75
+ img_fobj,
76
+ options: Options,
77
+ override_buffer_size=None,
78
+ override_dtype=None):
79
+ img_fobj.seek(0)
80
+ fid_shape, fid_dtype = parse_fid_info(recon_info)
81
+ volume_shape = parse_volume_shape(recon_info, options)
82
+
83
+ offset = getattr(options, 'offset') or 0
84
+ num_frames = get_num_frames(recon_info, options)
85
+ ignore_samples = getattr(options, 'ignore_samples') or 1
86
+
87
+ if all(arg != None for arg in [override_buffer_size, override_buffer_size]):
88
+ fid_fobj.seek(0)
89
+ buffer_size = override_buffer_size
90
+ fid_dtype = override_dtype
91
+ else:
92
+ buffer_size = int(np.prod(fid_shape) * fid_dtype.itemsize)
93
+ buf_offset = offset * buffer_size
94
+ fid_fobj.seek(buf_offset)
95
+
96
+ trimmed_traj = traj[:, ignore_samples:, ...]
97
+ logger.debug("Reconstruction traj shape: %s", trimmed_traj.shape)
98
+
99
+ dtype = None
100
+ for n in progressbar(range(num_frames), desc='frames', ncols=100):
101
+ buffer = fid_fobj.read(buffer_size)
102
+ vol = np.frombuffer(buffer, dtype=fid_dtype).reshape(fid_shape, order='F')
103
+ vol = (vol[0] + 1j * vol[1])[np.newaxis, ...]
104
+ k_space = vol.squeeze().T[..., ignore_samples:]
105
+ logger.debug("Reconstruction k-space shape: %s", k_space.shape)
106
+ n_receivers = fid_shape[2]
107
+
108
+ if n_receivers > 1:
109
+ recon_vol = []
110
+ for ch_id in range(n_receivers):
111
+ _k_space = k_space[:, ch_id, :]
112
+ _vol = nufft_adjoint(_k_space, trimmed_traj, volume_shape)
113
+ recon_vol.append(_vol)
114
+ recon_vol = np.stack(recon_vol, axis=0)
115
+ else:
116
+ recon_vol = nufft_adjoint(k_space, trimmed_traj, volume_shape)
117
+ if n == 0:
118
+ dtype = recon_vol.dtype
119
+ img_fobj.write(recon_vol.T.flatten(order="C").tobytes())
120
+ return dtype
121
+
122
+ __all__ = [
123
+ 'recon_dataobj',
124
+ 'get_dataobj_shape',
125
+ ]
@@ -0,0 +1,35 @@
1
+ converter_hook:
2
+ - name: "sordino-hook"
3
+ description: "SORDINO-ZTE reconstruction hook."
4
+ when:
5
+ method:
6
+ sources:
7
+ - file: method
8
+ key: Method
9
+ if:
10
+ regex: ["$method", "sordino"]
11
+ use: "sordino"
12
+
13
+ info_spec:
14
+ - name: "sordino-info"
15
+ description: "SORDINO-ZTE scan info."
16
+ when:
17
+ method:
18
+ sources:
19
+ - file: method
20
+ key: Method
21
+ if:
22
+ regex: ["$method", "sordino"]
23
+ use: "sordino_info"
24
+
25
+ metadata_spec:
26
+ - name: "sordino-metadata"
27
+ description: "SORDINO-ZTE metadata (draft BEP alignment)."
28
+ when:
29
+ method:
30
+ sources:
31
+ - file: method
32
+ key: Method
33
+ if:
34
+ regex: ["$method", "sordino"]
35
+ use: "sordino_metadata"