brkraw 0.5.3__py3-none-any.whl → 0.5.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
brkraw/core/zip.py CHANGED
@@ -588,6 +588,8 @@ class ZippedDir:
588
588
  def walk(
589
589
  zipobj: zipfile.ZipFile,
590
590
  top: str = "",
591
+ *,
592
+ sort_entries: bool = True,
591
593
  ) -> Iterable[Tuple[str, List[ZippedDir], List[ZippedFile]]]:
592
594
  """Walk through a ZipFile like os.walk, but with ZippedFile entries.
593
595
 
@@ -600,6 +602,9 @@ def walk(
600
602
  paths (for example "repo-abc/dir"). When top does not correspond to an
601
603
  explicit directory entry, the function still yields a subtree rooted at
602
604
  top, and dirpath values are archive paths under that prefix.
605
+ sort_entries : bool, optional
606
+ When True, sort directory names and file names for deterministic output.
607
+ Set to False for faster traversal when ordering does not matter.
603
608
 
604
609
  Yields
605
610
  ------
@@ -613,61 +618,70 @@ def walk(
613
618
  """
614
619
  tree_map: Dict[str, Dict[str, Any]] = defaultdict(lambda: {"dirs": set(), "files": {}})
615
620
 
616
- # Normalize and index
617
- for arcname in zipobj.namelist():
621
+ start = top.strip("/")
622
+ prefix = f"{start}/" if start else ""
623
+
624
+ def _is_dir(info: zipfile.ZipInfo) -> bool:
625
+ # ZipInfo.is_dir() exists on modern Python, but keep a safe fallback.
626
+ try:
627
+ return info.is_dir() # type: ignore[attr-defined]
628
+ except Exception:
629
+ return info.filename.endswith("/")
630
+
631
+ # Single pass over the archive; restrict to subtree early when top is given.
632
+ for info in zipobj.infolist():
633
+ arcname = info.filename
618
634
  norm = arcname.rstrip("/")
635
+ if not norm:
636
+ continue
637
+
638
+ # Restrict to the requested subtree if provided.
639
+ if start:
640
+ if norm != start and not norm.startswith(prefix):
641
+ continue
642
+
619
643
  parts = norm.split("/")
620
644
  parent = "/".join(parts[:-1]) # "" at root
621
645
  leaf = parts[-1]
622
646
 
623
- if arcname.endswith("/"): # a directory entry
647
+ if _is_dir(info):
624
648
  tree_map[parent]["dirs"].add(leaf)
625
- else: # a file entry
626
- tree_map[parent]["files"][leaf] = ZippedFile(
627
- name=leaf, arcname=norm, zipobj=zipobj
628
- )
649
+ else:
650
+ tree_map[parent]["files"][leaf] = ZippedFile(name=leaf, arcname=norm, zipobj=zipobj)
629
651
 
630
- # ensure intermediate directories are known
652
+ # Ensure intermediate directories are known.
631
653
  for i in range(len(parts) - 1):
632
654
  up_parent = "/".join(parts[:i])
633
655
  up_child = parts[i]
634
656
  tree_map[up_parent]["dirs"].add(up_child)
635
657
 
636
- start = top.rstrip("/")
637
-
638
- # When top does not exist explicitly, build a filtered pseudo-map rooted at top
658
+ # If the subtree has no entries, return nothing.
639
659
  if start and start not in tree_map:
640
- pseudo_map: Dict[str, Dict[str, Any]] = defaultdict(lambda: {"dirs": set(), "files": {}})
641
- for arcname in zipobj.namelist():
642
- if arcname.startswith(start + "/") or arcname.rstrip("/") == start:
643
- norm = arcname.rstrip("/")
644
- rel = norm[len(start):].lstrip("/")
645
- parent = "/".join([start] + ([p for p in rel.split("/")[:-1]] if rel else []))
646
- leaf = rel.split("/")[-1] if rel else start.split("/")[-1]
647
- if arcname.endswith("/"):
648
- pseudo_map[parent]["dirs"].add(leaf)
649
- else:
650
- pseudo_map[parent]["files"][leaf] = ZippedFile(leaf, norm, zipobj)
651
- prefix_parts = parent.split("/") if parent else []
652
- for i in range(len(prefix_parts)):
653
- up_parent = "/".join(prefix_parts[:i])
654
- up_child = prefix_parts[i]
655
- pseudo_map[up_parent]["dirs"].add(up_child)
656
- tree_map = pseudo_map
657
- if start and start not in tree_map:
658
- return
660
+ return
659
661
 
660
662
  built_dirs: Dict[str, ZippedDir] = {}
661
663
 
662
664
  def _build(path: str) -> ZippedDir:
663
665
  if path in built_dirs:
664
666
  return built_dirs[path]
665
- dirnames = sorted(tree_map[path]["dirs"])
666
- files = [tree_map[path]["files"][k] for k in sorted(tree_map[path]["files"].keys())]
667
+
668
+ dirset = tree_map[path]["dirs"]
669
+ files_dict = tree_map[path]["files"]
670
+
671
+ if sort_entries:
672
+ dirnames = sorted(dirset)
673
+ filekeys = sorted(files_dict.keys())
674
+ else:
675
+ # Sets/dicts are already in-memory; avoid sorting for speed.
676
+ dirnames = list(dirset)
677
+ filekeys = list(files_dict.keys())
678
+
679
+ files = [files_dict[k] for k in filekeys]
667
680
  subs: List[ZippedDir] = []
668
681
  for name in dirnames:
669
682
  sub_path = f"{path}/{name}" if path else name
670
683
  subs.append(_build(sub_path))
684
+
671
685
  obj = ZippedDir(
672
686
  name=path.rsplit("/", 1)[-1] if path else "",
673
687
  path=path,
@@ -1,13 +1,14 @@
1
1
  from __future__ import annotations
2
2
 
3
- from .study import Study
3
+ from .study import Study, LazyScan
4
4
  from .scan import Scan
5
5
  from .reco import Reco
6
6
  from .node import DatasetNode
7
7
 
8
8
 
9
9
  __all__ = [
10
- 'Study',
10
+ 'Study',
11
+ 'LazyScan',
11
12
  'Scan',
12
13
  'Reco',
13
14
  'DatasetNode'
@@ -1,21 +1,56 @@
1
+
1
2
  from __future__ import annotations
3
+
4
+ import logging
2
5
  from dataclasses import dataclass, field
3
6
  from pathlib import Path
4
- from typing import Dict, Mapping, Union, TYPE_CHECKING, List
7
+ from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Union
5
8
 
6
9
  from ..core.fs import DatasetFS
7
10
  from .node import DatasetNode
8
11
  from .scan import Scan
9
12
 
10
- if TYPE_CHECKING:
11
- from ..apps.loader.types import ScanLoader
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ @dataclass
17
+ class LazyScan:
18
+ """Lightweight lazy Scan proxy.
19
+
20
+ This defers `Scan.from_fs(...)` until the scan is actually accessed.
21
+ It implements attribute forwarding so it can be used where a Scan is expected.
22
+ """
23
+
24
+ fs: DatasetFS
25
+ scan_id: int
26
+ scan_root: str
27
+ _scan: Optional[Scan] = field(default=None, init=False, repr=False)
28
+
29
+ def materialize(self) -> Scan:
30
+ if self._scan is None:
31
+ logger.debug(
32
+ "Materializing Scan.from_fs for scan_id=%s scan_root=%s",
33
+ self.scan_id,
34
+ self.scan_root,
35
+ )
36
+ self._scan = Scan.from_fs(self.fs, self.scan_id, self.scan_root)
37
+ return self._scan
38
+
39
+ def __getattr__(self, name: str):
40
+ # Delegate unknown attributes to the underlying Scan.
41
+ return getattr(self.materialize(), name)
42
+
43
+ def __repr__(self) -> str:
44
+ if self._scan is None:
45
+ return f"LazyScan(id={self.scan_id} root='{self.scan_root}')"
46
+ return repr(self._scan)
12
47
 
13
48
 
14
49
  @dataclass
15
50
  class Study(DatasetNode):
16
51
  fs: DatasetFS
17
52
  relroot: str = ""
18
- scans: Dict[int, Scan] = field(default_factory=dict)
53
+ scans: Dict[int, "LazyScan"] = field(default_factory=dict)
19
54
  _cache: Dict[str, object] = field(default_factory=dict, init=False, repr=False)
20
55
 
21
56
  @classmethod
@@ -42,17 +77,25 @@ class Study(DatasetNode):
42
77
 
43
78
  @classmethod
44
79
  def discover(cls, fs: DatasetFS) -> List["Study"]:
45
- """Bottom-up discovery using reco markers (2dseq + visu_pars)."""
46
- reco_dirs: List[str] = []
47
- for dirpath, dirnames, filenames in fs.walk():
80
+ """Bottom-up discovery using reco markers (2dseq + visu_pars).
81
+
82
+ Notes:
83
+ Discovery is I/O bound on large studies or slow filesystems.
84
+ We minimize filesystem calls by:
85
+ - disabling per-directory sorting in fs.walk
86
+ - avoiding per-directory set() allocations
87
+ - caching scan-level existence checks (method/acqp)
88
+ """
89
+ studies: Dict[str, "Study"] = {}
90
+ scan_ok_cache: Dict[str, bool] = {}
91
+
92
+ for dirpath, _, filenames in fs.walk(sort_entries=False):
48
93
  rel = fs.strip_anchor(dirpath)
49
- names = set(filenames)
50
- if "2dseq" in names and "visu_pars" in names:
51
- reco_dirs.append(rel)
52
94
 
53
- studies: Dict[str, Study] = {}
54
- for reco_dir in reco_dirs:
55
- parts = [p for p in reco_dir.split("/") if p]
95
+ if "2dseq" not in filenames or "visu_pars" not in filenames:
96
+ continue
97
+
98
+ parts = [p for p in rel.split("/") if p]
56
99
  if "pdata" not in parts:
57
100
  continue
58
101
  pdata_idx = parts.index("pdata")
@@ -70,12 +113,18 @@ class Study(DatasetNode):
70
113
 
71
114
  scan_root = "/".join(parts[:pdata_idx])
72
115
  study_root = "/".join(parts[:pdata_idx - 1])
73
-
74
- if not (
75
- fs.exists(f"{scan_root}/method")
76
- and fs.exists(f"{scan_root}/acqp")
77
- and fs.exists(f"{reco_dir}/reco")
78
- ):
116
+
117
+ # Validate scan-level markers once per scan_root.
118
+ ok = scan_ok_cache.get(scan_root)
119
+ if ok is None:
120
+ ok = fs.exists(f"{scan_root}/method") and fs.exists(f"{scan_root}/acqp")
121
+ scan_ok_cache[scan_root] = ok
122
+ if not ok:
123
+ continue
124
+
125
+ # Validate reco file. In most PV layouts, `reco` lives in the same pdata/<reco_id> dir.
126
+ # Prefer checking the listing we already have, fall back to exists() for safety.
127
+ if "reco" not in filenames and not fs.exists(f"{rel}/reco"):
79
128
  continue
80
129
 
81
130
  study = studies.get(study_root)
@@ -84,16 +133,17 @@ class Study(DatasetNode):
84
133
  studies[study_root] = study
85
134
 
86
135
  if scan_id not in study.scans:
87
- study.scans[scan_id] = Scan.from_fs(fs, scan_id, scan_root)
136
+ # Defer Scan.from_fs(...) until the scan is actually accessed.
137
+ study.scans[scan_id] = LazyScan(fs=fs, scan_id=scan_id, scan_root=scan_root)
88
138
 
89
139
  return [studies[k] for k in sorted(studies.keys())]
90
140
 
91
141
  @property
92
- def avail(self) -> Mapping[int, Union[Scan, "ScanLoader"]]:
142
+ def avail(self) -> Mapping[int, "LazyScan"]:
93
143
  return {k: self.scans[k] for k in sorted(self.scans)}
94
144
 
95
- def get_scan(self, scan_id: int) -> Scan:
96
- return self.scans[scan_id]
145
+ def get_scan(self, scan_id: int) -> "Scan":
146
+ return self.scans[scan_id].materialize()
97
147
 
98
148
  @property
99
149
  def has_subject(self) -> bool:
@@ -11,7 +11,7 @@ from __future__ import annotations
11
11
  from typing import Union, Optional, TypedDict, cast
12
12
  import numpy as np
13
13
  from .helpers import get_file
14
- from ..dataclasses import Scan, Reco
14
+ from ..dataclasses import Scan, Reco, LazyScan
15
15
 
16
16
 
17
17
  WORDTYPE = {
@@ -42,8 +42,16 @@ def _get_dtype(byte_order: str, word_type: str) -> np.dtype:
42
42
  return np.dtype(f"{BYTEORDER[byte_order]}{WORDTYPE[word_type]}")
43
43
 
44
44
 
45
- def resolve(obj: Union["Scan", "Reco"]) -> Optional[ResolvedDatatype]:
45
+ def resolve(obj: Union["LazyScan", "Scan", "Reco"]) -> Optional[ResolvedDatatype]:
46
46
  """Return dtype/slope/offset metadata for a Scan or Reco."""
47
+ # Accept LazyScan-like proxies by materializing them.
48
+ if not isinstance(obj, (Scan, Reco)) and hasattr(obj, "materialize"):
49
+ try:
50
+ obj = obj.materialize()
51
+ except Exception as e:
52
+ raise TypeError(
53
+ f"resolve() failed to materialize proxy object {type(obj)!r}: {e}"
54
+ ) from e
47
55
  if isinstance(obj, Scan):
48
56
  try:
49
57
  p = get_file(obj, 'acqp')
brkraw/resolver/image.py CHANGED
@@ -10,6 +10,7 @@ from __future__ import annotations
10
10
 
11
11
 
12
12
  from typing import TYPE_CHECKING, Optional, Sequence, TypedDict, List, Tuple
13
+ import logging
13
14
  from .datatype import resolve as datatype_resolver
14
15
  from .shape import resolve as shape_resolver
15
16
  from .helpers import get_reco, get_file, swap_element
@@ -22,16 +23,17 @@ if TYPE_CHECKING:
22
23
 
23
24
 
24
25
  class ResolvedImage(TypedDict):
25
- dataobj: np.ndarray
26
+ dataobj: Optional[np.ndarray]
26
27
  slope: float
27
28
  offset: float
28
- shape_desc: List[str]
29
+ shape_desc: Optional[List[str]]
29
30
  sliceorder_scheme: Optional[str]
30
31
  num_cycles: int
31
32
  time_per_cycle: Optional[float]
32
33
 
33
34
 
34
35
  Z_AXIS_DESCRIPTORS = {'spatial', 'slice', 'without_slice'}
36
+ logger = logging.getLogger("brkraw.resolver.image")
35
37
 
36
38
 
37
39
  def _find_z_axis_candidate(shape_desc: Sequence[str]) -> Optional[int]:
@@ -81,7 +83,10 @@ def ensure_3d_spatial_data(dataobj: np.ndarray, shape_info: "ResolvedShape") ->
81
83
  ValueError: When data dimensionality and shape_desc disagree or z-axis
82
84
  descriptor is missing.
83
85
  """
84
- shape = shape_info['shape']
86
+ # NOTE: `shape_info['shape']` describes the full dataset. When we read only a
87
+ # subset of cycles (block read), `dataobj.shape` may differ (typically the last
88
+ # dimension). Use the actual `dataobj.shape` for validation and swapping.
89
+ shape = list(dataobj.shape)
85
90
  shape_desc = list(shape_info['shape_desc'])
86
91
 
87
92
  if dataobj.ndim != len(shape_desc):
@@ -108,18 +113,102 @@ def ensure_3d_spatial_data(dataobj: np.ndarray, shape_info: "ResolvedShape") ->
108
113
  return new_dataobj, normalized_shape_desc
109
114
 
110
115
 
111
- def _read_2dseq_data(reco: "Reco", dtype: np.dtype, shape: Sequence[int]) -> np.ndarray:
112
- """Read 2dseq file into a Fortran-ordered NumPy array with shape validation."""
113
- expected_size = int(np.prod(shape)) * np.dtype(dtype).itemsize
116
+ def _read_2dseq_data(
117
+ reco: "Reco",
118
+ dtype: np.dtype,
119
+ shape: Sequence[int],
120
+ *,
121
+ cycle_index: Optional[int] = None,
122
+ cycle_count: Optional[int] = None,
123
+ total_cycles: Optional[int] = None,
124
+ ) -> np.ndarray:
125
+ """Read 2dseq into a Fortran-ordered NumPy array.
126
+
127
+ Default behavior reads the full dataset.
128
+
129
+ When `cycle_index` is provided, read a contiguous block of cycles starting at
130
+ `cycle_index`. Use `cycle_count` to limit how many cycles to read. If
131
+ `cycle_count` is None, read through the end.
132
+
133
+ Notes:
134
+ This assumes cycles are stored contiguously by cycle in the 2dseq stream.
135
+ BrkRaw treats the cycle axis as the LAST dimension of `shape`.
136
+ """
137
+ itemsize = np.dtype(dtype).itemsize
138
+
139
+ # Full read path (default).
140
+ if cycle_index is None:
141
+ expected_size = int(np.prod(shape)) * itemsize
142
+ with get_file(reco, "2dseq") as f:
143
+ f.seek(0)
144
+ raw = f.read()
145
+ if len(raw) != expected_size:
146
+ raise ValueError(
147
+ f"2dseq size mismatch: expected {expected_size} bytes for shape {shape}, got {len(raw)}"
148
+ )
149
+ try:
150
+ return np.frombuffer(raw, dtype).reshape(shape, order="F")
151
+ except ValueError as exc:
152
+ raise ValueError(f"failed to reshape 2dseq buffer to shape {shape}") from exc
153
+
154
+ # Block read path.
155
+ if total_cycles is None:
156
+ raise ValueError("total_cycles is required when cycle_index is provided")
157
+
158
+ total_cycles = int(total_cycles)
159
+ if total_cycles < 1:
160
+ raise ValueError(f"invalid total_cycles={total_cycles}")
161
+
162
+ if cycle_index < 0 or cycle_index >= total_cycles:
163
+ raise ValueError(f"cycle_index {cycle_index} out of range [0, {total_cycles - 1}]")
164
+
165
+ if not shape:
166
+ raise ValueError("shape is empty")
167
+
168
+ # BrkRaw convention: cycle axis is the last dimension only when cycles > 1.
169
+ if total_cycles > 1:
170
+ if int(shape[-1]) != total_cycles:
171
+ raise ValueError(
172
+ f"cycle axis mismatch: expected shape[-1]==total_cycles ({total_cycles}), got shape[-1]={shape[-1]} for shape={shape}"
173
+ )
174
+ elems_per_cycle = int(np.prod(shape[:-1])) if len(shape) > 1 else 1
175
+ else:
176
+ elems_per_cycle = int(np.prod(shape))
177
+ bytes_per_cycle = elems_per_cycle * itemsize
178
+
179
+ if cycle_count is None:
180
+ cycle_count = total_cycles - cycle_index
181
+ cycle_count = int(cycle_count)
182
+
183
+ if cycle_count <= 0:
184
+ raise ValueError(f"cycle_count must be > 0 (got {cycle_count})")
185
+ if cycle_index + cycle_count > total_cycles:
186
+ raise ValueError(
187
+ f"cycle_index+cycle_count exceeds total_cycles: {cycle_index}+{cycle_count} +> {total_cycles}"
188
+ )
189
+
190
+ byte_offset = cycle_index * bytes_per_cycle
191
+ byte_size = cycle_count * bytes_per_cycle
192
+
114
193
  with get_file(reco, "2dseq") as f:
115
- f.seek(0)
116
- raw = f.read()
117
- if len(raw) != expected_size:
118
- raise ValueError(f"2dseq size mismatch: expected {expected_size} bytes for shape {shape}, got {len(raw)}")
194
+ f.seek(byte_offset)
195
+ raw = f.read(byte_size)
196
+
197
+ if len(raw) != byte_size:
198
+ raise ValueError(
199
+ f"2dseq block read size mismatch: expected {byte_size} bytes, got {len(raw)}"
200
+ )
201
+
202
+ # Cycle axis is the last dimension.
203
+ if len(shape) == 1:
204
+ block_shape = (cycle_count,)
205
+ else:
206
+ block_shape = (*shape[:-1], cycle_count)
207
+
119
208
  try:
120
- return np.frombuffer(raw, dtype).reshape(shape, order="F")
209
+ return np.frombuffer(raw, dtype).reshape(block_shape, order="F")
121
210
  except ValueError as exc:
122
- raise ValueError(f"failed to reshape 2dseq buffer to shape {shape}") from exc
211
+ raise ValueError(f"failed to reshape 2dseq block buffer to shape {block_shape}") from exc
123
212
 
124
213
 
125
214
  def _normalize_cycle_info(cycle_info: Optional["ResolvedCycle"]) -> Tuple[int, Optional[float]]:
@@ -129,7 +218,14 @@ def _normalize_cycle_info(cycle_info: Optional["ResolvedCycle"]) -> Tuple[int, O
129
218
  return int(cycle_info['num_cycles']), cycle_info.get('time_step')
130
219
 
131
220
 
132
- def resolve(scan: "Scan", reco_id: int = 1) -> Optional[ResolvedImage]:
221
+ def resolve(
222
+ scan: "Scan",
223
+ reco_id: int = 1,
224
+ *,
225
+ load_data: bool = True,
226
+ cycle_index: Optional[int] = None,
227
+ cycle_count: Optional[int] = None,
228
+ ) -> Optional[ResolvedImage]:
133
229
  """Load 2dseq as a NumPy array with associated metadata.
134
230
 
135
231
  Args:
@@ -161,13 +257,36 @@ def resolve(scan: "Scan", reco_id: int = 1) -> Optional[ResolvedImage]:
161
257
  offset = 0.0
162
258
  shape = shape_info["shape"]
163
259
 
164
- try:
165
- dataobj = _read_2dseq_data(reco, dtype, shape)
166
- except FileNotFoundError:
167
- return None
168
-
169
- dataobj, shape_desc = ensure_3d_spatial_data(dataobj, shape_info)
170
- num_cycles, time_per_cycle = _normalize_cycle_info(shape_info['objs'].cycle)
260
+ total_cycles, time_per_cycle = _normalize_cycle_info(shape_info['objs'].cycle)
261
+
262
+ dataobj, shape_desc = None, None
263
+ if load_data:
264
+ if total_cycles == 1:
265
+ logger.debug(
266
+ "Cycle slicing disabled: total_cycles=%s shape=%s",
267
+ total_cycles,
268
+ shape,
269
+ )
270
+ cycle_index = None
271
+ cycle_count = None
272
+ else:
273
+ logger.debug(
274
+ "Cycle slicing enabled: total_cycles=%s shape=%s",
275
+ total_cycles,
276
+ shape,
277
+ )
278
+ try:
279
+ dataobj = _read_2dseq_data(
280
+ reco,
281
+ dtype,
282
+ shape,
283
+ cycle_index=cycle_index,
284
+ cycle_count=cycle_count,
285
+ total_cycles=total_cycles,
286
+ )
287
+ except FileNotFoundError:
288
+ return None
289
+ dataobj, shape_desc = ensure_3d_spatial_data(dataobj, shape_info)
171
290
 
172
291
  result: ResolvedImage = {
173
292
  # image
@@ -178,7 +297,7 @@ def resolve(scan: "Scan", reco_id: int = 1) -> Optional[ResolvedImage]:
178
297
  'sliceorder_scheme': shape_info['sliceorder_scheme'],
179
298
 
180
299
  # cycle
181
- 'num_cycles': num_cycles,
300
+ 'num_cycles': total_cycles,
182
301
  'time_per_cycle': time_per_cycle,
183
302
  }
184
303
  return result
@@ -1,6 +1,5 @@
1
1
  from __future__ import annotations
2
2
 
3
- from pathlib import Path
4
3
  from typing import Any, Dict, Iterable, List, Mapping, Optional
5
4
  from importlib import resources
6
5
 
@@ -10,7 +10,7 @@ from ..remapper import load_spec, map_parameters
10
10
  from .validator import validate_rules
11
11
  import logging
12
12
 
13
- logger = logging.getLogger("brkraw")
13
+ logger = logging.getLogger(__name__)
14
14
 
15
15
  RULE_CATEGORIES = ("info_spec", "metadata_spec", "converter_hook")
16
16
  SPEC_CATEGORIES = ("info_spec", "metadata_spec")
@@ -216,8 +216,6 @@ def select_rule_use(
216
216
  logger.debug("Rule %r matched, selected use=%r.", rule.get("name"), selected)
217
217
  else:
218
218
  logger.debug("Rule %r matched but has no usable 'use' entry.", rule.get("name"))
219
- else:
220
- logger.debug("Rule %r did not match.", rule.get("name"))
221
219
  logger.debug("Rule selection result: %r", selected)
222
220
  return selected
223
221
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: brkraw
3
- Version: 0.5.3
3
+ Version: 0.5.5
4
4
  Summary: Toolkit for loading Bruker Paravision datasets, mapping metadata, and exporting NIfTI
5
5
  Project-URL: Homepage, https://brkraw.github.io
6
6
  Maintainer-email: SungHo Lee <shlee@unc.edu>
@@ -45,11 +45,11 @@ Description-Content-Type: text/markdown
45
45
  </picture>
46
46
  <!-- markdownlint-enable MD041 MD033 MD013 -->
47
47
 
48
- [![DOI](https://zenodo.org/badge/245546149.svg)](https://doi.org/10.5281/zenodo.3818614)
48
+ [![DOI](docs/assets/zenodo_badge.svg)](https://doi.org/10.5281/zenodo.3818614)
49
49
 
50
50
  A modular toolkit for Bruker MRI raw-data handling.
51
51
 
52
- BrkRaw (v0.5.3) converts raw data into standardized, neuroimaging-ready
52
+ BrkRaw (v0.5.5) converts raw data into standardized, neuroimaging-ready
53
53
  datasets, with extensible rules/specs and plugin hooks.
54
54
 
55
55
  - Documentation: [brkraw.github.io](https://brkraw.github.io/)
@@ -70,7 +70,7 @@ If you use BrkRaw in your research, please cite it.
70
70
  @software{brkraw,
71
71
  author = {Lee, Sung-Ho and Devenyi, Gabriel A. and Ban, Woomi and Shih, Yen-Yu Ian},
72
72
  title = {BrkRaw: A modular toolkit for Bruker MRI raw-data handling},
73
- version = {0.5.2},
73
+ version = {0.5.5},
74
74
  doi = {10.5281/zenodo.3818614},
75
75
  url = {https://github.com/BrkRaw/brkraw},
76
76
  note = {Documentation: https://brkraw.github.io},