dicube 0.1.4__cp311-cp311-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,805 @@
1
+ import json
2
+ import os
3
+ import struct
4
+
5
+ # from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
6
+ from concurrent.futures import ThreadPoolExecutor
7
+ from typing import List, Optional, Tuple
8
+
9
+ import numpy as np
10
+ import zstandard as zstd
11
+ from spacetransformer import Space
12
+
13
+ from ..codecs import get_codec
14
+ from ..core.pixel_header import PixelDataHeader
15
+ from ..dicom import DicomMeta
16
+ from ..dicom.dicom_status import DicomStatus
17
+ from ..validation import (
18
+ validate_not_none,
19
+ validate_parameter_type,
20
+ validate_string_not_empty
21
+ )
22
+ from ..exceptions import (
23
+ InvalidCubeFileError,
24
+ CodecError,
25
+ MetaDataError,
26
+ DataConsistencyError
27
+ )
28
+
29
+ """File Format Specification for DiCube (DCB) Files
30
+
31
+ -----------------------------------------------------------------
32
+ | File Header (Fixed length: 100 bytes) |
33
+ | magic: 8 bytes (e.g. b"DICUBE") |
34
+ | version: 4 bytes (unsigned int) |
35
+ | dicom_status_offset: 8 bytes (Q) |
36
+ | dicom_status_length: 8 bytes (Q) |
37
+ | dicommeta_offset: 8 bytes (Q) |
38
+ | dicommeta_length: 8 bytes (Q) |
39
+ | space_offset: 8 bytes (Q) |
40
+ | space_length: 8 bytes (Q) |
41
+ | pixel_header_offset: 8 bytes (Q) |
42
+ | pixel_header_length: 8 bytes (Q) |
43
+ | encoded_frame_offsets_offset: 8 bytes (Q) |
44
+ | encoded_frame_offsets_length: 8 bytes (Q) |
45
+ | encoded_frame_lengths_offset: 8 bytes (Q) |
46
+ | encoded_frame_lengths_length: 8 bytes (Q) |
47
+ | encoded_frame_count: 8 bytes (Q) |
48
+ -----------------------------------------------------------------
49
+ | DicomMeta (compressed JSON, optional) |
50
+ -----------------------------------------------------------------
51
+ | Space (JSON) |
52
+ -----------------------------------------------------------------
53
+ | PixelDataHeader (JSON, RescaleIntercept/Slope, status etc.) |
54
+ -----------------------------------------------------------------
55
+ | encoded_frame_offsets (encoded_frame_count Q values) |
56
+ -----------------------------------------------------------------
57
+ | encoded_frame_lengths (encoded_frame_count Q values) |
58
+ -----------------------------------------------------------------
59
+ | encoded_frame_data[0] |
60
+ -----------------------------------------------------------------
61
+ | encoded_frame_data[1] ... |
62
+ -----------------------------------------------------------------
63
+ | ... |
64
+ -----------------------------------------------------------------
65
+ | encoded_frame_data[n-1] |
66
+ -----------------------------------------------------------------
67
+
68
+ This format demonstrates how to store multi-frame images in a single file,
69
+ with offsets and lengths recorded in the header for random access.
70
+ """
71
+
72
+
73
+ class DcbFile:
74
+ """Base class implementing common file I/O logic for DiCube files.
75
+
76
+ This class provides core functionality for:
77
+ - Header structure management
78
+ - write() workflow (header, metadata, space, header, offsets/lengths, images)
79
+ - Common file operations (read/write)
80
+
81
+ Subclasses should implement frame encoding via _encode_one_frame() and
82
+ _decode_one_frame() methods, and set appropriate MAGIC and VERSION values.
83
+
84
+ Attributes:
85
+ HEADER_STRUCT (str): Struct format string for the header.
86
+ MAGIC (bytes): Magic bytes for file identification.
87
+ VERSION (int): File format version.
88
+ TRANSFER_SYNTAX_UID (str, optional): DICOM transfer syntax UID.
89
+ """
90
+
91
+ HEADER_STRUCT = "<8sI13Q"
92
+ MAGIC = b"DCMCUBE\x00"
93
+ VERSION = 1
94
+ TRANSFER_SYNTAX_UID = None # Base class has no specific transfer syntax
95
+
96
+ def __init__(self, filename: str, mode: str = "r"):
97
+ """Initialize a DCB file object.
98
+
99
+ Args:
100
+ filename (str): The file path.
101
+ mode (str): "r" for reading, "w" for writing, "a" for appending.
102
+ """
103
+ # Validate required parameters
104
+ validate_string_not_empty(filename, "filename", "DcbFile constructor", InvalidCubeFileError)
105
+ validate_parameter_type(mode, str, "mode", "DcbFile constructor", InvalidCubeFileError)
106
+
107
+ if mode not in ("r", "w", "a"):
108
+ raise InvalidCubeFileError(
109
+ f"Invalid file mode: {mode}",
110
+ context="DcbFile constructor",
111
+ details={"mode": mode, "supported_modes": ["r", "w", "a"]},
112
+ suggestion="Use 'r' for reading, 'w' for writing, or 'a' for appending"
113
+ )
114
+
115
+ self.filename = filename
116
+ self.mode = mode
117
+ self._header = None # Delay reading header until needed
118
+
119
+ if os.path.exists(filename) and mode in ("r", "a"):
120
+ self._read_header_and_check_type()
121
+
122
+ def _read_header_and_check_type(self):
123
+ """Read file header and determine the correct subclass."""
124
+ try:
125
+ hdr = self.read_header(verify_magic=False) # Lazy read
126
+ magic = hdr["magic"]
127
+ version = hdr["version"]
128
+
129
+ if magic != self.MAGIC:
130
+ if magic == DcbSFile.MAGIC and version == DcbSFile.VERSION:
131
+ self.__class__ = DcbSFile
132
+ else:
133
+ raise InvalidCubeFileError(
134
+ f"Unsupported file format",
135
+ context="file header validation",
136
+ details={"magic_number": magic, "file_path": self.filename},
137
+ suggestion="Ensure the file is a valid DicomCube file"
138
+ )
139
+ self.VERSION = version
140
+ except Exception as e:
141
+ if isinstance(e, InvalidCubeFileError):
142
+ raise
143
+ raise InvalidCubeFileError(
144
+ f"Failed to read file header: {str(e)}",
145
+ context="file header validation",
146
+ details={"file_path": self.filename}
147
+ ) from e
148
+
149
+ @property
150
+ def header(self):
151
+ """Get the file header, reading it from disk if not already loaded.
152
+
153
+ Returns:
154
+ dict: Dictionary containing header fields.
155
+ """
156
+ if self._header is None:
157
+ self._header = self.read_header()
158
+ return self._header
159
+
160
+ def read_header(self, verify_magic: bool = True):
161
+ """Read and parse the file header.
162
+
163
+ Args:
164
+ verify_magic (bool): If True, verify the magic number. Defaults to True.
165
+
166
+ Returns:
167
+ dict: Dictionary containing header fields.
168
+
169
+ Raises:
170
+ InvalidCubeFileError: If the file is not a valid DicomCube file.
171
+ """
172
+ if self._header:
173
+ return self._header
174
+
175
+ try:
176
+ header_size = struct.calcsize(self.HEADER_STRUCT)
177
+ with open(self.filename, "rb") as f:
178
+ header_data = f.read(header_size)
179
+
180
+ if len(header_data) < header_size:
181
+ raise InvalidCubeFileError(
182
+ f"File too small to contain valid header",
183
+ context="read_header operation",
184
+ details={"expected_size": header_size, "actual_size": len(header_data)},
185
+ suggestion="Ensure the file is a complete DicomCube file"
186
+ )
187
+
188
+ unpacked = struct.unpack(self.HEADER_STRUCT, header_data)
189
+ (
190
+ magic,
191
+ version,
192
+ dicom_status_offset,
193
+ dicom_status_length,
194
+ dicommeta_offset,
195
+ dicommeta_length,
196
+ space_offset,
197
+ space_length,
198
+ pixel_header_offset,
199
+ pixel_header_length,
200
+ frame_offsets_offset,
201
+ frame_offsets_length,
202
+ frame_lengths_offset,
203
+ frame_lengths_length,
204
+ frame_count,
205
+ ) = unpacked
206
+
207
+ if verify_magic and magic != self.MAGIC:
208
+ raise InvalidCubeFileError(
209
+ f"Invalid file format magic number",
210
+ context="read_header operation",
211
+ details={"expected_magic": self.MAGIC, "actual_magic": magic},
212
+ suggestion="Ensure the file is a valid DicomCube file"
213
+ )
214
+
215
+ self._header = {
216
+ "magic": magic,
217
+ "version": version,
218
+ "dicom_status_offset": dicom_status_offset,
219
+ "dicom_status_length": dicom_status_length,
220
+ "dicommeta_offset": dicommeta_offset,
221
+ "dicommeta_length": dicommeta_length,
222
+ "space_offset": space_offset,
223
+ "space_length": space_length,
224
+ "pixel_header_offset": pixel_header_offset,
225
+ "pixel_header_length": pixel_header_length,
226
+ "frame_offsets_offset": frame_offsets_offset,
227
+ "frame_offsets_length": frame_offsets_length,
228
+ "frame_lengths_offset": frame_lengths_offset,
229
+ "frame_lengths_length": frame_lengths_length,
230
+ "frame_count": frame_count,
231
+ }
232
+ return self._header
233
+ except Exception as e:
234
+ if isinstance(e, InvalidCubeFileError):
235
+ raise
236
+ raise InvalidCubeFileError(
237
+ f"Failed to read file header: {str(e)}",
238
+ context="read_header operation",
239
+ details={"file_path": self.filename}
240
+ ) from e
241
+
242
+ def _prepare_metadata(
243
+ self,
244
+ pixel_header: PixelDataHeader,
245
+ dicom_meta: Optional[DicomMeta] = None,
246
+ space: Optional[Space] = None,
247
+ dicom_status: Optional[DicomStatus] = None,
248
+ ):
249
+ """Prepare metadata for writing to DCB file.
250
+
251
+ Args:
252
+ pixel_header (PixelDataHeader): Pixel data header information.
253
+ dicom_meta (DicomMeta, optional): DICOM metadata. Defaults to None.
254
+ space (Space, optional): Spatial information. Defaults to None.
255
+ dicom_status (DicomStatus, optional): DICOM status. Defaults to None.
256
+
257
+ Returns:
258
+ dict: Dictionary containing prepared metadata components.
259
+ """
260
+ # Process dicom_status
261
+ if dicom_status is None:
262
+ # If not provided, try to get from pixel_header (backward compatibility)
263
+ if hasattr(pixel_header, "DicomStatus"):
264
+ dicom_status = pixel_header.DicomStatus
265
+ else:
266
+ dicom_status = DicomStatus.CONSISTENT
267
+
268
+ # Handle both enum and string values for dicom_status
269
+ if isinstance(dicom_status, DicomStatus):
270
+ dicom_status_bin = dicom_status.value.encode("utf-8")
271
+ else:
272
+ # If it's already a string, encode it directly
273
+ dicom_status_bin = dicom_status.encode("utf-8")
274
+
275
+ # Process dicom_meta
276
+ if dicom_meta:
277
+ dicommeta_json = dicom_meta.to_json().encode("utf-8")
278
+ dicommeta_gz = zstd.compress(dicommeta_json)
279
+ else:
280
+ dicommeta_gz = b""
281
+
282
+ # Process space
283
+ if space:
284
+ # Convert space from internal (z,y,x) to file (x,y,z) format
285
+ space_xyz = space.reverse_axis_order()
286
+ space_json = space_xyz.to_json().encode("utf-8")
287
+ else:
288
+ space_json = b""
289
+
290
+ # Process pixel_header
291
+ pixel_header_bin = pixel_header.to_json().encode("utf-8")
292
+
293
+ return {
294
+ 'dicom_status_bin': dicom_status_bin,
295
+ 'dicommeta_gz': dicommeta_gz,
296
+ 'space_json': space_json,
297
+ 'pixel_header_bin': pixel_header_bin
298
+ }
299
+
300
+ def _encode_frames(self, images: List, num_threads: int = 4):
301
+ """Encode frames using parallel or serial processing.
302
+
303
+ Args:
304
+ images (List): List of frames to encode.
305
+ num_threads (int): Number of worker threads for parallel encoding.
306
+
307
+ Returns:
308
+ List[bytes]: List of encoded frame data.
309
+ """
310
+ if num_threads is not None and num_threads > 1:
311
+ # Parallel encoding
312
+ with ThreadPoolExecutor(max_workers=num_threads) as executor:
313
+ encoded_blobs = list(
314
+ executor.map(lambda x: self._encode_one_frame(x), images)
315
+ )
316
+ return encoded_blobs
317
+ else:
318
+ # Serial encoding
319
+ encoded_blobs = []
320
+ for one_frame in images:
321
+ encoded_bytes = self._encode_one_frame(one_frame)
322
+ encoded_blobs.append(encoded_bytes)
323
+ return encoded_blobs
324
+
325
+ def _write_file_structure(self, f, metadata_components, encoded_frames, frame_count):
326
+ """Write the complete file structure including metadata and frames.
327
+
328
+ Args:
329
+ f: File handle for writing.
330
+ metadata_components (dict): Prepared metadata components.
331
+ encoded_frames (List[bytes]): List of encoded frame data.
332
+ frame_count (int): Number of frames.
333
+
334
+ Returns:
335
+ dict: Dictionary containing offset and length information for header.
336
+ """
337
+ # Write dicom_status
338
+ dicom_status_offset = f.tell()
339
+ f.write(metadata_components['dicom_status_bin'])
340
+ dicom_status_length = f.tell() - dicom_status_offset
341
+
342
+ # Write dicommeta_gz
343
+ dicommeta_offset = f.tell()
344
+ f.write(metadata_components['dicommeta_gz'])
345
+ dicommeta_length = f.tell() - dicommeta_offset
346
+
347
+ # Write space_json
348
+ space_offset = f.tell()
349
+ f.write(metadata_components['space_json'])
350
+ space_length = f.tell() - space_offset
351
+
352
+ # Write pixel_header_bin
353
+ pixel_header_offset = f.tell()
354
+ f.write(metadata_components['pixel_header_bin'])
355
+ pixel_header_length = f.tell() - pixel_header_offset
356
+
357
+ # Reserve offsets / lengths space
358
+ frame_offsets_offset = f.tell()
359
+ f.write(b"\x00" * (8 * frame_count))
360
+ frame_offsets_length = 8 * frame_count
361
+
362
+ frame_lengths_offset = f.tell()
363
+ f.write(b"\x00" * (8 * frame_count))
364
+ frame_lengths_length = 8 * frame_count
365
+
366
+ # Write frames and collect offset/length info
367
+ offsets = []
368
+ lengths = []
369
+
370
+ for encoded_bytes in encoded_frames:
371
+ offset_here = f.tell()
372
+ f.write(encoded_bytes)
373
+ length_here = f.tell() - offset_here
374
+
375
+ offsets.append(offset_here)
376
+ lengths.append(length_here)
377
+
378
+ # Backfill offsets & lengths
379
+ current_pos = f.tell()
380
+ f.seek(frame_offsets_offset)
381
+ for off in offsets:
382
+ f.write(struct.pack("<Q", off))
383
+
384
+ f.seek(frame_lengths_offset)
385
+ for lng in lengths:
386
+ f.write(struct.pack("<Q", lng))
387
+
388
+ # Go back to the end of the file
389
+ f.seek(current_pos)
390
+
391
+ return {
392
+ 'dicom_status_offset': dicom_status_offset,
393
+ 'dicom_status_length': dicom_status_length,
394
+ 'dicommeta_offset': dicommeta_offset,
395
+ 'dicommeta_length': dicommeta_length,
396
+ 'space_offset': space_offset,
397
+ 'space_length': space_length,
398
+ 'pixel_header_offset': pixel_header_offset,
399
+ 'pixel_header_length': pixel_header_length,
400
+ 'frame_offsets_offset': frame_offsets_offset,
401
+ 'frame_offsets_length': frame_offsets_length,
402
+ 'frame_lengths_offset': frame_lengths_offset,
403
+ 'frame_lengths_length': frame_lengths_length,
404
+ }
405
+
406
+ def write(
407
+ self,
408
+ images: List, # Can be List[np.ndarray] or List[Tuple] for ROI data
409
+ pixel_header: PixelDataHeader,
410
+ dicom_meta: Optional[DicomMeta] = None,
411
+ space: Optional[Space] = None,
412
+ num_threads: int = 4,
413
+ dicom_status: Optional[DicomStatus] = None,
414
+ ):
415
+ """Write image data and metadata to a DCB file.
416
+
417
+ This is a generic write method that subclasses can reuse, customizing
418
+ single-frame encoding via _encode_one_frame().
419
+
420
+ Args:
421
+ images (List): List of frames to write. Can be List[np.ndarray] for standard files,
422
+ or List[Tuple[np.ndarray, np.ndarray, np.ndarray]] for ROI files.
423
+ pixel_header (PixelDataHeader): PixelDataHeader instance containing pixel metadata.
424
+ dicom_meta (DicomMeta, optional): DICOM metadata. Defaults to None.
425
+ space (Space, optional): Spatial information. Defaults to None.
426
+ num_threads (int): Number of worker threads for parallel encoding. Defaults to 4.
427
+ dicom_status (str, optional): DICOM status string value. Defaults to None.
428
+ """
429
+ if images is None:
430
+ images = []
431
+ frame_count = len(images)
432
+
433
+ # Prepare metadata components
434
+ metadata_components = self._prepare_metadata(
435
+ pixel_header, dicom_meta, space, dicom_status
436
+ )
437
+
438
+ # Encode frames
439
+ encoded_frames = self._encode_frames(images, num_threads)
440
+
441
+ # Write file structure
442
+ header_size = struct.calcsize(self.HEADER_STRUCT)
443
+
444
+ with open(self.filename, "wb") as f:
445
+ # Write placeholder header
446
+ f.write(b"\x00" * header_size)
447
+
448
+ # Write file structure and get offset information
449
+ offset_info = self._write_file_structure(
450
+ f, metadata_components, encoded_frames, frame_count
451
+ )
452
+
453
+ # Backfill header
454
+ f.seek(0)
455
+ header_data = struct.pack(
456
+ self.HEADER_STRUCT,
457
+ self.MAGIC,
458
+ self.VERSION,
459
+ offset_info['dicom_status_offset'],
460
+ offset_info['dicom_status_length'],
461
+ offset_info['dicommeta_offset'],
462
+ offset_info['dicommeta_length'],
463
+ offset_info['space_offset'],
464
+ offset_info['space_length'],
465
+ offset_info['pixel_header_offset'],
466
+ offset_info['pixel_header_length'],
467
+ offset_info['frame_offsets_offset'],
468
+ offset_info['frame_offsets_length'],
469
+ offset_info['frame_lengths_offset'],
470
+ offset_info['frame_lengths_length'],
471
+ frame_count,
472
+ )
473
+ f.write(header_data)
474
+
475
+ def read_meta(self, DicomMetaClass=DicomMeta):
476
+ """Read DICOM metadata from the file.
477
+
478
+ Args:
479
+ DicomMetaClass (class): The class to use for creating the DicomMeta object.
480
+ Defaults to DicomMeta.
481
+
482
+ Returns:
483
+ DicomMeta: The DICOM metadata, or None if not present in the file.
484
+ """
485
+ hdr = self.header
486
+ dicommeta_offset = hdr["dicommeta_offset"]
487
+ dicommeta_length = hdr["dicommeta_length"]
488
+
489
+ if dicommeta_length == 0:
490
+ return None
491
+
492
+ with open(self.filename, "rb") as f:
493
+ f.seek(dicommeta_offset)
494
+ dicommeta_gz = f.read(dicommeta_length)
495
+
496
+ dicommeta_json = zstd.decompress(dicommeta_gz)
497
+ dicommeta_dict = json.loads(dicommeta_json.decode("utf-8"))
498
+
499
+ try:
500
+ return DicomMetaClass.from_json(json.dumps(dicommeta_dict))
501
+ except Exception as e:
502
+ # Backwards compatibility for older file format
503
+ return DicomMetaClass(
504
+ dicommeta_dict, ["slice_{i:04d}.dcm" for i in range(hdr["frame_count"])]
505
+ )
506
+
507
+ def read_space(self, SpaceClass=Space):
508
+ """Read spatial information from the file.
509
+
510
+ Args:
511
+ SpaceClass (class): The class to use for creating the Space object.
512
+ Defaults to Space.
513
+
514
+ Returns:
515
+ Space: The spatial information, or None if not present in the file.
516
+ """
517
+ hdr = self.header
518
+ space_offset = hdr["space_offset"]
519
+ space_length = hdr["space_length"]
520
+
521
+ if space_length == 0:
522
+ return None
523
+
524
+ with open(self.filename, "rb") as f:
525
+ f.seek(space_offset)
526
+ space_json = f.read(space_length)
527
+
528
+ try:
529
+ space = SpaceClass.from_json(space_json.decode("utf-8"))
530
+ # Convert from file (x,y,z) format to internal (z,y,x) format
531
+ return space.reverse_axis_order()
532
+ except Exception as e:
533
+ # If space reading fails, return None
534
+ return None
535
+
536
+ def read_pixel_header(self, HeaderClass=PixelDataHeader):
537
+ """Read pixel header information from the file.
538
+
539
+ Args:
540
+ HeaderClass (class): The class to use for creating the PixelDataHeader object.
541
+ Defaults to PixelDataHeader.
542
+
543
+ Returns:
544
+ PixelDataHeader: The pixel header information.
545
+
546
+ Raises:
547
+ ValueError: If the pixel header is not found in the file.
548
+ """
549
+ hdr = self.header
550
+ pixel_header_offset = hdr["pixel_header_offset"]
551
+ pixel_header_length = hdr["pixel_header_length"]
552
+
553
+ if pixel_header_length == 0:
554
+ raise ValueError("Pixel header not found in file.")
555
+
556
+ with open(self.filename, "rb") as f:
557
+ f.seek(pixel_header_offset)
558
+ pixel_header_bin = f.read(pixel_header_length)
559
+
560
+ pixel_header_json = pixel_header_bin.decode("utf-8")
561
+ return HeaderClass.from_json(pixel_header_json)
562
+
563
+ def read_images(self, num_threads: int = 4):
564
+ """Read all image frames from the file.
565
+
566
+ Args:
567
+ num_threads (int): Number of worker threads for parallel decoding.
568
+ Defaults to 4.
569
+
570
+ Returns:
571
+ List[np.ndarray] or np.ndarray: The decoded image frames. If the number of frames is 1,
572
+ returns a single numpy array, otherwise returns a list of arrays.
573
+ """
574
+ hdr = self.header
575
+ frame_count = hdr["frame_count"]
576
+
577
+ if frame_count == 0:
578
+ # No frames to read
579
+ pixel_header = self.read_pixel_header()
580
+ return np.array([], dtype=pixel_header.ORIGINAL_PIXEL_DTYPE)
581
+
582
+ # Read frame offsets and lengths
583
+ frame_offsets_offset = hdr["frame_offsets_offset"]
584
+ frame_offsets_length = hdr["frame_offsets_length"]
585
+ frame_lengths_offset = hdr["frame_lengths_offset"]
586
+ frame_lengths_length = hdr["frame_lengths_length"]
587
+
588
+ with open(self.filename, "rb") as f:
589
+ # Read frame offsets
590
+ f.seek(frame_offsets_offset)
591
+ frame_offsets_bin = f.read(frame_offsets_length)
592
+ frame_offsets = struct.unpack(f"<{frame_count}Q", frame_offsets_bin)
593
+
594
+ # Read frame lengths
595
+ f.seek(frame_lengths_offset)
596
+ frame_lengths_bin = f.read(frame_lengths_length)
597
+ frame_lengths = struct.unpack(f"<{frame_count}Q", frame_lengths_bin)
598
+
599
+ # Read each frame data
600
+ frame_data_list = []
601
+ for offset, length in zip(frame_offsets, frame_lengths):
602
+ if offset == 0 or length == 0:
603
+ # Skip empty frames
604
+ frame_data_list.append(None)
605
+ continue
606
+
607
+ f.seek(offset)
608
+ encoded_bytes = f.read(length)
609
+ frame_data_list.append(encoded_bytes)
610
+
611
+ # Decode frames (with parallelization if needed)
612
+ frames = []
613
+
614
+ if num_threads is not None and num_threads > 1 and frame_count > 1:
615
+ # Parallel decoding
616
+ with ThreadPoolExecutor(max_workers=num_threads) as executor:
617
+ frames = list(
618
+ executor.map(
619
+ lambda x: None if x is None else self._decode_one_frame(x),
620
+ frame_data_list,
621
+ )
622
+ )
623
+ else:
624
+ # Serial decoding
625
+ frames = [
626
+ None if data is None else self._decode_one_frame(data)
627
+ for data in frame_data_list
628
+ ]
629
+
630
+ # Filter out None frames
631
+ frames = [f for f in frames if f is not None]
632
+
633
+ if len(frames) == 0:
634
+ # Return empty array if no frames were decoded
635
+ pixel_header = self.read_pixel_header()
636
+ return np.array([], dtype=pixel_header.ORIGINAL_PIXEL_DTYPE)
637
+ elif len(frames) == 1:
638
+ # Return single frame directly
639
+ return frames[0]
640
+ else:
641
+ # Return list of frames
642
+ return frames
643
+
644
+ def _encode_one_frame(self, frame_data: np.ndarray) -> bytes:
645
+ """Encode a single frame of image data.
646
+
647
+ This is a placeholder method to be implemented by subclasses.
648
+
649
+ Args:
650
+ frame_data (np.ndarray): The frame data to encode.
651
+
652
+ Returns:
653
+ bytes: The encoded frame data.
654
+
655
+ Raises:
656
+ NotImplementedError: This method must be implemented by subclasses.
657
+ """
658
+ raise NotImplementedError("Subclass must implement _encode_one_frame")
659
+
660
+ def _decode_one_frame(self, bytes) -> np.ndarray:
661
+ """Decode a single frame of image data.
662
+
663
+ This is a placeholder method to be implemented by subclasses.
664
+
665
+ Args:
666
+ bytes (bytes): The encoded frame data.
667
+
668
+ Returns:
669
+ np.ndarray: The decoded frame data.
670
+
671
+ Raises:
672
+ NotImplementedError: This method must be implemented by subclasses.
673
+ """
674
+ raise NotImplementedError("Subclass must implement _decode_one_frame")
675
+
676
+ def read_dicom_status(self) -> str:
677
+ """Read DICOM status information from the file.
678
+
679
+ Returns:
680
+ str: The DICOM status string, or DicomStatus.CONSISTENT.value if not present.
681
+ """
682
+ hdr = self.header
683
+ dicom_status_offset = hdr["dicom_status_offset"]
684
+ dicom_status_length = hdr["dicom_status_length"]
685
+
686
+ if dicom_status_length == 0:
687
+ return DicomStatus.CONSISTENT.value
688
+
689
+ with open(self.filename, "rb") as f:
690
+ f.seek(dicom_status_offset)
691
+ dicom_status_bin = f.read(dicom_status_length)
692
+
693
+ return DicomStatus(dicom_status_bin.decode("utf-8"))
694
+
695
+ def get_transfer_syntax_uid(self) -> Optional[str]:
696
+ """Get the DICOM transfer syntax UID for this file.
697
+
698
+ Returns:
699
+ str or None: The transfer syntax UID, or None if not defined.
700
+ """
701
+ return self.TRANSFER_SYNTAX_UID
702
+
703
+
704
+
705
+
706
+
707
+ class DcbSFile(DcbFile):
708
+ """DICOM cube file implementation optimized for speed.
709
+
710
+ This format prioritizes quick read/write operations while maintaining
711
+ lossless compression with average compression ratio.
712
+
713
+ Attributes:
714
+ MAGIC (bytes): Magic bytes for file identification ("DCMCUBES").
715
+ VERSION (int): File format version.
716
+ TRANSFER_SYNTAX_UID (str): DICOM transfer syntax UID for HTJ2K Lossless.
717
+ CODEC_NAME (str): Codec name used for compression.
718
+ """
719
+
720
+ MAGIC = b"DCMCUBES"
721
+ VERSION = 1
722
+ TRANSFER_SYNTAX_UID = "1.2.840.10008.1.2.4.201" # HTJ2K Lossless
723
+ CODEC_NAME = "jph"
724
+
725
+ def _encode_one_frame(self, frame_data: np.ndarray) -> bytes:
726
+ """Encode a single frame using the HTJ2K codec.
727
+
728
+ Args:
729
+ frame_data (np.ndarray): The frame data to encode.
730
+
731
+ Returns:
732
+ bytes: The encoded frame data.
733
+
734
+ Raises:
735
+ CodecError: If encoding fails.
736
+ """
737
+ try:
738
+ codec = get_codec(self.CODEC_NAME)
739
+ return codec.encode_lossless(frame_data)
740
+ except Exception as e:
741
+ raise CodecError(
742
+ f"Failed to encode frame using {self.CODEC_NAME} codec: {str(e)}",
743
+ context="frame encoding operation",
744
+ details={"codec_name": self.CODEC_NAME, "frame_shape": frame_data.shape if hasattr(frame_data, 'shape') else None}
745
+ ) from e
746
+
747
+ def _decode_one_frame(self, bytes) -> np.ndarray:
748
+ """Decode a single frame using the HTJ2K codec.
749
+
750
+ Args:
751
+ bytes (bytes): The encoded frame data.
752
+
753
+ Returns:
754
+ np.ndarray: The decoded frame data.
755
+
756
+ Raises:
757
+ CodecError: If decoding fails.
758
+ """
759
+ try:
760
+ codec = get_codec(self.CODEC_NAME)
761
+ return codec.decode(bytes)
762
+ except Exception as e:
763
+ raise CodecError(
764
+ f"Failed to decode frame using {self.CODEC_NAME} codec: {str(e)}",
765
+ context="frame decoding operation",
766
+ details={"codec_name": self.CODEC_NAME, "data_size": len(bytes) if bytes else 0}
767
+ ) from e
768
+
769
+
770
+ class DcbAFile(DcbFile):
771
+ """DICOM cube file implementation optimized for archiving.
772
+
773
+ This format prioritizes high compression ratio for long-term storage
774
+ while maintaining lossless compression, at the expense of speed.
775
+
776
+ Attributes:
777
+ MAGIC (bytes): Magic bytes for file identification ("DCMCUBEA").
778
+ VERSION (int): File format version.
779
+ TRANSFER_SYNTAX_UID (str, optional): DICOM transfer syntax UID, set when codec is selected.
780
+ CODEC_NAME (str, optional): Codec name, set when codec is selected.
781
+ """
782
+
783
+ MAGIC = b"DCMCUBEA"
784
+ VERSION = 1
785
+ TRANSFER_SYNTAX_UID = None # To be defined when codec is selected
786
+ CODEC_NAME = None # To be defined when codec is selected
787
+
788
+
789
+ class DcbLFile(DcbFile):
790
+ """DICOM cube file implementation with lossy compression.
791
+
792
+ This format prioritizes very high compression ratio by using lossy compression,
793
+ sacrificing some image quality and perfect reconstruction.
794
+
795
+ Attributes:
796
+ MAGIC (bytes): Magic bytes for file identification ("DCMCUBEL").
797
+ VERSION (int): File format version.
798
+ TRANSFER_SYNTAX_UID (str, optional): DICOM transfer syntax UID, set when codec is selected.
799
+ CODEC_NAME (str, optional): Codec name, set when codec is selected.
800
+ """
801
+
802
+ MAGIC = b"DCMCUBEL"
803
+ VERSION = 1
804
+ TRANSFER_SYNTAX_UID = None # To be defined when codec is selected
805
+ CODEC_NAME = None # To be defined when codec is selected