@fideus-labs/fidnii 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/dist/BufferManager.d.ts +37 -4
  2. package/dist/BufferManager.d.ts.map +1 -1
  3. package/dist/BufferManager.js +70 -22
  4. package/dist/BufferManager.js.map +1 -1
  5. package/dist/OMEZarrNVImage.d.ts +26 -0
  6. package/dist/OMEZarrNVImage.d.ts.map +1 -1
  7. package/dist/OMEZarrNVImage.js +144 -24
  8. package/dist/OMEZarrNVImage.js.map +1 -1
  9. package/dist/RegionCoalescer.d.ts +16 -0
  10. package/dist/RegionCoalescer.d.ts.map +1 -1
  11. package/dist/RegionCoalescer.js +42 -5
  12. package/dist/RegionCoalescer.js.map +1 -1
  13. package/dist/ResolutionSelector.d.ts +14 -2
  14. package/dist/ResolutionSelector.d.ts.map +1 -1
  15. package/dist/ResolutionSelector.js +26 -16
  16. package/dist/ResolutionSelector.js.map +1 -1
  17. package/dist/index.d.ts +6 -4
  18. package/dist/index.d.ts.map +1 -1
  19. package/dist/index.js +4 -3
  20. package/dist/index.js.map +1 -1
  21. package/dist/normalize.d.ts +50 -0
  22. package/dist/normalize.d.ts.map +1 -0
  23. package/dist/normalize.js +95 -0
  24. package/dist/normalize.js.map +1 -0
  25. package/dist/types.d.ts +66 -1
  26. package/dist/types.d.ts.map +1 -1
  27. package/dist/types.js +66 -0
  28. package/dist/types.js.map +1 -1
  29. package/dist/utils/coordinates.d.ts.map +1 -1
  30. package/dist/utils/coordinates.js +20 -26
  31. package/dist/utils/coordinates.js.map +1 -1
  32. package/package.json +3 -4
  33. package/src/BufferManager.ts +83 -22
  34. package/src/OMEZarrNVImage.ts +190 -24
  35. package/src/RegionCoalescer.ts +45 -5
  36. package/src/ResolutionSelector.ts +32 -16
  37. package/src/index.ts +13 -2
  38. package/src/normalize.ts +119 -0
  39. package/src/types.ts +95 -1
  40. package/src/utils/coordinates.ts +26 -24
@@ -10,6 +10,18 @@ import { getBytesPerPixel, getTypedArrayConstructor } from "./types.js"
10
10
  * The buffer is resized to match the fetched data dimensions exactly.
11
11
  * Memory is reused when possible to avoid unnecessary allocations.
12
12
  *
13
+ * For multi-component images (RGB/RGBA), `componentsPerVoxel` controls
14
+ * how many scalar elements each spatial voxel occupies. The buffer is
15
+ * sized to hold `spatialPixels * componentsPerVoxel` elements, and the
16
+ * typed array view spans all of them. Spatial dimensions (`[z, y, x]`)
17
+ * track only the spatial extent; the component count is a fixed
18
+ * multiplier on the element count.
19
+ *
20
+ * When the source dtype is not uint8 and `componentsPerVoxel > 1`
21
+ * (non-uint8 RGB/RGBA), the buffer stores **uint8** data because NiiVue
22
+ * only supports `DT_RGB24` / `DT_RGBA32` (uint8-per-channel). The raw
23
+ * data is normalized to uint8 externally before being written here.
24
+ *
13
25
  * Memory reuse strategy:
14
26
  * - Reuse buffer if newSize <= currentCapacity
15
27
  * - Reallocate if newSize > currentCapacity OR newSize < 25% of currentCapacity
@@ -22,17 +34,48 @@ export class BufferManager {
22
34
  private readonly bytesPerPixel: number
23
35
  private readonly dtype: ZarrDtype
24
36
 
37
+ /**
38
+ * Number of scalar components per spatial voxel.
39
+ * 1 for scalar images, 3 for RGB, 4 for RGBA.
40
+ */
41
+ readonly componentsPerVoxel: number
42
+
43
+ /**
44
+ * Whether this buffer stores normalized uint8 data for a non-uint8
45
+ * RGB/RGBA source. When `true`, {@link getTypedArray} returns a
46
+ * `Uint8Array` and `bytesPerPixel` is 1, regardless of the source
47
+ * dtype.
48
+ */
49
+ readonly isNormalizedRGB: boolean
50
+
25
51
  /**
26
52
  * Create a new BufferManager.
27
53
  *
28
54
  * @param maxPixels - Maximum number of pixels allowed (budget)
29
55
  * @param dtype - Data type for the buffer
56
+ * @param componentsPerVoxel - Number of components per spatial voxel
57
+ * (default: 1; pass 3 for RGB, 4 for RGBA)
30
58
  */
31
- constructor(maxPixels: number, dtype: ZarrDtype) {
59
+ constructor(
60
+ maxPixels: number,
61
+ dtype: ZarrDtype,
62
+ componentsPerVoxel: number = 1,
63
+ ) {
32
64
  this.maxPixels = maxPixels
33
65
  this.dtype = dtype
34
- this.TypedArrayCtor = getTypedArrayConstructor(dtype)
35
- this.bytesPerPixel = getBytesPerPixel(dtype)
66
+ this.componentsPerVoxel = componentsPerVoxel
67
+
68
+ // Non-uint8 RGB/RGBA: the output buffer stores normalized uint8 data
69
+ // because NiiVue only supports uint8-per-channel color rendering.
70
+ this.isNormalizedRGB = componentsPerVoxel > 1 && dtype !== "uint8"
71
+
72
+ if (this.isNormalizedRGB) {
73
+ this.TypedArrayCtor = Uint8Array
74
+ this.bytesPerPixel = 1
75
+ } else {
76
+ this.TypedArrayCtor = getTypedArrayConstructor(dtype)
77
+ this.bytesPerPixel = getBytesPerPixel(dtype)
78
+ }
36
79
 
37
80
  // Initialize with empty buffer - will be allocated on first resize
38
81
  this.currentDimensions = [0, 0, 0]
@@ -53,27 +96,30 @@ export class BufferManager {
53
96
  * @returns TypedArray view over the (possibly new) buffer
54
97
  */
55
98
  resize(dimensions: [number, number, number]): TypedArray {
56
- const requiredPixels = dimensions[0] * dimensions[1] * dimensions[2]
99
+ const spatialPixels = dimensions[0] * dimensions[1] * dimensions[2]
57
100
 
58
- if (requiredPixels > this.maxPixels) {
101
+ if (spatialPixels > this.maxPixels) {
59
102
  console.warn(
60
103
  `[fidnii] BufferManager: Requested dimensions [${dimensions.join(
61
104
  ", ",
62
- )}] = ${requiredPixels} pixels exceeds maxPixels (${this.maxPixels}). ` +
105
+ )}] = ${spatialPixels} pixels exceeds maxPixels (${this.maxPixels}). ` +
63
106
  `Proceeding anyway (likely at lowest resolution).`,
64
107
  )
65
108
  }
66
109
 
67
- const currentCapacityPixels = this.buffer.byteLength / this.bytesPerPixel
110
+ // Total elements = spatial pixels × components per voxel
111
+ const requiredElements = spatialPixels * this.componentsPerVoxel
112
+ const currentCapacityElements = this.buffer.byteLength / this.bytesPerPixel
68
113
  const utilizationRatio =
69
- currentCapacityPixels > 0 ? requiredPixels / currentCapacityPixels : 0
114
+ currentCapacityElements > 0
115
+ ? requiredElements / currentCapacityElements
116
+ : 0
70
117
 
71
118
  const needsReallocation =
72
- requiredPixels > currentCapacityPixels || utilizationRatio < 0.25
119
+ requiredElements > currentCapacityElements || utilizationRatio < 0.25
73
120
 
74
121
  if (needsReallocation) {
75
- // Allocate new buffer
76
- const newByteLength = requiredPixels * this.bytesPerPixel
122
+ const newByteLength = requiredElements * this.bytesPerPixel
77
123
  this.buffer = new ArrayBuffer(newByteLength)
78
124
  }
79
125
 
@@ -91,14 +137,19 @@ export class BufferManager {
91
137
  /**
92
138
  * Get a typed array view over the current buffer region.
93
139
  *
94
- * The view is sized to match currentDimensions, not the full buffer capacity.
140
+ * The view is sized to match `spatialPixels × componentsPerVoxel`,
141
+ * not the full buffer capacity.
95
142
  */
96
143
  getTypedArray(): TypedArray {
97
- const pixelCount =
144
+ const spatialPixels =
98
145
  this.currentDimensions[0] *
99
146
  this.currentDimensions[1] *
100
147
  this.currentDimensions[2]
101
- return new this.TypedArrayCtor(this.buffer, 0, pixelCount)
148
+ return new this.TypedArrayCtor(
149
+ this.buffer,
150
+ 0,
151
+ spatialPixels * this.componentsPerVoxel,
152
+ )
102
153
  }
103
154
 
104
155
  /**
@@ -109,7 +160,8 @@ export class BufferManager {
109
160
  }
110
161
 
111
162
  /**
112
- * Get the total number of pixels in the current buffer region.
163
+ * Get the total number of spatial pixels in the current buffer region.
164
+ * This does NOT include the component multiplier.
113
165
  */
114
166
  getPixelCount(): number {
115
167
  return (
@@ -120,7 +172,15 @@ export class BufferManager {
120
172
  }
121
173
 
122
174
  /**
123
- * Get the buffer capacity in pixels.
175
+ * Get the total number of scalar elements in the current buffer region.
176
+ * For multi-component images, this is `spatialPixels × componentsPerVoxel`.
177
+ */
178
+ getElementCount(): number {
179
+ return this.getPixelCount() * this.componentsPerVoxel
180
+ }
181
+
182
+ /**
183
+ * Get the buffer capacity in scalar elements.
124
184
  */
125
185
  getCapacity(): number {
126
186
  return this.buffer.byteLength / this.bytesPerPixel
@@ -151,12 +211,12 @@ export class BufferManager {
151
211
  * Clear the current buffer region to zeros.
152
212
  */
153
213
  clear(): void {
154
- const pixelCount = this.getPixelCount()
155
- if (pixelCount > 0) {
214
+ const elementCount = this.getElementCount()
215
+ if (elementCount > 0) {
156
216
  const view = new Uint8Array(
157
217
  this.buffer,
158
218
  0,
159
- pixelCount * this.bytesPerPixel,
219
+ elementCount * this.bytesPerPixel,
160
220
  )
161
221
  view.fill(0)
162
222
  }
@@ -169,8 +229,9 @@ export class BufferManager {
169
229
  * @returns True if current buffer can fit the dimensions
170
230
  */
171
231
  canAccommodate(dimensions: [number, number, number]): boolean {
172
- const requiredPixels = dimensions[0] * dimensions[1] * dimensions[2]
173
- const currentCapacityPixels = this.buffer.byteLength / this.bytesPerPixel
174
- return requiredPixels <= currentCapacityPixels
232
+ const requiredElements =
233
+ dimensions[0] * dimensions[1] * dimensions[2] * this.componentsPerVoxel
234
+ const currentCapacityElements = this.buffer.byteLength / this.bytesPerPixel
235
+ return requiredElements <= currentCapacityElements
175
236
  }
176
237
  }
@@ -29,6 +29,8 @@ import {
29
29
  type OMEZarrNVImageEventMap,
30
30
  type PopulateTrigger,
31
31
  } from "./events.js"
32
+ import type { ChannelWindow } from "./normalize.js"
33
+ import { computeChannelMinMax, normalizeToUint8 } from "./normalize.js"
32
34
  import { RegionCoalescer } from "./RegionCoalescer.js"
33
35
  import type { OrthogonalAxis } from "./ResolutionSelector.js"
34
36
  import {
@@ -39,6 +41,7 @@ import {
39
41
  } from "./ResolutionSelector.js"
40
42
  import type {
41
43
  AttachedNiivueState,
44
+ ChannelInfo,
42
45
  ChunkAlignedRegion,
43
46
  ChunkCache,
44
47
  ClipPlane,
@@ -53,7 +56,12 @@ import type {
53
56
  } from "./types.js"
54
57
  import {
55
58
  getBytesPerPixel,
59
+ getChannelInfo,
56
60
  getNiftiDataType,
61
+ getRGBNiftiDataType,
62
+ isRGBImage,
63
+ NiftiDataType,
64
+ needsRGBNormalization,
57
65
  parseZarritaDtype,
58
66
  } from "./types.js"
59
67
  import {
@@ -124,6 +132,24 @@ export class OMEZarrNVImage extends NVImage {
124
132
  /** Data type of the volume */
125
133
  private readonly dtype: ZarrDtype
126
134
 
135
+ /**
136
+ * Channel dimension info, or `null` for scalar (single-component) images.
137
+ * When non-null, the image has a `"c"` dimension and is treated as
138
+ * multi-component (RGB/RGBA) data.
139
+ */
140
+ private readonly _channelInfo: ChannelInfo | null
141
+
142
+ /**
143
+ * Whether the image is 2D (no `"z"` dimension).
144
+ */
145
+ private readonly _is2D: boolean
146
+
147
+ /**
148
+ * Whether to negate the y-scale in the NIfTI affine for 2D images
149
+ * so that NiiVue renders them right-side up.
150
+ */
151
+ private readonly _flipY2D: boolean
152
+
127
153
  /** Full volume bounds in world space */
128
154
  private readonly _volumeBounds: VolumeBounds
129
155
 
@@ -258,6 +284,23 @@ export class OMEZarrNVImage extends NVImage {
258
284
  const highResImage = this.multiscales.images[0]
259
285
  this.dtype = parseZarritaDtype(highResImage.data.dtype)
260
286
 
287
+ // Detect channel (component) dimension for multi-component images
288
+ this._channelInfo = getChannelInfo(highResImage)
289
+
290
+ // Validate multi-component images: only RGB (3) / RGBA (4) are supported
291
+ if (this._channelInfo && !isRGBImage(highResImage)) {
292
+ throw new Error(
293
+ `Unsupported multi-component image: found ${this._channelInfo.components} ` +
294
+ `components with dtype '${this.dtype}'. Only RGB (3 components) ` +
295
+ `and RGBA (4 components) images are supported. For other ` +
296
+ `multi-component images, select a single component before loading.`,
297
+ )
298
+ }
299
+
300
+ // Detect 2D images (no z axis) and store y-flip preference
301
+ this._is2D = highResImage.dims.indexOf("z") === -1
302
+ this._flipY2D = options.flipY2D ?? true
303
+
261
304
  // Calculate volume bounds from highest resolution for most accurate bounds
262
305
  const highResAffine = createAffineFromNgffImage(highResImage)
263
306
  const highResShape = getVolumeShape(highResImage)
@@ -276,8 +319,15 @@ export class OMEZarrNVImage extends NVImage {
276
319
  this.targetLevelIndex = selection.levelIndex
277
320
  this.currentLevelIndex = this.multiscales.images.length - 1
278
321
 
279
- // Create buffer manager (dynamic sizing, no pre-allocation)
280
- this.bufferManager = new BufferManager(this.maxPixels, this.dtype)
322
+ // Create buffer manager (dynamic sizing, no pre-allocation).
323
+ // For multi-component images, each spatial voxel has multiple
324
+ // scalar elements (e.g. 3 for RGB, 4 for RGBA).
325
+ const componentsPerVoxel = this._channelInfo?.components ?? 1
326
+ this.bufferManager = new BufferManager(
327
+ this.maxPixels,
328
+ this.dtype,
329
+ componentsPerVoxel,
330
+ )
281
331
 
282
332
  // Initialize NVImage properties with placeholder values
283
333
  // Actual values will be set when data is first loaded
@@ -336,17 +386,24 @@ export class OMEZarrNVImage extends NVImage {
336
386
  // Placeholder dimensions (will be updated when data loads)
337
387
  hdr.dims = [3, 1, 1, 1, 1, 1, 1, 1]
338
388
 
339
- // Set data type
340
- hdr.datatypeCode = getNiftiDataType(this.dtype)
341
- hdr.numBitsPerVoxel = getBytesPerPixel(this.dtype) * 8
389
+ // Set data type — use RGB24/RGBA32 for multi-component images
390
+ // (any dtype; non-uint8 data is normalized to uint8 at load time)
391
+ if (this._channelInfo && isRGBImage(this.multiscales.images[0])) {
392
+ const rgbCode = getRGBNiftiDataType(this._channelInfo)
393
+ hdr.datatypeCode = rgbCode
394
+ hdr.numBitsPerVoxel = rgbCode === NiftiDataType.RGB24 ? 24 : 32
395
+ } else {
396
+ hdr.datatypeCode = getNiftiDataType(this.dtype)
397
+ hdr.numBitsPerVoxel = getBytesPerPixel(this.dtype) * 8
398
+ }
342
399
 
343
400
  // Placeholder pixel dimensions
344
401
  hdr.pixDims = [1, 1, 1, 1, 0, 0, 0, 0]
345
402
 
346
- // Placeholder affine (identity)
403
+ // Placeholder affine (identity, with y-flip for 2D images)
347
404
  hdr.affine = [
348
405
  [1, 0, 0, 0],
349
- [0, 1, 0, 0],
406
+ [0, this._flipY2D && this._is2D ? -1 : 1, 0, 0],
350
407
  [0, 0, 1, 0],
351
408
  [0, 0, 0, 1],
352
409
  ]
@@ -533,8 +590,29 @@ export class OMEZarrNVImage extends NVImage {
533
590
  // Resize buffer to match fetched data exactly (no upsampling!)
534
591
  const targetData = this.bufferManager.resize(fetchedShape)
535
592
 
536
- // Direct copy of fetched data
537
- targetData.set(result.data)
593
+ // For non-uint8 RGB/RGBA, we need OMERO metadata *before* copying
594
+ // so we can normalize the raw data to uint8 using channel windows.
595
+ const normalize = needsRGBNormalization(ngffImage, this.dtype)
596
+ if (normalize && !this.isLabelImage) {
597
+ await this.ensureOmeroMetadata(ngffImage, levelIndex)
598
+ }
599
+
600
+ if (normalize && this._channelInfo) {
601
+ // Non-uint8 RGB/RGBA: normalize raw data to uint8 using OMERO windows
602
+ const windows = this._getChannelWindows(
603
+ result.data,
604
+ this._channelInfo.components,
605
+ )
606
+ const normalized = normalizeToUint8(
607
+ result.data,
608
+ this._channelInfo.components,
609
+ windows,
610
+ )
611
+ targetData.set(normalized)
612
+ } else {
613
+ // uint8 RGB or scalar: direct copy
614
+ targetData.set(result.data)
615
+ }
538
616
 
539
617
  // Update this.img to point to the (possibly new) buffer
540
618
  this.img = this.bufferManager.getTypedArray() as NVImage["img"]
@@ -545,8 +623,9 @@ export class OMEZarrNVImage extends NVImage {
545
623
  if (this.isLabelImage) {
546
624
  // Label images: apply a discrete colormap instead of OMERO windowing
547
625
  this._applyLabelColormap(this, result.data)
548
- } else {
549
- // Compute or apply OMERO metadata for cal_min/cal_max
626
+ } else if (!normalize) {
627
+ // Scalar / uint8 RGB: compute or apply OMERO for cal_min/cal_max.
628
+ // (Normalized RGB already consumed the OMERO window above.)
550
629
  await this.ensureOmeroMetadata(ngffImage, levelIndex)
551
630
  }
552
631
 
@@ -630,12 +709,8 @@ export class OMEZarrNVImage extends NVImage {
630
709
  affine[13] += regionStart[1] * sy // y offset
631
710
  affine[14] += regionStart[0] * sz // z offset
632
711
 
633
- // Update affine in header
634
- const srows = affineToNiftiSrows(affine)
635
- this.hdr.affine = [srows.srow_x, srows.srow_y, srows.srow_z, [0, 0, 0, 1]]
636
-
637
- // Update current buffer bounds
638
- // Buffer starts at region.chunkAlignedStart and has extent fetchedShape
712
+ // Update current buffer bounds from the un-flipped affine
713
+ // (bounds stay in OME-Zarr world space for clip plane math)
639
714
  this._currentBufferBounds = {
640
715
  min: [
641
716
  affine[12], // x offset (world coord of buffer origin)
@@ -649,6 +724,17 @@ export class OMEZarrNVImage extends NVImage {
649
724
  ],
650
725
  }
651
726
 
727
+ // For 2D images, negate y-scale so NiiVue's calculateRAS() flips
728
+ // the rows to account for top-to-bottom pixel storage order.
729
+ if (this._flipY2D && this._is2D) {
730
+ affine[5] = -sy
731
+ affine[13] += (fetchedShape[1] - 1) * sy
732
+ }
733
+
734
+ // Update affine in header
735
+ const srows = affineToNiftiSrows(affine)
736
+ this.hdr.affine = [srows.srow_x, srows.srow_y, srows.srow_z, [0, 0, 0, 1]]
737
+
652
738
  // Recalculate RAS orientation
653
739
  this.calculateRAS()
654
740
  }
@@ -788,7 +874,15 @@ export class OMEZarrNVImage extends NVImage {
788
874
  (isTargetLevel && this._omeroComputedForLevel !== this.targetLevelIndex)
789
875
 
790
876
  if (needsCompute) {
791
- const computedOmero = await computeOmeroFromNgffImage(ngffImage)
877
+ // Pass the chunk cache so decoded chunks from OMERO statistics
878
+ // computation are reused by subsequent zarrGet() calls.
879
+ const omeroOpts = this._chunkCache
880
+ ? ({ cache: this._chunkCache } as Record<string, unknown>)
881
+ : undefined
882
+ const computedOmero = await computeOmeroFromNgffImage(
883
+ ngffImage,
884
+ omeroOpts,
885
+ )
792
886
  this._omero = computedOmero
793
887
  this._omeroComputedForLevel = levelIndex
794
888
  this.applyOmeroToHeader()
@@ -796,6 +890,42 @@ export class OMEZarrNVImage extends NVImage {
796
890
  }
797
891
  }
798
892
 
893
+ /**
894
+ * Get per-channel normalization windows for non-uint8 RGB/RGBA.
895
+ *
896
+ * Uses OMERO `window.start`/`window.end` (or `window.min`/`window.max`)
897
+ * when available. Falls back to computing min/max from the raw data.
898
+ *
899
+ * @param data - Raw multi-component data from the zarr fetch
900
+ * @param components - Number of components per voxel (3 or 4)
901
+ * @returns Per-channel windows for normalization to uint8
902
+ */
903
+ private _getChannelWindows(
904
+ data: TypedArray,
905
+ components: number,
906
+ ): ChannelWindow[] {
907
+ if (this._omero?.channels?.length) {
908
+ const windows: ChannelWindow[] = []
909
+ for (let c = 0; c < components; c++) {
910
+ const channel =
911
+ this._omero.channels[Math.min(c, this._omero.channels.length - 1)]
912
+ const win = channel?.window
913
+ if (win) {
914
+ windows.push({
915
+ start: win.start ?? win.min ?? 0,
916
+ end: win.end ?? win.max ?? 1,
917
+ })
918
+ } else {
919
+ windows.push({ start: 0, end: 1 })
920
+ }
921
+ }
922
+ return windows
923
+ }
924
+
925
+ // No OMERO metadata: fall back to per-channel min/max from data
926
+ return computeChannelMinMax(data, components)
927
+ }
928
+
799
929
  /**
800
930
  * Handle clip plane change from NiiVue.
801
931
  * This is called when the user interacts with clip planes in NiiVue.
@@ -1798,15 +1928,26 @@ export class OMEZarrNVImage extends NVImage {
1798
1928
  * Create a new slab buffer state for a slice type.
1799
1929
  */
1800
1930
  private _createSlabBuffer(sliceType: SlabSliceType): SlabBufferState {
1801
- const bufferManager = new BufferManager(this.maxPixels, this.dtype)
1931
+ const componentsPerVoxel = this._channelInfo?.components ?? 1
1932
+ const bufferManager = new BufferManager(
1933
+ this.maxPixels,
1934
+ this.dtype,
1935
+ componentsPerVoxel,
1936
+ )
1802
1937
  const nvImage = new NVImage()
1803
1938
 
1804
1939
  // Initialize with placeholder NIfTI header (same as main image setup)
1805
1940
  const hdr = new NIFTI1()
1806
1941
  nvImage.hdr = hdr
1807
1942
  hdr.dims = [3, 1, 1, 1, 1, 1, 1, 1]
1808
- hdr.datatypeCode = getNiftiDataType(this.dtype)
1809
- hdr.numBitsPerVoxel = getBytesPerPixel(this.dtype) * 8
1943
+ if (this._channelInfo && isRGBImage(this.multiscales.images[0])) {
1944
+ const rgbCode = getRGBNiftiDataType(this._channelInfo)
1945
+ hdr.datatypeCode = rgbCode
1946
+ hdr.numBitsPerVoxel = rgbCode === NiftiDataType.RGB24 ? 24 : 32
1947
+ } else {
1948
+ hdr.datatypeCode = getNiftiDataType(this.dtype)
1949
+ hdr.numBitsPerVoxel = getBytesPerPixel(this.dtype) * 8
1950
+ }
1810
1951
  hdr.pixDims = [1, 1, 1, 1, 0, 0, 0, 0]
1811
1952
  hdr.affine = [
1812
1953
  [1, 0, 0, 0],
@@ -2088,7 +2229,25 @@ export class OMEZarrNVImage extends NVImage {
2088
2229
 
2089
2230
  // Resize buffer and copy data
2090
2231
  const targetData = slabState.bufferManager.resize(fetchedShape)
2091
- targetData.set(result.data)
2232
+ const normalize = needsRGBNormalization(ngffImage, this.dtype)
2233
+
2234
+ if (normalize && this._channelInfo) {
2235
+ // Non-uint8 RGB/RGBA: normalize raw data to uint8 using OMERO windows
2236
+ const windows = this._getChannelWindows(
2237
+ result.data,
2238
+ this._channelInfo.components,
2239
+ )
2240
+ const normalized = normalizeToUint8(
2241
+ result.data,
2242
+ this._channelInfo.components,
2243
+ windows,
2244
+ )
2245
+ targetData.set(normalized)
2246
+ } else {
2247
+ // uint8 RGB or scalar: direct copy
2248
+ targetData.set(result.data)
2249
+ }
2250
+
2092
2251
  slabState.nvImage.img =
2093
2252
  slabState.bufferManager.getTypedArray() as NVImage["img"]
2094
2253
 
@@ -2108,8 +2267,9 @@ export class OMEZarrNVImage extends NVImage {
2108
2267
  if (this.isLabelImage) {
2109
2268
  // Label images: apply discrete colormap to the slab NVImage
2110
2269
  this._applyLabelColormap(slabState.nvImage, result.data)
2111
- } else if (this._omero) {
2112
- // Apply OMERO metadata if available
2270
+ } else if (this._omero && !normalize) {
2271
+ // Apply OMERO metadata for scalar / uint8 RGB.
2272
+ // Normalized RGB already consumed the OMERO window during normalization.
2113
2273
  this._applyOmeroToSlabHeader(slabState.nvImage)
2114
2274
  }
2115
2275
 
@@ -2213,6 +2373,12 @@ export class OMEZarrNVImage extends NVImage {
2213
2373
  affine[13] += fetchStart[1] * sy // y offset
2214
2374
  affine[14] += fetchStart[0] * sz // z offset
2215
2375
 
2376
+ // For 2D images, negate y-scale before normalization
2377
+ if (this._flipY2D && this._is2D) {
2378
+ affine[5] = -sy
2379
+ affine[13] += (fetchedShape[1] - 1) * sy
2380
+ }
2381
+
2216
2382
  // Apply normalization to the entire affine (scale columns + translation)
2217
2383
  for (let i = 0; i < 15; i++) {
2218
2384
  affine[i] *= normalizationScale
@@ -26,6 +26,46 @@ interface PendingRequest {
26
26
  requesters: Set<string>
27
27
  }
28
28
 
29
+ /**
30
+ * Map from [z, y, x] PixelRegion indices to the zarr dim name.
31
+ * Index 0 → "z", index 1 → "y", index 2 → "x".
32
+ */
33
+ const SPATIAL_DIM_MAP: Record<string, 0 | 1 | 2> = {
34
+ z: 0,
35
+ y: 1,
36
+ x: 2,
37
+ }
38
+
39
+ /**
40
+ * Build a zarr selection array that respects the actual dimension order
41
+ * of the zarr array.
42
+ *
43
+ * The `PixelRegion` is always in `[z, y, x]` order. This function maps
44
+ * each zarr dimension to the correct slice:
45
+ * - `"z"`, `"y"`, `"x"` → sliced by the corresponding PixelRegion axis
46
+ * - `"c"` (channel) → `null` (select all components)
47
+ * - `"t"` (time) → `0` (first timepoint)
48
+ *
49
+ * @param dims - Dimension names from NgffImage (e.g. `["y", "x", "c"]`)
50
+ * @param region - The pixel region in `[z, y, x]` order
51
+ * @returns Selection array matching the zarr dim order
52
+ */
53
+ export function buildSelection(
54
+ dims: string[],
55
+ region: PixelRegion,
56
+ ): (zarr.Slice | number | null)[] {
57
+ return dims.map((dim) => {
58
+ const spatialIdx = SPATIAL_DIM_MAP[dim]
59
+ if (spatialIdx !== undefined) {
60
+ return zarr.slice(region.start[spatialIdx], region.end[spatialIdx])
61
+ }
62
+ if (dim === "c") return null // select all channels
63
+ if (dim === "t") return 0 // first timepoint
64
+ // Unknown dimension — select all to avoid data loss
65
+ return null
66
+ })
67
+ }
68
+
29
69
  /**
30
70
  * RegionCoalescer handles fetching sub-regions from OME-Zarr images with:
31
71
  *
@@ -113,11 +153,11 @@ export class RegionCoalescer {
113
153
 
114
154
  // Fetch using fizarrita's worker-accelerated zarrGet
115
155
  try {
116
- const selection = [
117
- zarr.slice(region.start[0], region.end[0]),
118
- zarr.slice(region.start[1], region.end[1]),
119
- zarr.slice(region.start[2], region.end[2]),
120
- ]
156
+ // Build a dim-aware selection that maps the [z, y, x] PixelRegion
157
+ // to the actual zarr dimension order. Non-spatial dims are handled:
158
+ // "c" (channel) → null (fetch all components)
159
+ // "t" (time) → 0 (first timepoint, reduces dimension)
160
+ const selection = buildSelection(ngffImage.dims, region)
121
161
  // Pass the chunk cache to fizarrita's getWorker via zarrGet.
122
162
  // The `cache` option is available in @fideus-labs/fizarrita >=1.2.0.
123
163
  const zarrOpts = this._cache
@@ -93,51 +93,67 @@ export function selectResolution(
93
93
  }
94
94
 
95
95
  /**
96
- * Get the chunk shape for a volume.
96
+ * Get the chunk shape for a volume as [z, y, x].
97
+ *
98
+ * Looks up `"z"`, `"y"`, `"x"` dimensions by name. If `"z"` is absent
99
+ * (e.g., 2D images with `dims=["y", "x"]` or `["y", "x", "c"]`),
100
+ * the z chunk size defaults to 1. Non-spatial dimensions like `"c"` and
101
+ * `"t"` are ignored.
97
102
  *
98
103
  * @param ngffImage - The NgffImage to get chunk shape from
99
104
  * @returns Chunk shape as [z, y, x]
105
+ * @throws If neither `"y"` nor `"x"` can be found in dims
100
106
  */
101
107
  export function getChunkShape(ngffImage: NgffImage): [number, number, number] {
102
108
  const chunks = ngffImage.data.chunks
103
109
  const dims = ngffImage.dims
104
110
 
105
- // Find z, y, x indices in dims
106
- const zIdx = dims.indexOf("z")
107
111
  const yIdx = dims.indexOf("y")
108
112
  const xIdx = dims.indexOf("x")
109
113
 
110
- if (zIdx === -1 || yIdx === -1 || xIdx === -1) {
111
- // Fallback: assume last 3 dimensions are z, y, x
112
- const n = chunks.length
113
- return [chunks[n - 3] || 1, chunks[n - 2] || 1, chunks[n - 1] || 1]
114
+ if (yIdx === -1 || xIdx === -1) {
115
+ throw new Error(
116
+ `Cannot determine chunk shape: dims=[${dims.join(",")}] ` +
117
+ `is missing required "y" and/or "x" axes`,
118
+ )
114
119
  }
115
120
 
116
- return [chunks[zIdx], chunks[yIdx], chunks[xIdx]]
121
+ const zIdx = dims.indexOf("z")
122
+ const cz = zIdx !== -1 ? chunks[zIdx] : 1
123
+
124
+ return [cz, chunks[yIdx], chunks[xIdx]]
117
125
  }
118
126
 
119
127
  /**
120
- * Get the shape of a volume as [z, y, x].
128
+ * Get the spatial shape of a volume as [z, y, x].
129
+ *
130
+ * Looks up `"z"`, `"y"`, `"x"` dimensions by name. If `"z"` is absent
131
+ * (e.g., 2D images with `dims=["y", "x"]` or `["y", "x", "c"]`),
132
+ * the z size defaults to 1. Non-spatial dimensions like `"c"` and
133
+ * `"t"` are ignored.
121
134
  *
122
135
  * @param ngffImage - The NgffImage
123
136
  * @returns Shape as [z, y, x]
137
+ * @throws If neither `"y"` nor `"x"` can be found in dims
124
138
  */
125
139
  export function getVolumeShape(ngffImage: NgffImage): [number, number, number] {
126
140
  const shape = ngffImage.data.shape
127
141
  const dims = ngffImage.dims
128
142
 
129
- // Find z, y, x indices in dims
130
- const zIdx = dims.indexOf("z")
131
143
  const yIdx = dims.indexOf("y")
132
144
  const xIdx = dims.indexOf("x")
133
145
 
134
- if (zIdx === -1 || yIdx === -1 || xIdx === -1) {
135
- // Fallback: assume last 3 dimensions are z, y, x
136
- const n = shape.length
137
- return [shape[n - 3] || 1, shape[n - 2] || 1, shape[n - 1] || 1]
146
+ if (yIdx === -1 || xIdx === -1) {
147
+ throw new Error(
148
+ `Cannot determine volume shape: dims=[${dims.join(",")}] ` +
149
+ `is missing required "y" and/or "x" axes`,
150
+ )
138
151
  }
139
152
 
140
- return [shape[zIdx], shape[yIdx], shape[xIdx]]
153
+ const zIdx = dims.indexOf("z")
154
+ const sz = zIdx !== -1 ? shape[zIdx] : 1
155
+
156
+ return [sz, shape[yIdx], shape[xIdx]]
141
157
  }
142
158
 
143
159
  /**