@fideus-labs/fidnii 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.txt +9 -0
- package/README.md +180 -0
- package/dist/BufferManager.d.ts +86 -0
- package/dist/BufferManager.d.ts.map +1 -0
- package/dist/BufferManager.js +146 -0
- package/dist/BufferManager.js.map +1 -0
- package/dist/ClipPlanes.d.ts +180 -0
- package/dist/ClipPlanes.d.ts.map +1 -0
- package/dist/ClipPlanes.js +513 -0
- package/dist/ClipPlanes.js.map +1 -0
- package/dist/OMEZarrNVImage.d.ts +545 -0
- package/dist/OMEZarrNVImage.d.ts.map +1 -0
- package/dist/OMEZarrNVImage.js +1799 -0
- package/dist/OMEZarrNVImage.js.map +1 -0
- package/dist/RegionCoalescer.d.ts +75 -0
- package/dist/RegionCoalescer.d.ts.map +1 -0
- package/dist/RegionCoalescer.js +151 -0
- package/dist/RegionCoalescer.js.map +1 -0
- package/dist/ResolutionSelector.d.ts +88 -0
- package/dist/ResolutionSelector.d.ts.map +1 -0
- package/dist/ResolutionSelector.js +224 -0
- package/dist/ResolutionSelector.js.map +1 -0
- package/dist/ViewportBounds.d.ts +50 -0
- package/dist/ViewportBounds.d.ts.map +1 -0
- package/dist/ViewportBounds.js +325 -0
- package/dist/ViewportBounds.js.map +1 -0
- package/dist/events.d.ts +122 -0
- package/dist/events.d.ts.map +1 -0
- package/dist/events.js +12 -0
- package/dist/events.js.map +1 -0
- package/dist/index.d.ts +48 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +59 -0
- package/dist/index.js.map +1 -0
- package/dist/types.d.ts +273 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +126 -0
- package/dist/types.js.map +1 -0
- package/dist/utils/affine.d.ts +72 -0
- package/dist/utils/affine.d.ts.map +1 -0
- package/dist/utils/affine.js +173 -0
- package/dist/utils/affine.js.map +1 -0
- package/dist/utils/coordinates.d.ts +80 -0
- package/dist/utils/coordinates.d.ts.map +1 -0
- package/dist/utils/coordinates.js +207 -0
- package/dist/utils/coordinates.js.map +1 -0
- package/package.json +61 -0
- package/src/BufferManager.ts +176 -0
- package/src/ClipPlanes.ts +640 -0
- package/src/OMEZarrNVImage.ts +2286 -0
- package/src/RegionCoalescer.ts +217 -0
- package/src/ResolutionSelector.ts +325 -0
- package/src/ViewportBounds.ts +369 -0
- package/src/events.ts +146 -0
- package/src/index.ts +153 -0
- package/src/types.ts +429 -0
- package/src/utils/affine.ts +218 -0
- package/src/utils/coordinates.ts +271 -0
|
@@ -0,0 +1,1799 @@
|
|
|
1
|
+
// SPDX-FileCopyrightText: Copyright (c) Fideus Labs LLC
|
|
2
|
+
// SPDX-License-Identifier: MIT
|
|
3
|
+
import { NIFTI1 } from "nifti-reader-js";
|
|
4
|
+
import { NVImage, SLICE_TYPE } from "@niivue/niivue";
|
|
5
|
+
import { computeOmeroFromNgffImage } from "@fideus-labs/ngff-zarr/browser";
|
|
6
|
+
import { LRUCache } from "lru-cache";
|
|
7
|
+
import { getBytesPerPixel, getNiftiDataType, parseZarritaDtype, } from "./types.js";
|
|
8
|
+
import { BufferManager } from "./BufferManager.js";
|
|
9
|
+
import { RegionCoalescer } from "./RegionCoalescer.js";
|
|
10
|
+
import { getChunkShape, getVolumeShape, select2DResolution, selectResolution, } from "./ResolutionSelector.js";
|
|
11
|
+
import { alignToChunks, clipPlanesToNiivue, clipPlanesToPixelRegion, createDefaultClipPlanes, MAX_CLIP_PLANES, normalizeVector, validateClipPlanes, } from "./ClipPlanes.js";
|
|
12
|
+
import { affineToNiftiSrows, calculateWorldBounds, createAffineFromNgffImage, } from "./utils/affine.js";
|
|
13
|
+
import { worldToPixel } from "./utils/coordinates.js";
|
|
14
|
+
import { boundsApproxEqual, computeViewportBounds2D, computeViewportBounds3D, } from "./ViewportBounds.js";
|
|
15
|
+
import { OMEZarrNVImageEvent, } from "./events.js";
|
|
16
|
+
const DEFAULT_MAX_PIXELS = 50_000_000;
|
|
17
|
+
const DEFAULT_MAX_CACHE_ENTRIES = 200;
|
|
18
|
+
/**
|
|
19
|
+
* OMEZarrNVImage extends NVImage to support rendering OME-Zarr images in NiiVue.
|
|
20
|
+
*
|
|
21
|
+
* Features:
|
|
22
|
+
* - Progressive loading: quick preview from lowest resolution, then target resolution
|
|
23
|
+
* - Arbitrary clip planes defined by point + normal (up to 6)
|
|
24
|
+
* - Dynamic buffer sizing to match fetched data exactly (no upsampling)
|
|
25
|
+
* - Request coalescing for efficient chunk fetching
|
|
26
|
+
* - Automatic metadata updates to reflect OME-Zarr coordinate transforms
|
|
27
|
+
*/
|
|
28
|
+
export class OMEZarrNVImage extends NVImage {
|
|
29
|
+
/** The OME-Zarr multiscales data */
|
|
30
|
+
multiscales;
|
|
31
|
+
/** Maximum number of pixels to use */
|
|
32
|
+
maxPixels;
|
|
33
|
+
/** Reference to NiiVue instance */
|
|
34
|
+
niivue;
|
|
35
|
+
/** Buffer manager for dynamically-sized pixel data */
|
|
36
|
+
bufferManager;
|
|
37
|
+
/** Region coalescer for efficient chunk fetching */
|
|
38
|
+
coalescer;
|
|
39
|
+
/** Decoded-chunk cache shared across 3D and 2D slab loads. */
|
|
40
|
+
_chunkCache;
|
|
41
|
+
/** Current clip planes in world space */
|
|
42
|
+
_clipPlanes;
|
|
43
|
+
/** Target resolution level index (based on maxPixels) */
|
|
44
|
+
targetLevelIndex;
|
|
45
|
+
/** Current resolution level index during progressive loading */
|
|
46
|
+
currentLevelIndex;
|
|
47
|
+
/** True if currently loading data */
|
|
48
|
+
isLoading = false;
|
|
49
|
+
/** Data type of the volume */
|
|
50
|
+
dtype;
|
|
51
|
+
/** Full volume bounds in world space */
|
|
52
|
+
_volumeBounds;
|
|
53
|
+
/** Current buffer bounds in world space (may differ from full volume when clipped) */
|
|
54
|
+
_currentBufferBounds;
|
|
55
|
+
/** Previous clip plane change handler (to restore later) */
|
|
56
|
+
previousOnClipPlaneChange;
|
|
57
|
+
/** Debounce delay for clip plane updates (ms) */
|
|
58
|
+
clipPlaneDebounceMs;
|
|
59
|
+
/** Timeout handle for debounced clip plane refetch */
|
|
60
|
+
clipPlaneRefetchTimeout = null;
|
|
61
|
+
/** Previous clip planes state for direction comparison */
|
|
62
|
+
_previousClipPlanes = [];
|
|
63
|
+
/** Previous pixel count at current resolution (for direction comparison) */
|
|
64
|
+
_previousPixelCount = 0;
|
|
65
|
+
/** Cached/computed OMERO metadata for visualization (cal_min/cal_max) */
|
|
66
|
+
_omero;
|
|
67
|
+
/** Active channel index for OMERO window selection (default: 0) */
|
|
68
|
+
_activeChannel = 0;
|
|
69
|
+
/** Resolution level at which OMERO was last computed (to track recomputation) */
|
|
70
|
+
_omeroComputedForLevel = -1;
|
|
71
|
+
/** Internal EventTarget for event dispatching (composition pattern) */
|
|
72
|
+
_eventTarget = new EventTarget();
|
|
73
|
+
/** Pending populate request (latest wins - replaces any previous pending) */
|
|
74
|
+
_pendingPopulateRequest = null;
|
|
75
|
+
/** Current populate trigger (set at start of populateVolume, used by events) */
|
|
76
|
+
_currentPopulateTrigger = "initial";
|
|
77
|
+
// ============================================================
|
|
78
|
+
// Multi-NV / Slab Buffer State
|
|
79
|
+
// ============================================================
|
|
80
|
+
/** Attached Niivue instances and their state */
|
|
81
|
+
_attachedNiivues = new Map();
|
|
82
|
+
/** Per-slice-type slab buffers (lazily created) */
|
|
83
|
+
_slabBuffers = new Map();
|
|
84
|
+
/** Debounce timeout for slab reload per slice type */
|
|
85
|
+
_slabReloadTimeouts = new Map();
|
|
86
|
+
// ============================================================
|
|
87
|
+
// Viewport-Aware Resolution State
|
|
88
|
+
// ============================================================
|
|
89
|
+
/** Whether viewport-aware resolution selection is enabled */
|
|
90
|
+
_viewportAwareEnabled = false;
|
|
91
|
+
/**
|
|
92
|
+
* Viewport bounds for the 3D render volume (union of all RENDER/MULTIPLANAR NVs).
|
|
93
|
+
* Null = full volume, no viewport constraint.
|
|
94
|
+
*/
|
|
95
|
+
_viewportBounds3D = null;
|
|
96
|
+
/**
|
|
97
|
+
* Per-slab viewport bounds (from the NV instance that displays each slab).
|
|
98
|
+
* Null entry = full volume, no viewport constraint for that slab.
|
|
99
|
+
*/
|
|
100
|
+
_viewportBoundsPerSlab = new Map();
|
|
101
|
+
/** Timeout handle for debounced viewport update */
|
|
102
|
+
_viewportUpdateTimeout = null;
|
|
103
|
+
/** Per-slab AbortController to cancel in-flight progressive loads */
|
|
104
|
+
_slabAbortControllers = new Map();
|
|
105
|
+
// ============================================================
|
|
106
|
+
// 3D Zoom Override
|
|
107
|
+
// ============================================================
|
|
108
|
+
/** Maximum 3D render zoom level for scroll-wheel zoom */
|
|
109
|
+
_max3DZoom;
|
|
110
|
+
/** Minimum 3D render zoom level for scroll-wheel zoom */
|
|
111
|
+
_min3DZoom;
|
|
112
|
+
/**
|
|
113
|
+
* Debounce delay for viewport-aware reloads (ms).
|
|
114
|
+
* Higher than clip plane debounce to avoid excessive reloads during
|
|
115
|
+
* continuous zoom/pan interactions.
|
|
116
|
+
*/
|
|
117
|
+
static VIEWPORT_DEBOUNCE_MS = 500;
|
|
118
|
+
/**
|
|
119
|
+
* Private constructor. Use OMEZarrNVImage.create() for instantiation.
|
|
120
|
+
*/
|
|
121
|
+
constructor(options) {
|
|
122
|
+
// Call NVImage constructor with no data buffer
|
|
123
|
+
super();
|
|
124
|
+
this.multiscales = options.multiscales;
|
|
125
|
+
this.maxPixels = options.maxPixels ?? DEFAULT_MAX_PIXELS;
|
|
126
|
+
this.niivue = options.niivue;
|
|
127
|
+
this.clipPlaneDebounceMs = options.clipPlaneDebounceMs ?? 300;
|
|
128
|
+
// Initialize chunk cache: user-provided > LRU(maxCacheEntries) > disabled
|
|
129
|
+
const maxEntries = options.maxCacheEntries ?? DEFAULT_MAX_CACHE_ENTRIES;
|
|
130
|
+
if (options.cache) {
|
|
131
|
+
this._chunkCache = options.cache;
|
|
132
|
+
}
|
|
133
|
+
else if (maxEntries > 0) {
|
|
134
|
+
this._chunkCache = new LRUCache({ max: maxEntries });
|
|
135
|
+
}
|
|
136
|
+
this.coalescer = new RegionCoalescer(this._chunkCache);
|
|
137
|
+
this._max3DZoom = options.max3DZoom ?? 10.0;
|
|
138
|
+
this._min3DZoom = options.min3DZoom ?? 0.3;
|
|
139
|
+
this._viewportAwareEnabled = options.viewportAware ?? true;
|
|
140
|
+
// Initialize clip planes to empty (full volume visible)
|
|
141
|
+
this._clipPlanes = createDefaultClipPlanes(this.multiscales);
|
|
142
|
+
// Get data type from highest resolution image
|
|
143
|
+
const highResImage = this.multiscales.images[0];
|
|
144
|
+
this.dtype = parseZarritaDtype(highResImage.data.dtype);
|
|
145
|
+
// Calculate volume bounds from highest resolution for most accurate bounds
|
|
146
|
+
const highResAffine = createAffineFromNgffImage(highResImage);
|
|
147
|
+
const highResShape = getVolumeShape(highResImage);
|
|
148
|
+
this._volumeBounds = calculateWorldBounds(highResAffine, highResShape);
|
|
149
|
+
// Initially, buffer bounds = full volume bounds (no clipping yet)
|
|
150
|
+
this._currentBufferBounds = { ...this._volumeBounds };
|
|
151
|
+
// Calculate target resolution based on pixel budget
|
|
152
|
+
const selection = selectResolution(this.multiscales, this.maxPixels, this._clipPlanes, this._volumeBounds);
|
|
153
|
+
this.targetLevelIndex = selection.levelIndex;
|
|
154
|
+
this.currentLevelIndex = this.multiscales.images.length - 1;
|
|
155
|
+
// Create buffer manager (dynamic sizing, no pre-allocation)
|
|
156
|
+
this.bufferManager = new BufferManager(this.maxPixels, this.dtype);
|
|
157
|
+
// Initialize NVImage properties with placeholder values
|
|
158
|
+
// Actual values will be set when data is first loaded
|
|
159
|
+
this.initializeNVImageProperties();
|
|
160
|
+
}
|
|
161
|
+
/**
|
|
162
|
+
* Create a new OMEZarrNVImage instance.
|
|
163
|
+
*
|
|
164
|
+
* By default, the image is automatically added to NiiVue and progressive
|
|
165
|
+
* loading starts immediately (fire-and-forget). This enables progressive
|
|
166
|
+
* rendering where each resolution level is displayed as it loads.
|
|
167
|
+
*
|
|
168
|
+
* Set `autoLoad: false` for manual control over when loading starts.
|
|
169
|
+
* Listen to 'populateComplete' event to know when loading finishes.
|
|
170
|
+
*
|
|
171
|
+
* @param options - Options including multiscales, niivue reference, and optional maxPixels
|
|
172
|
+
* @returns Promise resolving to the OMEZarrNVImage instance
|
|
173
|
+
*/
|
|
174
|
+
static async create(options) {
|
|
175
|
+
const image = new OMEZarrNVImage(options);
|
|
176
|
+
// Store and replace the clip plane change handler
|
|
177
|
+
image.previousOnClipPlaneChange = image.niivue.onClipPlaneChange;
|
|
178
|
+
image.niivue.onClipPlaneChange = (clipPlane) => {
|
|
179
|
+
// Call original handler if it exists
|
|
180
|
+
if (image.previousOnClipPlaneChange) {
|
|
181
|
+
image.previousOnClipPlaneChange(clipPlane);
|
|
182
|
+
}
|
|
183
|
+
// Handle clip plane change
|
|
184
|
+
image.onNiivueClipPlaneChange(clipPlane);
|
|
185
|
+
};
|
|
186
|
+
// Auto-attach the primary NV instance for slice type / location tracking
|
|
187
|
+
image.attachNiivue(image.niivue);
|
|
188
|
+
// Auto-load by default (add to NiiVue + start progressive loading)
|
|
189
|
+
const autoLoad = options.autoLoad ?? true;
|
|
190
|
+
if (autoLoad) {
|
|
191
|
+
image.niivue.addVolume(image);
|
|
192
|
+
void image.populateVolume(); // Fire-and-forget, returns immediately
|
|
193
|
+
}
|
|
194
|
+
return image;
|
|
195
|
+
}
|
|
196
|
+
/**
|
|
197
|
+
* Initialize NVImage properties with placeholder values.
|
|
198
|
+
* Actual values will be set by loadResolutionLevel() after first data fetch.
|
|
199
|
+
*/
|
|
200
|
+
initializeNVImageProperties() {
|
|
201
|
+
// Create NIfTI header with placeholder values
|
|
202
|
+
const hdr = new NIFTI1();
|
|
203
|
+
this.hdr = hdr;
|
|
204
|
+
// Placeholder dimensions (will be updated when data loads)
|
|
205
|
+
hdr.dims = [3, 1, 1, 1, 1, 1, 1, 1];
|
|
206
|
+
// Set data type
|
|
207
|
+
hdr.datatypeCode = getNiftiDataType(this.dtype);
|
|
208
|
+
hdr.numBitsPerVoxel = getBytesPerPixel(this.dtype) * 8;
|
|
209
|
+
// Placeholder pixel dimensions
|
|
210
|
+
hdr.pixDims = [1, 1, 1, 1, 0, 0, 0, 0];
|
|
211
|
+
// Placeholder affine (identity)
|
|
212
|
+
hdr.affine = [
|
|
213
|
+
[1, 0, 0, 0],
|
|
214
|
+
[0, 1, 0, 0],
|
|
215
|
+
[0, 0, 1, 0],
|
|
216
|
+
[0, 0, 0, 1],
|
|
217
|
+
];
|
|
218
|
+
hdr.sform_code = 1; // Scanner coordinates
|
|
219
|
+
// Set name
|
|
220
|
+
this.name = this.multiscales.metadata?.name ?? "OME-Zarr";
|
|
221
|
+
// Initialize with empty typed array (will be replaced when data loads)
|
|
222
|
+
// We need at least 1 element to avoid issues
|
|
223
|
+
this.img = this.bufferManager.resize([1, 1, 1]);
|
|
224
|
+
// Set default colormap
|
|
225
|
+
this._colormap = "gray";
|
|
226
|
+
this._opacity = 1.0;
|
|
227
|
+
}
|
|
228
|
+
/**
|
|
229
|
+
* Populate the volume with data.
|
|
230
|
+
*
|
|
231
|
+
* Loading strategy:
|
|
232
|
+
* 1. Load lowest resolution first for quick preview (unless skipPreview is true)
|
|
233
|
+
* 2. Jump directly to target resolution (skip intermediate levels)
|
|
234
|
+
*
|
|
235
|
+
* If called while already loading, the request is queued. Only the latest
|
|
236
|
+
* queued request is kept (latest wins). When a queued request is replaced,
|
|
237
|
+
* a `loadingSkipped` event is emitted.
|
|
238
|
+
*
|
|
239
|
+
* @param skipPreview - If true, skip the preview load (used for clip plane updates)
|
|
240
|
+
* @param trigger - What triggered this population (default: 'initial')
|
|
241
|
+
*/
|
|
242
|
+
async populateVolume(skipPreview = false, trigger = "initial") {
|
|
243
|
+
// If already loading, queue this request (latest wins)
|
|
244
|
+
if (this.isLoading) {
|
|
245
|
+
if (this._pendingPopulateRequest !== null) {
|
|
246
|
+
// Replacing an existing queued request - emit loadingSkipped
|
|
247
|
+
this._emitEvent("loadingSkipped", {
|
|
248
|
+
reason: "queued-replaced",
|
|
249
|
+
trigger: this._pendingPopulateRequest.trigger,
|
|
250
|
+
});
|
|
251
|
+
}
|
|
252
|
+
// Queue this request (no event - just queuing)
|
|
253
|
+
this._pendingPopulateRequest = { skipPreview, trigger };
|
|
254
|
+
return;
|
|
255
|
+
}
|
|
256
|
+
this.isLoading = true;
|
|
257
|
+
this._currentPopulateTrigger = trigger;
|
|
258
|
+
this._pendingPopulateRequest = null; // Clear any stale pending request
|
|
259
|
+
try {
|
|
260
|
+
const numLevels = this.multiscales.images.length;
|
|
261
|
+
const lowestLevel = numLevels - 1;
|
|
262
|
+
// Quick preview from lowest resolution (if different from target and not skipped)
|
|
263
|
+
if (!skipPreview && lowestLevel !== this.targetLevelIndex) {
|
|
264
|
+
await this.loadResolutionLevel(lowestLevel, "preview");
|
|
265
|
+
const prevLevel = this.currentLevelIndex;
|
|
266
|
+
this.currentLevelIndex = lowestLevel;
|
|
267
|
+
// Emit resolutionChange for preview load
|
|
268
|
+
if (prevLevel !== lowestLevel) {
|
|
269
|
+
this._emitEvent("resolutionChange", {
|
|
270
|
+
currentLevel: this.currentLevelIndex,
|
|
271
|
+
targetLevel: this.targetLevelIndex,
|
|
272
|
+
previousLevel: prevLevel,
|
|
273
|
+
trigger: this._currentPopulateTrigger,
|
|
274
|
+
});
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
// Final quality at target resolution
|
|
278
|
+
await this.loadResolutionLevel(this.targetLevelIndex, "target");
|
|
279
|
+
const prevLevelBeforeTarget = this.currentLevelIndex;
|
|
280
|
+
this.currentLevelIndex = this.targetLevelIndex;
|
|
281
|
+
// Emit resolutionChange for target load
|
|
282
|
+
if (prevLevelBeforeTarget !== this.targetLevelIndex) {
|
|
283
|
+
this._emitEvent("resolutionChange", {
|
|
284
|
+
currentLevel: this.currentLevelIndex,
|
|
285
|
+
targetLevel: this.targetLevelIndex,
|
|
286
|
+
previousLevel: prevLevelBeforeTarget,
|
|
287
|
+
trigger: this._currentPopulateTrigger,
|
|
288
|
+
});
|
|
289
|
+
}
|
|
290
|
+
// Update previous state for direction-aware resolution selection
|
|
291
|
+
// Always calculate at level 0 for consistent comparison across resolution changes
|
|
292
|
+
this._previousClipPlanes = this.copyClipPlanes(this._clipPlanes);
|
|
293
|
+
const referenceImage = this.multiscales.images[0];
|
|
294
|
+
const region = clipPlanesToPixelRegion(this._clipPlanes, this._volumeBounds, referenceImage);
|
|
295
|
+
const aligned = alignToChunks(region, referenceImage);
|
|
296
|
+
this._previousPixelCount = this.calculateAlignedPixelCount(aligned);
|
|
297
|
+
}
|
|
298
|
+
finally {
|
|
299
|
+
this.isLoading = false;
|
|
300
|
+
this.handlePendingPopulateRequest();
|
|
301
|
+
}
|
|
302
|
+
}
|
|
303
|
+
/**
|
|
304
|
+
* Process any pending populate request after current load completes.
|
|
305
|
+
* If no pending request, emits populateComplete.
|
|
306
|
+
*/
|
|
307
|
+
handlePendingPopulateRequest() {
|
|
308
|
+
const pending = this._pendingPopulateRequest;
|
|
309
|
+
if (pending !== null) {
|
|
310
|
+
this._pendingPopulateRequest = null;
|
|
311
|
+
// Use void to indicate we're intentionally not awaiting
|
|
312
|
+
void this.populateVolume(pending.skipPreview, pending.trigger);
|
|
313
|
+
return;
|
|
314
|
+
}
|
|
315
|
+
// No more pending requests - emit populateComplete
|
|
316
|
+
this._emitEvent("populateComplete", {
|
|
317
|
+
currentLevel: this.currentLevelIndex,
|
|
318
|
+
targetLevel: this.targetLevelIndex,
|
|
319
|
+
trigger: this._currentPopulateTrigger,
|
|
320
|
+
});
|
|
321
|
+
}
|
|
322
|
+
/**
|
|
323
|
+
* Load data at a specific resolution level.
|
|
324
|
+
*
|
|
325
|
+
* With dynamic buffer sizing:
|
|
326
|
+
* 1. Fetch data for the aligned region
|
|
327
|
+
* 2. Resize buffer to match fetched data exactly (no upsampling)
|
|
328
|
+
* 3. Update header with correct dimensions and voxel sizes
|
|
329
|
+
* 4. Refresh NiiVue
|
|
330
|
+
*
|
|
331
|
+
* @param levelIndex - Resolution level index
|
|
332
|
+
* @param requesterId - ID for request coalescing
|
|
333
|
+
*/
|
|
334
|
+
async loadResolutionLevel(levelIndex, requesterId) {
|
|
335
|
+
// Emit loadingStart event
|
|
336
|
+
this._emitEvent("loadingStart", {
|
|
337
|
+
levelIndex,
|
|
338
|
+
trigger: this._currentPopulateTrigger,
|
|
339
|
+
});
|
|
340
|
+
const ngffImage = this.multiscales.images[levelIndex];
|
|
341
|
+
// Get the pixel region for current clip planes (+ 3D viewport bounds if active)
|
|
342
|
+
const pixelRegion = clipPlanesToPixelRegion(this._clipPlanes, this._volumeBounds, ngffImage, this._viewportBounds3D ?? undefined);
|
|
343
|
+
const alignedRegion = alignToChunks(pixelRegion, ngffImage);
|
|
344
|
+
// Calculate the shape of data to fetch
|
|
345
|
+
const fetchedShape = [
|
|
346
|
+
alignedRegion.chunkAlignedEnd[0] - alignedRegion.chunkAlignedStart[0],
|
|
347
|
+
alignedRegion.chunkAlignedEnd[1] - alignedRegion.chunkAlignedStart[1],
|
|
348
|
+
alignedRegion.chunkAlignedEnd[2] - alignedRegion.chunkAlignedStart[2],
|
|
349
|
+
];
|
|
350
|
+
// Fetch the data
|
|
351
|
+
const fetchRegion = {
|
|
352
|
+
start: alignedRegion.chunkAlignedStart,
|
|
353
|
+
end: alignedRegion.chunkAlignedEnd,
|
|
354
|
+
};
|
|
355
|
+
const result = await this.coalescer.fetchRegion(ngffImage, levelIndex, fetchRegion, requesterId);
|
|
356
|
+
// Resize buffer to match fetched data exactly (no upsampling!)
|
|
357
|
+
const targetData = this.bufferManager.resize(fetchedShape);
|
|
358
|
+
// Direct copy of fetched data
|
|
359
|
+
targetData.set(result.data);
|
|
360
|
+
// Update this.img to point to the (possibly new) buffer
|
|
361
|
+
this.img = this.bufferManager.getTypedArray();
|
|
362
|
+
// Update NVImage header with correct dimensions and transforms
|
|
363
|
+
this.updateHeaderForRegion(ngffImage, alignedRegion, fetchedShape);
|
|
364
|
+
// Compute or apply OMERO metadata for cal_min/cal_max
|
|
365
|
+
await this.ensureOmeroMetadata(ngffImage, levelIndex);
|
|
366
|
+
// Reset global_min so NiiVue's refreshLayers() re-runs calMinMax() on real data.
|
|
367
|
+
// Without this, if calMinMax() was previously called on placeholder/empty data
|
|
368
|
+
// (e.g., when setting colormap before loading), global_min would already be set
|
|
369
|
+
// and NiiVue would skip recalculating intensity ranges, leaving cal_min/cal_max
|
|
370
|
+
// at stale values (typically 0/0), causing an all-white render.
|
|
371
|
+
this.global_min = undefined;
|
|
372
|
+
// Update NiiVue clip planes
|
|
373
|
+
this.updateNiivueClipPlanes();
|
|
374
|
+
// Refresh NiiVue
|
|
375
|
+
this.niivue.updateGLVolume();
|
|
376
|
+
// Widen the display window if actual data exceeds the OMERO range.
|
|
377
|
+
// At higher resolutions, individual bright/dark voxels that were averaged
|
|
378
|
+
// out at lower resolutions can exceed the OMERO-specified window, causing
|
|
379
|
+
// clipping artifacts. This preserves the OMERO lower bound but widens the
|
|
380
|
+
// ceiling to encompass the full data range when needed.
|
|
381
|
+
this._widenCalRangeIfNeeded(this);
|
|
382
|
+
// Emit loadingComplete event
|
|
383
|
+
this._emitEvent("loadingComplete", {
|
|
384
|
+
levelIndex,
|
|
385
|
+
trigger: this._currentPopulateTrigger,
|
|
386
|
+
});
|
|
387
|
+
}
|
|
388
|
+
/**
|
|
389
|
+
* Update NVImage header for a loaded region.
|
|
390
|
+
*
|
|
391
|
+
* With dynamic buffer sizing, the buffer dimensions equal the fetched dimensions.
|
|
392
|
+
* We set pixDims directly from the resolution level's voxel size (no upsampling correction).
|
|
393
|
+
* The affine translation is adjusted to account for the region offset.
|
|
394
|
+
*
|
|
395
|
+
* @param ngffImage - The NgffImage at the current resolution level
|
|
396
|
+
* @param region - The chunk-aligned region that was loaded
|
|
397
|
+
* @param fetchedShape - The shape of the fetched data [z, y, x]
|
|
398
|
+
*/
|
|
399
|
+
updateHeaderForRegion(ngffImage, region, fetchedShape) {
|
|
400
|
+
if (!this.hdr)
|
|
401
|
+
return;
|
|
402
|
+
// Get voxel size from this resolution level (no upsampling adjustment needed!)
|
|
403
|
+
const scale = ngffImage.scale;
|
|
404
|
+
const sx = scale.x ?? scale.X ?? 1;
|
|
405
|
+
const sy = scale.y ?? scale.Y ?? 1;
|
|
406
|
+
const sz = scale.z ?? scale.Z ?? 1;
|
|
407
|
+
// Set pixDims directly from resolution's voxel size
|
|
408
|
+
this.hdr.pixDims = [1, sx, sy, sz, 0, 0, 0, 0];
|
|
409
|
+
// Set dims to match fetched data (buffer now equals fetched size)
|
|
410
|
+
// NIfTI dims: [ndim, x, y, z, t, ...]
|
|
411
|
+
this.hdr.dims = [
|
|
412
|
+
3,
|
|
413
|
+
fetchedShape[2],
|
|
414
|
+
fetchedShape[1],
|
|
415
|
+
fetchedShape[0],
|
|
416
|
+
1,
|
|
417
|
+
1,
|
|
418
|
+
1,
|
|
419
|
+
1,
|
|
420
|
+
];
|
|
421
|
+
// Build affine with offset for region start
|
|
422
|
+
const affine = createAffineFromNgffImage(ngffImage);
|
|
423
|
+
// Adjust translation for region offset
|
|
424
|
+
// Buffer pixel [0,0,0] corresponds to source pixel region.chunkAlignedStart
|
|
425
|
+
const regionStart = region.chunkAlignedStart;
|
|
426
|
+
// regionStart is [z, y, x], affine translation is [x, y, z] (indices 12, 13, 14)
|
|
427
|
+
affine[12] += regionStart[2] * sx; // x offset
|
|
428
|
+
affine[13] += regionStart[1] * sy; // y offset
|
|
429
|
+
affine[14] += regionStart[0] * sz; // z offset
|
|
430
|
+
// Update affine in header
|
|
431
|
+
const srows = affineToNiftiSrows(affine);
|
|
432
|
+
this.hdr.affine = [
|
|
433
|
+
srows.srow_x,
|
|
434
|
+
srows.srow_y,
|
|
435
|
+
srows.srow_z,
|
|
436
|
+
[0, 0, 0, 1],
|
|
437
|
+
];
|
|
438
|
+
// Update current buffer bounds
|
|
439
|
+
// Buffer starts at region.chunkAlignedStart and has extent fetchedShape
|
|
440
|
+
this._currentBufferBounds = {
|
|
441
|
+
min: [
|
|
442
|
+
affine[12], // x offset (world coord of buffer origin)
|
|
443
|
+
affine[13], // y offset
|
|
444
|
+
affine[14], // z offset
|
|
445
|
+
],
|
|
446
|
+
max: [
|
|
447
|
+
affine[12] + fetchedShape[2] * sx,
|
|
448
|
+
affine[13] + fetchedShape[1] * sy,
|
|
449
|
+
affine[14] + fetchedShape[0] * sz,
|
|
450
|
+
],
|
|
451
|
+
};
|
|
452
|
+
// Recalculate RAS orientation
|
|
453
|
+
this.calculateRAS();
|
|
454
|
+
}
|
|
455
|
+
/**
|
|
456
|
+
* Update NiiVue clip planes from current _clipPlanes.
|
|
457
|
+
*
|
|
458
|
+
* Clip planes are converted relative to the CURRENT BUFFER bounds,
|
|
459
|
+
* not the full volume bounds. This is because NiiVue's shader works
|
|
460
|
+
* in texture coordinates of the currently loaded data.
|
|
461
|
+
*/
|
|
462
|
+
updateNiivueClipPlanes() {
|
|
463
|
+
// Use current buffer bounds for clip plane conversion
|
|
464
|
+
// This ensures clip planes are relative to the currently loaded data
|
|
465
|
+
const niivueClipPlanes = clipPlanesToNiivue(this._clipPlanes, this._currentBufferBounds);
|
|
466
|
+
if (niivueClipPlanes.length > 0) {
|
|
467
|
+
this.niivue.scene.clipPlaneDepthAziElevs = niivueClipPlanes;
|
|
468
|
+
}
|
|
469
|
+
else {
|
|
470
|
+
// Clear clip planes - set to "disabled" state (depth > 1.8)
|
|
471
|
+
this.niivue.scene.clipPlaneDepthAziElevs = [[2, 0, 0]];
|
|
472
|
+
}
|
|
473
|
+
}
|
|
474
|
+
/**
|
|
475
|
+
* Apply OMERO window settings to NIfTI header cal_min/cal_max.
|
|
476
|
+
*
|
|
477
|
+
* Uses the active channel's window (start/end preferred over min/max).
|
|
478
|
+
* This sets the display intensity range for NiiVue rendering.
|
|
479
|
+
*/
|
|
480
|
+
applyOmeroToHeader() {
|
|
481
|
+
if (!this.hdr || !this._omero?.channels?.length)
|
|
482
|
+
return;
|
|
483
|
+
// Clamp active channel to valid range
|
|
484
|
+
const channelIndex = Math.min(this._activeChannel, this._omero.channels.length - 1);
|
|
485
|
+
const channel = this._omero.channels[channelIndex];
|
|
486
|
+
const window = channel?.window;
|
|
487
|
+
if (window) {
|
|
488
|
+
// Prefer start/end (display window based on quantiles) over min/max (data range)
|
|
489
|
+
const calMin = window.start ?? window.min;
|
|
490
|
+
const calMax = window.end ?? window.max;
|
|
491
|
+
if (calMin !== undefined)
|
|
492
|
+
this.hdr.cal_min = calMin;
|
|
493
|
+
if (calMax !== undefined)
|
|
494
|
+
this.hdr.cal_max = calMax;
|
|
495
|
+
}
|
|
496
|
+
}
|
|
497
|
+
/**
|
|
498
|
+
* Ensure OMERO metadata is available and applied.
|
|
499
|
+
*
|
|
500
|
+
* Strategy:
|
|
501
|
+
* - If OMERO exists in file metadata, use it (first time only)
|
|
502
|
+
* - If NOT present, compute dynamically:
|
|
503
|
+
* - Compute at preview (lowest) resolution for quick initial display
|
|
504
|
+
* - Recompute at target resolution for more accurate values
|
|
505
|
+
* - Keep target values for consistency on subsequent clip plane changes
|
|
506
|
+
*
|
|
507
|
+
* @param ngffImage - The NgffImage at the current resolution level
|
|
508
|
+
* @param levelIndex - The resolution level index
|
|
509
|
+
*/
|
|
510
|
+
async ensureOmeroMetadata(ngffImage, levelIndex) {
|
|
511
|
+
const existingOmero = this.multiscales.metadata?.omero;
|
|
512
|
+
if (existingOmero && !this._omero) {
|
|
513
|
+
// Use existing OMERO metadata from the file (first time)
|
|
514
|
+
this._omero = existingOmero;
|
|
515
|
+
this.applyOmeroToHeader();
|
|
516
|
+
return;
|
|
517
|
+
}
|
|
518
|
+
if (!existingOmero) {
|
|
519
|
+
// No OMERO in file - compute dynamically
|
|
520
|
+
// Compute at preview (lowest) and target levels, then keep for consistency
|
|
521
|
+
const lowestLevel = this.multiscales.images.length - 1;
|
|
522
|
+
const isPreviewLevel = levelIndex === lowestLevel;
|
|
523
|
+
const isTargetLevel = levelIndex === this.targetLevelIndex;
|
|
524
|
+
const needsCompute = isPreviewLevel ||
|
|
525
|
+
(isTargetLevel &&
|
|
526
|
+
this._omeroComputedForLevel !== this.targetLevelIndex);
|
|
527
|
+
if (needsCompute) {
|
|
528
|
+
const computedOmero = await computeOmeroFromNgffImage(ngffImage);
|
|
529
|
+
this._omero = computedOmero;
|
|
530
|
+
this._omeroComputedForLevel = levelIndex;
|
|
531
|
+
this.applyOmeroToHeader();
|
|
532
|
+
}
|
|
533
|
+
}
|
|
534
|
+
}
|
|
535
|
+
/**
|
|
536
|
+
* Handle clip plane change from NiiVue.
|
|
537
|
+
* This is called when the user interacts with clip planes in NiiVue.
|
|
538
|
+
*/
|
|
539
|
+
onNiivueClipPlaneChange(_clipPlane) {
|
|
540
|
+
// For now, we don't update our clip planes from NiiVue interactions
|
|
541
|
+
// This could be extended in the future to support bidirectional sync
|
|
542
|
+
}
|
|
543
|
+
/**
|
|
544
|
+
* Set clip planes.
|
|
545
|
+
*
|
|
546
|
+
* Visual clipping is updated immediately for responsive feedback.
|
|
547
|
+
* Data refetch is debounced to avoid excessive reloading during slider interaction.
|
|
548
|
+
* Resolution changes are direction-aware: reducing volume may increase resolution,
|
|
549
|
+
* increasing volume may decrease resolution.
|
|
550
|
+
*
|
|
551
|
+
* @param planes - Array of clip planes (max 6). Empty array = full volume visible.
|
|
552
|
+
* @throws Error if more than 6 planes provided or if planes are invalid
|
|
553
|
+
*/
|
|
554
|
+
setClipPlanes(planes) {
|
|
555
|
+
// Validate the planes
|
|
556
|
+
validateClipPlanes(planes);
|
|
557
|
+
// Check if this is a "reset" operation (clearing all planes)
|
|
558
|
+
const isReset = planes.length === 0 && this._previousClipPlanes.length > 0;
|
|
559
|
+
// Store new clip planes
|
|
560
|
+
this._clipPlanes = planes.map((p) => ({
|
|
561
|
+
point: [...p.point],
|
|
562
|
+
normal: normalizeVector([...p.normal]),
|
|
563
|
+
}));
|
|
564
|
+
// Always update NiiVue clip planes immediately (visual feedback)
|
|
565
|
+
this.updateNiivueClipPlanes();
|
|
566
|
+
this.niivue.drawScene();
|
|
567
|
+
// Clear any pending debounced refetch
|
|
568
|
+
if (this.clipPlaneRefetchTimeout) {
|
|
569
|
+
clearTimeout(this.clipPlaneRefetchTimeout);
|
|
570
|
+
this.clipPlaneRefetchTimeout = null;
|
|
571
|
+
}
|
|
572
|
+
// Debounce the data refetch decision
|
|
573
|
+
this.clipPlaneRefetchTimeout = setTimeout(() => {
|
|
574
|
+
this.handleDebouncedClipPlaneUpdate(isReset);
|
|
575
|
+
}, this.clipPlaneDebounceMs);
|
|
576
|
+
}
|
|
577
|
+
/**
|
|
578
|
+
* Handle clip plane update after debounce delay.
|
|
579
|
+
* Implements direction-aware resolution selection.
|
|
580
|
+
*
|
|
581
|
+
* Only triggers a refetch when the resolution level needs to change.
|
|
582
|
+
* Visual clipping is handled by NiiVue clip planes (updated immediately in setClipPlanes).
|
|
583
|
+
*/
|
|
584
|
+
handleDebouncedClipPlaneUpdate(isReset) {
|
|
585
|
+
this.clipPlaneRefetchTimeout = null;
|
|
586
|
+
// Always use level 0 for consistent pixel count comparison across resolution changes
|
|
587
|
+
const referenceImage = this.multiscales.images[0];
|
|
588
|
+
// Calculate current region at reference resolution
|
|
589
|
+
const currentRegion = clipPlanesToPixelRegion(this._clipPlanes, this._volumeBounds, referenceImage);
|
|
590
|
+
const currentAligned = alignToChunks(currentRegion, referenceImage);
|
|
591
|
+
const currentPixelCount = this.calculateAlignedPixelCount(currentAligned);
|
|
592
|
+
// Determine volume change direction (comparing at consistent reference level)
|
|
593
|
+
const volumeReduced = currentPixelCount < this._previousPixelCount;
|
|
594
|
+
const volumeIncreased = currentPixelCount > this._previousPixelCount;
|
|
595
|
+
// Get optimal resolution for new region (3D viewport bounds)
|
|
596
|
+
const selection = selectResolution(this.multiscales, this.maxPixels, this._clipPlanes, this._volumeBounds, this._viewportBounds3D ?? undefined);
|
|
597
|
+
// Direction-aware resolution change
|
|
598
|
+
let newTargetLevel = this.targetLevelIndex;
|
|
599
|
+
if (isReset) {
|
|
600
|
+
// Reset/clear: always recalculate optimal resolution
|
|
601
|
+
newTargetLevel = selection.levelIndex;
|
|
602
|
+
}
|
|
603
|
+
else if (volumeReduced && selection.levelIndex < this.targetLevelIndex) {
|
|
604
|
+
// Volume reduced → allow higher resolution (lower level index)
|
|
605
|
+
newTargetLevel = selection.levelIndex;
|
|
606
|
+
}
|
|
607
|
+
else if (volumeIncreased && selection.levelIndex > this.targetLevelIndex) {
|
|
608
|
+
// Volume increased → allow lower resolution (higher level index) if needed to fit
|
|
609
|
+
newTargetLevel = selection.levelIndex;
|
|
610
|
+
}
|
|
611
|
+
// Otherwise: keep current level (no unnecessary resolution changes)
|
|
612
|
+
// Only refetch when resolution level changes
|
|
613
|
+
// Visual clipping is handled by NiiVue clip planes (already updated in setClipPlanes)
|
|
614
|
+
if (newTargetLevel !== this.targetLevelIndex) {
|
|
615
|
+
this.targetLevelIndex = newTargetLevel;
|
|
616
|
+
this.populateVolume(true, "clipPlanesChanged"); // Skip preview for clip plane updates
|
|
617
|
+
}
|
|
618
|
+
// Emit clipPlanesChange event (after debounce)
|
|
619
|
+
this._emitEvent("clipPlanesChange", {
|
|
620
|
+
clipPlanes: this.copyClipPlanes(this._clipPlanes),
|
|
621
|
+
});
|
|
622
|
+
}
|
|
623
|
+
/**
|
|
624
|
+
* Calculate pixel count for a chunk-aligned region.
|
|
625
|
+
*/
|
|
626
|
+
calculateAlignedPixelCount(aligned) {
|
|
627
|
+
return ((aligned.chunkAlignedEnd[0] - aligned.chunkAlignedStart[0]) *
|
|
628
|
+
(aligned.chunkAlignedEnd[1] - aligned.chunkAlignedStart[1]) *
|
|
629
|
+
(aligned.chunkAlignedEnd[2] - aligned.chunkAlignedStart[2]));
|
|
630
|
+
}
|
|
631
|
+
/**
|
|
632
|
+
* Create a deep copy of clip planes array.
|
|
633
|
+
*/
|
|
634
|
+
copyClipPlanes(planes) {
|
|
635
|
+
return planes.map((p) => ({
|
|
636
|
+
point: [...p.point],
|
|
637
|
+
normal: [...p.normal],
|
|
638
|
+
}));
|
|
639
|
+
}
|
|
640
|
+
/**
|
|
641
|
+
* Get current clip planes.
|
|
642
|
+
*
|
|
643
|
+
* @returns Copy of current clip planes array
|
|
644
|
+
*/
|
|
645
|
+
getClipPlanes() {
|
|
646
|
+
return this._clipPlanes.map((p) => ({
|
|
647
|
+
point: [...p.point],
|
|
648
|
+
normal: [...p.normal],
|
|
649
|
+
}));
|
|
650
|
+
}
|
|
651
|
+
/**
|
|
652
|
+
* Add a single clip plane.
|
|
653
|
+
*
|
|
654
|
+
* @param plane - Clip plane to add
|
|
655
|
+
* @throws Error if already at maximum (6) clip planes
|
|
656
|
+
*/
|
|
657
|
+
addClipPlane(plane) {
|
|
658
|
+
if (this._clipPlanes.length >= MAX_CLIP_PLANES) {
|
|
659
|
+
throw new Error(`Cannot add clip plane: already at maximum of ${MAX_CLIP_PLANES} planes`);
|
|
660
|
+
}
|
|
661
|
+
const newPlanes = [
|
|
662
|
+
...this._clipPlanes,
|
|
663
|
+
{
|
|
664
|
+
point: [...plane.point],
|
|
665
|
+
normal: [...plane.normal],
|
|
666
|
+
},
|
|
667
|
+
];
|
|
668
|
+
this.setClipPlanes(newPlanes);
|
|
669
|
+
}
|
|
670
|
+
/**
|
|
671
|
+
* Remove a clip plane by index.
|
|
672
|
+
*
|
|
673
|
+
* @param index - Index of plane to remove
|
|
674
|
+
* @throws Error if index is out of bounds
|
|
675
|
+
*/
|
|
676
|
+
removeClipPlane(index) {
|
|
677
|
+
if (index < 0 || index >= this._clipPlanes.length) {
|
|
678
|
+
throw new Error(`Invalid clip plane index: ${index} (have ${this._clipPlanes.length} planes)`);
|
|
679
|
+
}
|
|
680
|
+
const newPlanes = this._clipPlanes.filter((_, i) => i !== index);
|
|
681
|
+
this.setClipPlanes(newPlanes);
|
|
682
|
+
}
|
|
683
|
+
/**
|
|
684
|
+
* Clear all clip planes (show full volume).
|
|
685
|
+
*/
|
|
686
|
+
clearClipPlanes() {
|
|
687
|
+
this.setClipPlanes([]);
|
|
688
|
+
}
|
|
689
|
+
/**
|
|
690
|
+
* Get the current resolution level index.
|
|
691
|
+
*/
|
|
692
|
+
getCurrentLevelIndex() {
|
|
693
|
+
return this.currentLevelIndex;
|
|
694
|
+
}
|
|
695
|
+
/**
|
|
696
|
+
* Get the target resolution level index.
|
|
697
|
+
*/
|
|
698
|
+
getTargetLevelIndex() {
|
|
699
|
+
return this.targetLevelIndex;
|
|
700
|
+
}
|
|
701
|
+
/**
|
|
702
|
+
* Get the number of resolution levels.
|
|
703
|
+
*/
|
|
704
|
+
getNumLevels() {
|
|
705
|
+
return this.multiscales.images.length;
|
|
706
|
+
}
|
|
707
|
+
/**
|
|
708
|
+
* Get the volume bounds in world space.
|
|
709
|
+
*/
|
|
710
|
+
getVolumeBounds() {
|
|
711
|
+
return {
|
|
712
|
+
min: [...this._volumeBounds.min],
|
|
713
|
+
max: [...this._volumeBounds.max],
|
|
714
|
+
};
|
|
715
|
+
}
|
|
716
|
+
// ============================================================
|
|
717
|
+
// Viewport-Aware Resolution
|
|
718
|
+
// ============================================================
|
|
719
|
+
/**
|
|
720
|
+
* Enable or disable viewport-aware resolution selection.
|
|
721
|
+
*
|
|
722
|
+
* When enabled, pan/zoom/rotation interactions are monitored and the fetch
|
|
723
|
+
* region is constrained to the visible viewport area. This allows higher
|
|
724
|
+
* resolution within the same `maxPixels` budget when zoomed in.
|
|
725
|
+
*
|
|
726
|
+
* @param enabled - Whether to enable viewport-aware resolution
|
|
727
|
+
*/
|
|
728
|
+
setViewportAware(enabled) {
|
|
729
|
+
if (enabled === this._viewportAwareEnabled)
|
|
730
|
+
return;
|
|
731
|
+
this._viewportAwareEnabled = enabled;
|
|
732
|
+
if (enabled) {
|
|
733
|
+
// Hook viewport events on all attached NVs
|
|
734
|
+
for (const [nv, state] of this._attachedNiivues) {
|
|
735
|
+
this._hookViewportEvents(nv, state);
|
|
736
|
+
}
|
|
737
|
+
// Compute initial viewport bounds and trigger refetch
|
|
738
|
+
this._recomputeViewportBounds();
|
|
739
|
+
}
|
|
740
|
+
else {
|
|
741
|
+
// Unhook viewport events on all attached NVs
|
|
742
|
+
for (const [nv, state] of this._attachedNiivues) {
|
|
743
|
+
this._unhookViewportEvents(nv, state);
|
|
744
|
+
}
|
|
745
|
+
// Clear viewport bounds and refetch at full volume
|
|
746
|
+
this._viewportBounds3D = null;
|
|
747
|
+
this._viewportBoundsPerSlab.clear();
|
|
748
|
+
if (this._viewportUpdateTimeout) {
|
|
749
|
+
clearTimeout(this._viewportUpdateTimeout);
|
|
750
|
+
this._viewportUpdateTimeout = null;
|
|
751
|
+
}
|
|
752
|
+
// Recompute resolution without viewport constraint
|
|
753
|
+
const selection = selectResolution(this.multiscales, this.maxPixels, this._clipPlanes, this._volumeBounds);
|
|
754
|
+
if (selection.levelIndex !== this.targetLevelIndex) {
|
|
755
|
+
this.targetLevelIndex = selection.levelIndex;
|
|
756
|
+
this.populateVolume(true, "viewportChanged");
|
|
757
|
+
}
|
|
758
|
+
// Also reload slabs without viewport constraint
|
|
759
|
+
this._reloadAllSlabs("viewportChanged");
|
|
760
|
+
}
|
|
761
|
+
}
|
|
762
|
+
/**
|
|
763
|
+
* Get whether viewport-aware resolution selection is enabled.
|
|
764
|
+
*/
|
|
765
|
+
get viewportAware() {
|
|
766
|
+
return this._viewportAwareEnabled;
|
|
767
|
+
}
|
|
768
|
+
/**
|
|
769
|
+
* Get the current 3D viewport bounds (null if viewport-aware is disabled
|
|
770
|
+
* or no viewport constraint is active).
|
|
771
|
+
*/
|
|
772
|
+
getViewportBounds() {
|
|
773
|
+
if (!this._viewportBounds3D)
|
|
774
|
+
return null;
|
|
775
|
+
return {
|
|
776
|
+
min: [...this._viewportBounds3D.min],
|
|
777
|
+
max: [...this._viewportBounds3D.max],
|
|
778
|
+
};
|
|
779
|
+
}
|
|
780
|
+
/**
|
|
781
|
+
* Hook viewport events (onMouseUp, onZoom3DChange, wheel) on a NV instance.
|
|
782
|
+
*/
|
|
783
|
+
_hookViewportEvents(nv, state) {
|
|
784
|
+
// Save and chain onMouseUp (fires at end of any mouse/touch interaction)
|
|
785
|
+
state.previousOnMouseUp = nv.onMouseUp;
|
|
786
|
+
nv.onMouseUp = (data) => {
|
|
787
|
+
if (state.previousOnMouseUp) {
|
|
788
|
+
state.previousOnMouseUp(data);
|
|
789
|
+
}
|
|
790
|
+
this._handleViewportInteractionEnd(nv);
|
|
791
|
+
};
|
|
792
|
+
// Save and chain onZoom3DChange (fires when volScaleMultiplier changes)
|
|
793
|
+
state.previousOnZoom3DChange = nv.onZoom3DChange;
|
|
794
|
+
nv.onZoom3DChange = (zoom) => {
|
|
795
|
+
if (state.previousOnZoom3DChange) {
|
|
796
|
+
state.previousOnZoom3DChange(zoom);
|
|
797
|
+
}
|
|
798
|
+
this._handleViewportInteractionEnd(nv);
|
|
799
|
+
};
|
|
800
|
+
// Add wheel event listener on the canvas for scroll-wheel zoom detection
|
|
801
|
+
const controller = new AbortController();
|
|
802
|
+
state.viewportAbortController = controller;
|
|
803
|
+
if (nv.canvas) {
|
|
804
|
+
nv.canvas.addEventListener("wheel", () => {
|
|
805
|
+
this._handleViewportInteractionEnd(nv);
|
|
806
|
+
}, { signal: controller.signal, passive: true });
|
|
807
|
+
}
|
|
808
|
+
}
|
|
809
|
+
/**
|
|
810
|
+
* Unhook viewport events from a NV instance.
|
|
811
|
+
*/
|
|
812
|
+
_unhookViewportEvents(nv, state) {
|
|
813
|
+
// Restore onMouseUp
|
|
814
|
+
if (state.previousOnMouseUp !== undefined) {
|
|
815
|
+
nv.onMouseUp = state.previousOnMouseUp;
|
|
816
|
+
state.previousOnMouseUp = undefined;
|
|
817
|
+
}
|
|
818
|
+
// Restore onZoom3DChange
|
|
819
|
+
if (state.previousOnZoom3DChange !== undefined) {
|
|
820
|
+
nv.onZoom3DChange = state.previousOnZoom3DChange;
|
|
821
|
+
state.previousOnZoom3DChange = undefined;
|
|
822
|
+
}
|
|
823
|
+
// Remove wheel event listener
|
|
824
|
+
if (state.viewportAbortController) {
|
|
825
|
+
state.viewportAbortController.abort();
|
|
826
|
+
state.viewportAbortController = undefined;
|
|
827
|
+
}
|
|
828
|
+
}
|
|
829
|
+
// ============================================================
|
|
830
|
+
// 3D Zoom Override
|
|
831
|
+
// ============================================================
|
|
832
|
+
/**
|
|
833
|
+
* Install a capturing-phase wheel listener on the NV canvas that overrides
|
|
834
|
+
* NiiVue's hardcoded 3D render zoom clamp ([0.5, 2.0]).
|
|
835
|
+
*
|
|
836
|
+
* The listener intercepts scroll events over 3D render tiles and applies
|
|
837
|
+
* zoom via `nv.setScale()` (which has no internal clamp), using the
|
|
838
|
+
* configurable `_min3DZoom` / `_max3DZoom` bounds instead.
|
|
839
|
+
*
|
|
840
|
+
* Clip-plane scrolling is preserved: when a clip plane is active
|
|
841
|
+
* (depth < 1.8), the event passes through to NiiVue's native handler.
|
|
842
|
+
*/
|
|
843
|
+
_hookZoomOverride(nv, state) {
|
|
844
|
+
if (!nv.canvas)
|
|
845
|
+
return;
|
|
846
|
+
const controller = new AbortController();
|
|
847
|
+
state.zoomOverrideAbortController = controller;
|
|
848
|
+
nv.canvas.addEventListener("wheel", (e) => {
|
|
849
|
+
// Convert mouse position to DPR-scaled canvas coordinates
|
|
850
|
+
const rect = nv.canvas.getBoundingClientRect();
|
|
851
|
+
const dpr = nv.uiData.dpr ?? 1;
|
|
852
|
+
const x = (e.clientX - rect.left) * dpr;
|
|
853
|
+
const y = (e.clientY - rect.top) * dpr;
|
|
854
|
+
// Only intercept if mouse is over a 3D render tile
|
|
855
|
+
if (nv.inRenderTile(x, y) < 0)
|
|
856
|
+
return;
|
|
857
|
+
// Preserve clip-plane scrolling: when a clip plane is active
|
|
858
|
+
// (depth < 1.8), let NiiVue handle the event normally.
|
|
859
|
+
const clips = nv.scene.clipPlaneDepthAziElevs;
|
|
860
|
+
const activeIdx = nv.uiData.activeClipPlaneIndex;
|
|
861
|
+
if (nv.volumes.length > 0 &&
|
|
862
|
+
clips?.[activeIdx]?.[0] !== undefined &&
|
|
863
|
+
clips[activeIdx][0] < 1.8) {
|
|
864
|
+
return;
|
|
865
|
+
}
|
|
866
|
+
// Prevent NiiVue's clamped handler from running.
|
|
867
|
+
// NiiVue registers its listener in the bubbling phase, so our
|
|
868
|
+
// capturing-phase listener fires first. stopImmediatePropagation
|
|
869
|
+
// ensures no other same-element listeners fire either.
|
|
870
|
+
e.stopImmediatePropagation();
|
|
871
|
+
e.preventDefault();
|
|
872
|
+
// Compute new zoom (same ×1.1 / ×0.9 per step as NiiVue).
|
|
873
|
+
// Round to 2 decimal places (NiiVue rounds to 1, which causes the
|
|
874
|
+
// zoom to get stuck at small values like 0.5 where ×0.9 rounds back).
|
|
875
|
+
const zoomDir = e.deltaY < 0 ? 1 : -1;
|
|
876
|
+
const current = nv.scene.volScaleMultiplier;
|
|
877
|
+
let newZoom = current * (zoomDir > 0 ? 1.1 : 0.9);
|
|
878
|
+
newZoom = Math.round(newZoom * 100) / 100;
|
|
879
|
+
newZoom = Math.max(this._min3DZoom, Math.min(this._max3DZoom, newZoom));
|
|
880
|
+
nv.setScale(newZoom);
|
|
881
|
+
// Notify the viewport-aware system. Since we stopped propagation,
|
|
882
|
+
// the passive wheel listener from _hookViewportEvents won't fire,
|
|
883
|
+
// so we call this directly.
|
|
884
|
+
this._handleViewportInteractionEnd(nv);
|
|
885
|
+
}, { capture: true, signal: controller.signal });
|
|
886
|
+
}
|
|
887
|
+
/**
|
|
888
|
+
* Remove the 3D zoom override wheel listener from a NV instance.
|
|
889
|
+
*/
|
|
890
|
+
_unhookZoomOverride(_nv, state) {
|
|
891
|
+
if (state.zoomOverrideAbortController) {
|
|
892
|
+
state.zoomOverrideAbortController.abort();
|
|
893
|
+
state.zoomOverrideAbortController = undefined;
|
|
894
|
+
}
|
|
895
|
+
}
|
|
896
|
+
/**
|
|
897
|
+
* Called at the end of any viewport interaction (mouse up, touch end,
|
|
898
|
+
* zoom change, scroll wheel). Debounces the viewport bounds recomputation.
|
|
899
|
+
*/
|
|
900
|
+
_handleViewportInteractionEnd(_nv) {
|
|
901
|
+
if (!this._viewportAwareEnabled)
|
|
902
|
+
return;
|
|
903
|
+
// Debounce: clear any pending update and schedule a new one
|
|
904
|
+
if (this._viewportUpdateTimeout) {
|
|
905
|
+
clearTimeout(this._viewportUpdateTimeout);
|
|
906
|
+
}
|
|
907
|
+
this._viewportUpdateTimeout = setTimeout(() => {
|
|
908
|
+
this._viewportUpdateTimeout = null;
|
|
909
|
+
this._recomputeViewportBounds();
|
|
910
|
+
}, OMEZarrNVImage.VIEWPORT_DEBOUNCE_MS);
|
|
911
|
+
}
|
|
912
|
+
/**
|
|
913
|
+
* Recompute viewport bounds from all attached NV instances and trigger
|
|
914
|
+
* resolution reselection if bounds changed significantly.
|
|
915
|
+
*/
|
|
916
|
+
_recomputeViewportBounds() {
|
|
917
|
+
if (!this._viewportAwareEnabled)
|
|
918
|
+
return;
|
|
919
|
+
// Compute separate viewport bounds for:
|
|
920
|
+
// - 3D volume: union of all RENDER/MULTIPLANAR NV viewport bounds
|
|
921
|
+
// - Per-slab: each slab type gets its own NV's viewport bounds
|
|
922
|
+
let new3DBounds = null;
|
|
923
|
+
const newSlabBounds = new Map();
|
|
924
|
+
for (const [nv, state] of this._attachedNiivues) {
|
|
925
|
+
if (state.currentSliceType === SLICE_TYPE.RENDER ||
|
|
926
|
+
state.currentSliceType === SLICE_TYPE.MULTIPLANAR) {
|
|
927
|
+
// 3D render mode: compute from orthographic frustum
|
|
928
|
+
const nvBounds = computeViewportBounds3D(nv, this._volumeBounds);
|
|
929
|
+
if (!new3DBounds) {
|
|
930
|
+
new3DBounds = nvBounds;
|
|
931
|
+
}
|
|
932
|
+
else {
|
|
933
|
+
// Union of multiple 3D views
|
|
934
|
+
new3DBounds = {
|
|
935
|
+
min: [
|
|
936
|
+
Math.min(new3DBounds.min[0], nvBounds.min[0]),
|
|
937
|
+
Math.min(new3DBounds.min[1], nvBounds.min[1]),
|
|
938
|
+
Math.min(new3DBounds.min[2], nvBounds.min[2]),
|
|
939
|
+
],
|
|
940
|
+
max: [
|
|
941
|
+
Math.max(new3DBounds.max[0], nvBounds.max[0]),
|
|
942
|
+
Math.max(new3DBounds.max[1], nvBounds.max[1]),
|
|
943
|
+
Math.max(new3DBounds.max[2], nvBounds.max[2]),
|
|
944
|
+
],
|
|
945
|
+
};
|
|
946
|
+
}
|
|
947
|
+
}
|
|
948
|
+
else if (this._isSlabSliceType(state.currentSliceType)) {
|
|
949
|
+
// 2D slice mode: compute from pan/zoom
|
|
950
|
+
const sliceType = state.currentSliceType;
|
|
951
|
+
const slabState = this._slabBuffers.get(sliceType);
|
|
952
|
+
const normScale = slabState?.normalizationScale ?? 1.0;
|
|
953
|
+
const nvBounds = computeViewportBounds2D(nv, state.currentSliceType, this._volumeBounds, normScale);
|
|
954
|
+
newSlabBounds.set(sliceType, nvBounds);
|
|
955
|
+
}
|
|
956
|
+
}
|
|
957
|
+
// Check if 3D bounds changed
|
|
958
|
+
const bounds3DChanged = !new3DBounds !== !this._viewportBounds3D ||
|
|
959
|
+
(new3DBounds &&
|
|
960
|
+
this._viewportBounds3D &&
|
|
961
|
+
!boundsApproxEqual(new3DBounds, this._viewportBounds3D));
|
|
962
|
+
// Check if any slab bounds changed
|
|
963
|
+
let slabBoundsChanged = false;
|
|
964
|
+
for (const [sliceType, newBounds] of newSlabBounds) {
|
|
965
|
+
const oldBounds = this._viewportBoundsPerSlab.get(sliceType) ?? null;
|
|
966
|
+
if (!newBounds !== !oldBounds ||
|
|
967
|
+
(newBounds && oldBounds && !boundsApproxEqual(newBounds, oldBounds))) {
|
|
968
|
+
slabBoundsChanged = true;
|
|
969
|
+
break;
|
|
970
|
+
}
|
|
971
|
+
}
|
|
972
|
+
if (!bounds3DChanged && !slabBoundsChanged)
|
|
973
|
+
return;
|
|
974
|
+
// Update stored bounds
|
|
975
|
+
this._viewportBounds3D = new3DBounds;
|
|
976
|
+
for (const [sliceType, bounds] of newSlabBounds) {
|
|
977
|
+
this._viewportBoundsPerSlab.set(sliceType, bounds);
|
|
978
|
+
}
|
|
979
|
+
// Recompute 3D resolution selection with new 3D viewport bounds
|
|
980
|
+
if (bounds3DChanged) {
|
|
981
|
+
const selection = selectResolution(this.multiscales, this.maxPixels, this._clipPlanes, this._volumeBounds, this._viewportBounds3D ?? undefined);
|
|
982
|
+
if (selection.levelIndex !== this.targetLevelIndex) {
|
|
983
|
+
this.targetLevelIndex = selection.levelIndex;
|
|
984
|
+
this.populateVolume(true, "viewportChanged");
|
|
985
|
+
}
|
|
986
|
+
}
|
|
987
|
+
// Reload slabs with new per-slab viewport bounds
|
|
988
|
+
if (slabBoundsChanged) {
|
|
989
|
+
this._reloadAllSlabs("viewportChanged");
|
|
990
|
+
}
|
|
991
|
+
}
|
|
992
|
+
/**
|
|
993
|
+
* Reload all active slabs (for all slice types that have buffers).
|
|
994
|
+
*/
|
|
995
|
+
_reloadAllSlabs(trigger) {
|
|
996
|
+
for (const [sliceType, slabState] of this._slabBuffers) {
|
|
997
|
+
// Find the world coordinate for this slab from any attached NV in this mode
|
|
998
|
+
for (const [nv, attachedState] of this._attachedNiivues) {
|
|
999
|
+
if (this._isSlabSliceType(attachedState.currentSliceType) &&
|
|
1000
|
+
attachedState.currentSliceType === sliceType) {
|
|
1001
|
+
const crosshairPos = nv.scene?.crosshairPos;
|
|
1002
|
+
if (!crosshairPos || nv.volumes.length === 0)
|
|
1003
|
+
continue;
|
|
1004
|
+
try {
|
|
1005
|
+
const mm = nv.frac2mm([
|
|
1006
|
+
crosshairPos[0],
|
|
1007
|
+
crosshairPos[1],
|
|
1008
|
+
crosshairPos[2],
|
|
1009
|
+
]);
|
|
1010
|
+
// frac2mm returns values in the slab NVImage's mm space, which
|
|
1011
|
+
// is normalized (world * normalizationScale). Convert back to
|
|
1012
|
+
// physical world coordinates for worldToPixel and other callers.
|
|
1013
|
+
const ns = slabState.normalizationScale;
|
|
1014
|
+
const worldCoord = [
|
|
1015
|
+
mm[0] / ns,
|
|
1016
|
+
mm[1] / ns,
|
|
1017
|
+
mm[2] / ns,
|
|
1018
|
+
];
|
|
1019
|
+
this._debouncedSlabReload(sliceType, worldCoord, trigger);
|
|
1020
|
+
}
|
|
1021
|
+
catch {
|
|
1022
|
+
// Can't convert coordinates yet
|
|
1023
|
+
}
|
|
1024
|
+
break;
|
|
1025
|
+
}
|
|
1026
|
+
}
|
|
1027
|
+
}
|
|
1028
|
+
}
|
|
1029
|
+
/**
|
|
1030
|
+
* Get whether the image is currently loading.
|
|
1031
|
+
*/
|
|
1032
|
+
getIsLoading() {
|
|
1033
|
+
return this.isLoading;
|
|
1034
|
+
}
|
|
1035
|
+
/**
|
|
1036
|
+
* Wait for all pending fetches to complete.
|
|
1037
|
+
*/
|
|
1038
|
+
async waitForIdle() {
|
|
1039
|
+
await this.coalescer.onIdle();
|
|
1040
|
+
}
|
|
1041
|
+
// ============================================================
|
|
1042
|
+
// OMERO Metadata (Visualization Parameters)
|
|
1043
|
+
// ============================================================
|
|
1044
|
+
/**
|
|
1045
|
+
* Get OMERO metadata (if available).
|
|
1046
|
+
*
|
|
1047
|
+
* Returns the existing OMERO metadata from the OME-Zarr file,
|
|
1048
|
+
* or the computed OMERO metadata if none was present in the file.
|
|
1049
|
+
*
|
|
1050
|
+
* OMERO metadata includes per-channel visualization parameters:
|
|
1051
|
+
* - window.min/max: The actual data range
|
|
1052
|
+
* - window.start/end: The display window (based on quantiles)
|
|
1053
|
+
* - color: Hex color for the channel
|
|
1054
|
+
* - label: Channel name
|
|
1055
|
+
*
|
|
1056
|
+
* @returns OMERO metadata or undefined if not yet loaded/computed
|
|
1057
|
+
*/
|
|
1058
|
+
getOmero() {
|
|
1059
|
+
return this._omero;
|
|
1060
|
+
}
|
|
1061
|
+
/**
|
|
1062
|
+
* Get the active channel index used for OMERO window selection.
|
|
1063
|
+
*
|
|
1064
|
+
* For multi-channel images, this determines which channel's
|
|
1065
|
+
* cal_min/cal_max values are applied to the NiiVue display.
|
|
1066
|
+
*
|
|
1067
|
+
* @returns Current active channel index (0-based)
|
|
1068
|
+
*/
|
|
1069
|
+
getActiveChannel() {
|
|
1070
|
+
return this._activeChannel;
|
|
1071
|
+
}
|
|
1072
|
+
/**
|
|
1073
|
+
* Set the active channel for OMERO window selection.
|
|
1074
|
+
*
|
|
1075
|
+
* For multi-channel images, this determines which channel's
|
|
1076
|
+
* window (cal_min/cal_max) values are applied to the NiiVue display.
|
|
1077
|
+
*
|
|
1078
|
+
* Changing the active channel immediately updates the display intensity
|
|
1079
|
+
* range and refreshes the NiiVue rendering.
|
|
1080
|
+
*
|
|
1081
|
+
* @param index - Channel index (0-based)
|
|
1082
|
+
* @throws Error if no OMERO metadata is available
|
|
1083
|
+
* @throws Error if index is out of range
|
|
1084
|
+
*
|
|
1085
|
+
* @example
|
|
1086
|
+
* ```typescript
|
|
1087
|
+
* // Get number of channels
|
|
1088
|
+
* const omero = image.getOmero();
|
|
1089
|
+
* if (omero) {
|
|
1090
|
+
* console.log(`${omero.channels.length} channels available`);
|
|
1091
|
+
* // Switch to channel 1
|
|
1092
|
+
* image.setActiveChannel(1);
|
|
1093
|
+
* }
|
|
1094
|
+
* ```
|
|
1095
|
+
*/
|
|
1096
|
+
setActiveChannel(index) {
|
|
1097
|
+
if (!this._omero?.channels?.length) {
|
|
1098
|
+
throw new Error("No OMERO metadata available");
|
|
1099
|
+
}
|
|
1100
|
+
if (index < 0 || index >= this._omero.channels.length) {
|
|
1101
|
+
throw new Error(`Invalid channel index: ${index} (have ${this._omero.channels.length} channels)`);
|
|
1102
|
+
}
|
|
1103
|
+
this._activeChannel = index;
|
|
1104
|
+
this.applyOmeroToHeader();
|
|
1105
|
+
this.niivue.updateGLVolume();
|
|
1106
|
+
this._widenCalRangeIfNeeded(this);
|
|
1107
|
+
}
|
|
1108
|
+
// ============================================================
|
|
1109
|
+
// Multi-NV / Slab Buffer Management
|
|
1110
|
+
// ============================================================
|
|
1111
|
+
/**
|
|
1112
|
+
* Attach a Niivue instance for slice-type-aware rendering.
|
|
1113
|
+
*
|
|
1114
|
+
* The image auto-detects the NV's current slice type and hooks into
|
|
1115
|
+
* `onOptsChange` to track mode changes and `onLocationChange` to track
|
|
1116
|
+
* crosshair/slice position changes.
|
|
1117
|
+
*
|
|
1118
|
+
* When the NV is in a 2D slice mode (Axial, Coronal, Sagittal), the image
|
|
1119
|
+
* loads a slab (one chunk thick in the orthogonal direction) at the current
|
|
1120
|
+
* slice position, using a 2D pixel budget for resolution selection.
|
|
1121
|
+
*
|
|
1122
|
+
* @param nv - The Niivue instance to attach
|
|
1123
|
+
*/
|
|
1124
|
+
attachNiivue(nv) {
|
|
1125
|
+
if (this._attachedNiivues.has(nv))
|
|
1126
|
+
return; // Already attached
|
|
1127
|
+
const state = {
|
|
1128
|
+
nv,
|
|
1129
|
+
currentSliceType: this._detectSliceType(nv),
|
|
1130
|
+
previousOnLocationChange: nv.onLocationChange,
|
|
1131
|
+
previousOnOptsChange: nv
|
|
1132
|
+
.onOptsChange,
|
|
1133
|
+
};
|
|
1134
|
+
// Hook onOptsChange to detect slice type changes
|
|
1135
|
+
nv.onOptsChange = (propertyName, newValue, oldValue) => {
|
|
1136
|
+
// Chain to previous handler
|
|
1137
|
+
if (state.previousOnOptsChange) {
|
|
1138
|
+
state.previousOnOptsChange(propertyName, newValue, oldValue);
|
|
1139
|
+
}
|
|
1140
|
+
if (propertyName === "sliceType") {
|
|
1141
|
+
this._handleSliceTypeChange(nv, newValue);
|
|
1142
|
+
}
|
|
1143
|
+
};
|
|
1144
|
+
// Hook onLocationChange to detect slice position changes
|
|
1145
|
+
nv.onLocationChange = (location) => {
|
|
1146
|
+
// Chain to previous handler
|
|
1147
|
+
if (state.previousOnLocationChange) {
|
|
1148
|
+
state.previousOnLocationChange(location);
|
|
1149
|
+
}
|
|
1150
|
+
this._handleLocationChange(nv, location);
|
|
1151
|
+
};
|
|
1152
|
+
this._attachedNiivues.set(nv, state);
|
|
1153
|
+
// Hook viewport events if viewport-aware mode is already enabled
|
|
1154
|
+
if (this._viewportAwareEnabled) {
|
|
1155
|
+
this._hookViewportEvents(nv, state);
|
|
1156
|
+
}
|
|
1157
|
+
// Override NiiVue's hardcoded 3D zoom clamp (always-on)
|
|
1158
|
+
this._hookZoomOverride(nv, state);
|
|
1159
|
+
// If the NV is already in a 2D slice mode, set up the slab buffer
|
|
1160
|
+
const sliceType = state.currentSliceType;
|
|
1161
|
+
if (this._isSlabSliceType(sliceType)) {
|
|
1162
|
+
this._ensureSlabForNiivue(nv, sliceType);
|
|
1163
|
+
}
|
|
1164
|
+
}
|
|
1165
|
+
/**
|
|
1166
|
+
* Detach a Niivue instance, restoring its original callbacks.
|
|
1167
|
+
*
|
|
1168
|
+
* @param nv - The Niivue instance to detach
|
|
1169
|
+
*/
|
|
1170
|
+
detachNiivue(nv) {
|
|
1171
|
+
const state = this._attachedNiivues.get(nv);
|
|
1172
|
+
if (!state)
|
|
1173
|
+
return;
|
|
1174
|
+
// Unhook viewport events if active
|
|
1175
|
+
this._unhookViewportEvents(nv, state);
|
|
1176
|
+
// Unhook 3D zoom override
|
|
1177
|
+
this._unhookZoomOverride(nv, state);
|
|
1178
|
+
// Restore original callbacks
|
|
1179
|
+
nv.onLocationChange = state.previousOnLocationChange ?? (() => { });
|
|
1180
|
+
nv.onOptsChange =
|
|
1181
|
+
(state.previousOnOptsChange ?? (() => { }));
|
|
1182
|
+
this._attachedNiivues.delete(nv);
|
|
1183
|
+
}
|
|
1184
|
+
/**
|
|
1185
|
+
* Get the slab buffer state for a given slice type, if it exists.
|
|
1186
|
+
* Useful for testing and inspection.
|
|
1187
|
+
*
|
|
1188
|
+
* @param sliceType - The slice type to query
|
|
1189
|
+
* @returns The slab buffer state, or undefined if not yet created
|
|
1190
|
+
*/
|
|
1191
|
+
getSlabBufferState(sliceType) {
|
|
1192
|
+
return this._slabBuffers.get(sliceType);
|
|
1193
|
+
}
|
|
1194
|
+
/**
|
|
1195
|
+
* Get all attached Niivue instances.
|
|
1196
|
+
*/
|
|
1197
|
+
getAttachedNiivues() {
|
|
1198
|
+
return Array.from(this._attachedNiivues.keys());
|
|
1199
|
+
}
|
|
1200
|
+
// ---- Private slab helpers ----
|
|
1201
|
+
/**
|
|
1202
|
+
* Detect the current slice type of a Niivue instance.
|
|
1203
|
+
*/
|
|
1204
|
+
_detectSliceType(nv) {
|
|
1205
|
+
// Access the opts.sliceType via the scene data or fall back to checking
|
|
1206
|
+
// the convenience properties. Niivue stores the current sliceType in opts.
|
|
1207
|
+
// We can read it from the NV instance's internal opts.
|
|
1208
|
+
const opts = nv.opts;
|
|
1209
|
+
if (opts && typeof opts.sliceType === "number") {
|
|
1210
|
+
return opts.sliceType;
|
|
1211
|
+
}
|
|
1212
|
+
// Default to Render
|
|
1213
|
+
return SLICE_TYPE.RENDER;
|
|
1214
|
+
}
|
|
1215
|
+
/**
|
|
1216
|
+
* Check if a slice type is one of the 2D slab types.
|
|
1217
|
+
*/
|
|
1218
|
+
_isSlabSliceType(st) {
|
|
1219
|
+
return (st === SLICE_TYPE.AXIAL ||
|
|
1220
|
+
st === SLICE_TYPE.CORONAL ||
|
|
1221
|
+
st === SLICE_TYPE.SAGITTAL);
|
|
1222
|
+
}
|
|
1223
|
+
/**
|
|
1224
|
+
* Get the orthogonal axis index for a slab slice type.
|
|
1225
|
+
* Returns index in [z, y, x] order:
|
|
1226
|
+
* - Axial: slicing through Z → orthogonal axis = 0 (Z)
|
|
1227
|
+
* - Coronal: slicing through Y → orthogonal axis = 1 (Y)
|
|
1228
|
+
* - Sagittal: slicing through X → orthogonal axis = 2 (X)
|
|
1229
|
+
*/
|
|
1230
|
+
_getOrthogonalAxis(sliceType) {
|
|
1231
|
+
switch (sliceType) {
|
|
1232
|
+
case SLICE_TYPE.AXIAL:
|
|
1233
|
+
return 0; // Z
|
|
1234
|
+
case SLICE_TYPE.CORONAL:
|
|
1235
|
+
return 1; // Y
|
|
1236
|
+
case SLICE_TYPE.SAGITTAL:
|
|
1237
|
+
return 2; // X
|
|
1238
|
+
}
|
|
1239
|
+
}
|
|
1240
|
+
/**
|
|
1241
|
+
* Handle a slice type change on an attached Niivue instance.
|
|
1242
|
+
*/
|
|
1243
|
+
_handleSliceTypeChange(nv, newSliceType) {
|
|
1244
|
+
const state = this._attachedNiivues.get(nv);
|
|
1245
|
+
if (!state)
|
|
1246
|
+
return;
|
|
1247
|
+
const oldSliceType = state.currentSliceType;
|
|
1248
|
+
state.currentSliceType = newSliceType;
|
|
1249
|
+
if (oldSliceType === newSliceType)
|
|
1250
|
+
return;
|
|
1251
|
+
if (this._isSlabSliceType(newSliceType)) {
|
|
1252
|
+
// Switching TO a 2D slab mode: swap in the slab NVImage
|
|
1253
|
+
this._ensureSlabForNiivue(nv, newSliceType);
|
|
1254
|
+
}
|
|
1255
|
+
else {
|
|
1256
|
+
// Switching TO Render or Multiplanar mode: swap back to the main (3D) NVImage
|
|
1257
|
+
this._swapVolumeInNiivue(nv, this);
|
|
1258
|
+
}
|
|
1259
|
+
}
|
|
1260
|
+
/**
|
|
1261
|
+
* Handle location (crosshair) change on an attached Niivue instance.
|
|
1262
|
+
* Checks if the current slice position has moved outside the loaded slab.
|
|
1263
|
+
*/
|
|
1264
|
+
_handleLocationChange(nv, _location) {
|
|
1265
|
+
const state = this._attachedNiivues.get(nv);
|
|
1266
|
+
if (!state || !this._isSlabSliceType(state.currentSliceType))
|
|
1267
|
+
return;
|
|
1268
|
+
const sliceType = state.currentSliceType;
|
|
1269
|
+
const slabState = this._slabBuffers.get(sliceType);
|
|
1270
|
+
if (!slabState || slabState.slabStart < 0)
|
|
1271
|
+
return; // Slab not yet created or loaded
|
|
1272
|
+
// Get the current crosshair position in fractional coordinates [0..1]
|
|
1273
|
+
const crosshairPos = nv.scene?.crosshairPos;
|
|
1274
|
+
if (!crosshairPos || nv.volumes.length === 0)
|
|
1275
|
+
return;
|
|
1276
|
+
let worldCoord;
|
|
1277
|
+
try {
|
|
1278
|
+
const mm = nv.frac2mm([
|
|
1279
|
+
crosshairPos[0],
|
|
1280
|
+
crosshairPos[1],
|
|
1281
|
+
crosshairPos[2],
|
|
1282
|
+
]);
|
|
1283
|
+
// frac2mm returns values in the slab NVImage's normalized mm space
|
|
1284
|
+
// (world * normalizationScale). Convert back to physical world.
|
|
1285
|
+
const ns = slabState.normalizationScale;
|
|
1286
|
+
worldCoord = [mm[0] / ns, mm[1] / ns, mm[2] / ns];
|
|
1287
|
+
}
|
|
1288
|
+
catch {
|
|
1289
|
+
return; // Can't convert coordinates yet
|
|
1290
|
+
}
|
|
1291
|
+
// Convert world to pixel at the slab's current resolution level
|
|
1292
|
+
const ngffImage = this.multiscales.images[slabState.levelIndex];
|
|
1293
|
+
const pixelCoord = worldToPixel(worldCoord, ngffImage);
|
|
1294
|
+
// Check the orthogonal axis
|
|
1295
|
+
const orthAxis = this._getOrthogonalAxis(sliceType);
|
|
1296
|
+
const pixelPos = pixelCoord[orthAxis];
|
|
1297
|
+
// Is the pixel position outside the currently loaded slab?
|
|
1298
|
+
if (pixelPos < slabState.slabStart || pixelPos >= slabState.slabEnd) {
|
|
1299
|
+
// Need to reload the slab for the new position
|
|
1300
|
+
this._debouncedSlabReload(sliceType, worldCoord);
|
|
1301
|
+
}
|
|
1302
|
+
}
|
|
1303
|
+
/**
|
|
1304
|
+
* Debounced slab reload to avoid excessive reloading during scrolling.
|
|
1305
|
+
*/
|
|
1306
|
+
_debouncedSlabReload(sliceType, worldCoord, trigger = "sliceChanged") {
|
|
1307
|
+
// Clear any pending reload for this slice type
|
|
1308
|
+
const existing = this._slabReloadTimeouts.get(sliceType);
|
|
1309
|
+
if (existing)
|
|
1310
|
+
clearTimeout(existing);
|
|
1311
|
+
const timeout = setTimeout(() => {
|
|
1312
|
+
this._slabReloadTimeouts.delete(sliceType);
|
|
1313
|
+
void this._loadSlab(sliceType, worldCoord, trigger);
|
|
1314
|
+
}, 100); // Short debounce for slice scrolling (faster than clip plane debounce)
|
|
1315
|
+
this._slabReloadTimeouts.set(sliceType, timeout);
|
|
1316
|
+
}
|
|
1317
|
+
/**
|
|
1318
|
+
* Ensure a slab buffer exists and is loaded for the given NV + slice type.
|
|
1319
|
+
* If needed, creates the slab buffer and triggers an initial load.
|
|
1320
|
+
*/
|
|
1321
|
+
_ensureSlabForNiivue(nv, sliceType) {
|
|
1322
|
+
let slabState = this._slabBuffers.get(sliceType);
|
|
1323
|
+
if (!slabState) {
|
|
1324
|
+
// Lazily create the slab buffer
|
|
1325
|
+
slabState = this._createSlabBuffer(sliceType);
|
|
1326
|
+
this._slabBuffers.set(sliceType, slabState);
|
|
1327
|
+
}
|
|
1328
|
+
// Swap the slab's NVImage into this NV instance
|
|
1329
|
+
this._swapVolumeInNiivue(nv, slabState.nvImage);
|
|
1330
|
+
// Get the current crosshair position and load the slab.
|
|
1331
|
+
// Use the volume bounds center as a fallback if crosshair isn't available yet.
|
|
1332
|
+
let worldCoord;
|
|
1333
|
+
try {
|
|
1334
|
+
const crosshairPos = nv.scene?.crosshairPos;
|
|
1335
|
+
if (crosshairPos && nv.volumes.length > 0) {
|
|
1336
|
+
const mm = nv.frac2mm([
|
|
1337
|
+
crosshairPos[0],
|
|
1338
|
+
crosshairPos[1],
|
|
1339
|
+
crosshairPos[2],
|
|
1340
|
+
]);
|
|
1341
|
+
worldCoord = [mm[0], mm[1], mm[2]];
|
|
1342
|
+
}
|
|
1343
|
+
else {
|
|
1344
|
+
// Fall back to volume center
|
|
1345
|
+
worldCoord = [
|
|
1346
|
+
(this._volumeBounds.min[0] + this._volumeBounds.max[0]) / 2,
|
|
1347
|
+
(this._volumeBounds.min[1] + this._volumeBounds.max[1]) / 2,
|
|
1348
|
+
(this._volumeBounds.min[2] + this._volumeBounds.max[2]) / 2,
|
|
1349
|
+
];
|
|
1350
|
+
}
|
|
1351
|
+
}
|
|
1352
|
+
catch {
|
|
1353
|
+
// Fall back to volume center if frac2mm fails
|
|
1354
|
+
worldCoord = [
|
|
1355
|
+
(this._volumeBounds.min[0] + this._volumeBounds.max[0]) / 2,
|
|
1356
|
+
(this._volumeBounds.min[1] + this._volumeBounds.max[1]) / 2,
|
|
1357
|
+
(this._volumeBounds.min[2] + this._volumeBounds.max[2]) / 2,
|
|
1358
|
+
];
|
|
1359
|
+
}
|
|
1360
|
+
void this._loadSlab(sliceType, worldCoord, "initial").catch((err) => {
|
|
1361
|
+
console.error(`[fidnii] Error loading slab for ${SLICE_TYPE[sliceType]}:`, err);
|
|
1362
|
+
});
|
|
1363
|
+
}
|
|
1364
|
+
/**
|
|
1365
|
+
* Create a new slab buffer state for a slice type.
|
|
1366
|
+
*/
|
|
1367
|
+
_createSlabBuffer(sliceType) {
|
|
1368
|
+
const bufferManager = new BufferManager(this.maxPixels, this.dtype);
|
|
1369
|
+
const nvImage = new NVImage();
|
|
1370
|
+
// Initialize with placeholder NIfTI header (same as main image setup)
|
|
1371
|
+
const hdr = new NIFTI1();
|
|
1372
|
+
nvImage.hdr = hdr;
|
|
1373
|
+
hdr.dims = [3, 1, 1, 1, 1, 1, 1, 1];
|
|
1374
|
+
hdr.datatypeCode = getNiftiDataType(this.dtype);
|
|
1375
|
+
hdr.numBitsPerVoxel = getBytesPerPixel(this.dtype) * 8;
|
|
1376
|
+
hdr.pixDims = [1, 1, 1, 1, 0, 0, 0, 0];
|
|
1377
|
+
hdr.affine = [
|
|
1378
|
+
[1, 0, 0, 0],
|
|
1379
|
+
[0, 1, 0, 0],
|
|
1380
|
+
[0, 0, 1, 0],
|
|
1381
|
+
[0, 0, 0, 1],
|
|
1382
|
+
];
|
|
1383
|
+
hdr.sform_code = 1;
|
|
1384
|
+
nvImage.name = `${this.name ?? "OME-Zarr"} [${SLICE_TYPE[sliceType]}]`;
|
|
1385
|
+
nvImage.img = bufferManager.resize([1, 1, 1]);
|
|
1386
|
+
nvImage._colormap = "gray";
|
|
1387
|
+
nvImage._opacity = 1.0;
|
|
1388
|
+
// Select initial resolution using 2D pixel budget
|
|
1389
|
+
const orthAxis = this._getOrthogonalAxis(sliceType);
|
|
1390
|
+
const selection = select2DResolution(this.multiscales, this.maxPixels, this._clipPlanes, this._volumeBounds, orthAxis);
|
|
1391
|
+
return {
|
|
1392
|
+
nvImage,
|
|
1393
|
+
bufferManager,
|
|
1394
|
+
levelIndex: this.multiscales.images.length - 1, // Start at lowest
|
|
1395
|
+
targetLevelIndex: selection.levelIndex,
|
|
1396
|
+
slabStart: -1,
|
|
1397
|
+
slabEnd: -1,
|
|
1398
|
+
isLoading: false,
|
|
1399
|
+
dtype: this.dtype,
|
|
1400
|
+
normalizationScale: 1.0, // Updated on first slab load
|
|
1401
|
+
pendingReload: null,
|
|
1402
|
+
};
|
|
1403
|
+
}
|
|
1404
|
+
/**
|
|
1405
|
+
* Swap the NVImage in a Niivue instance's volume list.
|
|
1406
|
+
* Removes any existing volumes from this OMEZarrNVImage and adds the target.
|
|
1407
|
+
*/
|
|
1408
|
+
_swapVolumeInNiivue(nv, targetVolume) {
|
|
1409
|
+
// Find and remove any volumes we own (the main image or any slab NVImages)
|
|
1410
|
+
const ourVolumes = new Set([this]);
|
|
1411
|
+
for (const slab of this._slabBuffers.values()) {
|
|
1412
|
+
ourVolumes.add(slab.nvImage);
|
|
1413
|
+
}
|
|
1414
|
+
// Remove our volumes from nv (in reverse to avoid index shifting issues)
|
|
1415
|
+
const toRemove = nv.volumes.filter((v) => ourVolumes.has(v));
|
|
1416
|
+
for (const vol of toRemove) {
|
|
1417
|
+
try {
|
|
1418
|
+
nv.removeVolume(vol);
|
|
1419
|
+
}
|
|
1420
|
+
catch {
|
|
1421
|
+
// Ignore errors during removal (volume may not be fully initialized)
|
|
1422
|
+
}
|
|
1423
|
+
}
|
|
1424
|
+
// Add the target volume if not already present
|
|
1425
|
+
if (!nv.volumes.includes(targetVolume)) {
|
|
1426
|
+
try {
|
|
1427
|
+
nv.addVolume(targetVolume);
|
|
1428
|
+
}
|
|
1429
|
+
catch (err) {
|
|
1430
|
+
console.warn("[fidnii] Failed to add volume to NV:", err);
|
|
1431
|
+
return;
|
|
1432
|
+
}
|
|
1433
|
+
}
|
|
1434
|
+
try {
|
|
1435
|
+
nv.updateGLVolume();
|
|
1436
|
+
this._widenCalRangeIfNeeded(targetVolume);
|
|
1437
|
+
}
|
|
1438
|
+
catch {
|
|
1439
|
+
// May fail if GL context not ready
|
|
1440
|
+
}
|
|
1441
|
+
}
|
|
1442
|
+
/**
|
|
1443
|
+
* Load a slab for a 2D slice type at the given world position.
|
|
1444
|
+
*
|
|
1445
|
+
* The slab is one chunk thick in the orthogonal direction and uses
|
|
1446
|
+
* the full in-plane extent (respecting clip planes).
|
|
1447
|
+
*
|
|
1448
|
+
* Loading follows a progressive strategy: preview (lowest res) then target.
|
|
1449
|
+
* For viewport-triggered reloads, progressive rendering is skipped and
|
|
1450
|
+
* only the target level is loaded (the user already sees the previous
|
|
1451
|
+
* resolution, so a single jump is smoother).
|
|
1452
|
+
*
|
|
1453
|
+
* If a load is already in progress, the request is queued (latest-wins)
|
|
1454
|
+
* and automatically drained when the current load finishes.
|
|
1455
|
+
*/
|
|
1456
|
+
async _loadSlab(sliceType, worldCoord, trigger) {
|
|
1457
|
+
const slabState = this._slabBuffers.get(sliceType);
|
|
1458
|
+
if (!slabState)
|
|
1459
|
+
return;
|
|
1460
|
+
if (slabState.isLoading) {
|
|
1461
|
+
// Queue this request (latest wins) — auto-drained when current load finishes
|
|
1462
|
+
slabState.pendingReload = { worldCoord, trigger };
|
|
1463
|
+
// Abort the in-flight progressive load so it finishes faster
|
|
1464
|
+
const controller = this._slabAbortControllers.get(sliceType);
|
|
1465
|
+
if (controller)
|
|
1466
|
+
controller.abort();
|
|
1467
|
+
return;
|
|
1468
|
+
}
|
|
1469
|
+
slabState.isLoading = true;
|
|
1470
|
+
slabState.pendingReload = null;
|
|
1471
|
+
// Create an AbortController for this load so it can be cancelled if a
|
|
1472
|
+
// newer request arrives while we're still fetching intermediate levels.
|
|
1473
|
+
const abortController = new AbortController();
|
|
1474
|
+
this._slabAbortControllers.set(sliceType, abortController);
|
|
1475
|
+
this._emitEvent("slabLoadingStart", {
|
|
1476
|
+
sliceType,
|
|
1477
|
+
levelIndex: slabState.targetLevelIndex,
|
|
1478
|
+
trigger,
|
|
1479
|
+
});
|
|
1480
|
+
try {
|
|
1481
|
+
const orthAxis = this._getOrthogonalAxis(sliceType);
|
|
1482
|
+
// Recompute target resolution using 2D pixel budget with per-slab viewport bounds
|
|
1483
|
+
const slabViewportBounds = this._viewportBoundsPerSlab.get(sliceType) ??
|
|
1484
|
+
undefined;
|
|
1485
|
+
const selection = select2DResolution(this.multiscales, this.maxPixels, this._clipPlanes, this._volumeBounds, orthAxis, slabViewportBounds);
|
|
1486
|
+
slabState.targetLevelIndex = selection.levelIndex;
|
|
1487
|
+
const numLevels = this.multiscales.images.length;
|
|
1488
|
+
const lowestLevel = numLevels - 1;
|
|
1489
|
+
// For viewport-triggered reloads, skip progressive rendering — jump
|
|
1490
|
+
// straight to the target level. The user already sees the previous
|
|
1491
|
+
// resolution, so a single update is smoother than replaying the full
|
|
1492
|
+
// progressive sequence which causes visual flicker during rapid
|
|
1493
|
+
// zoom/pan interactions.
|
|
1494
|
+
const skipProgressive = trigger === "viewportChanged";
|
|
1495
|
+
const startLevel = skipProgressive
|
|
1496
|
+
? slabState.targetLevelIndex
|
|
1497
|
+
: lowestLevel;
|
|
1498
|
+
for (let level = startLevel; level >= slabState.targetLevelIndex; level--) {
|
|
1499
|
+
// Check if this load has been superseded by a newer request
|
|
1500
|
+
if (abortController.signal.aborted)
|
|
1501
|
+
break;
|
|
1502
|
+
await this._loadSlabAtLevel(slabState, sliceType, worldCoord, level, orthAxis, trigger);
|
|
1503
|
+
// Check again after the async fetch completes
|
|
1504
|
+
if (abortController.signal.aborted)
|
|
1505
|
+
break;
|
|
1506
|
+
slabState.levelIndex = level;
|
|
1507
|
+
// Yield to the browser so the current level is actually painted before
|
|
1508
|
+
// we start fetching the next (higher-resolution) level.
|
|
1509
|
+
if (level > slabState.targetLevelIndex) {
|
|
1510
|
+
await new Promise((resolve) => requestAnimationFrame(() => resolve()));
|
|
1511
|
+
}
|
|
1512
|
+
}
|
|
1513
|
+
}
|
|
1514
|
+
finally {
|
|
1515
|
+
slabState.isLoading = false;
|
|
1516
|
+
this._emitEvent("slabLoadingComplete", {
|
|
1517
|
+
sliceType,
|
|
1518
|
+
levelIndex: slabState.levelIndex,
|
|
1519
|
+
slabStart: slabState.slabStart,
|
|
1520
|
+
slabEnd: slabState.slabEnd,
|
|
1521
|
+
trigger,
|
|
1522
|
+
});
|
|
1523
|
+
// Auto-drain: if a newer request was queued while we were loading,
|
|
1524
|
+
// start it now (like populateVolume's handlePendingPopulateRequest).
|
|
1525
|
+
this._handlePendingSlabReload(sliceType);
|
|
1526
|
+
}
|
|
1527
|
+
}
|
|
1528
|
+
/**
|
|
1529
|
+
* Process any pending slab reload request after the current load completes.
|
|
1530
|
+
* Mirrors populateVolume's handlePendingPopulateRequest pattern.
|
|
1531
|
+
*/
|
|
1532
|
+
_handlePendingSlabReload(sliceType) {
|
|
1533
|
+
const slabState = this._slabBuffers.get(sliceType);
|
|
1534
|
+
if (!slabState)
|
|
1535
|
+
return;
|
|
1536
|
+
const pending = slabState.pendingReload;
|
|
1537
|
+
if (pending) {
|
|
1538
|
+
slabState.pendingReload = null;
|
|
1539
|
+
void this._loadSlab(sliceType, pending.worldCoord, pending.trigger);
|
|
1540
|
+
}
|
|
1541
|
+
}
|
|
1542
|
+
/**
|
|
1543
|
+
* Load slab data at a specific resolution level.
|
|
1544
|
+
*/
|
|
1545
|
+
async _loadSlabAtLevel(slabState, sliceType, worldCoord, levelIndex, orthAxis, _trigger) {
|
|
1546
|
+
const ngffImage = this.multiscales.images[levelIndex];
|
|
1547
|
+
const chunkShape = getChunkShape(ngffImage);
|
|
1548
|
+
const volumeShape = getVolumeShape(ngffImage);
|
|
1549
|
+
// Convert world position to pixel position at this level
|
|
1550
|
+
const pixelCoord = worldToPixel(worldCoord, ngffImage);
|
|
1551
|
+
const orthPixel = pixelCoord[orthAxis];
|
|
1552
|
+
// Find the chunk-aligned slab in the orthogonal axis
|
|
1553
|
+
const chunkSize = chunkShape[orthAxis];
|
|
1554
|
+
const slabStart = Math.max(0, Math.floor(orthPixel / chunkSize) * chunkSize);
|
|
1555
|
+
const slabEnd = Math.min(slabStart + chunkSize, volumeShape[orthAxis]);
|
|
1556
|
+
// Get the full in-plane region (respecting clip planes only).
|
|
1557
|
+
// Viewport bounds are intentionally NOT passed here — they are used only
|
|
1558
|
+
// for resolution selection (in _loadSlab → select2DResolution) so that a
|
|
1559
|
+
// higher-res level is chosen when zoomed in. The fetch region always
|
|
1560
|
+
// covers the full in-plane extent so the slab fills the entire viewport.
|
|
1561
|
+
const pixelRegion = clipPlanesToPixelRegion(this._clipPlanes, this._volumeBounds, ngffImage);
|
|
1562
|
+
const alignedRegion = alignToChunks(pixelRegion, ngffImage);
|
|
1563
|
+
// Override the orthogonal axis with our slab extent
|
|
1564
|
+
const fetchStart = [
|
|
1565
|
+
alignedRegion.chunkAlignedStart[0],
|
|
1566
|
+
alignedRegion.chunkAlignedStart[1],
|
|
1567
|
+
alignedRegion.chunkAlignedStart[2],
|
|
1568
|
+
];
|
|
1569
|
+
const fetchEnd = [
|
|
1570
|
+
alignedRegion.chunkAlignedEnd[0],
|
|
1571
|
+
alignedRegion.chunkAlignedEnd[1],
|
|
1572
|
+
alignedRegion.chunkAlignedEnd[2],
|
|
1573
|
+
];
|
|
1574
|
+
fetchStart[orthAxis] = slabStart;
|
|
1575
|
+
fetchEnd[orthAxis] = slabEnd;
|
|
1576
|
+
const fetchedShape = [
|
|
1577
|
+
fetchEnd[0] - fetchStart[0],
|
|
1578
|
+
fetchEnd[1] - fetchStart[1],
|
|
1579
|
+
fetchEnd[2] - fetchStart[2],
|
|
1580
|
+
];
|
|
1581
|
+
// Fetch the data
|
|
1582
|
+
const fetchRegion = { start: fetchStart, end: fetchEnd };
|
|
1583
|
+
const result = await this.coalescer.fetchRegion(ngffImage, levelIndex, fetchRegion, `slab-${SLICE_TYPE[sliceType]}-${levelIndex}`);
|
|
1584
|
+
// Resize buffer and copy data
|
|
1585
|
+
const targetData = slabState.bufferManager.resize(fetchedShape);
|
|
1586
|
+
targetData.set(result.data);
|
|
1587
|
+
slabState.nvImage.img = slabState.bufferManager.getTypedArray();
|
|
1588
|
+
// Update slab position tracking
|
|
1589
|
+
slabState.slabStart = slabStart;
|
|
1590
|
+
slabState.slabEnd = slabEnd;
|
|
1591
|
+
// Update the NVImage header for this slab region
|
|
1592
|
+
this._updateSlabHeader(slabState.nvImage, ngffImage, fetchStart, fetchEnd, fetchedShape);
|
|
1593
|
+
// Apply OMERO metadata if available
|
|
1594
|
+
if (this._omero) {
|
|
1595
|
+
this._applyOmeroToSlabHeader(slabState.nvImage);
|
|
1596
|
+
}
|
|
1597
|
+
// Reset global_min so NiiVue recalculates intensity ranges
|
|
1598
|
+
slabState.nvImage.global_min = undefined;
|
|
1599
|
+
// Compute the normalization scale used by _updateSlabHeader so we can
|
|
1600
|
+
// convert the world coordinate into the slab's normalized mm space.
|
|
1601
|
+
const scale = ngffImage.scale;
|
|
1602
|
+
const maxVoxelSize = Math.max(scale.x ?? scale.X ?? 1, scale.y ?? scale.Y ?? 1, scale.z ?? scale.Z ?? 1);
|
|
1603
|
+
const normalizationScale = maxVoxelSize > 0 ? 1.0 / maxVoxelSize : 1.0;
|
|
1604
|
+
slabState.normalizationScale = normalizationScale;
|
|
1605
|
+
const normalizedMM = [
|
|
1606
|
+
worldCoord[0] * normalizationScale,
|
|
1607
|
+
worldCoord[1] * normalizationScale,
|
|
1608
|
+
worldCoord[2] * normalizationScale,
|
|
1609
|
+
];
|
|
1610
|
+
// Refresh all NV instances using this slice type
|
|
1611
|
+
for (const [attachedNv, attachedState] of this._attachedNiivues) {
|
|
1612
|
+
if (this._isSlabSliceType(attachedState.currentSliceType) &&
|
|
1613
|
+
attachedState.currentSliceType === sliceType) {
|
|
1614
|
+
// Ensure this NV has the slab volume
|
|
1615
|
+
if (attachedNv.volumes.includes(slabState.nvImage)) {
|
|
1616
|
+
attachedNv.updateGLVolume();
|
|
1617
|
+
// Widen the display window if actual data exceeds the OMERO range.
|
|
1618
|
+
// Must run after updateGLVolume() which computes global_min/global_max.
|
|
1619
|
+
this._widenCalRangeIfNeeded(slabState.nvImage);
|
|
1620
|
+
// Position the crosshair at the correct slice within this slab.
|
|
1621
|
+
// Without this, NiiVue defaults to the center of the slab which
|
|
1622
|
+
// corresponds to different physical positions at each resolution level.
|
|
1623
|
+
const frac = attachedNv.mm2frac(normalizedMM);
|
|
1624
|
+
// Clamp to [0,1] — when viewport-aware mode constrains the slab to
|
|
1625
|
+
// a subregion, the crosshair world position may be outside the slab's
|
|
1626
|
+
// spatial extent, causing mm2frac to return out-of-range values.
|
|
1627
|
+
frac[0] = Math.max(0, Math.min(1, frac[0]));
|
|
1628
|
+
frac[1] = Math.max(0, Math.min(1, frac[1]));
|
|
1629
|
+
frac[2] = Math.max(0, Math.min(1, frac[2]));
|
|
1630
|
+
attachedNv.scene.crosshairPos = frac;
|
|
1631
|
+
attachedNv.drawScene();
|
|
1632
|
+
}
|
|
1633
|
+
}
|
|
1634
|
+
}
|
|
1635
|
+
}
|
|
1636
|
+
/**
|
|
1637
|
+
* Update NVImage header for a slab region.
|
|
1638
|
+
*/
|
|
1639
|
+
_updateSlabHeader(nvImage, ngffImage, fetchStart, _fetchEnd, fetchedShape) {
|
|
1640
|
+
if (!nvImage.hdr)
|
|
1641
|
+
return;
|
|
1642
|
+
const scale = ngffImage.scale;
|
|
1643
|
+
const sx = scale.x ?? scale.X ?? 1;
|
|
1644
|
+
const sy = scale.y ?? scale.Y ?? 1;
|
|
1645
|
+
const sz = scale.z ?? scale.Z ?? 1;
|
|
1646
|
+
// NiiVue's 2D slice renderer has precision issues when voxel sizes are
|
|
1647
|
+
// very small (e.g. OME-Zarr datasets in meters where pixDims ~ 2e-5).
|
|
1648
|
+
// Since the slab NVImage is rendered independently in its own Niivue
|
|
1649
|
+
// instance, we can normalize coordinates to ~1mm voxels without affecting
|
|
1650
|
+
// the 3D render. We scale uniformly to preserve aspect ratio.
|
|
1651
|
+
const maxVoxelSize = Math.max(sx, sy, sz);
|
|
1652
|
+
const normalizationScale = maxVoxelSize > 0 ? 1.0 / maxVoxelSize : 1.0;
|
|
1653
|
+
const nsx = sx * normalizationScale;
|
|
1654
|
+
const nsy = sy * normalizationScale;
|
|
1655
|
+
const nsz = sz * normalizationScale;
|
|
1656
|
+
nvImage.hdr.pixDims = [1, nsx, nsy, nsz, 0, 0, 0, 0];
|
|
1657
|
+
// NIfTI dims: [ndim, x, y, z, t, ...]
|
|
1658
|
+
nvImage.hdr.dims = [
|
|
1659
|
+
3,
|
|
1660
|
+
fetchedShape[2],
|
|
1661
|
+
fetchedShape[1],
|
|
1662
|
+
fetchedShape[0],
|
|
1663
|
+
1,
|
|
1664
|
+
1,
|
|
1665
|
+
1,
|
|
1666
|
+
1,
|
|
1667
|
+
];
|
|
1668
|
+
// Build affine with offset for region start, then normalize
|
|
1669
|
+
const affine = createAffineFromNgffImage(ngffImage);
|
|
1670
|
+
// Adjust translation for region offset (fetchStart is [z, y, x])
|
|
1671
|
+
affine[12] += fetchStart[2] * sx; // x offset
|
|
1672
|
+
affine[13] += fetchStart[1] * sy; // y offset
|
|
1673
|
+
affine[14] += fetchStart[0] * sz; // z offset
|
|
1674
|
+
// Apply normalization to the entire affine (scale columns + translation)
|
|
1675
|
+
for (let i = 0; i < 15; i++) {
|
|
1676
|
+
affine[i] *= normalizationScale;
|
|
1677
|
+
}
|
|
1678
|
+
// affine[15] stays 1
|
|
1679
|
+
const srows = affineToNiftiSrows(affine);
|
|
1680
|
+
nvImage.hdr.affine = [
|
|
1681
|
+
srows.srow_x,
|
|
1682
|
+
srows.srow_y,
|
|
1683
|
+
srows.srow_z,
|
|
1684
|
+
[0, 0, 0, 1],
|
|
1685
|
+
];
|
|
1686
|
+
nvImage.hdr.sform_code = 1;
|
|
1687
|
+
nvImage.calculateRAS();
|
|
1688
|
+
}
|
|
1689
|
+
/**
|
|
1690
|
+
* Apply OMERO metadata to a slab NVImage header.
|
|
1691
|
+
*/
|
|
1692
|
+
_applyOmeroToSlabHeader(nvImage) {
|
|
1693
|
+
if (!nvImage.hdr || !this._omero?.channels?.length)
|
|
1694
|
+
return;
|
|
1695
|
+
const channelIndex = Math.min(this._activeChannel, this._omero.channels.length - 1);
|
|
1696
|
+
const channel = this._omero.channels[channelIndex];
|
|
1697
|
+
const window = channel?.window;
|
|
1698
|
+
if (window) {
|
|
1699
|
+
const calMin = window.start ?? window.min;
|
|
1700
|
+
const calMax = window.end ?? window.max;
|
|
1701
|
+
if (calMin !== undefined)
|
|
1702
|
+
nvImage.hdr.cal_min = calMin;
|
|
1703
|
+
if (calMax !== undefined)
|
|
1704
|
+
nvImage.hdr.cal_max = calMax;
|
|
1705
|
+
}
|
|
1706
|
+
}
|
|
1707
|
+
/**
|
|
1708
|
+
* Widen the display intensity range if the actual data exceeds the current
|
|
1709
|
+
* cal_min/cal_max window (typically set from OMERO metadata).
|
|
1710
|
+
*
|
|
1711
|
+
* OMERO window settings may have been computed at a lower resolution where
|
|
1712
|
+
* downsampling averaged out extreme voxels. At higher resolutions, individual
|
|
1713
|
+
* bright/dark voxels can exceed the OMERO range, causing clipping artifacts
|
|
1714
|
+
* (e.g., "banding" where bright structures clip to solid white).
|
|
1715
|
+
*
|
|
1716
|
+
* Widens cal_min/cal_max to global_min/global_max (actual data extremes at
|
|
1717
|
+
* the current resolution level) so no data is clipped. The hdr.cal_min/
|
|
1718
|
+
* cal_max values are NOT modified — they preserve the original OMERO values
|
|
1719
|
+
* for reuse on subsequent slab reloads.
|
|
1720
|
+
*
|
|
1721
|
+
* Must be called AFTER updateGLVolume() so that calMinMax() has computed
|
|
1722
|
+
* global_min/global_max from the actual slab data.
|
|
1723
|
+
*
|
|
1724
|
+
* @returns true if the display range was widened
|
|
1725
|
+
*/
|
|
1726
|
+
_widenCalRangeIfNeeded(nvImage) {
|
|
1727
|
+
if (nvImage.global_min === undefined || nvImage.global_max === undefined) {
|
|
1728
|
+
return false;
|
|
1729
|
+
}
|
|
1730
|
+
let widened = false;
|
|
1731
|
+
// Widen the runtime display range (cal_min/cal_max) to encompass the
|
|
1732
|
+
// actual data extremes (global_min/global_max) at this resolution level.
|
|
1733
|
+
// The hdr values are NOT modified so the original OMERO window is
|
|
1734
|
+
// preserved for next reload.
|
|
1735
|
+
if (nvImage.cal_max !== undefined &&
|
|
1736
|
+
nvImage.global_max > nvImage.cal_max) {
|
|
1737
|
+
nvImage.cal_max = nvImage.global_max;
|
|
1738
|
+
widened = true;
|
|
1739
|
+
}
|
|
1740
|
+
if (nvImage.cal_min !== undefined &&
|
|
1741
|
+
nvImage.global_min < nvImage.cal_min) {
|
|
1742
|
+
nvImage.cal_min = nvImage.global_min;
|
|
1743
|
+
widened = true;
|
|
1744
|
+
}
|
|
1745
|
+
return widened;
|
|
1746
|
+
}
|
|
1747
|
+
// ============================================================
|
|
1748
|
+
// Event System (Browser-native EventTarget API)
|
|
1749
|
+
// ============================================================
|
|
1750
|
+
/**
|
|
1751
|
+
* Add a type-safe event listener for OMEZarrNVImage events.
|
|
1752
|
+
*
|
|
1753
|
+
* @param type - Event type name
|
|
1754
|
+
* @param listener - Event listener function
|
|
1755
|
+
* @param options - Standard addEventListener options (once, signal, etc.)
|
|
1756
|
+
*
|
|
1757
|
+
* @example
|
|
1758
|
+
* ```typescript
|
|
1759
|
+
* image.addEventListener('resolutionChange', (event) => {
|
|
1760
|
+
* console.log('New level:', event.detail.currentLevel);
|
|
1761
|
+
* });
|
|
1762
|
+
*
|
|
1763
|
+
* // One-time listener
|
|
1764
|
+
* image.addEventListener('loadingComplete', handler, { once: true });
|
|
1765
|
+
*
|
|
1766
|
+
* // With AbortController
|
|
1767
|
+
* const controller = new AbortController();
|
|
1768
|
+
* image.addEventListener('loadingStart', handler, { signal: controller.signal });
|
|
1769
|
+
* controller.abort(); // removes the listener
|
|
1770
|
+
* ```
|
|
1771
|
+
*/
|
|
1772
|
+
addEventListener(type, listener, options) {
|
|
1773
|
+
this._eventTarget.addEventListener(type, listener, options);
|
|
1774
|
+
}
|
|
1775
|
+
/**
|
|
1776
|
+
* Remove a type-safe event listener for OMEZarrNVImage events.
|
|
1777
|
+
*
|
|
1778
|
+
* @param type - Event type name
|
|
1779
|
+
* @param listener - Event listener function to remove
|
|
1780
|
+
* @param options - Standard removeEventListener options
|
|
1781
|
+
*/
|
|
1782
|
+
removeEventListener(type, listener, options) {
|
|
1783
|
+
this._eventTarget.removeEventListener(type, listener, options);
|
|
1784
|
+
}
|
|
1785
|
+
/**
|
|
1786
|
+
* Internal helper to emit events.
|
|
1787
|
+
* Catches and logs any errors from event listeners to prevent breaking execution.
|
|
1788
|
+
*/
|
|
1789
|
+
_emitEvent(eventName, detail) {
|
|
1790
|
+
try {
|
|
1791
|
+
const event = new OMEZarrNVImageEvent(eventName, detail);
|
|
1792
|
+
this._eventTarget.dispatchEvent(event);
|
|
1793
|
+
}
|
|
1794
|
+
catch (error) {
|
|
1795
|
+
console.error(`Error in ${eventName} event listener:`, error);
|
|
1796
|
+
}
|
|
1797
|
+
}
|
|
1798
|
+
}
|
|
1799
|
+
//# sourceMappingURL=OMEZarrNVImage.js.map
|