@aics/vole-core 3.15.4 → 3.15.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +0 -1
- package/es/Channel.js +5 -5
- package/es/ContourPass.js +98 -0
- package/es/FusedChannelData.js +5 -5
- package/es/Line3d.js +11 -4
- package/es/PickVolume.js +8 -7
- package/es/RenderToBuffer.js +8 -2
- package/es/ThreeJsPanel.js +30 -7
- package/es/View3d.js +9 -6
- package/es/VolumeDrawable.js +20 -0
- package/es/constants/volumeRayMarchPickShader.js +1 -1
- package/es/types/Channel.d.ts +3 -3
- package/es/types/ContourPass.d.ts +38 -0
- package/es/types/PickVolume.d.ts +1 -0
- package/es/types/RenderToBuffer.d.ts +5 -1
- package/es/types/ThreeJsPanel.d.ts +8 -2
- package/es/types/View3d.d.ts +0 -2
- package/es/types/VolumeDrawable.d.ts +2 -0
- package/es/types/loaders/zarr_utils/ChunkPrefetchIterator.d.ts +1 -1
- package/es/types/types.d.ts +1 -0
- package/es/types/workers/VolumeLoaderContext.d.ts +3 -1
- package/es/workers/VolumeLoaderContext.js +6 -2
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -34,7 +34,6 @@ const loaderContext = new VolumeLoaderContext(CACHE_MAX_SIZE, CONCURRENCY_LIMIT,
|
|
|
34
34
|
|
|
35
35
|
// create the viewer. it will try to fill the parent element.
|
|
36
36
|
const view3D = new View3d(el);
|
|
37
|
-
view3D.loaderContext = loaderContext;
|
|
38
37
|
|
|
39
38
|
// ensure the loader worker is ready
|
|
40
39
|
await loaderContext.onOpen();
|
package/es/Channel.js
CHANGED
|
@@ -14,7 +14,7 @@ export default class Channel {
|
|
|
14
14
|
};
|
|
15
15
|
this.rawMin = 0;
|
|
16
16
|
this.rawMax = 255;
|
|
17
|
-
this.
|
|
17
|
+
this.frame = 0;
|
|
18
18
|
|
|
19
19
|
// on gpu
|
|
20
20
|
this.dataTexture = new DataTexture(new Uint8Array(), 0, 0);
|
|
@@ -148,7 +148,7 @@ export default class Channel {
|
|
|
148
148
|
|
|
149
149
|
// give the channel fresh data and initialize from that data
|
|
150
150
|
// data is formatted as a texture atlas where each tile is a z slice of the volume
|
|
151
|
-
setFromAtlas(bitsArray, w, h, dtype, rawMin, rawMax, subregionSize,
|
|
151
|
+
setFromAtlas(bitsArray, w, h, dtype, rawMin, rawMax, subregionSize, frame = 0) {
|
|
152
152
|
this.dtype = dtype;
|
|
153
153
|
this.imgData = {
|
|
154
154
|
data: bitsArray,
|
|
@@ -158,7 +158,7 @@ export default class Channel {
|
|
|
158
158
|
this.rebuildDataTexture(this.imgData.data, w, h);
|
|
159
159
|
this.loaded = true;
|
|
160
160
|
this.histogram = new Histogram(bitsArray);
|
|
161
|
-
this.
|
|
161
|
+
this.frame = frame;
|
|
162
162
|
|
|
163
163
|
// reuse old lut but auto-remap it to new data range
|
|
164
164
|
this.setRawDataRange(rawMin, rawMax);
|
|
@@ -195,11 +195,11 @@ export default class Channel {
|
|
|
195
195
|
}
|
|
196
196
|
|
|
197
197
|
// give the channel fresh volume data and initialize from that data
|
|
198
|
-
setFromVolumeData(bitsArray, vx, vy, vz, ax, ay, rawMin, rawMax, dtype,
|
|
198
|
+
setFromVolumeData(bitsArray, vx, vy, vz, ax, ay, rawMin, rawMax, dtype, frame = 0) {
|
|
199
199
|
this.dims = [vx, vy, vz];
|
|
200
200
|
this.volumeData = bitsArray;
|
|
201
201
|
this.dtype = dtype;
|
|
202
|
-
this.
|
|
202
|
+
this.frame = frame;
|
|
203
203
|
// TODO FIXME performance hit for shuffling the data and storing 2 versions of it (could do this in worker at least?)
|
|
204
204
|
this.packToAtlas(vx, vy, vz, ax, ay);
|
|
205
205
|
this.loaded = true;
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
import { Color, DataTexture, FloatType, RedIntegerFormat, RGBAFormat, Uniform, UnsignedIntType } from "three";
|
|
2
|
+
import RenderToBuffer, { RenderPassType } from "./RenderToBuffer";
|
|
3
|
+
/* babel-plugin-inline-import './constants/shaders/contour.frag' */
|
|
4
|
+
const contourFragShader = "precision highp float;\nprecision highp int;\nprecision highp usampler2D;\nprecision highp sampler3D;\n\n/**\n * LUT mapping from the segmentation ID (raw pixel value) to the\n * global ID (index in data buffers like `featureData` and `outlierData`).\n * \n * For a given local pixel ID `localId`, the global ID is given by:\n * `localIdToGlobalId[localId - localIdOffset] - 1`.\n*/\nuniform usampler2D localIdToGlobalId;\nuniform uint localIdOffset;\nuniform bool useGlobalIdLookup;\n/* Pick buffer. Used to determine IDs. */\nuniform sampler2D pickBuffer;\n\nuniform int highlightedId;\nuniform int outlineThickness;\nuniform float outlineAlpha;\nuniform vec3 outlineColor;\nuniform float devicePixelRatio;\n\nconst uint BACKGROUND_ID = 0u;\nconst uint MISSING_DATA_ID = 0xFFFFFFFFu;\nconst int ID_OFFSET = 1;\n\nuvec4 getUintFromTex(usampler2D tex, int index) {\n int width = textureSize(tex, 0).x;\n ivec2 featurePos = ivec2(index % width, index / width);\n return uvec4(texelFetch(tex, featurePos, 0));\n}\n\nuint getId(ivec2 uv) {\n float rawId = texelFetch(pickBuffer, uv, 0).g;\n if (rawId == 0.0) {\n return BACKGROUND_ID;\n }\n int localId = int(rawId) - int(localIdOffset);\n if (!useGlobalIdLookup) {\n return uint(localId + ID_OFFSET);\n }\n uvec4 c = getUintFromTex(localIdToGlobalId, localId);\n // Note: IDs are offset by `ID_OFFSET` (`=1`) to reserve `0` for local IDs\n // that don't have associated data in the global lookup. `ID_OFFSET` MUST be\n // subtracted from the ID when accessing data buffers.\n uint globalId = c.r;\n if (globalId == 0u) {\n return MISSING_DATA_ID;\n }\n return globalId;\n}\n\nbool isEdge(ivec2 uv, int id, int thickness) {\n float wStep = 1.0;\n float hStep = 1.0;\n float thicknessFloat = float(thickness);\n // sample around the pixel to see if we are on an edge\n int R = int(getId(uv + ivec2(thicknessFloat * wStep, 0))) - ID_OFFSET;\n int L = int(getId(uv + ivec2(-thicknessFloat * wStep, 0))) - ID_OFFSET;\n int T = int(getId(uv + ivec2(0, thicknessFloat * hStep))) - ID_OFFSET;\n int B = int(getId(uv + ivec2(0, -thicknessFloat * hStep))) - ID_OFFSET;\n // if any neighbors are not id then this is an edge\n return id != -1 && (R != id || L != id || T != id || B != id);\n}\n\nvoid main(void) {\n ivec2 vUv = ivec2(int(gl_FragCoord.x / devicePixelRatio), int(gl_FragCoord.y / devicePixelRatio));\n\n uint rawId = getId(vUv);\n int id = int(rawId) - ID_OFFSET;\n\n if (id == highlightedId && isEdge(vUv, id, outlineThickness)) {\n gl_FragColor = vec4(outlineColor, outlineAlpha);\n } else {\n gl_FragColor = vec4(0, 0, 0, 0.0);\n }\n}";
|
|
5
|
+
import { clamp } from "three/src/math/MathUtils";
|
|
6
|
+
const makeDefaultUniforms = () => {
|
|
7
|
+
const pickBufferTex = new DataTexture(new Float32Array([1, 0, 0, 0]), 1, 1, RGBAFormat, FloatType);
|
|
8
|
+
const localIdToGlobalId = new DataTexture(new Uint32Array([0]), 1, 1, RedIntegerFormat, UnsignedIntType);
|
|
9
|
+
localIdToGlobalId.needsUpdate = true;
|
|
10
|
+
return {
|
|
11
|
+
pickBuffer: new Uniform(pickBufferTex),
|
|
12
|
+
highlightedId: new Uniform(94),
|
|
13
|
+
outlineThickness: new Uniform(2.0),
|
|
14
|
+
outlineColor: new Uniform(new Color(1, 0, 1)),
|
|
15
|
+
outlineAlpha: new Uniform(1.0),
|
|
16
|
+
useGlobalIdLookup: new Uniform(false),
|
|
17
|
+
localIdToGlobalId: new Uniform(localIdToGlobalId),
|
|
18
|
+
localIdOffset: new Uniform(0),
|
|
19
|
+
devicePixelRatio: new Uniform(1.0)
|
|
20
|
+
};
|
|
21
|
+
};
|
|
22
|
+
export default class ContourPass {
|
|
23
|
+
constructor() {
|
|
24
|
+
this.pass = new RenderToBuffer(contourFragShader, makeDefaultUniforms(), RenderPassType.TRANSPARENT);
|
|
25
|
+
this.frameToGlobalIdLookup = null;
|
|
26
|
+
this.frame = 0;
|
|
27
|
+
}
|
|
28
|
+
setOutlineColor(color, alpha = 1.0) {
|
|
29
|
+
this.pass.material.uniforms.outlineColor.value = color;
|
|
30
|
+
this.pass.material.uniforms.outlineAlpha.value = clamp(alpha, 0, 1);
|
|
31
|
+
}
|
|
32
|
+
setOutlineThickness(thickness) {
|
|
33
|
+
this.pass.material.uniforms.outlineThickness.value = Math.floor(thickness);
|
|
34
|
+
}
|
|
35
|
+
syncGlobalIdLookup() {
|
|
36
|
+
const uniforms = this.pass.material.uniforms;
|
|
37
|
+
const globalIdLookupInfo = this.frameToGlobalIdLookup?.get(this.frame);
|
|
38
|
+
if (!globalIdLookupInfo) {
|
|
39
|
+
uniforms.useGlobalIdLookup.value = false;
|
|
40
|
+
return;
|
|
41
|
+
}
|
|
42
|
+
uniforms.useGlobalIdLookup.value = true;
|
|
43
|
+
uniforms.localIdToGlobalId.value = globalIdLookupInfo.texture;
|
|
44
|
+
uniforms.localIdOffset.value = globalIdLookupInfo.minSegId;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* Sets a frame-dependent lookup for global IDs. Set to a non-null value if
|
|
49
|
+
* the `highlightedId` represents a global ID instead of a local (pixel) ID.
|
|
50
|
+
* @param frameToGlobalIdLookup A map from a frame number to a lookup object,
|
|
51
|
+
* containing a texture and an offset value; see `ColorizeFeature` for more
|
|
52
|
+
* details. If `null`, the pass will not use a global ID lookup.
|
|
53
|
+
*/
|
|
54
|
+
setGlobalIdLookup(frameToGlobalIdLookup) {
|
|
55
|
+
if (this.frameToGlobalIdLookup !== frameToGlobalIdLookup) {
|
|
56
|
+
this.frameToGlobalIdLookup = frameToGlobalIdLookup;
|
|
57
|
+
this.syncGlobalIdLookup();
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Sets the current frame number. If a global ID lookup has been set
|
|
63
|
+
* (`setGlobalIdLookup`), this must be updated on every frame.
|
|
64
|
+
*/
|
|
65
|
+
setFrame(frame) {
|
|
66
|
+
if (this.frame !== frame) {
|
|
67
|
+
this.frame = frame;
|
|
68
|
+
this.syncGlobalIdLookup();
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
/**
|
|
73
|
+
* Sets the current ID that should be highlighted with a contour.
|
|
74
|
+
* @param id The ID to highlight. If a global ID lookup has been set
|
|
75
|
+
* (`setGlobalIdLookup`), this should be a global ID.
|
|
76
|
+
*/
|
|
77
|
+
setHighlightedId(id) {
|
|
78
|
+
this.pass.material.uniforms.highlightedId.value = id;
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/**
|
|
82
|
+
* Renders the contour as a transparent pass on the specified target.
|
|
83
|
+
* @param renderer The WebGL renderer to render with.
|
|
84
|
+
* @param target The render target to render to.
|
|
85
|
+
* @param pickBuffer The pick buffer containing the pixel IDs to highlight,
|
|
86
|
+
* e.g. `PickVolume.getPickBuffer()`.
|
|
87
|
+
*/
|
|
88
|
+
render(renderer, target, pickBuffer) {
|
|
89
|
+
// Setup uniforms
|
|
90
|
+
const uniforms = this.pass.material.uniforms;
|
|
91
|
+
uniforms.devicePixelRatio.value = window.devicePixelRatio;
|
|
92
|
+
uniforms.pickBuffer.value = pickBuffer.texture;
|
|
93
|
+
const startingAutoClearState = renderer.autoClear;
|
|
94
|
+
renderer.autoClear = false;
|
|
95
|
+
this.pass.render(renderer, target ?? undefined);
|
|
96
|
+
renderer.autoClear = startingAutoClearState;
|
|
97
|
+
}
|
|
98
|
+
}
|
package/es/FusedChannelData.js
CHANGED
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
import { Color, DataTexture, RedFormat, UnsignedByteType, ClampToEdgeWrapping, Scene, OrthographicCamera, WebGLRenderTarget, RGBAFormat, ShaderMaterial, Mesh, PlaneGeometry, OneFactor, CustomBlending, MaxEquation, LinearFilter, Vector2 } from "three";
|
|
2
2
|
import { renderToBufferVertShader } from "./constants/basicShaders.js";
|
|
3
3
|
/* babel-plugin-inline-import './constants/shaders/fuseUI.frag' */
|
|
4
|
-
const fuseShaderSrcUI = "precision highp float;\nprecision highp int;\nprecision highp usampler2D;\nprecision highp sampler3D;\n\n// the lut texture is a 256x1 rgba texture for each channel\nuniform sampler2D lutSampler;\n\nuniform vec2 lutMinMax;\nuniform uint highlightedId;\n\n// src texture is the raw volume intensity data\nuniform usampler2D srcTexture;\n\nvoid main()
|
|
4
|
+
const fuseShaderSrcUI = "precision highp float;\nprecision highp int;\nprecision highp usampler2D;\nprecision highp sampler3D;\n\n// the lut texture is a 256x1 rgba texture for each channel\nuniform sampler2D lutSampler;\n\nuniform vec2 lutMinMax;\nuniform uint highlightedId;\n\n// src texture is the raw volume intensity data\nuniform usampler2D srcTexture;\n\nvoid main() {\n ivec2 vUv = ivec2(int(gl_FragCoord.x), int(gl_FragCoord.y));\n uint intensity = texelFetch(srcTexture, vUv, 0).r;\n float ilookup = float(float(intensity) - lutMinMax.x) / float(lutMinMax.y - lutMinMax.x);\n // apply lut to intensity:\n vec4 pix = texture(lutSampler, vec2(ilookup, 0.5));\n gl_FragColor = vec4(pix.xyz * pix.w, pix.w);\n}\n";
|
|
5
5
|
/* babel-plugin-inline-import './constants/shaders/fuseF.frag' */
|
|
6
6
|
const fuseShaderSrcF = "precision highp float;\nprecision highp int;\nprecision highp sampler2D;\nprecision highp sampler3D;\n\n// the lut texture is a 256x1 rgba texture for each channel\nuniform sampler2D lutSampler;\n\nuniform vec2 lutMinMax;\n\n// src texture is the raw volume intensity data\nuniform sampler2D srcTexture;\n\nvoid main()\n{\n ivec2 vUv = ivec2(int(gl_FragCoord.x), int(gl_FragCoord.y));\n\n // load from channel\n float intensity = texelFetch(srcTexture, vUv, 0).r;\n\n float ilookup = float(float(intensity) - lutMinMax.x) / float(lutMinMax.y - lutMinMax.x);\n // apply lut to intensity:\n vec4 pix = texture(lutSampler, vec2(ilookup, 0.5));\n gl_FragColor = vec4(pix.xyz*pix.w, pix.w);\n}\n";
|
|
7
7
|
/* babel-plugin-inline-import './constants/shaders/fuseI.frag' */
|
|
8
8
|
const fuseShaderSrcI = "precision highp float;\nprecision highp int;\nprecision highp sampler2D;\nprecision highp sampler3D;\n\n// the lut texture is a 256x1 rgba texture for each channel\nuniform sampler2D lutSampler;\n\nuniform vec2 lutMinMax;\n\n// src texture is the raw volume intensity data\nuniform isampler2D srcTexture;\n\nvoid main()\n{\n ivec2 vUv = ivec2(int(gl_FragCoord.x), int(gl_FragCoord.y));\n int intensity = texelFetch(srcTexture, vUv, 0).r;\n float ilookup = float(float(intensity) - lutMinMax.x) / float(lutMinMax.y - lutMinMax.x);\n // apply lut to intensity:\n vec4 pix = texture(lutSampler, vec2(ilookup, 0.5));\n gl_FragColor = vec4(pix.xyz*pix.w, pix.w);\n}\n";
|
|
9
9
|
/* babel-plugin-inline-import './constants/shaders/colorizeUI.frag' */
|
|
10
|
-
const colorizeSrcUI = "precision highp float;\nprecision highp int;\nprecision highp usampler2D;\nprecision highp sampler3D;\n\nuniform sampler2D featureData;\n/** Min and max feature values that define the endpoints of the color map. Values\n * outside the range will be clamped to the nearest endpoint.\n */\nuniform float featureColorRampMin;\nuniform float featureColorRampMax;\nuniform sampler2D colorRamp;\nuniform usampler2D inRangeIds;\nuniform usampler2D outlierData;\n\n/**\n * LUT mapping from the segmentation ID (raw pixel value) to the\n * global ID (index in data buffers like `featureData` and `outlierData`).\n * \n * For a given segmentation ID `segId`, the global ID is given by:\n * `segIdToGlobalId[segId - segIdOffset] - 1`.\n*/\nuniform usampler2D segIdToGlobalId;\nuniform uint segIdOffset;\n\nuniform vec3 outlineColor;\n\n/** MUST be synchronized with the DrawMode enum in ColorizeCanvas! */\nconst uint DRAW_MODE_HIDE = 0u;\nconst uint DRAW_MODE_COLOR = 1u;\nconst uint BACKGROUND_ID = 0u;\nconst uint MISSING_DATA_ID = 0xFFFFFFFFu;\n\nuniform vec3 outlierColor;\nuniform uint outlierDrawMode;\nuniform vec3 outOfRangeColor;\nuniform uint outOfRangeDrawMode;\n\nuniform uint highlightedId;\n\nuniform bool useRepeatingCategoricalColors;\n\n// src texture is the raw volume intensity data\nuniform usampler2D srcTexture;\n\nvec4 getFloatFromTex(sampler2D tex, int index) {\n int width = textureSize(tex, 0).x;\n ivec2 featurePos = ivec2(index % width, index / width);\n return texelFetch(tex, featurePos, 0);\n}\nuvec4 getUintFromTex(usampler2D tex, int index) {\n int width = textureSize(tex, 0).x;\n ivec2 featurePos = ivec2(index % width, index / width);\n return texelFetch(tex, featurePos, 0);\n}\nuint getId(ivec2 uv) {\n uint rawId = texelFetch(srcTexture, uv, 0).r;\n if (rawId == 0u) {\n return BACKGROUND_ID;\n }\n uvec4 c = getUintFromTex(segIdToGlobalId, int(rawId - segIdOffset));\n // Note: IDs are offset by `1` to reserve `0` for segmentations that don't\n // have associated data. `1` MUST be subtracted from the ID when accessing\n // data buffers.\n uint globalId = c.r;\n if (globalId == 0u) {\n return MISSING_DATA_ID;\n }\n return globalId;\n}\n\nvec4 getColorRamp(float val) {\n float width = float(textureSize(colorRamp, 0).x);\n float range = (width - 1.0) / width;\n float adjustedVal = (0.5 / width) + (val * range);\n return texture(colorRamp, vec2(adjustedVal, 0.5));\n}\n\nvec4 getCategoricalColor(float featureValue) {\n float width = float(textureSize(colorRamp, 0).x);\n float modValue = mod(featureValue, width);\n // The categorical texture uses no interpolation, so when sampling, `modValue`\n // is rounded to the nearest integer.\n return getColorRamp(modValue / (width - 1.0));\n}\n\nvec4 getColorFromDrawMode(uint drawMode, vec3 defaultColor) {\n const uint DRAW_MODE_HIDE = 0u;\n vec3 backgroundColor = vec3(0.0, 0.0, 0.0);\n if (drawMode == DRAW_MODE_HIDE) {\n return vec4(backgroundColor, 0.0);\n } else {\n return vec4(defaultColor, 1.0);\n }\n}\n\nfloat getFeatureVal(uint id) {\n // Data buffer starts at 0, non-background segmentation IDs start at 1\n return getFloatFromTex(featureData, int(id) - 1).r;\n}\nuint getOutlierVal(uint id) {\n // Data buffer starts at 0, non-background segmentation IDs start at 1\n return getUintFromTex(outlierData, int(id) - 1).r;\n}\nbool getIsInRange(uint id) {\n return getUintFromTex(inRangeIds, int(id) - 1).r == 1u;\n}\nbool getIsOutlier(float featureVal, uint outlierVal) {\n return isinf(featureVal) || outlierVal != 0u;\n}\n\nvec4 getObjectColor(ivec2 sUv, float opacity) {\n // Get the segmentation id at this pixel\n uint id = getId(sUv);\n\n // A segmentation id of 0 represents background\n if (id == BACKGROUND_ID) {\n return vec4(0, 0, 0, 0);\n }\n\n // color the highlighted object. Note, `highlightedId` is a 0-based index\n // (global ID w/o offset), while `id` is a 1-based index.\n if (id - 1u == highlightedId) {\n
|
|
10
|
+
const colorizeSrcUI = "precision highp float;\nprecision highp int;\nprecision highp usampler2D;\nprecision highp sampler3D;\n\nuniform sampler2D featureData;\n/** Min and max feature values that define the endpoints of the color map. Values\n * outside the range will be clamped to the nearest endpoint.\n */\nuniform float featureColorRampMin;\nuniform float featureColorRampMax;\nuniform sampler2D colorRamp;\nuniform usampler2D inRangeIds;\nuniform usampler2D outlierData;\n\n// TODO: Rename to `localId` for consistency\n/**\n * LUT mapping from the segmentation ID (raw pixel value) to the\n * global ID (index in data buffers like `featureData` and `outlierData`).\n * \n * For a given segmentation ID `segId`, the global ID is given by:\n * `segIdToGlobalId[segId - segIdOffset] - 1`.\n*/\nuniform usampler2D segIdToGlobalId;\nuniform uint segIdOffset;\n\nuniform vec3 outlineColor;\n\n/** MUST be synchronized with the DrawMode enum in ColorizeCanvas! */\nconst uint DRAW_MODE_HIDE = 0u;\nconst uint DRAW_MODE_COLOR = 1u;\nconst uint BACKGROUND_ID = 0u;\nconst uint MISSING_DATA_ID = 0xFFFFFFFFu;\n\nuniform vec3 outlierColor;\nuniform uint outlierDrawMode;\nuniform vec3 outOfRangeColor;\nuniform uint outOfRangeDrawMode;\n\nuniform uint highlightedId;\n\nuniform bool useRepeatingCategoricalColors;\n\n// src texture is the raw volume intensity data\nuniform usampler2D srcTexture;\n\nvec4 getFloatFromTex(sampler2D tex, int index) {\n int width = textureSize(tex, 0).x;\n ivec2 featurePos = ivec2(index % width, index / width);\n return texelFetch(tex, featurePos, 0);\n}\nuvec4 getUintFromTex(usampler2D tex, int index) {\n int width = textureSize(tex, 0).x;\n ivec2 featurePos = ivec2(index % width, index / width);\n return texelFetch(tex, featurePos, 0);\n}\nuint getId(ivec2 uv) {\n uint rawId = texelFetch(srcTexture, uv, 0).r;\n if (rawId == 0u) {\n return BACKGROUND_ID;\n }\n uvec4 c = getUintFromTex(segIdToGlobalId, int(rawId - segIdOffset));\n // Note: IDs are offset by `1` to reserve `0` for segmentations that don't\n // have associated data. `1` MUST be subtracted from the ID when accessing\n // data buffers.\n uint globalId = c.r;\n if (globalId == 0u) {\n return MISSING_DATA_ID;\n }\n return globalId;\n}\n\nvec4 getColorRamp(float val) {\n float width = float(textureSize(colorRamp, 0).x);\n float range = (width - 1.0) / width;\n float adjustedVal = (0.5 / width) + (val * range);\n return texture(colorRamp, vec2(adjustedVal, 0.5));\n}\n\nvec4 getCategoricalColor(float featureValue) {\n float width = float(textureSize(colorRamp, 0).x);\n float modValue = mod(featureValue, width);\n // The categorical texture uses no interpolation, so when sampling, `modValue`\n // is rounded to the nearest integer.\n return getColorRamp(modValue / (width - 1.0));\n}\n\nvec4 getColorFromDrawMode(uint drawMode, vec3 defaultColor) {\n const uint DRAW_MODE_HIDE = 0u;\n vec3 backgroundColor = vec3(0.0, 0.0, 0.0);\n if (drawMode == DRAW_MODE_HIDE) {\n return vec4(backgroundColor, 0.0);\n } else {\n return vec4(defaultColor, 1.0);\n }\n}\n\nfloat getFeatureVal(uint id) {\n // Data buffer starts at 0, non-background segmentation IDs start at 1\n return getFloatFromTex(featureData, int(id) - 1).r;\n}\nuint getOutlierVal(uint id) {\n // Data buffer starts at 0, non-background segmentation IDs start at 1\n return getUintFromTex(outlierData, int(id) - 1).r;\n}\nbool getIsInRange(uint id) {\n return getUintFromTex(inRangeIds, int(id) - 1).r == 1u;\n}\nbool getIsOutlier(float featureVal, uint outlierVal) {\n return isinf(featureVal) || outlierVal != 0u;\n}\n\nvec4 getObjectColor(ivec2 sUv, float opacity) {\n // Get the segmentation id at this pixel\n uint id = getId(sUv);\n\n // A segmentation id of 0 represents background\n if (id == BACKGROUND_ID) {\n return vec4(0, 0, 0, 0);\n }\n\n // color the highlighted object. Note, `highlightedId` is a 0-based index\n // (global ID w/o offset), while `id` is a 1-based index.\n // if (id - 1u == highlightedId) {\n // return vec4(outlineColor, 1.0);\n // }\n\n float featureVal = getFeatureVal(id);\n uint outlierVal = getOutlierVal(id);\n float normFeatureVal = (featureVal - featureColorRampMin) / (featureColorRampMax - featureColorRampMin);\n\n // Use the selected draw mode to handle out of range and outlier values;\n // otherwise color with the color ramp as usual.\n bool isInRange = getIsInRange(id);\n bool isOutlier = getIsOutlier(featureVal, outlierVal);\n bool isMissingData = (id == MISSING_DATA_ID);\n\n // Features outside the filtered/thresholded range will all be treated the same (use `outOfRangeDrawColor`).\n // Features inside the range can either be outliers or standard values, and are colored accordingly.\n vec4 color;\n if (isMissingData) { \n // TODO: Add color controls for missing data\n color = getColorFromDrawMode(outlierDrawMode, outlierColor);\n } else if (isInRange) {\n if (isOutlier) {\n color = getColorFromDrawMode(outlierDrawMode, outlierColor);\n } else if (useRepeatingCategoricalColors) {\n color = getCategoricalColor(featureVal);\n } else {\n color = getColorRamp(normFeatureVal);\n }\n } else {\n color = getColorFromDrawMode(outOfRangeDrawMode, outOfRangeColor);\n }\n color.a *= opacity;\n return color;\n}\n\nvoid main() {\n ivec2 vUv = ivec2(int(gl_FragCoord.x), int(gl_FragCoord.y));\n gl_FragColor = getObjectColor(vUv, 1.0);\n}\n";
|
|
11
11
|
// This is the owner of the fused RGBA volume texture atlas, and the mask texture atlas.
|
|
12
12
|
// This module is responsible for updating the fused texture, given the read-only volume channel data.
|
|
13
13
|
export default class FusedChannelData {
|
|
@@ -229,10 +229,10 @@ export default class FusedChannelData {
|
|
|
229
229
|
mat.uniforms.outlierDrawMode.value = feature.outlierDrawMode;
|
|
230
230
|
mat.uniforms.outOfRangeDrawMode.value = feature.outOfRangeDrawMode;
|
|
231
231
|
mat.uniforms.hideOutOfRange.value = feature.hideOutOfRange;
|
|
232
|
-
const
|
|
233
|
-
let globalIdLookupInfo = feature.frameToGlobalIdLookup.get(
|
|
232
|
+
const frame = channels[chIndex].frame;
|
|
233
|
+
let globalIdLookupInfo = feature.frameToGlobalIdLookup.get(frame);
|
|
234
234
|
if (!globalIdLookupInfo) {
|
|
235
|
-
console.warn(`FusedChannelData.gpuFuse: No global ID lookup info for
|
|
235
|
+
console.warn(`FusedChannelData.gpuFuse: No global ID lookup info for frame ${frame} in channel ${chIndex}. A default lookup will be used, which may cause visual artifacts.`);
|
|
236
236
|
globalIdLookupInfo = {
|
|
237
237
|
texture: new DataTexture(Uint32Array[0]),
|
|
238
238
|
minSegId: 1
|
package/es/Line3d.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { Group, Vector3 } from "three";
|
|
2
2
|
import { LineMaterial } from "three/addons/lines/LineMaterial";
|
|
3
|
-
import {
|
|
3
|
+
import { MESH_NO_PICK_OCCLUSION_LAYER, OVERLAY_LAYER } from "./ThreeJsPanel";
|
|
4
4
|
import { LineSegments2 } from "three/addons/lines/LineSegments2";
|
|
5
5
|
import { LineSegmentsGeometry } from "three/addons/lines/LineSegmentsGeometry";
|
|
6
6
|
const DEFAULT_VERTEX_BUFFER_SIZE = 1020;
|
|
@@ -20,11 +20,18 @@ export default class Line3d {
|
|
|
20
20
|
worldUnits: false
|
|
21
21
|
});
|
|
22
22
|
this.lineMesh = new LineSegments2(geometry, material);
|
|
23
|
-
|
|
23
|
+
|
|
24
|
+
// Lines need to write depth information so they interact with the volume
|
|
25
|
+
// (so the lines appear to fade into the volume if they intersect), but
|
|
26
|
+
// lines shouldn't interact with the pick buffer, otherwise strange visual
|
|
27
|
+
// artifacts can occur where contours are drawn around lines. This layer
|
|
28
|
+
// (MESH_NO_PICK_OCCLUSION_LAYER) does not occlude/interact with the pick
|
|
29
|
+
// buffer but still writes depth information for the volume.
|
|
30
|
+
this.lineMesh.layers.set(MESH_NO_PICK_OCCLUSION_LAYER);
|
|
24
31
|
this.lineMesh.frustumCulled = false;
|
|
25
32
|
this.meshPivot = new Group();
|
|
26
33
|
this.meshPivot.add(this.lineMesh);
|
|
27
|
-
this.meshPivot.layers.set(
|
|
34
|
+
this.meshPivot.layers.set(MESH_NO_PICK_OCCLUSION_LAYER);
|
|
28
35
|
this.scale = new Vector3(1, 1, 1);
|
|
29
36
|
this.flipAxes = new Vector3(1, 1, 1);
|
|
30
37
|
}
|
|
@@ -108,7 +115,7 @@ export default class Line3d {
|
|
|
108
115
|
* volume, ignoring depth.
|
|
109
116
|
*/
|
|
110
117
|
setRenderAsOverlay(renderAsOverlay) {
|
|
111
|
-
this.lineMesh.layers.set(renderAsOverlay ? OVERLAY_LAYER :
|
|
118
|
+
this.lineMesh.layers.set(renderAsOverlay ? OVERLAY_LAYER : MESH_NO_PICK_OCCLUSION_LAYER);
|
|
112
119
|
this.lineMesh.material.depthTest = !renderAsOverlay;
|
|
113
120
|
this.lineMesh.material.depthTest = !renderAsOverlay;
|
|
114
121
|
this.lineMesh.material.needsUpdate = true;
|
package/es/PickVolume.js
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import { BoxGeometry, Color, DataTexture, FloatType, Group, Matrix4, Mesh, NearestFilter, RGBAFormat, Scene, ShaderMaterial, Vector2, WebGLRenderTarget } from "three";
|
|
2
2
|
import { pickVertexShaderSrc, pickFragmentShaderSrc, pickShaderUniforms } from "./constants/volumeRayMarchPickShader.js";
|
|
3
3
|
import { VolumeRenderSettings, SettingsFlags } from "./VolumeRenderSettings.js";
|
|
4
|
+
import { VOLUME_LAYER } from "./ThreeJsPanel.js";
|
|
4
5
|
export default class PickVolume {
|
|
5
6
|
needRedraw = false;
|
|
6
7
|
channelToPick = 0;
|
|
@@ -43,6 +44,9 @@ export default class PickVolume {
|
|
|
43
44
|
setChannelToPick(channel) {
|
|
44
45
|
this.channelToPick = channel;
|
|
45
46
|
}
|
|
47
|
+
getChannelToPick() {
|
|
48
|
+
return this.channelToPick;
|
|
49
|
+
}
|
|
46
50
|
getPickBuffer() {
|
|
47
51
|
return this.pickBuffer;
|
|
48
52
|
}
|
|
@@ -185,7 +189,6 @@ export default class PickVolume {
|
|
|
185
189
|
}
|
|
186
190
|
this.needRedraw = false;
|
|
187
191
|
this.setUniform("iResolution", this.settings.resolution);
|
|
188
|
-
this.setUniform("textureRes", this.settings.resolution);
|
|
189
192
|
const depthTex = depthTexture ?? this.emptyPositionTex;
|
|
190
193
|
this.setUniform("textureDepth", depthTex);
|
|
191
194
|
this.setUniform("usingPositionTexture", depthTex.isDepthTexture ? 0 : 1);
|
|
@@ -194,18 +197,16 @@ export default class PickVolume {
|
|
|
194
197
|
|
|
195
198
|
// this.channelData.gpuFuse(renderer);
|
|
196
199
|
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
// TODO TODO TODO FIXME
|
|
201
|
-
this.setUniform("textureAtlas", this.volume.getChannel(this.channelToPick).dataTexture);
|
|
200
|
+
const channelTex = this.volume.getChannel(this.channelToPick).dataTexture;
|
|
201
|
+
this.setUniform("textureAtlas", channelTex);
|
|
202
|
+
this.setUniform("textureRes", new Vector2(channelTex.image.width, channelTex.image.height));
|
|
202
203
|
this.geometryTransformNode.updateMatrixWorld(true);
|
|
203
204
|
const mvm = new Matrix4();
|
|
204
205
|
mvm.multiplyMatrices(camera.matrixWorldInverse, this.geometryMesh.matrixWorld);
|
|
205
206
|
mvm.invert();
|
|
206
207
|
this.setUniform("inverseModelViewMatrix", mvm);
|
|
207
208
|
this.setUniform("inverseProjMatrix", camera.projectionMatrixInverse);
|
|
208
|
-
|
|
209
|
+
|
|
209
210
|
// draw into pick buffer...
|
|
210
211
|
camera.layers.set(VOLUME_LAYER);
|
|
211
212
|
renderer.setRenderTarget(this.pickBuffer);
|
package/es/RenderToBuffer.js
CHANGED
|
@@ -1,18 +1,24 @@
|
|
|
1
1
|
import { Mesh, OrthographicCamera, PlaneGeometry, Scene, ShaderMaterial } from "three";
|
|
2
2
|
import { renderToBufferVertShader } from "./constants/basicShaders.js";
|
|
3
|
+
export let RenderPassType = /*#__PURE__*/function (RenderPassType) {
|
|
4
|
+
RenderPassType[RenderPassType["OPAQUE"] = 0] = "OPAQUE";
|
|
5
|
+
RenderPassType[RenderPassType["TRANSPARENT"] = 1] = "TRANSPARENT";
|
|
6
|
+
return RenderPassType;
|
|
7
|
+
}({});
|
|
3
8
|
|
|
4
9
|
/**
|
|
5
10
|
* Helper for render passes that just require a fragment shader: accepts a fragment shader and its
|
|
6
11
|
* uniforms, and handles the ceremony of rendering a fullscreen quad with a simple vertex shader.
|
|
7
12
|
*/
|
|
8
13
|
export default class RenderToBuffer {
|
|
9
|
-
constructor(fragmentSrc, uniforms) {
|
|
14
|
+
constructor(fragmentSrc, uniforms, passType = RenderPassType.OPAQUE) {
|
|
10
15
|
this.scene = new Scene();
|
|
11
16
|
this.geometry = new PlaneGeometry(2, 2);
|
|
12
17
|
this.material = new ShaderMaterial({
|
|
13
18
|
vertexShader: renderToBufferVertShader,
|
|
14
19
|
fragmentShader: fragmentSrc,
|
|
15
|
-
uniforms
|
|
20
|
+
uniforms,
|
|
21
|
+
transparent: passType === RenderPassType.TRANSPARENT
|
|
16
22
|
});
|
|
17
23
|
this.material.depthWrite = false;
|
|
18
24
|
this.material.depthTest = false;
|
package/es/ThreeJsPanel.js
CHANGED
|
@@ -9,7 +9,9 @@ import RenderToBuffer from "./RenderToBuffer.js";
|
|
|
9
9
|
import { copyImageFragShader } from "./constants/basicShaders.js";
|
|
10
10
|
export const VOLUME_LAYER = 0;
|
|
11
11
|
export const MESH_LAYER = 1;
|
|
12
|
-
|
|
12
|
+
/** Meshes that do not occlude picking/contour behavior. */
|
|
13
|
+
export const MESH_NO_PICK_OCCLUSION_LAYER = 2;
|
|
14
|
+
export const OVERLAY_LAYER = 3;
|
|
13
15
|
const DEFAULT_PERSPECTIVE_CAMERA_DISTANCE = 5.0;
|
|
14
16
|
const DEFAULT_PERSPECTIVE_CAMERA_NEAR = 0.1;
|
|
15
17
|
const DEFAULT_PERSPECTIVE_CAMERA_FAR = 20.0;
|
|
@@ -48,6 +50,8 @@ export class ThreeJsPanel {
|
|
|
48
50
|
this.timestepIndicatorElement = document.createElement("div");
|
|
49
51
|
this.showTimestepIndicator = false;
|
|
50
52
|
this.animateFuncs = [];
|
|
53
|
+
this.postMeshRenderFuncs = [];
|
|
54
|
+
this.overlayRenderFuncs = [];
|
|
51
55
|
|
|
52
56
|
// are we in a constant render loop or not?
|
|
53
57
|
this.inRenderLoop = false;
|
|
@@ -545,28 +549,47 @@ export class ThreeJsPanel {
|
|
|
545
549
|
|
|
546
550
|
// RENDERING
|
|
547
551
|
// Step 1: Render meshes, e.g. isosurfaces, separately to a render target. (Meshes are all on
|
|
548
|
-
//
|
|
552
|
+
// layer 1.) This is necessary to access the depth buffer.
|
|
549
553
|
this.camera.layers.set(MESH_LAYER);
|
|
550
554
|
this.renderer.setRenderTarget(this.meshRenderTarget);
|
|
551
555
|
this.renderer.render(this.scene, this.camera);
|
|
552
556
|
|
|
553
|
-
// Step 2
|
|
557
|
+
// Step 2. Render any passes that have to happen after the meshes are
|
|
558
|
+
// rendered but before volume rendering (e.g. pick buffer).
|
|
559
|
+
this.postMeshRenderFuncs.forEach(func => {
|
|
560
|
+
func(this.renderer, this.camera, this.meshRenderTarget.depthTexture);
|
|
561
|
+
});
|
|
562
|
+
|
|
563
|
+
// Step 3: Render meshes that do not interact with the pick buffer. This
|
|
564
|
+
// must happen after the pick buffer is rendered so picking isn't occluded
|
|
565
|
+
// by them, but before the volume renders so that volumes can still depth
|
|
566
|
+
// test against the lines.
|
|
567
|
+
this.renderer.autoClear = false;
|
|
568
|
+
this.camera.layers.set(MESH_NO_PICK_OCCLUSION_LAYER);
|
|
569
|
+
this.renderer.setRenderTarget(this.meshRenderTarget);
|
|
570
|
+
this.renderer.render(this.scene, this.camera);
|
|
571
|
+
|
|
572
|
+
// Step 4: Render the mesh render target out to the screen.
|
|
554
573
|
this.meshRenderToBuffer.material.uniforms.image.value = this.meshRenderTarget.texture;
|
|
555
574
|
this.meshRenderToBuffer.render(this.renderer);
|
|
556
575
|
|
|
557
|
-
// Step
|
|
558
|
-
this.renderer.autoClear = false;
|
|
576
|
+
// Step 5: Render volumes, which can now depth test against the meshes.
|
|
559
577
|
this.camera.layers.set(VOLUME_LAYER);
|
|
560
578
|
this.renderer.setRenderTarget(null);
|
|
561
579
|
this.renderer.render(this.scene, this.camera);
|
|
562
580
|
|
|
563
|
-
// Step
|
|
581
|
+
// Step 6: Render lines and other objects that must render over volumes and meshes.
|
|
564
582
|
this.camera.layers.set(OVERLAY_LAYER);
|
|
565
583
|
this.renderer.setRenderTarget(null);
|
|
566
584
|
this.renderer.render(this.scene, this.camera);
|
|
585
|
+
|
|
586
|
+
// Step 7: Render overlay passes (e.g. contours) and update the pick buffer.
|
|
587
|
+
this.overlayRenderFuncs.forEach(func => {
|
|
588
|
+
func(this.renderer, this.camera, this.meshRenderTarget.depthTexture);
|
|
589
|
+
});
|
|
567
590
|
this.renderer.autoClear = true;
|
|
568
591
|
|
|
569
|
-
//
|
|
592
|
+
// Step 8: Render axis helper and other overlays.
|
|
570
593
|
if (this.showAxis) {
|
|
571
594
|
this.renderer.autoClear = false;
|
|
572
595
|
this.renderer.render(this.axisHelperScene, this.axisCamera);
|
package/es/View3d.js
CHANGED
|
@@ -19,10 +19,6 @@ const allGlobalLoadingOptions = {
|
|
|
19
19
|
* @class
|
|
20
20
|
*/
|
|
21
21
|
export class View3d {
|
|
22
|
-
// TODO because View3d is basically a top level entrypoint for Vol-E,
|
|
23
|
-
// maybe it should create the VolumeLoaderContext with options passed in.
|
|
24
|
-
// (instead of having the loaderContext created externally)
|
|
25
|
-
|
|
26
22
|
/**
|
|
27
23
|
* @param {Object} options Optional options.
|
|
28
24
|
* @param {boolean} options.useWebGL2 Default true
|
|
@@ -143,6 +139,8 @@ export class View3d {
|
|
|
143
139
|
if (this.image) {
|
|
144
140
|
this.canvas3d.removeControlHandlers();
|
|
145
141
|
this.canvas3d.animateFuncs = [];
|
|
142
|
+
this.canvas3d.postMeshRenderFuncs = [];
|
|
143
|
+
this.canvas3d.overlayRenderFuncs = [];
|
|
146
144
|
this.scene.remove(this.image.sceneRoot);
|
|
147
145
|
}
|
|
148
146
|
return this.image;
|
|
@@ -344,7 +342,12 @@ export class View3d {
|
|
|
344
342
|
this.canvas3d.setControlHandlers(this.onStartControls.bind(this), this.onChangeControls.bind(this), this.onEndControls.bind(this));
|
|
345
343
|
this.canvas3d.animateFuncs.push(this.preRender.bind(this));
|
|
346
344
|
this.canvas3d.animateFuncs.push(img.onAnimate.bind(img));
|
|
347
|
-
|
|
345
|
+
// NOTE: `fillPickBuffer` MUST run after mesh rendering occurs. This is
|
|
346
|
+
// because the pick buffer needs to access the `meshRenderTarget`'s depth
|
|
347
|
+
// texture, but during a resize, the texture is disposed of and not
|
|
348
|
+
// recreated until the next render.
|
|
349
|
+
this.canvas3d.postMeshRenderFuncs.push(img.fillPickBuffer.bind(img));
|
|
350
|
+
this.canvas3d.overlayRenderFuncs.push(img.drawContours.bind(img));
|
|
348
351
|
this.updatePerspectiveScaleBar(img.volume);
|
|
349
352
|
this.updateTimestepIndicator(img.volume);
|
|
350
353
|
|
|
@@ -961,7 +964,7 @@ export class View3d {
|
|
|
961
964
|
});
|
|
962
965
|
// when multiple prefetch frames arrive at once, should we slow down how quickly we load them?
|
|
963
966
|
prefetch.addInput(allGlobalLoadingOptions, "throttleArrivingChannelData").on("change", event => {
|
|
964
|
-
|
|
967
|
+
loader?.getContext?.().setThrottleChannelData(event.value);
|
|
965
968
|
});
|
|
966
969
|
return pane;
|
|
967
970
|
}
|
package/es/VolumeDrawable.js
CHANGED
|
@@ -7,6 +7,7 @@ import { LUT_ARRAY_LENGTH } from "./Lut.js";
|
|
|
7
7
|
import { RenderMode } from "./types.js";
|
|
8
8
|
import Atlas2DSlice from "./Atlas2DSlice.js";
|
|
9
9
|
import { VolumeRenderSettings, SettingsFlags, Axis } from "./VolumeRenderSettings.js";
|
|
10
|
+
import ContourPass from "./ContourPass.js";
|
|
10
11
|
|
|
11
12
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
12
13
|
|
|
@@ -80,6 +81,7 @@ export default class VolumeDrawable {
|
|
|
80
81
|
if (this.pickRendering) {
|
|
81
82
|
this.pickRendering = new PickVolume(this.volume, this.settings);
|
|
82
83
|
}
|
|
84
|
+
this.contourRendering = new ContourPass();
|
|
83
85
|
|
|
84
86
|
// draw meshes first, and volume last, for blending and depth test reasons with raymarch
|
|
85
87
|
this.meshVolume = new MeshVolume(this.volume);
|
|
@@ -103,6 +105,7 @@ export default class VolumeDrawable {
|
|
|
103
105
|
getPickBuffer() {
|
|
104
106
|
return this.pickRendering?.getPickBuffer();
|
|
105
107
|
}
|
|
108
|
+
|
|
106
109
|
/**
|
|
107
110
|
* Updates whether a channel's data must be loaded for rendering,
|
|
108
111
|
* based on if its volume or isosurface is enabled, or whether it is needed for masking.
|
|
@@ -414,6 +417,12 @@ export default class VolumeDrawable {
|
|
|
414
417
|
fillPickBuffer(renderer, camera, depthTexture) {
|
|
415
418
|
this.pickRendering?.doRender(renderer, camera, depthTexture);
|
|
416
419
|
}
|
|
420
|
+
drawContours(renderer) {
|
|
421
|
+
if (!this.pickRendering || !this.contourRendering) {
|
|
422
|
+
return;
|
|
423
|
+
}
|
|
424
|
+
this.contourRendering.render(renderer, renderer.getRenderTarget(), this.pickRendering.getPickBuffer());
|
|
425
|
+
}
|
|
417
426
|
getViewMode() {
|
|
418
427
|
return this.viewMode;
|
|
419
428
|
}
|
|
@@ -424,6 +433,7 @@ export default class VolumeDrawable {
|
|
|
424
433
|
return this.meshVolume.hasIsosurface(channel);
|
|
425
434
|
}
|
|
426
435
|
setSelectedID(channelIndex, id) {
|
|
436
|
+
this.contourRendering.setHighlightedId(id);
|
|
427
437
|
if (this.fusion.length > 0) {
|
|
428
438
|
// TODO does it make sense to do this for a particular channel?
|
|
429
439
|
if (id !== this.fusion[channelIndex].selectedID) {
|
|
@@ -440,6 +450,13 @@ export default class VolumeDrawable {
|
|
|
440
450
|
this.volumeRendering.updateActiveChannels(this.fusion, this.volume.channels);
|
|
441
451
|
// pickRendering only really works with one channel so we don't need to call
|
|
442
452
|
// its updateActiveChannels method
|
|
453
|
+
if (this.pickRendering) {
|
|
454
|
+
const pickChannel = this.pickRendering.getChannelToPick();
|
|
455
|
+
const channelData = this.volume.channels[pickChannel];
|
|
456
|
+
if (channelData) {
|
|
457
|
+
this.contourRendering.setFrame(channelData.frame);
|
|
458
|
+
}
|
|
459
|
+
}
|
|
443
460
|
}
|
|
444
461
|
setRenderUpdateListener(callback) {
|
|
445
462
|
this.renderUpdateListener = callback;
|
|
@@ -610,8 +627,11 @@ export default class VolumeDrawable {
|
|
|
610
627
|
// TODO only one channel can ever have this?
|
|
611
628
|
if (!featureInfo) {
|
|
612
629
|
this.fusion[channelIndex].feature = undefined;
|
|
630
|
+
this.contourRendering.setGlobalIdLookup(null);
|
|
613
631
|
} else {
|
|
614
632
|
this.fusion[channelIndex].feature = featureInfo;
|
|
633
|
+
this.contourRendering.setOutlineColor(featureInfo.outlineColor, featureInfo.outlineAlpha);
|
|
634
|
+
this.contourRendering.setGlobalIdLookup(featureInfo.frameToGlobalIdLookup);
|
|
615
635
|
}
|
|
616
636
|
this.volumeRendering.updateSettings(this.settings, SettingsFlags.MATERIAL);
|
|
617
637
|
this.pickRendering?.updateSettings(this.settings, SettingsFlags.MATERIAL);
|
|
@@ -2,7 +2,7 @@ import { Vector2, Vector3, Matrix4, Texture } from "three";
|
|
|
2
2
|
/* babel-plugin-inline-import './shaders/raymarch.vert' */
|
|
3
3
|
const rayMarchVertexShader = "// switch on high precision floats\n#ifdef GL_ES\nprecision highp float;\n#endif\n\nvarying vec3 pObj;\n\nvoid main() {\n pObj = position;\n gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);\n}\n";
|
|
4
4
|
/* babel-plugin-inline-import './shaders/volumePick.frag' */
|
|
5
|
-
const rayMarchFragmentShader = "\n#ifdef GL_ES\nprecision highp float;\nprecision highp usampler2D;\n#endif\n\n#define M_PI 3.14159265358979323846\n\nuniform vec2 iResolution;\nuniform vec2 textureRes;\n\n//uniform float maskAlpha;\nuniform
|
|
5
|
+
const rayMarchFragmentShader = "\n#ifdef GL_ES\nprecision highp float;\nprecision highp usampler2D;\n#endif\n\n#define M_PI 3.14159265358979323846\n\nuniform vec2 iResolution;\nuniform vec2 textureRes;\n\n//uniform float maskAlpha;\nuniform uvec2 ATLAS_DIMS;\nuniform vec3 AABB_CLIP_MIN;\nuniform float CLIP_NEAR;\nuniform vec3 AABB_CLIP_MAX;\nuniform float CLIP_FAR;\n// one raw channel atlas that has segmentation data\nuniform usampler2D textureAtlas;\n//uniform sampler2D textureAtlasMask;\nuniform sampler2D textureDepth;\nuniform int usingPositionTexture;\nuniform int BREAK_STEPS;\nuniform float SLICES;\nuniform float isOrtho;\nuniform float orthoThickness;\nuniform float orthoScale;\nuniform int maxProject;\nuniform vec3 flipVolume;\nuniform vec3 volumeScale;\n\n// view space to axis-aligned volume box\nuniform mat4 inverseModelViewMatrix;\nuniform mat4 inverseProjMatrix;\n\nvarying vec3 pObj;\n\nfloat powf(float a, float b) {\n return pow(a,b);\n}\n\nfloat rand(vec2 co) {\n float threadId = gl_FragCoord.x/(gl_FragCoord.y + 1.0);\n float bigVal = threadId*1299721.0/911.0;\n vec2 smallVal = vec2(threadId*7927.0/577.0, threadId*104743.0/1039.0);\n return fract(sin(dot(co, smallVal)) * bigVal);\n}\n\n// get the uv offset into the atlas for the given z slice\n// ATLAS_DIMS is the number of z slices across the atlas texture\nvec2 offsetFrontBack(uint a) {\n uint ax = ATLAS_DIMS.x;\n vec2 tiles = vec2(1.0f/float(ATLAS_DIMS.x), 1.0f/float(ATLAS_DIMS.y));\n vec2 os = vec2(float(a % ax), float(a / ax)) * tiles;\n return clamp(os, vec2(0.0), vec2(1.0) - vec2(1.0) * tiles);\n}\n\nuint sampleAtlasNearest(usampler2D tex, vec4 pos) {\n uint bounds = uint(pos[0] >= 0.0 && pos[0] <= 1.0 &&\n pos[1] >= 0.0 && pos[1] <= 1.0 &&\n pos[2] >= 0.0 && pos[2] <= 1.0 );\n float nSlices = float(SLICES);\n\n // ascii art of a texture atlas:\n // +------------------+\n // | 0 | 1 | 2 | 3 |\n // +------------------+\n // | 4 | 5 | 6 | 7 | \n // +------------------+\n // | 8 | 9 |10 |11 |\n // +------------------+\n // |12 |13 |14 |15 |\n // +------------------+\n // Each tile is one z-slice of the 3D texture, which has been flattened\n // into an atlased 2D texture.\n\n // pos.xy is 0-1 range. Apply the xy flip here and then divide by number of tiles in x and y to normalize\n // to a single tile. This results in a uv coordinate that's in the correct X and Y position but only for\n // the first tile (z slice) of the atlas texture, z=0.\n vec2 loc0 = ((pos.xy - 0.5) * flipVolume.xy + 0.5) / vec2(float(ATLAS_DIMS.x), float(ATLAS_DIMS.y));\n \n // Next, offset the UV coordinate so we are sampling in the correct Z slice.\n // Round z to the nearest (floor) slice\n float z = min(floor(pos.z * nSlices), nSlices-1.0);\n // flip z coordinate if needed\n if (flipVolume.z == -1.0) {\n z = nSlices - z - 1.0;\n }\n\n // calculate the offset to the z slice in the atlas texture\n vec2 o = offsetFrontBack(uint(z)) + loc0;\n //uint voxelColor = texture2D(tex, o).x;\n uint voxelColor = texelFetch(tex, ivec2(o * textureRes), 0).x;\n\n // Apply mask\n // float voxelMask = texture2D(textureAtlasMask, o).x;\n // voxelMask = mix(voxelMask, 1.0, maskAlpha);\n // voxelColor.rgb *= voxelMask;\n\n return bounds*voxelColor;\n}\n\nbool intersectBox(in vec3 r_o, in vec3 r_d, in vec3 boxMin, in vec3 boxMax,\n out float tnear, out float tfar) {\n // compute intersection of ray with all six bbox planes\n vec3 invR = vec3(1.0,1.0,1.0) / r_d;\n vec3 tbot = invR * (boxMin - r_o);\n vec3 ttop = invR * (boxMax - r_o);\n\n // re-order intersections to find smallest and largest on each axis\n vec3 tmin = min(ttop, tbot);\n vec3 tmax = max(ttop, tbot);\n\n // find the largest tmin and the smallest tmax\n float largest_tmin = max(max(tmin.x, tmin.y), tmin.z);\n float smallest_tmax = min(min(tmax.x, tmax.y), tmax.z);\n\n tnear = largest_tmin;\n tfar = smallest_tmax;\n\n // use >= here?\n return(smallest_tmax > largest_tmin);\n}\n\nvec4 integrateVolume(vec4 eye_o,vec4 eye_d,\n float tnear, float tfar,\n float clipNear, float clipFar,\n usampler2D textureAtlas\n ) {\n uint C = 0u;\n // march along ray from front to back, accumulating color\n\n // estimate step length\n const int maxSteps = 512;\n // modify the 3 components of eye_d by volume scale\n float scaledSteps = float(BREAK_STEPS) * length((eye_d.xyz/volumeScale));\n float csteps = clamp(float(scaledSteps), 1.0, float(maxSteps));\n float invstep = (tfar-tnear)/csteps;\n // Removed random ray dither to prevent artifacting\n float r = 0.0; // (SLICES==1.0) ? 0.0 : rand(eye_d.xy);\n // if ortho and clipped, make step size smaller so we still get same number of steps\n float tstep = invstep*orthoThickness;\n float tfarsurf = r*tstep;\n float overflow = mod((tfarsurf - tfar),tstep); // random dithering offset\n float t = tnear + overflow;\n t += r*tstep; // random dithering offset\n float tdist = 0.0;\n int numSteps = 0;\n vec4 pos, col;\n for (int i = 0; i < maxSteps; i++) {\n pos = eye_o + eye_d*t;\n // !!! assume box bounds are -0.5 .. 0.5. pos = (pos-min)/(max-min)\n // scaling is handled by model transform and already accounted for before we get here.\n // AABB clip is independent of this and is only used to determine tnear and tfar.\n pos.xyz = (pos.xyz-(-0.5))/((0.5)-(-0.5)); //0.5 * (pos + 1.0); // map position from [boxMin, boxMax] to [0, 1] coordinates\n\n uint col = sampleAtlasNearest(textureAtlas, pos);\n\n // FOR INTERSECTION / PICKING, the FIRST nonzero intensity terminates the raymarch\n\n if (maxProject != 0) {\n C = max(col, C);\n } else {\n if (col > 0u) {\n C = col;\n break;\n }\n }\n t += tstep;\n numSteps = i;\n\n if (t > tfar || t > tnear+clipFar ) break;\n }\n\n return vec4(float(C));\n}\n\nvoid main() {\n gl_FragColor = vec4(0.0);\n vec2 vUv = gl_FragCoord.xy/iResolution.xy;\n\n vec3 eyeRay_o, eyeRay_d;\n\n if (isOrtho == 0.0) {\n // for perspective rays:\n // world space camera coordinates\n // transform to object space\n eyeRay_o = (inverseModelViewMatrix * vec4(0.0, 0.0, 0.0, 1.0)).xyz;\n eyeRay_d = normalize(pObj - eyeRay_o);\n } else {\n // for ortho rays:\n float zDist = 2.0;\n eyeRay_d = (inverseModelViewMatrix*vec4(0.0, 0.0, -zDist, 0.0)).xyz;\n vec4 ray_o = vec4(2.0*vUv - 1.0, 1.0, 1.0);\n ray_o.xy *= orthoScale;\n ray_o.x *= iResolution.x/iResolution.y;\n eyeRay_o = (inverseModelViewMatrix*ray_o).xyz;\n }\n\n // -0.5..0.5 is full box. AABB_CLIP lets us clip to a box shaped ROI to look at\n // I am applying it here at the earliest point so that the ray march does\n // not waste steps. For general shaped ROI, this has to be handled more\n // generally (obviously)\n vec3 boxMin = AABB_CLIP_MIN;\n vec3 boxMax = AABB_CLIP_MAX;\n\n float tnear, tfar;\n bool hit = intersectBox(eyeRay_o, eyeRay_d, boxMin, boxMax, tnear, tfar);\n\n if (!hit) {\n // return background color if ray misses the cube\n // is this safe to do when there is other geometry / gObjects drawn?\n gl_FragColor = vec4(0.0); //C1;//vec4(0.0);\n return;\n }\n\n float clipNear = 0.0;//-(dot(eyeRay_o.xyz, eyeNorm) + dNear) / dot(eyeRay_d.xyz, eyeNorm);\n float clipFar = 10000.0;//-(dot(eyeRay_o.xyz,-eyeNorm) + dFar ) / dot(eyeRay_d.xyz,-eyeNorm);\n\n // Sample the depth/position texture\n // If this is a depth texture, the r component is a depth value. If this is a position texture,\n // the xyz components are a view space position and w is 1.0 iff there's a mesh at this fragment.\n vec4 meshPosSample = texture2D(textureDepth, vUv);\n // Note: we make a different check for whether a mesh is present with depth vs. position textures.\n // Here's the check for depth textures:\n bool hasDepthValue = usingPositionTexture == 0 && meshPosSample.r < 1.0;\n\n // If there's a depth-contributing mesh at this fragment, we may need to terminate the ray early\n if (hasDepthValue || (usingPositionTexture == 1 && meshPosSample.a > 0.0)) {\n if (hasDepthValue) {\n // We're working with a depth value, so we need to convert back to view space position\n // Get a projection space position from depth and uv, and unproject back to view space\n vec4 meshProj = vec4(vUv * 2.0 - 1.0, meshPosSample.r * 2.0 - 1.0, 1.0);\n vec4 meshView = inverseProjMatrix * meshProj;\n meshPosSample = vec4(meshView.xyz / meshView.w, 1.0);\n }\n // Transform the mesh position to object space\n vec4 meshObj = inverseModelViewMatrix * meshPosSample;\n\n // Derive a t value for the mesh intersection\n // NOTE: divides by 0 when `eyeRay_d.z` is 0. Could be mitigated by picking another component\n // to derive with when z is 0, but I found this was rare enough in practice to be acceptable.\n float tMesh = (meshObj.z - eyeRay_o.z) / eyeRay_d.z;\n if (tMesh < tfar) {\n clipFar = tMesh - tnear;\n }\n }\n\n vec4 C = integrateVolume(vec4(eyeRay_o,1.0), vec4(eyeRay_d,0.0),\n tnear, tfar, //intersections of box\n clipNear, clipFar,\n textureAtlas);\n\n gl_FragColor = C;\n return;\n}\n";
|
|
6
6
|
export const pickVertexShaderSrc = rayMarchVertexShader;
|
|
7
7
|
export const pickFragmentShaderSrc = rayMarchFragmentShader;
|
|
8
8
|
export const pickShaderUniforms = () => {
|
package/es/types/Channel.d.ts
CHANGED
|
@@ -25,7 +25,7 @@ export default class Channel {
|
|
|
25
25
|
lutTexture: DataTexture;
|
|
26
26
|
rawMin: number;
|
|
27
27
|
rawMax: number;
|
|
28
|
-
|
|
28
|
+
frame: number;
|
|
29
29
|
constructor(name: string);
|
|
30
30
|
combineLuts(rgbColor: [number, number, number] | number, out?: Uint8Array): Uint8Array;
|
|
31
31
|
setRawDataRange(min: number, max: number): void;
|
|
@@ -34,9 +34,9 @@ export default class Channel {
|
|
|
34
34
|
normalizeRaw(val: number): number;
|
|
35
35
|
getIntensityFromAtlas(x: number, y: number, z: number): number;
|
|
36
36
|
private rebuildDataTexture;
|
|
37
|
-
setFromAtlas(bitsArray: TypedArray<NumberType>, w: number, h: number, dtype: NumberType, rawMin: number, rawMax: number, subregionSize: Vector3,
|
|
37
|
+
setFromAtlas(bitsArray: TypedArray<NumberType>, w: number, h: number, dtype: NumberType, rawMin: number, rawMax: number, subregionSize: Vector3, frame?: number): void;
|
|
38
38
|
private unpackFromAtlas;
|
|
39
|
-
setFromVolumeData(bitsArray: TypedArray<NumberType>, vx: number, vy: number, vz: number, ax: number, ay: number, rawMin: number, rawMax: number, dtype: NumberType,
|
|
39
|
+
setFromVolumeData(bitsArray: TypedArray<NumberType>, vx: number, vy: number, vz: number, ax: number, ay: number, rawMin: number, rawMax: number, dtype: NumberType, frame?: number): void;
|
|
40
40
|
private packToAtlas;
|
|
41
41
|
setLut(lut: Lut): void;
|
|
42
42
|
setColorPalette(palette: Uint8Array): void;
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import { Color, WebGLRenderer, WebGLRenderTarget } from "three";
|
|
2
|
+
import { ColorizeFeature } from "./types";
|
|
3
|
+
export default class ContourPass {
|
|
4
|
+
private pass;
|
|
5
|
+
private frameToGlobalIdLookup;
|
|
6
|
+
private frame;
|
|
7
|
+
constructor();
|
|
8
|
+
setOutlineColor(color: Color, alpha?: number): void;
|
|
9
|
+
setOutlineThickness(thickness: number): void;
|
|
10
|
+
private syncGlobalIdLookup;
|
|
11
|
+
/**
|
|
12
|
+
* Sets a frame-dependent lookup for global IDs. Set to a non-null value if
|
|
13
|
+
* the `highlightedId` represents a global ID instead of a local (pixel) ID.
|
|
14
|
+
* @param frameToGlobalIdLookup A map from a frame number to a lookup object,
|
|
15
|
+
* containing a texture and an offset value; see `ColorizeFeature` for more
|
|
16
|
+
* details. If `null`, the pass will not use a global ID lookup.
|
|
17
|
+
*/
|
|
18
|
+
setGlobalIdLookup(frameToGlobalIdLookup: ColorizeFeature["frameToGlobalIdLookup"] | null): void;
|
|
19
|
+
/**
|
|
20
|
+
* Sets the current frame number. If a global ID lookup has been set
|
|
21
|
+
* (`setGlobalIdLookup`), this must be updated on every frame.
|
|
22
|
+
*/
|
|
23
|
+
setFrame(frame: number): void;
|
|
24
|
+
/**
|
|
25
|
+
* Sets the current ID that should be highlighted with a contour.
|
|
26
|
+
* @param id The ID to highlight. If a global ID lookup has been set
|
|
27
|
+
* (`setGlobalIdLookup`), this should be a global ID.
|
|
28
|
+
*/
|
|
29
|
+
setHighlightedId(id: number): void;
|
|
30
|
+
/**
|
|
31
|
+
* Renders the contour as a transparent pass on the specified target.
|
|
32
|
+
* @param renderer The WebGL renderer to render with.
|
|
33
|
+
* @param target The render target to render to.
|
|
34
|
+
* @param pickBuffer The pick buffer containing the pixel IDs to highlight,
|
|
35
|
+
* e.g. `PickVolume.getPickBuffer()`.
|
|
36
|
+
*/
|
|
37
|
+
render(renderer: WebGLRenderer, target: WebGLRenderTarget | null, pickBuffer: WebGLRenderTarget): void;
|
|
38
|
+
}
|
package/es/types/PickVolume.d.ts
CHANGED
|
@@ -24,6 +24,7 @@ export default class PickVolume implements VolumeRenderImpl {
|
|
|
24
24
|
*/
|
|
25
25
|
constructor(volume: Volume, settings?: VolumeRenderSettings);
|
|
26
26
|
setChannelToPick(channel: number): void;
|
|
27
|
+
getChannelToPick(): number;
|
|
27
28
|
getPickBuffer(): WebGLRenderTarget;
|
|
28
29
|
updateVolumeDimensions(): void;
|
|
29
30
|
viewpointMoved(): void;
|
|
@@ -1,4 +1,8 @@
|
|
|
1
1
|
import { IUniform, Mesh, OrthographicCamera, PlaneGeometry, WebGLRenderer, Scene, ShaderMaterial, WebGLRenderTarget } from "three";
|
|
2
|
+
export declare enum RenderPassType {
|
|
3
|
+
OPAQUE = 0,
|
|
4
|
+
TRANSPARENT = 1
|
|
5
|
+
}
|
|
2
6
|
/**
|
|
3
7
|
* Helper for render passes that just require a fragment shader: accepts a fragment shader and its
|
|
4
8
|
* uniforms, and handles the ceremony of rendering a fullscreen quad with a simple vertex shader.
|
|
@@ -11,7 +15,7 @@ export default class RenderToBuffer {
|
|
|
11
15
|
camera: OrthographicCamera;
|
|
12
16
|
constructor(fragmentSrc: string, uniforms: {
|
|
13
17
|
[key: string]: IUniform;
|
|
14
|
-
});
|
|
18
|
+
}, passType?: RenderPassType);
|
|
15
19
|
/** Renders this pass to `target` using `renderer`, or to the canvas if no `target` is given. */
|
|
16
20
|
render(renderer: WebGLRenderer, target?: WebGLRenderTarget): void;
|
|
17
21
|
}
|
|
@@ -3,7 +3,9 @@ import TrackballControls from "./TrackballControls.js";
|
|
|
3
3
|
import { ViewportCorner } from "./types.js";
|
|
4
4
|
export declare const VOLUME_LAYER = 0;
|
|
5
5
|
export declare const MESH_LAYER = 1;
|
|
6
|
-
|
|
6
|
+
/** Meshes that do not occlude picking/contour behavior. */
|
|
7
|
+
export declare const MESH_NO_PICK_OCCLUSION_LAYER = 2;
|
|
8
|
+
export declare const OVERLAY_LAYER = 3;
|
|
7
9
|
export type CameraState = {
|
|
8
10
|
position: [number, number, number];
|
|
9
11
|
up: [number, number, number];
|
|
@@ -13,13 +15,16 @@ export type CameraState = {
|
|
|
13
15
|
/** The scale value for the orthographic camera controls; undefined for perspective cameras. */
|
|
14
16
|
orthoScale?: number;
|
|
15
17
|
};
|
|
18
|
+
type AnimateFunction = (renderer: WebGLRenderer, camera: PerspectiveCamera | OrthographicCamera, depthTexture?: DepthTexture | null) => void;
|
|
16
19
|
export declare class ThreeJsPanel {
|
|
17
20
|
containerdiv: HTMLDivElement;
|
|
18
21
|
private canvas;
|
|
19
22
|
scene: Scene;
|
|
20
23
|
private meshRenderTarget;
|
|
21
24
|
private meshRenderToBuffer;
|
|
22
|
-
animateFuncs:
|
|
25
|
+
animateFuncs: AnimateFunction[];
|
|
26
|
+
postMeshRenderFuncs: AnimateFunction[];
|
|
27
|
+
overlayRenderFuncs: AnimateFunction[];
|
|
23
28
|
private inRenderLoop;
|
|
24
29
|
private requestedRender;
|
|
25
30
|
hasWebGL2: boolean;
|
|
@@ -109,3 +114,4 @@ export declare class ThreeJsPanel {
|
|
|
109
114
|
setControlHandlers(onstart: EventListener<Event, "start", TrackballControls>, onchange: EventListener<Event, "change", TrackballControls>, onend: EventListener<Event, "end", TrackballControls>): void;
|
|
110
115
|
hitTest(offsetX: number, offsetY: number, pickBuffer: WebGLRenderTarget | undefined): number;
|
|
111
116
|
}
|
|
117
|
+
export {};
|
package/es/types/View3d.d.ts
CHANGED
|
@@ -5,7 +5,6 @@ import { Light } from "./Light.js";
|
|
|
5
5
|
import Volume from "./Volume.js";
|
|
6
6
|
import { type ColorizeFeature, type VolumeChannelDisplayOptions, type VolumeDisplayOptions, ViewportCorner, RenderMode } from "./types.js";
|
|
7
7
|
import { PerChannelCallback } from "./loaders/IVolumeLoader.js";
|
|
8
|
-
import VolumeLoaderContext from "./workers/VolumeLoaderContext.js";
|
|
9
8
|
import Line3d from "./Line3d.js";
|
|
10
9
|
export declare const RENDERMODE_RAYMARCH = RenderMode.RAYMARCH;
|
|
11
10
|
export declare const RENDERMODE_PATHTRACE = RenderMode.PATHTRACE;
|
|
@@ -17,7 +16,6 @@ export interface View3dOptions {
|
|
|
17
16
|
* @class
|
|
18
17
|
*/
|
|
19
18
|
export declare class View3d {
|
|
20
|
-
loaderContext?: VolumeLoaderContext;
|
|
21
19
|
private canvas3d;
|
|
22
20
|
private scene;
|
|
23
21
|
private backgroundColor;
|
|
@@ -39,6 +39,7 @@ export default class VolumeDrawable {
|
|
|
39
39
|
private childObjects;
|
|
40
40
|
private volumeRendering;
|
|
41
41
|
private pickRendering?;
|
|
42
|
+
private contourRendering;
|
|
42
43
|
private renderMode;
|
|
43
44
|
private renderUpdateListener?;
|
|
44
45
|
constructor(volume: Volume, options: VolumeDisplayOptions);
|
|
@@ -71,6 +72,7 @@ export default class VolumeDrawable {
|
|
|
71
72
|
onAnimate(renderer: WebGLRenderer, camera: PerspectiveCamera | OrthographicCamera, depthTexture?: DepthTexture | Texture | null): void;
|
|
72
73
|
enablePicking(enabled: boolean, channelIndex: number): void;
|
|
73
74
|
fillPickBuffer(renderer: WebGLRenderer, camera: PerspectiveCamera | OrthographicCamera, depthTexture?: DepthTexture | Texture | null): void;
|
|
75
|
+
drawContours(renderer: WebGLRenderer): void;
|
|
74
76
|
getViewMode(): Axis;
|
|
75
77
|
getIsovalue(channel: number): number | undefined;
|
|
76
78
|
hasIsosurface(channel: number): boolean;
|
package/es/types/types.d.ts
CHANGED
|
@@ -78,11 +78,13 @@ declare class VolumeLoaderContext {
|
|
|
78
78
|
declare class WorkerLoader extends ThreadableVolumeLoader {
|
|
79
79
|
private loaderId;
|
|
80
80
|
private workerHandle;
|
|
81
|
+
private context;
|
|
81
82
|
private currentLoadId;
|
|
82
83
|
private currentLoadCallback;
|
|
83
84
|
private currentMetadataUpdateCallback;
|
|
84
|
-
constructor(loaderId: number, workerHandle: SharedLoadWorkerHandle);
|
|
85
|
+
constructor(loaderId: number, workerHandle: SharedLoadWorkerHandle, context: VolumeLoaderContext);
|
|
85
86
|
private getLoaderId;
|
|
87
|
+
getContext(): VolumeLoaderContext;
|
|
86
88
|
/** Close and permanently invalidate this loader. */
|
|
87
89
|
close(): void;
|
|
88
90
|
/**
|
|
@@ -178,7 +178,7 @@ class VolumeLoaderContext {
|
|
|
178
178
|
if (loaderId === undefined) {
|
|
179
179
|
throw new Error("Failed to create loader");
|
|
180
180
|
}
|
|
181
|
-
const loader = new WorkerLoader(loaderId, this.workerHandle);
|
|
181
|
+
const loader = new WorkerLoader(loaderId, this.workerHandle, this);
|
|
182
182
|
this.loaders.set(loaderId, loader);
|
|
183
183
|
return loader;
|
|
184
184
|
}
|
|
@@ -196,10 +196,11 @@ class WorkerLoader extends ThreadableVolumeLoader {
|
|
|
196
196
|
currentLoadId = -1;
|
|
197
197
|
currentLoadCallback = undefined;
|
|
198
198
|
currentMetadataUpdateCallback = undefined;
|
|
199
|
-
constructor(loaderId, workerHandle) {
|
|
199
|
+
constructor(loaderId, workerHandle, context) {
|
|
200
200
|
super();
|
|
201
201
|
this.loaderId = loaderId;
|
|
202
202
|
this.workerHandle = workerHandle;
|
|
203
|
+
this.context = context;
|
|
203
204
|
}
|
|
204
205
|
getLoaderId() {
|
|
205
206
|
if (this.loaderId === undefined || !this.workerHandle.isOpen) {
|
|
@@ -207,6 +208,9 @@ class WorkerLoader extends ThreadableVolumeLoader {
|
|
|
207
208
|
}
|
|
208
209
|
return this.loaderId;
|
|
209
210
|
}
|
|
211
|
+
getContext() {
|
|
212
|
+
return this.context;
|
|
213
|
+
}
|
|
210
214
|
|
|
211
215
|
/** Close and permanently invalidate this loader. */
|
|
212
216
|
close() {
|